hip_filename stringlengths 5 84 | hip_content stringlengths 79 9.69M | cuda_filename stringlengths 4 83 | cuda_content stringlengths 19 9.69M |
|---|---|---|---|
7c0cafb7420b4dff2285632378e53bcfe6b2396a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "filters.hpp"
#include "packed_data_struct.hpp"
#include "macros.hpp"
#include "cuda_globals.hpp"
// Some parameters to tweak the bloom effect
#define BASE_BLOOM (0.017f*0.5f)
#define BLOOM_MUL (5.5f)
#define BLOOM_INCR (0.0f)
// -----------------------------------------------------------------------------
#define znew (z=36969*(z&65535)+(z>>16))
#define wnew (w=18000*(w&65535)+(w>>16))
#define MWC ((z<<16)+w)
__device__
int pack (const float4& s)
{
int nr = ((int)(fminf(s.x*255.f,255.f)));
int ng = ((int)(fminf(s.y*255.f,255.f)))<<8;
int nb = ((int)(fminf(s.z*255.f,255.f)))<<16;
return nr |ng |nb;
}
// -----------------------------------------------------------------------------
__device__
int pack_dither (const float4& s, int px, int py)
{
int z = px, w = py;
z = 1103515245 * z + 12345;
w = 1103515245 * w + 12345;
znew; wnew;
float x0 = MWC * 1.f / (0xFFFFFFFF);
znew; wnew;
float x1 = MWC * 1.f / (0xFFFFFFFF);
znew; wnew;
float x2 = MWC * 1.f / (0xFFFFFFFF);
float fr = fminf(s.x * 255.f,255.f);
float fg = fminf(s.y * 255.f,255.f);
float fb = fminf(s.z * 255.f,255.f);
int ir = fr;
int ig = fg;
int ib = fb;
int nr = (((fr - ir) > x0)?ir+1:ir);
int ng = ((((fg - ig) > x1)?ig+1:ig)<<8);
int nb = ((((fb - ib) > x2)?ib+1:ib)<<16);
return nr |ng |nb;
}
// -----------------------------------------------------------------------------
/// Additive filter.
/// Given an image stored linearly in 'img' of size (width, height)
/// We add every pixels of the square window with length 'filter_size'
/// centered at (px, py).
/// @note negative values in 'img' are ignored.
/// @param out_depth : the mean value of the depth inside the window.
/// @return the mean value of each color channel inside the window
__device__
float4 additive_filter(const float4* img,
const float* depth_buf,
int px, int py,
int width, int height,
int filter_size,
float& out_depth)
{
// TODO: we can accelerate the filtering by loading into shared memory the
// pixel block corresponding to the thread block.
// Load px py in shared memory
// synchronize threads of the block
// Do the filtering using the shared memory when possible
px -= (filter_size/2);
py -= (filter_size/2);
float4 new_val = {0.f, 0.f, 0.f, 0.f};
float new_depth_val = 0.f;
float contrib = 0.f;
for(int i=0; i < filter_size; i++)
{
for(int j=0; j < filter_size; j++)
{
int idx = (px+i) + (py+j) * width;
if(((px+i) < width) & ((px+i) >= 0) & ((py+j) < height) & ((py+j) >=0))
{
if(img[idx].x >=0.f)
{
new_val.x += img[idx].x;
new_val.y += img[idx].y;
new_val.z += img[idx].z;
new_val.w += img[idx].w;
new_depth_val += depth_buf[idx];
contrib += 1.f;
}
}
}
}
out_depth = new_depth_val/contrib, 0.f;
contrib *= (MULTISAMPX*MULTISAMPY);
new_val.x = fmaxf(new_val.x/contrib, 0.f);
new_val.y = fmaxf(new_val.y/contrib, 0.f);
new_val.z = fmaxf(new_val.z/contrib, 0.f);
new_val.w = fmaxf(new_val.w/contrib, 0.f);
return new_val;
}
// -----------------------------------------------------------------------------
__global__
void flatten_image(const float4* in_color,
const float* in_depth,
int* out_rgb24,
unsigned* out_depth,
int width,
int height,
bool do_filter,
int filter_size,
bool dither)
{
const int px = blockIdx.x * BLOCK_SIZE_X + threadIdx.x;
const int py = blockIdx.y * BLOCK_SIZE_Y + threadIdx.y;
const int idx = py * width + px;
if(idx < width * height)
{
float depth;
float4 pix_val;
if(do_filter)
pix_val = additive_filter(in_color, in_depth, px, py, width, height, filter_size, depth);
else
{
pix_val = in_color[idx];
depth = in_depth[idx];
}
out_rgb24[idx] = dither ? pack_dither(pix_val, px, py) : pack(pix_val);
out_depth[idx] = *reinterpret_cast<unsigned*>(&depth);
}
}
// -----------------------------------------------------------------------------
__global__
void clean_pbo(int* color, unsigned* depth, int n, float4 val)
{
const int p = threadIdx.x + blockDim.x * blockDim.y * blockIdx.x;
if(p < n){
color[p] = int(val.x *255) + (int(val.y*255)<<8) + (int(val.z*255)<<16);
float depth_value = 1.f;
depth[p] = *reinterpret_cast<unsigned*>(&depth_value);
}
}
// -----------------------------------------------------------------------------
void clean_pbos(int* color,
unsigned* depth,
int width,
int height,
float4 cl_color)
{
int nb_pixels = width * MULTISAMPX * height * MULTISAMPY;
int dimblock = BLOCK_SIZE_X * BLOCK_SIZE_Y;
int dimgrid = (nb_pixels + dimblock -1) / dimblock;
hipLaunchKernelGGL(( clean_pbo), dim3(dimgrid), dim3(dimblock) , 0, 0, color, depth, nb_pixels, cl_color);
}
// -----------------------------------------------------------------------------
__global__
void clean_buff(float4* buff, float* depth, int n, float4 val, float depth_val)
{
const int p = threadIdx.x + blockDim.x * blockDim.y * blockIdx.x;
if(p < n){
buff[p] = make_float4(val.x, val.y, val.z, val.w);
depth[p] = depth_val;
}
}
// -----------------------------------------------------------------------------
/// Fill 'd_buff' with zeros
void clean_buffers(float4* d_buff_, float* d_depth_, float far_, int width_, int height_)
{
int nb_pixels = width_ * MULTISAMPX * height_ * MULTISAMPY;
int dimblock = BLOCK_SIZE_X * BLOCK_SIZE_Y;
int dimgrid = (nb_pixels + dimblock -1) / dimblock;
float4 clear_val = make_float4(-1.f, -1.f, -1.f, -1.f);
hipLaunchKernelGGL(( clean_buff), dim3(dimgrid), dim3(dimblock) , 0, 0, d_buff_, d_depth_, nb_pixels, clear_val, far_);
}
// -----------------------------------------------------------------------------
// =============================================================================
namespace Bloom {
// =============================================================================
__device__
float4 operator+(float4 a, float4 b){
return make_float4(a.x+b.x, a.y+b.y,a.z+b.z, a.w +b.w);
}
__device__
float4 operator*(float4 a, float f){
return make_float4(a.x*f, a.y*f, a.z*f, a.w*f);
}
__device__
float4 operator*(float4 a, float4 b){
return make_float4(a.x*b.x, a.y*b.y, a.z*b.z, a.w*b.w);
}
__device__ float4
lerp(const float4& a, const float4& b, float f){
return a * f + b * (1.f - f);
}
// -----------------------------------------------------------------------------
__global__
void copy_pbo_to_img(PBO_data pbo, float4* img)
{
const int px = pbo.start_x + blockIdx.x*BLOCK_SIZE_X + threadIdx.x;
const int py = pbo.start_y + blockIdx.y*BLOCK_SIZE_Y + threadIdx.y;
const int p = py * pbo.width + px;
img[p] = pbo.d_rendu[p];
}
// -----------------------------------------------------------------------------
__global__
void copy_img_to_pbo(PBO_data pbo, float4* img)
{
const int px = pbo.start_x + blockIdx.x*BLOCK_SIZE_X + threadIdx.x;
const int py = pbo.start_y + blockIdx.y*BLOCK_SIZE_Y + threadIdx.y;
const int p = py * pbo.width + px;
pbo.d_rendu[p] = img[p];
}
// -----------------------------------------------------------------------------
__global__
void clean_img(PBO_data pbo, float4* img)
{
const int px = pbo.start_x + blockIdx.x*BLOCK_SIZE_X + threadIdx.x;
const int py = pbo.start_y + blockIdx.y*BLOCK_SIZE_Y + threadIdx.y;
const int p = py * pbo.width + px;
img[2*p] = make_float4(0.f, 0.f, 0.f, 0.f);
img[2*p+1] = make_float4(0.f, 0.f, 0.f, 0.f);
}
// -----------------------------------------------------------------------------
__global__
void pre_compute_bloom(PBO_data pbo, float4* img, float4* bloom, int lvl)
{
const int px = pbo.start_x + blockIdx.x*BLOCK_SIZE_X + threadIdx.x;
const int py = pbo.start_y + blockIdx.y*BLOCK_SIZE_Y + threadIdx.y;
int w = pbo.width;
int h = pbo.height;
int offset = 0;
for(int i = 0; i < lvl; i++){
offset += w*h;
w /= 2;
h /= 2;
}
float4* img_src = &(img[offset]);
float4* img_dst = &(img[offset+w*h]);
w /= 2;
h /= 2;
if(px < w & py < h){
float4 a = img_src[2*px + 4 * w * py];
float4 b = img_src[2*px + 1 + 4 * w * py];
float4 c = img_src[2*px + (2 * py + 1 )* 2*w];
float4 d = img_src[2*px + 1 + (2 * py + 1)*2*w];
img_dst[px + w*py] = (a+b+c+d)*0.25f;
}
}
// -----------------------------------------------------------------------------
__global__
void compute_bloom0(PBO_data pbo, float4* img, float4* bloom, int lvls)
{
const int px = pbo.start_x + blockIdx.x*BLOCK_SIZE_X + threadIdx.x;
const int py = pbo.start_y + blockIdx.y*BLOCK_SIZE_Y + threadIdx.y;
int w = pbo.width;
int h = pbo.height;
float4* img_src = img;
float4* bm_dst = bloom;
for(int i = 0; i < lvls; i++){
if(px < w & py < h){
float4 e = img_src[px + w*py];
float4 a = (px>0)?img_src[px-1 + w*py]:e;
float4 b = (px<w-1)?img_src[px+1 + w*py]:e;
float4 c = (py>0)?img_src[px + w*(py-1)]:e;
float4 d = (py<w-1)?img_src[px + w*(py+1)]:e;
float4 f = (px>0 & py>0)?img_src[px-1 + w*(py-1)]:e;
float4 g = (px>0 & py<w-1)?img_src[px-1 + w*(py+1)]:e;
float4 h = (px<w-1 & py>0)?img_src[px+1 + w*(py-1)]:e;
float4 k = (px<w-1 & py<w-1)?img_src[px+1 + w*(py+1)]:e;
float4 res = (f + g + h + k)*(1.f/16.f)+ (a + b + c + d)*(1.f/8.f) + e*(1.f/4.f);
bm_dst[px + w*py] = res;
}
bm_dst = &bm_dst[w*h];
img_src = &img_src[w*h];
w /= 2;
h /= 2;
}
}
// -----------------------------------------------------------------------------
__global__
void compute_bloom(PBO_data pbo, float4* img, float4* bloom, int lvls)
{
const int px = pbo.start_x + blockIdx.x*BLOCK_SIZE_X + threadIdx.x;
const int py = pbo.start_y + blockIdx.y*BLOCK_SIZE_Y + threadIdx.y;
int w = pbo.width;
int h = pbo.height;
float pqx = px;
float pqy = py;
float factor = BASE_BLOOM;
float4 res = make_float4(0.f,0.f,0.f,0.f);
float4* bm_src = bloom;
//res = bm_src[qx/2 + (w/2) * (qy/2) + w*h];
//res = bm_src[qx + w*qy];
int offset = 0;
for(int i = 0; i < lvls; i++){
factor *= BLOOM_MUL;
factor += BLOOM_INCR;
int qx = pqx;
int qy = pqy;
float dx = pqx-qx;
float dy = pqy-qy;
float4 color_a = bm_src[qx + w*qy + offset];
float4 color_b = (qx<w-1)?bm_src[qx+1 + w*qy + offset]:color_a;
float4 color_c = (qy<h-1)?bm_src[qx + w*(qy+1) + offset]:color_a;
float4 color_d = (qx<w-1)?((qy<h-1)?bm_src[qx+1 + w*(qy+1) + offset]:color_b):
((qy<h-1)?color_c:color_a);
float4 color_lvl = lerp(lerp(color_d, color_c, dx),lerp(color_b, color_a, dx),dy);
color_lvl.x *= color_lvl.x;
color_lvl.y *= color_lvl.y;
color_lvl.z *= color_lvl.z;
color_lvl.w *= color_lvl.w;
res = res+ color_lvl * factor;
offset += w*h;
pqx = (pqx-0.5f)*0.5f;
pqy = (pqy-0.5f)*0.5f;
w /= 2;
h /= 2;
}
/*int result;
w = pbo.width;
h = pbo.height;
int qx = px;
int qy = py;
offset = 0;
for(int i = 0; i < 6; i++){
offset += w*h;
w /= 2;
h /= 2;
qx /= 2;
qy /= 2;
}
*/
const int p = py * pbo.width + px;
img[p] = img[p] + res;//
//img[p] = img[qx + w*qy + offset];
}
}// END BLOOM NAMESPACE =========================================================
void do_bloom(float4* d_rendu,
float4* d_img_buff,
float4* d_bloom_buff,
int width, int height)
{
using namespace Bloom;
PBO_data pbo_d(d_rendu, NULL, 0, 0, width, height);
dim3 dimBlock(BLOCK_SIZE_X,BLOCK_SIZE_Y);
dim3 dimGrid(width/BLOCK_SIZE_X, height/BLOCK_SIZE_Y);
dim3 dimGrid0((width + 2*BLOCK_SIZE_X-1)/(2*BLOCK_SIZE_X),
(height + 2*BLOCK_SIZE_Y-1)/(2*BLOCK_SIZE_Y));
dim3 dimGrid1((width + 4*BLOCK_SIZE_X-1)/(4*BLOCK_SIZE_X),
(height + 4*BLOCK_SIZE_Y-1)/(4*BLOCK_SIZE_Y));
dim3 dimGrid2((width + 8*BLOCK_SIZE_X-1)/(8*BLOCK_SIZE_X),
(height + 8*BLOCK_SIZE_Y-1)/(8*BLOCK_SIZE_Y));
dim3 dimGrid3((width + 16*BLOCK_SIZE_X-1)/(16*BLOCK_SIZE_X),
(height + 16*BLOCK_SIZE_Y-1)/(16*BLOCK_SIZE_Y));
dim3 dimGrid4((width + 32*BLOCK_SIZE_X-1)/(32*BLOCK_SIZE_X),
(height + 32*BLOCK_SIZE_Y-1)/(32*BLOCK_SIZE_Y));
dim3 dimGrid5((width + 64*BLOCK_SIZE_X-1)/(64*BLOCK_SIZE_X),
(height + 64*BLOCK_SIZE_Y-1)/(64*BLOCK_SIZE_Y));
dim3 dimGrid6((width + 128*BLOCK_SIZE_X-1)/(128*BLOCK_SIZE_X),
(height + 128*BLOCK_SIZE_Y-1)/(128*BLOCK_SIZE_Y));
dim3 dimGrid7((width + 256*BLOCK_SIZE_X-1)/(256*BLOCK_SIZE_X),
(height + 256*BLOCK_SIZE_Y-1)/(256*BLOCK_SIZE_Y));
hipLaunchKernelGGL(( clean_img), dim3(dimGrid), dim3(dimBlock) , 0, 0, pbo_d, d_img_buff);
hipLaunchKernelGGL(( copy_pbo_to_img), dim3(dimGrid), dim3(dimBlock) , 0, 0, pbo_d, d_img_buff);
int k = 2;
hipLaunchKernelGGL(( pre_compute_bloom), dim3(dimGrid0), dim3(dimBlock) , 0, 0, pbo_d, d_img_buff,d_bloom_buff, 0);
hipLaunchKernelGGL(( pre_compute_bloom), dim3(dimGrid1), dim3(dimBlock) , 0, 0, pbo_d, d_img_buff,d_bloom_buff, 1);
if(width % 8 == 0 && height % 8 == 0){
hipLaunchKernelGGL(( pre_compute_bloom), dim3(dimGrid2), dim3(dimBlock) , 0, 0, pbo_d, d_img_buff,d_bloom_buff, 2);
k++;
}
if(width % 16 == 0 && height % 16 == 0){
hipLaunchKernelGGL(( pre_compute_bloom), dim3(dimGrid3), dim3(dimBlock) , 0, 0, pbo_d, d_img_buff,d_bloom_buff, 3);
k++;
}
if(width % 32 == 0 && height % 32 == 0){
hipLaunchKernelGGL(( pre_compute_bloom), dim3(dimGrid4), dim3(dimBlock) , 0, 0, pbo_d, d_img_buff,d_bloom_buff, 4);
k++;
}
if(width % 64 == 0 && height % 64 == 0){
hipLaunchKernelGGL(( pre_compute_bloom), dim3(dimGrid5), dim3(dimBlock) , 0, 0, pbo_d, d_img_buff,d_bloom_buff, 5);
k++;
}
if(width % 128 == 0 && height % 128 == 0){
hipLaunchKernelGGL(( pre_compute_bloom), dim3(dimGrid4), dim3(dimBlock) , 0, 0, pbo_d, d_img_buff,d_bloom_buff, 6);
k++;
}
if(width % 256 == 0 && height % 256 == 0){
hipLaunchKernelGGL(( pre_compute_bloom), dim3(dimGrid5), dim3(dimBlock) , 0, 0, pbo_d, d_img_buff,d_bloom_buff, 7);
k++;
}
hipLaunchKernelGGL(( compute_bloom0), dim3(dimGrid), dim3(dimBlock) , 0, 0, pbo_d, d_img_buff, d_bloom_buff, k);
hipLaunchKernelGGL(( compute_bloom), dim3(dimGrid), dim3(dimBlock) , 0, 0, pbo_d, d_img_buff, d_bloom_buff, k);
hipLaunchKernelGGL(( copy_img_to_pbo), dim3(dimGrid), dim3(dimBlock) , 0, 0, pbo_d, d_img_buff);
}
| 7c0cafb7420b4dff2285632378e53bcfe6b2396a.cu | #include "filters.hpp"
#include "packed_data_struct.hpp"
#include "macros.hpp"
#include "cuda_globals.hpp"
// Some parameters to tweak the bloom effect
#define BASE_BLOOM (0.017f*0.5f)
#define BLOOM_MUL (5.5f)
#define BLOOM_INCR (0.0f)
// -----------------------------------------------------------------------------
#define znew (z=36969*(z&65535)+(z>>16))
#define wnew (w=18000*(w&65535)+(w>>16))
#define MWC ((z<<16)+w)
__device__
int pack (const float4& s)
{
int nr = ((int)(fminf(s.x*255.f,255.f)));
int ng = ((int)(fminf(s.y*255.f,255.f)))<<8;
int nb = ((int)(fminf(s.z*255.f,255.f)))<<16;
return nr |ng |nb;
}
// -----------------------------------------------------------------------------
__device__
int pack_dither (const float4& s, int px, int py)
{
int z = px, w = py;
z = 1103515245 * z + 12345;
w = 1103515245 * w + 12345;
znew; wnew;
float x0 = MWC * 1.f / (0xFFFFFFFF);
znew; wnew;
float x1 = MWC * 1.f / (0xFFFFFFFF);
znew; wnew;
float x2 = MWC * 1.f / (0xFFFFFFFF);
float fr = fminf(s.x * 255.f,255.f);
float fg = fminf(s.y * 255.f,255.f);
float fb = fminf(s.z * 255.f,255.f);
int ir = fr;
int ig = fg;
int ib = fb;
int nr = (((fr - ir) > x0)?ir+1:ir);
int ng = ((((fg - ig) > x1)?ig+1:ig)<<8);
int nb = ((((fb - ib) > x2)?ib+1:ib)<<16);
return nr |ng |nb;
}
// -----------------------------------------------------------------------------
/// Additive filter.
/// Given an image stored linearly in 'img' of size (width, height)
/// We add every pixels of the square window with length 'filter_size'
/// centered at (px, py).
/// @note negative values in 'img' are ignored.
/// @param out_depth : the mean value of the depth inside the window.
/// @return the mean value of each color channel inside the window
__device__
float4 additive_filter(const float4* img,
const float* depth_buf,
int px, int py,
int width, int height,
int filter_size,
float& out_depth)
{
// TODO: we can accelerate the filtering by loading into shared memory the
// pixel block corresponding to the thread block.
// Load px py in shared memory
// synchronize threads of the block
// Do the filtering using the shared memory when possible
px -= (filter_size/2);
py -= (filter_size/2);
float4 new_val = {0.f, 0.f, 0.f, 0.f};
float new_depth_val = 0.f;
float contrib = 0.f;
for(int i=0; i < filter_size; i++)
{
for(int j=0; j < filter_size; j++)
{
int idx = (px+i) + (py+j) * width;
if(((px+i) < width) & ((px+i) >= 0) & ((py+j) < height) & ((py+j) >=0))
{
if(img[idx].x >=0.f)
{
new_val.x += img[idx].x;
new_val.y += img[idx].y;
new_val.z += img[idx].z;
new_val.w += img[idx].w;
new_depth_val += depth_buf[idx];
contrib += 1.f;
}
}
}
}
out_depth = new_depth_val/contrib, 0.f;
contrib *= (MULTISAMPX*MULTISAMPY);
new_val.x = fmaxf(new_val.x/contrib, 0.f);
new_val.y = fmaxf(new_val.y/contrib, 0.f);
new_val.z = fmaxf(new_val.z/contrib, 0.f);
new_val.w = fmaxf(new_val.w/contrib, 0.f);
return new_val;
}
// -----------------------------------------------------------------------------
__global__
void flatten_image(const float4* in_color,
const float* in_depth,
int* out_rgb24,
unsigned* out_depth,
int width,
int height,
bool do_filter,
int filter_size,
bool dither)
{
const int px = blockIdx.x * BLOCK_SIZE_X + threadIdx.x;
const int py = blockIdx.y * BLOCK_SIZE_Y + threadIdx.y;
const int idx = py * width + px;
if(idx < width * height)
{
float depth;
float4 pix_val;
if(do_filter)
pix_val = additive_filter(in_color, in_depth, px, py, width, height, filter_size, depth);
else
{
pix_val = in_color[idx];
depth = in_depth[idx];
}
out_rgb24[idx] = dither ? pack_dither(pix_val, px, py) : pack(pix_val);
out_depth[idx] = *reinterpret_cast<unsigned*>(&depth);
}
}
// -----------------------------------------------------------------------------
__global__
void clean_pbo(int* color, unsigned* depth, int n, float4 val)
{
const int p = threadIdx.x + blockDim.x * blockDim.y * blockIdx.x;
if(p < n){
color[p] = int(val.x *255) + (int(val.y*255)<<8) + (int(val.z*255)<<16);
float depth_value = 1.f;
depth[p] = *reinterpret_cast<unsigned*>(&depth_value);
}
}
// -----------------------------------------------------------------------------
void clean_pbos(int* color,
unsigned* depth,
int width,
int height,
float4 cl_color)
{
int nb_pixels = width * MULTISAMPX * height * MULTISAMPY;
int dimblock = BLOCK_SIZE_X * BLOCK_SIZE_Y;
int dimgrid = (nb_pixels + dimblock -1) / dimblock;
clean_pbo<<<dimgrid, dimblock >>>(color, depth, nb_pixels, cl_color);
}
// -----------------------------------------------------------------------------
__global__
void clean_buff(float4* buff, float* depth, int n, float4 val, float depth_val)
{
const int p = threadIdx.x + blockDim.x * blockDim.y * blockIdx.x;
if(p < n){
buff[p] = make_float4(val.x, val.y, val.z, val.w);
depth[p] = depth_val;
}
}
// -----------------------------------------------------------------------------
/// Fill 'd_buff' with zeros
void clean_buffers(float4* d_buff_, float* d_depth_, float far_, int width_, int height_)
{
int nb_pixels = width_ * MULTISAMPX * height_ * MULTISAMPY;
int dimblock = BLOCK_SIZE_X * BLOCK_SIZE_Y;
int dimgrid = (nb_pixels + dimblock -1) / dimblock;
float4 clear_val = make_float4(-1.f, -1.f, -1.f, -1.f);
clean_buff<<<dimgrid, dimblock >>>(d_buff_, d_depth_, nb_pixels, clear_val, far_);
}
// -----------------------------------------------------------------------------
// =============================================================================
namespace Bloom {
// =============================================================================
__device__
float4 operator+(float4 a, float4 b){
return make_float4(a.x+b.x, a.y+b.y,a.z+b.z, a.w +b.w);
}
__device__
float4 operator*(float4 a, float f){
return make_float4(a.x*f, a.y*f, a.z*f, a.w*f);
}
__device__
float4 operator*(float4 a, float4 b){
return make_float4(a.x*b.x, a.y*b.y, a.z*b.z, a.w*b.w);
}
__device__ float4
lerp(const float4& a, const float4& b, float f){
return a * f + b * (1.f - f);
}
// -----------------------------------------------------------------------------
__global__
void copy_pbo_to_img(PBO_data pbo, float4* img)
{
const int px = pbo.start_x + blockIdx.x*BLOCK_SIZE_X + threadIdx.x;
const int py = pbo.start_y + blockIdx.y*BLOCK_SIZE_Y + threadIdx.y;
const int p = py * pbo.width + px;
img[p] = pbo.d_rendu[p];
}
// -----------------------------------------------------------------------------
__global__
void copy_img_to_pbo(PBO_data pbo, float4* img)
{
const int px = pbo.start_x + blockIdx.x*BLOCK_SIZE_X + threadIdx.x;
const int py = pbo.start_y + blockIdx.y*BLOCK_SIZE_Y + threadIdx.y;
const int p = py * pbo.width + px;
pbo.d_rendu[p] = img[p];
}
// -----------------------------------------------------------------------------
__global__
void clean_img(PBO_data pbo, float4* img)
{
const int px = pbo.start_x + blockIdx.x*BLOCK_SIZE_X + threadIdx.x;
const int py = pbo.start_y + blockIdx.y*BLOCK_SIZE_Y + threadIdx.y;
const int p = py * pbo.width + px;
img[2*p] = make_float4(0.f, 0.f, 0.f, 0.f);
img[2*p+1] = make_float4(0.f, 0.f, 0.f, 0.f);
}
// -----------------------------------------------------------------------------
__global__
void pre_compute_bloom(PBO_data pbo, float4* img, float4* bloom, int lvl)
{
const int px = pbo.start_x + blockIdx.x*BLOCK_SIZE_X + threadIdx.x;
const int py = pbo.start_y + blockIdx.y*BLOCK_SIZE_Y + threadIdx.y;
int w = pbo.width;
int h = pbo.height;
int offset = 0;
for(int i = 0; i < lvl; i++){
offset += w*h;
w /= 2;
h /= 2;
}
float4* img_src = &(img[offset]);
float4* img_dst = &(img[offset+w*h]);
w /= 2;
h /= 2;
if(px < w & py < h){
float4 a = img_src[2*px + 4 * w * py];
float4 b = img_src[2*px + 1 + 4 * w * py];
float4 c = img_src[2*px + (2 * py + 1 )* 2*w];
float4 d = img_src[2*px + 1 + (2 * py + 1)*2*w];
img_dst[px + w*py] = (a+b+c+d)*0.25f;
}
}
// -----------------------------------------------------------------------------
__global__
void compute_bloom0(PBO_data pbo, float4* img, float4* bloom, int lvls)
{
const int px = pbo.start_x + blockIdx.x*BLOCK_SIZE_X + threadIdx.x;
const int py = pbo.start_y + blockIdx.y*BLOCK_SIZE_Y + threadIdx.y;
int w = pbo.width;
int h = pbo.height;
float4* img_src = img;
float4* bm_dst = bloom;
for(int i = 0; i < lvls; i++){
if(px < w & py < h){
float4 e = img_src[px + w*py];
float4 a = (px>0)?img_src[px-1 + w*py]:e;
float4 b = (px<w-1)?img_src[px+1 + w*py]:e;
float4 c = (py>0)?img_src[px + w*(py-1)]:e;
float4 d = (py<w-1)?img_src[px + w*(py+1)]:e;
float4 f = (px>0 & py>0)?img_src[px-1 + w*(py-1)]:e;
float4 g = (px>0 & py<w-1)?img_src[px-1 + w*(py+1)]:e;
float4 h = (px<w-1 & py>0)?img_src[px+1 + w*(py-1)]:e;
float4 k = (px<w-1 & py<w-1)?img_src[px+1 + w*(py+1)]:e;
float4 res = (f + g + h + k)*(1.f/16.f)+ (a + b + c + d)*(1.f/8.f) + e*(1.f/4.f);
bm_dst[px + w*py] = res;
}
bm_dst = &bm_dst[w*h];
img_src = &img_src[w*h];
w /= 2;
h /= 2;
}
}
// -----------------------------------------------------------------------------
__global__
void compute_bloom(PBO_data pbo, float4* img, float4* bloom, int lvls)
{
const int px = pbo.start_x + blockIdx.x*BLOCK_SIZE_X + threadIdx.x;
const int py = pbo.start_y + blockIdx.y*BLOCK_SIZE_Y + threadIdx.y;
int w = pbo.width;
int h = pbo.height;
float pqx = px;
float pqy = py;
float factor = BASE_BLOOM;
float4 res = make_float4(0.f,0.f,0.f,0.f);
float4* bm_src = bloom;
//res = bm_src[qx/2 + (w/2) * (qy/2) + w*h];
//res = bm_src[qx + w*qy];
int offset = 0;
for(int i = 0; i < lvls; i++){
factor *= BLOOM_MUL;
factor += BLOOM_INCR;
int qx = pqx;
int qy = pqy;
float dx = pqx-qx;
float dy = pqy-qy;
float4 color_a = bm_src[qx + w*qy + offset];
float4 color_b = (qx<w-1)?bm_src[qx+1 + w*qy + offset]:color_a;
float4 color_c = (qy<h-1)?bm_src[qx + w*(qy+1) + offset]:color_a;
float4 color_d = (qx<w-1)?((qy<h-1)?bm_src[qx+1 + w*(qy+1) + offset]:color_b):
((qy<h-1)?color_c:color_a);
float4 color_lvl = lerp(lerp(color_d, color_c, dx),lerp(color_b, color_a, dx),dy);
color_lvl.x *= color_lvl.x;
color_lvl.y *= color_lvl.y;
color_lvl.z *= color_lvl.z;
color_lvl.w *= color_lvl.w;
res = res+ color_lvl * factor;
offset += w*h;
pqx = (pqx-0.5f)*0.5f;
pqy = (pqy-0.5f)*0.5f;
w /= 2;
h /= 2;
}
/*int result;
w = pbo.width;
h = pbo.height;
int qx = px;
int qy = py;
offset = 0;
for(int i = 0; i < 6; i++){
offset += w*h;
w /= 2;
h /= 2;
qx /= 2;
qy /= 2;
}
*/
const int p = py * pbo.width + px;
img[p] = img[p] + res;//
//img[p] = img[qx + w*qy + offset];
}
}// END BLOOM NAMESPACE =========================================================
void do_bloom(float4* d_rendu,
float4* d_img_buff,
float4* d_bloom_buff,
int width, int height)
{
using namespace Bloom;
PBO_data pbo_d(d_rendu, NULL, 0, 0, width, height);
dim3 dimBlock(BLOCK_SIZE_X,BLOCK_SIZE_Y);
dim3 dimGrid(width/BLOCK_SIZE_X, height/BLOCK_SIZE_Y);
dim3 dimGrid0((width + 2*BLOCK_SIZE_X-1)/(2*BLOCK_SIZE_X),
(height + 2*BLOCK_SIZE_Y-1)/(2*BLOCK_SIZE_Y));
dim3 dimGrid1((width + 4*BLOCK_SIZE_X-1)/(4*BLOCK_SIZE_X),
(height + 4*BLOCK_SIZE_Y-1)/(4*BLOCK_SIZE_Y));
dim3 dimGrid2((width + 8*BLOCK_SIZE_X-1)/(8*BLOCK_SIZE_X),
(height + 8*BLOCK_SIZE_Y-1)/(8*BLOCK_SIZE_Y));
dim3 dimGrid3((width + 16*BLOCK_SIZE_X-1)/(16*BLOCK_SIZE_X),
(height + 16*BLOCK_SIZE_Y-1)/(16*BLOCK_SIZE_Y));
dim3 dimGrid4((width + 32*BLOCK_SIZE_X-1)/(32*BLOCK_SIZE_X),
(height + 32*BLOCK_SIZE_Y-1)/(32*BLOCK_SIZE_Y));
dim3 dimGrid5((width + 64*BLOCK_SIZE_X-1)/(64*BLOCK_SIZE_X),
(height + 64*BLOCK_SIZE_Y-1)/(64*BLOCK_SIZE_Y));
dim3 dimGrid6((width + 128*BLOCK_SIZE_X-1)/(128*BLOCK_SIZE_X),
(height + 128*BLOCK_SIZE_Y-1)/(128*BLOCK_SIZE_Y));
dim3 dimGrid7((width + 256*BLOCK_SIZE_X-1)/(256*BLOCK_SIZE_X),
(height + 256*BLOCK_SIZE_Y-1)/(256*BLOCK_SIZE_Y));
clean_img<<<dimGrid, dimBlock >>>(pbo_d, d_img_buff);
copy_pbo_to_img<<<dimGrid, dimBlock >>>(pbo_d, d_img_buff);
int k = 2;
pre_compute_bloom<<<dimGrid0, dimBlock >>>(pbo_d, d_img_buff,d_bloom_buff, 0);
pre_compute_bloom<<<dimGrid1, dimBlock >>>(pbo_d, d_img_buff,d_bloom_buff, 1);
if(width % 8 == 0 && height % 8 == 0){
pre_compute_bloom<<<dimGrid2, dimBlock >>>(pbo_d, d_img_buff,d_bloom_buff, 2);
k++;
}
if(width % 16 == 0 && height % 16 == 0){
pre_compute_bloom<<<dimGrid3, dimBlock >>>(pbo_d, d_img_buff,d_bloom_buff, 3);
k++;
}
if(width % 32 == 0 && height % 32 == 0){
pre_compute_bloom<<<dimGrid4, dimBlock >>>(pbo_d, d_img_buff,d_bloom_buff, 4);
k++;
}
if(width % 64 == 0 && height % 64 == 0){
pre_compute_bloom<<<dimGrid5, dimBlock >>>(pbo_d, d_img_buff,d_bloom_buff, 5);
k++;
}
if(width % 128 == 0 && height % 128 == 0){
pre_compute_bloom<<<dimGrid4, dimBlock >>>(pbo_d, d_img_buff,d_bloom_buff, 6);
k++;
}
if(width % 256 == 0 && height % 256 == 0){
pre_compute_bloom<<<dimGrid5, dimBlock >>>(pbo_d, d_img_buff,d_bloom_buff, 7);
k++;
}
compute_bloom0<<<dimGrid, dimBlock >>>(pbo_d, d_img_buff, d_bloom_buff, k);
compute_bloom<<<dimGrid, dimBlock >>>(pbo_d, d_img_buff, d_bloom_buff, k);
copy_img_to_pbo<<<dimGrid, dimBlock >>>(pbo_d, d_img_buff);
}
|
c06ccde797b31ae7621bd84828bebd845f6d3e10.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Name: H.G. Manesha Washani
// Student Id: 1432289
#include <stdio.h>
/* this one of the header file. in this code need
dynamically allocated array function. library code can use
malloc, free option */
#include <stdlib.h>
#define N 4
/* The __global__ indicates that this is an entry-point function running on the device. is called from host code */
__global__ void Matrixadd(int A[][N], int B[][N], int C[][N]){
int g = threadIdx.x;
int h = threadIdx.y;
C[g][h] = A[g][h] + B[g][h];
}
/* This is randam function of assessment gave*/
void randmatfunc(int newmat[N][N]){ //int change to void mode and added newmate parameter to the function
int i, j, k;
for(i=0;i<N;i++){
for(j=0;j<N;j++){
k = rand() % 100 + 1;;
printf("%d ", k);
newmat[i][j] =k;
}
printf("\n");
}
}
/*Remove matrix numbers and, insert function to A and B matrix and create number automatically */
int main()
{
int A[N][N];
randmatfunc(A);
int B[N][N];
randmatfunc(B);
int C[N][N];
//device copies of A, B,C
int (*d_A)[N], (*d_B)[N], (*d_C)[N];
/* Device copies of A, B and C allovated space for device aopies of A, B and C. in lecture CUDA part 1 explanation have allocate memory on the device. */
hipMalloc((void**)&d_A, (N*N)*sizeof(int));
hipMalloc((void**)&d_B, (N*N)*sizeof(int));
hipMalloc((void**)&d_C, (N*N)*sizeof(int));
/* Copy input to device. the memory areas may not overlap calling cuda Memcpy()*/
hipMemcpy(d_A, A, (N*N)*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_B, B, (N*N)*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_C, C, (N*N)*sizeof(int), hipMemcpyHostToDevice);
//Launch add() kernel on GPU
int numBlocks = 1;
dim3 threadsPerBlock(N,N);
hipLaunchKernelGGL(( Matrixadd), dim3(numBlocks),dim3(threadsPerBlock), 0, 0, d_A,d_B,d_C);
// Copy result back to the host
hipMemcpy(C, d_C, (N*N)*sizeof(int), hipMemcpyDeviceToHost);
int g, h; printf("C = \n");
for(g=0;g<N;g++){
for(h=0;h<N;h++){
printf("%d ", C[g][h]);
}
printf("\n");
}
// This is cleanup
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
printf("\n");
return 0;
}
| c06ccde797b31ae7621bd84828bebd845f6d3e10.cu | // Name: H.G. Manesha Washani
// Student Id: 1432289
#include <stdio.h>
/* this one of the header file. in this code need
dynamically allocated array function. library code can use
malloc, free option */
#include <stdlib.h>
#define N 4
/* The __global__ indicates that this is an entry-point function running on the device. is called from host code */
__global__ void Matrixadd(int A[][N], int B[][N], int C[][N]){
int g = threadIdx.x;
int h = threadIdx.y;
C[g][h] = A[g][h] + B[g][h];
}
/* This is randam function of assessment gave*/
void randmatfunc(int newmat[N][N]){ //int change to void mode and added newmate parameter to the function
int i, j, k;
for(i=0;i<N;i++){
for(j=0;j<N;j++){
k = rand() % 100 + 1;;
printf("%d ", k);
newmat[i][j] =k;
}
printf("\n");
}
}
/*Remove matrix numbers and, insert function to A and B matrix and create number automatically */
int main()
{
int A[N][N];
randmatfunc(A);
int B[N][N];
randmatfunc(B);
int C[N][N];
//device copies of A, B,C
int (*d_A)[N], (*d_B)[N], (*d_C)[N];
/* Device copies of A, B and C allovated space for device aopies of A, B and C. in lecture CUDA part 1 explanation have allocate memory on the device. */
cudaMalloc((void**)&d_A, (N*N)*sizeof(int));
cudaMalloc((void**)&d_B, (N*N)*sizeof(int));
cudaMalloc((void**)&d_C, (N*N)*sizeof(int));
/* Copy input to device. the memory areas may not overlap calling cuda Memcpy()*/
cudaMemcpy(d_A, A, (N*N)*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_B, B, (N*N)*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_C, C, (N*N)*sizeof(int), cudaMemcpyHostToDevice);
//Launch add() kernel on GPU
int numBlocks = 1;
dim3 threadsPerBlock(N,N);
Matrixadd<<<numBlocks,threadsPerBlock>>>(d_A,d_B,d_C);
// Copy result back to the host
cudaMemcpy(C, d_C, (N*N)*sizeof(int), cudaMemcpyDeviceToHost);
int g, h; printf("C = \n");
for(g=0;g<N;g++){
for(h=0;h<N;h++){
printf("%d ", C[g][h]);
}
printf("\n");
}
// This is cleanup
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
printf("\n");
return 0;
}
|
bc1820ffc1f981d71ac4d979a0ecc98a27081a9c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// Created by gxl on 2020/12/30.
//
#include "bfs.cuh"
void conventionParticipateBFS(string bfsPath, int sampleSourceNode) {
cout << "===============conventionParticipateBFS==============" << endl;
uint testNumNodes = 0;
ulong testNumEdge = 0;
ulong traverseSum = 0;
uint *nodePointersI;
uint *edgeList;
auto startReadGraph = std::chrono::steady_clock::now();
ifstream infile(bfsPath, ios::in | ios::binary);
infile.read((char *) &testNumNodes, sizeof(uint));
uint numEdge = 0;
infile.read((char *) &numEdge, sizeof(uint));
testNumEdge = numEdge;
cout << "vertex num: " << testNumNodes << " edge num: " << testNumEdge << endl;
nodePointersI = new uint[testNumNodes];
infile.read((char *) nodePointersI, sizeof(uint) * testNumNodes);
edgeList = new uint[testNumEdge];
infile.read((char *) edgeList, sizeof(uint) * testNumEdge);
infile.close();
unsigned long max_partition_size;
unsigned long total_gpu_size;
getMaxPartitionSize(max_partition_size, total_gpu_size, testNumNodes, 0.9, sizeof(uint), 5);
uint partitionNum;
if (testNumEdge > max_partition_size) {
partitionNum = testNumEdge / max_partition_size + 1;
} else {
partitionNum = 1;
}
uint *degree = new uint[testNumNodes];
uint *value = new uint[testNumNodes];
bool *isActiveNodeList = new bool[testNumNodes];
CommonPartitionInfo *partitionInfoList = new CommonPartitionInfo[partitionNum];
bool *needTransferPartition = new bool[partitionNum];
for (uint i = 0; i < testNumNodes; i++) {
isActiveNodeList[i] = false;
value[i] = UINT_MAX;
if (i + 1 < testNumNodes) {
degree[i] = nodePointersI[i + 1] - nodePointersI[i];
} else {
degree[i] = testNumEdge - nodePointersI[i];
}
if (degree[i] > max_partition_size) {
cout << "node " << i << " degree > maxPartition " << endl;
return;
}
}
for (uint i = 0; i < partitionNum; i++) {
partitionInfoList[i].startVertex = -1;
partitionInfoList[i].endVertex = -1;
partitionInfoList[i].nodePointerOffset = -1;
partitionInfoList[i].partitionEdgeSize = -1;
}
int tempPartitionIndex = 0;
uint tempNodeIndex = 0;
while (tempNodeIndex < testNumNodes) {
if (partitionInfoList[tempPartitionIndex].startVertex == -1) {
partitionInfoList[tempPartitionIndex].startVertex = tempNodeIndex;
partitionInfoList[tempPartitionIndex].endVertex = tempNodeIndex;
partitionInfoList[tempPartitionIndex].nodePointerOffset = nodePointersI[tempNodeIndex];
partitionInfoList[tempPartitionIndex].partitionEdgeSize = degree[tempNodeIndex];
tempNodeIndex++;
} else {
if (partitionInfoList[tempPartitionIndex].partitionEdgeSize + degree[tempNodeIndex] > max_partition_size) {
tempPartitionIndex++;
} else {
partitionInfoList[tempPartitionIndex].endVertex = tempNodeIndex;
partitionInfoList[tempPartitionIndex].partitionEdgeSize += degree[tempNodeIndex];
tempNodeIndex++;
}
}
}
uint *degreeD;
bool *isActiveNodeListD;
bool *nextActiveNodeListD;
uint *nodePointerListD;
uint *partitionEdgeListD;
uint *valueD;
hipMalloc(°reeD, testNumNodes * sizeof(uint));
hipMalloc(&valueD, testNumNodes * sizeof(uint));
hipMalloc(&isActiveNodeListD, testNumNodes * sizeof(bool));
hipMalloc(&nextActiveNodeListD, testNumNodes * sizeof(bool));
hipMalloc(&nodePointerListD, testNumNodes * sizeof(uint));
hipMalloc(&partitionEdgeListD, max_partition_size * sizeof(uint));
hipMemcpy(degreeD, degree, testNumNodes * sizeof(uint), hipMemcpyHostToDevice);
hipMemcpy(nodePointerListD, nodePointersI, testNumNodes * sizeof(uint), hipMemcpyHostToDevice);
hipMemset(nextActiveNodeListD, 0, testNumNodes * sizeof(bool));
//cacaulate the active node And make active node array
dim3 grid = dim3(56, 1, 1);
dim3 block = dim3(1024, 1, 1);
int testTimes = 1;
long timeSum = 0;
for (int i = 0; i < testTimes; i++) {
uint sourceNode = rand() % testNumNodes;
sourceNode = sampleSourceNode;
//sourceNode = 25838548;
//sourceNode = 26890152;
//sourceNode = 47235513;
cout << "sourceNode " << sourceNode << endl;
for (int j = 0; j < testNumNodes; j++) {
isActiveNodeList[j] = false;
value[j] = UINT_MAX;
}
isActiveNodeList[sourceNode] = true;
value[sourceNode] = 1;
hipMemcpy(valueD, value, testNumNodes * sizeof(uint), hipMemcpyHostToDevice);
uint activeSum = 0;
int iteration = 0;
auto startProcessing = std::chrono::steady_clock::now();
while (true) {
uint activeNodeNum = 0;
checkNeedTransferPartitionOpt(needTransferPartition, partitionInfoList, isActiveNodeList, partitionNum,
testNumNodes, activeNodeNum);
if (activeNodeNum <= 0) {
break;
} else {
//cout << "iteration " << iteration << " activeNodes " << activeNodeNum << endl;
activeSum += activeNodeNum;
}
hipMemcpy(isActiveNodeListD, isActiveNodeList, testNumNodes * sizeof(bool), hipMemcpyHostToDevice);
for (int j = 0; j < partitionNum; j++) {
if (needTransferPartition[j]) {
hipMemcpy(partitionEdgeListD, edgeList + partitionInfoList[j].nodePointerOffset,
partitionInfoList[j].partitionEdgeSize * sizeof(uint), hipMemcpyHostToDevice);
traverseSum += partitionInfoList[j].partitionEdgeSize * sizeof(uint);
hipLaunchKernelGGL(( bfsKernel_CommonPartition), dim3(grid), dim3(block), 0, 0, partitionInfoList[j].startVertex,
partitionInfoList[j].endVertex,
partitionInfoList[j].nodePointerOffset,
isActiveNodeListD, nodePointerListD,
partitionEdgeListD, degreeD, valueD,
nextActiveNodeListD);
hipDeviceSynchronize();
gpuErrorcheck(hipPeekAtLastError())
}
}
hipMemcpy(isActiveNodeList, nextActiveNodeListD, testNumNodes * sizeof(bool), hipMemcpyDeviceToHost);
hipMemset(nextActiveNodeListD, 0, testNumNodes * sizeof(bool));
iteration++;
}
cout << " activeSum " << activeSum << endl;
auto endRead = std::chrono::steady_clock::now();
long durationRead = std::chrono::duration_cast<std::chrono::milliseconds>(endRead - startProcessing).count();
cout << " finish time : " << durationRead << " ms" << endl;
cout << "traverseSum " << traverseSum << endl;
}
free(nodePointersI);
free(edgeList);
free(degree);
free(isActiveNodeList);
hipFree(isActiveNodeListD);
hipFree(nextActiveNodeListD);
hipFree(nodePointerListD);
hipFree(partitionEdgeListD);
//todo free partitionInfoList needTransferPartition
}
long
bfsCaculateInShareReturnValue(uint testNumNodes, uint testNumEdge, uint *nodePointersI, uint *edgeList, uint sourceNode,
uint **bfsValue, int valueIndex) {
auto start = std::chrono::steady_clock::now();
uint *degree;
uint *value;
uint sourceCode = 0;
gpuErrorcheck(hipMallocManaged(°ree, testNumNodes * sizeof(uint)));
gpuErrorcheck(hipMallocManaged(&value, testNumNodes * sizeof(uint)));
auto startPreCaculate = std::chrono::steady_clock::now();
//caculate degree
for (uint i = 0; i < testNumNodes - 1; i++) {
if (nodePointersI[i] > testNumEdge) {
cout << i << " " << nodePointersI[i] << endl;
break;
}
degree[i] = nodePointersI[i + 1] - nodePointersI[i];
}
degree[testNumNodes - 1] = testNumEdge - nodePointersI[testNumNodes - 1];
sourceCode = sourceNode;
cout << "sourceNode " << sourceNode << " degree " << degree[sourceNode] << endl;
bool *label;
gpuErrorcheck(hipMallocManaged(&label, testNumNodes * sizeof(bool)));
for (uint i = 0; i < testNumNodes; i++) {
label[i] = false;
value[i] = UINT_MAX;
}
auto endPreCaculate = std::chrono::steady_clock::now();
long durationPreCaculate = std::chrono::duration_cast<std::chrono::milliseconds>(
endPreCaculate - startPreCaculate).count();
label[sourceCode] = true;
value[sourceCode] = 0;
uint *activeNodeList;
hipMallocManaged(&activeNodeList, testNumNodes * sizeof(uint));
//cacaulate the active node And make active node array
uint *activeNodeLabelingD;
gpuErrorcheck(hipMallocManaged(&activeNodeLabelingD, testNumNodes * sizeof(unsigned int)));
uint *activeNodeLabelingPrefixD;
gpuErrorcheck(hipMallocManaged(&activeNodeLabelingPrefixD, testNumNodes * sizeof(unsigned int)));
dim3 grid = dim3(56, 1, 1);
dim3 block = dim3(1024, 1, 1);
hipLaunchKernelGGL(( setLabeling), dim3(grid), dim3(block), 0, 0, testNumNodes, label, activeNodeLabelingD);
thrust::device_ptr<unsigned int> ptr_labeling(activeNodeLabelingD);
thrust::device_ptr<unsigned int> ptr_labeling_prefixsum(activeNodeLabelingPrefixD);
uint activeNodesNum = thrust::reduce(ptr_labeling, ptr_labeling + testNumNodes);
int iter = 0;
uint nodeSum = activeNodesNum;
auto startProcessing = std::chrono::steady_clock::now();
while (activeNodesNum > 0) {
iter++;
thrust::exclusive_scan(ptr_labeling, ptr_labeling + testNumNodes, ptr_labeling_prefixsum);
hipLaunchKernelGGL(( setActiveNodeArray), dim3(grid), dim3(block), 0, 0, testNumNodes, activeNodeList, label, activeNodeLabelingPrefixD);
hipLaunchKernelGGL(( setLabelDefault), dim3(grid), dim3(block), 0, 0, activeNodesNum, activeNodeList, label);
hipLaunchKernelGGL(( bfs_kernel), dim3(grid), dim3(block), 0, 0, activeNodesNum, activeNodeList, nodePointersI, degree, edgeList, value, label);
hipDeviceSynchronize();
gpuErrorcheck(hipPeekAtLastError());
hipLaunchKernelGGL(( setLabeling), dim3(grid), dim3(block), 0, 0, testNumNodes, label, activeNodeLabelingD);
activeNodesNum = thrust::reduce(ptr_labeling, ptr_labeling + testNumNodes);
nodeSum += activeNodesNum;
}
hipDeviceSynchronize();
for (int i = 0; i < testNumNodes; i++) {
bfsValue[valueIndex][i] = value[i];
}
auto endRead = std::chrono::steady_clock::now();
long durationRead = std::chrono::duration_cast<std::chrono::milliseconds>(endRead - startProcessing).count();
hipFree(degree);
hipFree(label);
hipFree(value);
hipFree(activeNodeList);
hipFree(activeNodeLabelingD);
hipFree(activeNodeLabelingPrefixD);
return durationRead;
}
void bfsShare(string bfsPath, int sampleSourceNode) {
uint testNumNodes = 0;
ulong testNumEdge = 0;
uint *nodePointersI;
uint *edgeList;
bool isUseShare = true;
auto startReadGraph = std::chrono::steady_clock::now();
ifstream infile(bfsPath, ios::in | ios::binary);
infile.read((char *) &testNumNodes, sizeof(uint));
uint numEdge = 0;
infile.read((char *) &numEdge, sizeof(uint));
testNumEdge = numEdge;
cout << "vertex num: " << testNumNodes << " edge num: " << testNumEdge << endl;
gpuErrorcheck(hipMallocManaged(&nodePointersI, (testNumNodes + 1) * sizeof(uint)));
infile.read((char *) nodePointersI, sizeof(uint) * testNumNodes);
gpuErrorcheck(hipMallocManaged(&edgeList, (numEdge) * sizeof(uint)));
hipMemAdvise(nodePointersI, (testNumNodes + 1) * sizeof(uint), hipMemAdviseSetReadMostly, 0);
hipMemAdvise(edgeList, (numEdge) * sizeof(uint), hipMemAdviseSetReadMostly, 0);
infile.read((char *) edgeList, sizeof(uint) * testNumEdge);
infile.close();
//preprocessData(nodePointersI, edgeList, testNumNodes, testNumEdge);
auto endReadGraph = std::chrono::steady_clock::now();
long durationReadGraph = std::chrono::duration_cast<std::chrono::milliseconds>(
endReadGraph - startReadGraph).count();
cout << "read graph time : " << durationReadGraph << "ms" << endl;
int testTimes = 1;
long timeSum = 0;
for (int i = 0; i < testTimes; i++) {
uint sourceNode = rand() % testNumNodes;
sourceNode = sampleSourceNode;
cout << "sourceNode " << sourceNode << endl;
timeSum += bfsCaculateInShare(testNumNodes, testNumEdge, nodePointersI, edgeList, sourceNode);
//timeSum += bfsCaculateInShare(testNumNodes, testNumEdge, nodePointersI, edgeList, 53037907);
break;
}
}
void bfsOpt(string bfsPath, int sampleSourceNode, float adviseK) {
uint testNumNodes = 0;
ulong testNumEdge = 0;
uint *nodePointersI;
uint *edgeList;
bool isUseShare = true;
auto startReadGraph = std::chrono::steady_clock::now();
ifstream infile(bfsPath, ios::in | ios::binary);
infile.read((char *) &testNumNodes, sizeof(uint));
uint numEdge = 0;
infile.read((char *) &numEdge, sizeof(uint));
testNumEdge = numEdge;
cout << "vertex num: " << testNumNodes << " edge num: " << testNumEdge << endl;
nodePointersI = new uint[testNumNodes + 1];
infile.read((char *) nodePointersI, sizeof(uint) * testNumNodes);
edgeList = new uint[testNumEdge + 1];
infile.read((char *) edgeList, sizeof(uint) * testNumEdge);
infile.close();
auto endReadGraph = std::chrono::steady_clock::now();
long durationReadGraph = std::chrono::duration_cast<std::chrono::milliseconds>(
endReadGraph - startReadGraph).count();
cout << "read graph time : " << durationReadGraph << "ms" << endl;
int testTimes = 1;
long timeSum = 0;
for (int i = 0; i < testTimes; i++) {
uint sourceNode = rand() % testNumNodes;
sourceNode = 47235513;
sourceNode = 25838548;
cout << "sourceNode " << sourceNode << endl;
//timeSum += bfsCaculateInOpt(testNumNodes, testNumEdge, nodePointersI, edgeList, 25838548);
//caculateInOptChooseByDegree(testNumNodes, testNumEdge, nodePointersI, edgeList);
//timeSum += bfsCaculateInAsync(testNumNodes, testNumEdge, nodePointersI, edgeList, 53037907);
timeSum += bfsCaculateInAsyncNoUVMRandom(testNumNodes, testNumEdge, nodePointersI, edgeList, sourceNode,
adviseK);
//timeSum += bfsCaculateInAsyncSwapOpt(testNumNodes, testNumEdge, nodePointersI, edgeList, 25838548);
//timeSum += bfsCaculateInAsyncSwapOptWithOverload(testNumNodes, testNumEdge, nodePointersI, edgeList, 25838548);
//timeSum += bfsCaculateInAsyncSwapManage(testNumNodes, testNumEdge, nodePointersI, edgeList, 25838548);
//break;
cout << i << "========================================" << endl;
}
hipFree(nodePointersI);
hipFree(edgeList);
}
void testBFS() {
uint testNumNodes = 0;
ulong testNumEdge = 0;
uint *nodePointersI;
uint *edgeList;
bool isUseShare = true;
auto startReadGraph = std::chrono::steady_clock::now();
ifstream infile(testGraphPath, ios::in | ios::binary);
infile.read((char *) &testNumNodes, sizeof(uint));
uint numEdge = 0;
infile.read((char *) &numEdge, sizeof(uint));
testNumEdge = numEdge;
cout << "vertex num: " << testNumNodes << " edge num: " << testNumEdge << endl;
if (isUseShare) {
gpuErrorcheck(hipMallocManaged(&nodePointersI, (testNumNodes + 1) * sizeof(uint)));
infile.read((char *) nodePointersI, sizeof(uint) * testNumNodes);
gpuErrorcheck(hipMallocManaged(&edgeList, (numEdge) * sizeof(uint)));
hipMemAdvise(nodePointersI, (testNumNodes + 1) * sizeof(uint), hipMemAdviseSetReadMostly, 0);
hipMemAdvise(edgeList, (numEdge) * sizeof(uint), hipMemAdviseSetReadMostly, 0);
infile.read((char *) edgeList, sizeof(uint) * testNumEdge);
infile.close();
//preprocessData(nodePointersI, edgeList, testNumNodes, testNumEdge);
} else {
nodePointersI = new uint[testNumNodes + 1];
infile.read((char *) nodePointersI, sizeof(uint) * testNumNodes);
edgeList = new uint[testNumEdge + 1];
infile.read((char *) edgeList, sizeof(uint) * testNumEdge);
infile.close();
}
auto endReadGraph = std::chrono::steady_clock::now();
long durationReadGraph = std::chrono::duration_cast<std::chrono::milliseconds>(
endReadGraph - startReadGraph).count();
cout << "read graph time : " << durationReadGraph << "ms" << endl;
int testTimes = 1;
long timeSum = 0;
for (int i = 0; i < testTimes; i++) {
uint sourceNode = rand() % testNumNodes;
cout << "sourceNode " << sourceNode << endl;
if (isUseShare) {
//timeSum += bfsCaculateInShare(testNumNodes, testNumEdge, nodePointersI, edgeList, 25838548);
timeSum += bfsCaculateInShare(testNumNodes, testNumEdge, nodePointersI, edgeList, 53037907);
break;
} else {
//timeSum += bfsCaculateInOpt(testNumNodes, testNumEdge, nodePointersI, edgeList, 25838548);
//caculateInOptChooseByDegree(testNumNodes, testNumEdge, nodePointersI, edgeList);
//timeSum += bfsCaculateInAsync(testNumNodes, testNumEdge, nodePointersI, edgeList, 53037907);
timeSum += bfsCaculateInAsyncNoUVMSwap(testNumNodes, testNumEdge, nodePointersI, edgeList, 25838548);
//timeSum += bfsCaculateInAsyncSwapOpt(testNumNodes, testNumEdge, nodePointersI, edgeList, 25838548);
//timeSum += bfsCaculateInAsyncSwapOptWithOverload(testNumNodes, testNumEdge, nodePointersI, edgeList, 25838548);
//timeSum += bfsCaculateInAsyncSwapManage(testNumNodes, testNumEdge, nodePointersI, edgeList, 25838548);
//break;
}
cout << i << "========================================" << endl;
}
if (isUseShare) {
hipFree(nodePointersI);
hipFree(edgeList);
} else {
delete[]nodePointersI;
delete[]edgeList;
}
}
long bfsCaculateInShare(uint testNumNodes, uint testNumEdge, uint *nodePointersI, uint *edgeList, uint sourceNode) {
auto start = std::chrono::steady_clock::now();
uint *degree;
uint *value;
uint sourceCode = 0;
gpuErrorcheck(hipMallocManaged(°ree, testNumNodes * sizeof(uint)));
gpuErrorcheck(hipMallocManaged(&value, testNumNodes * sizeof(uint)));
auto startPreCaculate = std::chrono::steady_clock::now();
//caculate degree
for (uint i = 0; i < testNumNodes - 1; i++) {
if (nodePointersI[i] > testNumEdge) {
cout << i << " " << nodePointersI[i] << endl;
break;
}
degree[i] = nodePointersI[i + 1] - nodePointersI[i];
}
degree[testNumNodes - 1] = testNumEdge - nodePointersI[testNumNodes - 1];
sourceCode = sourceNode;
cout << "sourceNode " << sourceNode << " degree " << degree[sourceNode] << endl;
bool *label;
gpuErrorcheck(hipMallocManaged(&label, testNumNodes * sizeof(bool)));
for (uint i = 0; i < testNumNodes; i++) {
label[i] = false;
value[i] = UINT_MAX;
}
auto endPreCaculate = std::chrono::steady_clock::now();
long durationPreCaculate = std::chrono::duration_cast<std::chrono::milliseconds>(
endPreCaculate - startPreCaculate).count();
cout << "durationPreCaculate time : " << durationPreCaculate << " ms" << endl;
label[sourceCode] = true;
value[sourceCode] = 1;
uint *activeNodeList;
hipMallocManaged(&activeNodeList, testNumNodes * sizeof(uint));
//cacaulate the active node And make active node array
uint *activeNodeLabelingD;
gpuErrorcheck(hipMallocManaged(&activeNodeLabelingD, testNumNodes * sizeof(unsigned int)));
uint *activeNodeLabelingPrefixD;
gpuErrorcheck(hipMallocManaged(&activeNodeLabelingPrefixD, testNumNodes * sizeof(unsigned int)));
dim3 grid = dim3(56, 1, 1);
dim3 block = dim3(1024, 1, 1);
hipLaunchKernelGGL(( setLabeling), dim3(grid), dim3(block), 0, 0, testNumNodes, label, activeNodeLabelingD);
thrust::device_ptr<unsigned int> ptr_labeling(activeNodeLabelingD);
thrust::device_ptr<unsigned int> ptr_labeling_prefixsum(activeNodeLabelingPrefixD);
uint activeNodesNum = thrust::reduce(ptr_labeling, ptr_labeling + testNumNodes);
int iter = 0;
uint nodeSum = activeNodesNum;
auto startProcessing = std::chrono::steady_clock::now();
//vector<vector<uint>> visitRecordByIteration;
while (activeNodesNum > 0) {
iter++;
thrust::exclusive_scan(ptr_labeling, ptr_labeling + testNumNodes, ptr_labeling_prefixsum);
hipLaunchKernelGGL(( setActiveNodeArray), dim3(grid), dim3(block), 0, 0, testNumNodes, activeNodeList, label, activeNodeLabelingPrefixD);
hipLaunchKernelGGL(( setLabelDefault), dim3(grid), dim3(block), 0, 0, activeNodesNum, activeNodeList, label);
hipLaunchKernelGGL(( bfs_kernel), dim3(grid), dim3(block), 0, 0, activeNodesNum, activeNodeList, nodePointersI, degree, edgeList, value, label);
hipDeviceSynchronize();
gpuErrorcheck(hipPeekAtLastError());
//visitRecordByIteration.push_back(countDataByIteration(testNumEdge, testNumNodes, nodePointersI, degree, activeNodeLabelingD));
hipLaunchKernelGGL(( setLabeling), dim3(grid), dim3(block), 0, 0, testNumNodes, label, activeNodeLabelingD);
activeNodesNum = thrust::reduce(ptr_labeling, ptr_labeling + testNumNodes);
nodeSum += activeNodesNum;
cout << "iter: " << iter << " activeNodes: " << activeNodesNum << endl;
}
hipDeviceSynchronize();
//writeTrunkVistInIteration(visitRecordByIteration, "./CountByIterationbfs.txt");
cout << "nodeSum: " << nodeSum << endl;
auto endRead = std::chrono::steady_clock::now();
long durationRead = std::chrono::duration_cast<std::chrono::milliseconds>(endRead - startProcessing).count();
cout << "iter sum is " << iter << " finish time : " << durationRead << " ms" << endl;
//cout << "range min " << rangeMin << " range max " << rangeMax << " range sum " << rangeSum << endl;
cout << "source node pointer " << nodePointersI[sourceNode] << endl;
hipFree(degree);
hipFree(label);
hipFree(value);
hipFree(activeNodeList);
hipFree(activeNodeLabelingD);
hipFree(activeNodeLabelingPrefixD);
return durationRead;
}
long
bfsCaculateInAsyncNoUVMSwap(uint testNumNodes, uint testNumEdge, uint *nodePointersI, uint *edgeList, uint sourceNode) {
cout << "=========bfsCaculateInAsyncNoUVM========" << endl;
auto start = std::chrono::steady_clock::now();
auto startPreCaculate = std::chrono::steady_clock::now();
//CPU
long durationRead;
uint fragmentNum = testNumEdge / fragment_size;
unsigned long max_partition_size;
unsigned long total_gpu_size;
uint staticFragmentNum;
uint maxStaticNode = 0;
uint *degree;
uint *value;
uint *label;
uint *staticFragmentToNormalMap;
bool *isInStatic;
uint *overloadNodeList;
uint *staticNodePointer;
uint *staticFragmentData;
uint *overloadFragmentData;
uint *activeNodeList;
uint *activeOverloadNodePointers;
vector<PartEdgeListInfo> partEdgeListInfoArr;
/*
* overloadEdgeList overload edge list in every iteration
* */
uint *overloadEdgeList;
FragmentData *fragmentData;
bool isFromTail = false;
//GPU
uint *staticEdgeListD;
uint *overloadEdgeListD;
bool *isInStaticD;
uint *overloadNodeListD;
uint *staticNodePointerD;
uint *staticFragmentVisitRecordsD;
uint *staticFragmentDataD;
uint *canSwapStaticFragmentDataD;
uint *canSwapFragmentPrefixSumD;
uint *degreeD;
// async need two labels
uint *isActiveD1;
uint *isStaticActive;
uint *isOverloadActive;
uint *valueD;
uint *activeNodeListD;
uint *activeNodeLabelingPrefixD;
uint *overloadLabelingPrefixD;
uint *activeOverloadNodePointersD;
uint *activeOverloadDegreeD;
degree = new uint[testNumNodes];
value = new uint[testNumNodes];
label = new uint[testNumNodes];
isInStatic = new bool[testNumNodes];
overloadNodeList = new uint[testNumNodes];
staticNodePointer = new uint[testNumNodes];
activeNodeList = new uint[testNumNodes];
activeOverloadNodePointers = new uint[testNumNodes];
fragmentData = new FragmentData[fragmentNum];
//getMaxPartitionSize(max_partition_size, testNumNodes);
getMaxPartitionSize(max_partition_size, total_gpu_size, testNumNodes, 0.97, sizeof(uint));
staticFragmentNum = max_partition_size / fragment_size;
staticFragmentToNormalMap = new uint[staticFragmentNum];
staticFragmentData = new uint[staticFragmentNum];
overloadFragmentData = new uint[fragmentNum];
//caculate degree
uint meanDegree = testNumEdge / testNumNodes;
cout << " meanDegree " << meanDegree << endl;
uint degree0Sum = 0;
for (uint i = 0; i < testNumNodes - 1; i++) {
if (nodePointersI[i] > testNumEdge) {
cout << i << " " << nodePointersI[i] << endl;
break;
}
degree[i] = nodePointersI[i + 1] - nodePointersI[i];
}
degree[testNumNodes - 1] = testNumEdge - nodePointersI[testNumNodes - 1];
memcpy(staticNodePointer, nodePointersI, testNumNodes * sizeof(uint));
//caculate static staticEdgeListD
gpuErrorcheck(hipMalloc(&staticEdgeListD, max_partition_size * sizeof(uint)));
auto startmove = std::chrono::steady_clock::now();
gpuErrorcheck(
hipMemcpy(staticEdgeListD, edgeList, max_partition_size * sizeof(uint), hipMemcpyHostToDevice));
auto endMove = std::chrono::steady_clock::now();
long testDuration = std::chrono::duration_cast<std::chrono::milliseconds>(
endMove - startmove).count();
gpuErrorcheck(hipMalloc(&isInStaticD, testNumNodes * sizeof(bool)))
gpuErrorcheck(hipMalloc(&overloadNodeListD, testNumNodes * sizeof(uint)));
gpuErrorcheck(hipMalloc(&staticNodePointerD, testNumNodes * sizeof(uint)))
gpuErrorcheck(hipMemcpy(staticNodePointerD, nodePointersI, testNumNodes * sizeof(uint), hipMemcpyHostToDevice));
for (uint i = 0; i < testNumNodes; i++) {
label[i] = 0;
value[i] = UINT_MAX - 1;
uint pointStartFragmentIndex = nodePointersI[i] / fragment_size;
uint pointEndFragmentIndex =
degree[i] == 0 ? pointStartFragmentIndex : (nodePointersI[i] + degree[i] - 1) / fragment_size;
if (pointStartFragmentIndex == pointEndFragmentIndex && pointStartFragmentIndex >= 0 &&
pointStartFragmentIndex < fragmentNum) {
if (fragmentData[pointStartFragmentIndex].vertexNum == 0) {
fragmentData[pointStartFragmentIndex].startVertex = i;
} else if (fragmentData[pointStartFragmentIndex].startVertex > i) {
fragmentData[pointStartFragmentIndex].startVertex = i;
}
fragmentData[pointStartFragmentIndex].vertexNum++;
}
if (nodePointersI[i] < max_partition_size && (nodePointersI[i] + degree[i] - 1) < max_partition_size) {
isInStatic[i] = true;
if (i > maxStaticNode) maxStaticNode = i;
} else {
isInStatic[i] = false;
}
}
label[sourceNode] = 1;
value[sourceNode] = 1;
hipMemcpy(isInStaticD, isInStatic, testNumNodes * sizeof(bool), hipMemcpyHostToDevice);
cout << "max_partition_size: " << max_partition_size << " maxStaticNode: " << maxStaticNode << endl;
cout << "fragmentNum " << fragmentNum << " staticFragmentNum " << staticFragmentNum << endl;
for (int i = 0; i < staticFragmentNum; i++) {
fragmentData[i].isIn = true;
}
for (uint i = 0; i < staticFragmentNum; i++) {
staticFragmentToNormalMap[i] = i;
}
//uint partOverloadSize = max_partition_size / 2;
uint partOverloadSize = total_gpu_size - max_partition_size;
uint overloadSize = testNumEdge - nodePointersI[maxStaticNode + 1];
cout << " partOverloadSize " << partOverloadSize << " overloadSize " << overloadSize << endl;
overloadEdgeList = (uint *) malloc(overloadSize * sizeof(uint));
if (overloadEdgeList == NULL) {
cout << "overloadEdgeList is null" << endl;
return 0;
}
gpuErrorcheck(hipMalloc(&overloadEdgeListD, partOverloadSize * sizeof(uint)));
//gpuErrorcheck(hipMallocManaged(&edgeListOverloadManage, overloadSize * sizeof(uint)));
gpuErrorcheck(hipMalloc(&staticFragmentDataD, staticFragmentNum * sizeof(uint)));
gpuErrorcheck(hipMalloc(&staticFragmentVisitRecordsD, staticFragmentNum * sizeof(uint)));
gpuErrorcheck(hipMalloc(&canSwapStaticFragmentDataD, staticFragmentNum * sizeof(uint)));
gpuErrorcheck(hipMalloc(&canSwapFragmentPrefixSumD, staticFragmentNum * sizeof(uint)));
thrust::device_ptr<unsigned int> ptr_canSwapFragment(canSwapStaticFragmentDataD);
thrust::device_ptr<unsigned int> ptr_canSwapFragmentPrefixSum(canSwapFragmentPrefixSumD);
gpuErrorcheck(hipMalloc(°reeD, testNumNodes * sizeof(uint)));
gpuErrorcheck(hipMalloc(&isActiveD1, testNumNodes * sizeof(uint)));
gpuErrorcheck(hipMalloc(&isStaticActive, testNumNodes * sizeof(uint)));
gpuErrorcheck(hipMalloc(&isOverloadActive, testNumNodes * sizeof(uint)));
gpuErrorcheck(hipMalloc(&valueD, testNumNodes * sizeof(uint)));
gpuErrorcheck(hipMalloc(&activeNodeLabelingPrefixD, testNumNodes * sizeof(unsigned int)));
gpuErrorcheck(hipMalloc(&overloadLabelingPrefixD, testNumNodes * sizeof(unsigned int)));
gpuErrorcheck(hipMalloc(&activeNodeListD, testNumNodes * sizeof(uint)));
gpuErrorcheck(hipMalloc(&activeOverloadNodePointersD, testNumNodes * sizeof(uint)));
gpuErrorcheck(hipMalloc(&activeOverloadDegreeD, testNumNodes * sizeof(uint)));
gpuErrorcheck(hipMemcpy(degreeD, degree, testNumNodes * sizeof(uint), hipMemcpyHostToDevice));
gpuErrorcheck(hipMemcpy(valueD, value, testNumNodes * sizeof(uint), hipMemcpyHostToDevice));
gpuErrorcheck(hipMemcpy(isActiveD1, label, testNumNodes * sizeof(uint), hipMemcpyHostToDevice));
gpuErrorcheck(hipMemset(isStaticActive, 0, testNumNodes * sizeof(uint)));
gpuErrorcheck(hipMemset(isOverloadActive, 0, testNumNodes * sizeof(uint)));
//cacaulate the active node And make active node array
dim3 grid = dim3(56, 1, 1);
dim3 block = dim3(1024, 1, 1);
//setLabeling<<<grid, block>>>(testNumNodes, labelD, activeNodeLabelingD);
thrust::device_ptr<unsigned int> ptr_labeling(isActiveD1);
thrust::device_ptr<unsigned int> ptr_labeling_static(isStaticActive);
thrust::device_ptr<unsigned int> ptr_labeling_overload(isOverloadActive);
thrust::device_ptr<unsigned int> ptr_labeling_prefixsum(activeNodeLabelingPrefixD);
thrust::device_ptr<unsigned int> ptrOverloadDegree(activeOverloadDegreeD);
thrust::device_ptr<unsigned int> ptrOverloadPrefixsum(overloadLabelingPrefixD);
uint activeNodesNum = thrust::reduce(ptr_labeling, ptr_labeling + testNumNodes);
int iter = 0;
uint nodeSum = activeNodesNum;
uint overloadEdgeSum = 0;
auto startCpu = std::chrono::steady_clock::now();
auto endReadCpu = std::chrono::steady_clock::now();
long durationReadCpu = 0;
auto startSwap = std::chrono::steady_clock::now();
auto endSwap = std::chrono::steady_clock::now();
long durationSwap = 0;
auto startGpuProcessing = std::chrono::steady_clock::now();
auto endGpuProcessing = std::chrono::steady_clock::now();
long durationGpuProcessing = 0;
auto startOverloadGpuProcessing = std::chrono::steady_clock::now();
auto endOverloadGpuProcessing = std::chrono::steady_clock::now();
long durationOverloadGpuProcessing = 0;
auto startPreGpuProcessing = std::chrono::steady_clock::now();
auto endPreGpuProcessing = std::chrono::steady_clock::now();
long durationPreGpuProcessing = 0;
auto endPreCaculate = std::chrono::steady_clock::now();
long durationPreCaculate = std::chrono::duration_cast<std::chrono::milliseconds>(
endPreCaculate - startPreCaculate).count();
cout << "durationPreCaculate time : " << durationPreCaculate << " ms" << endl;
hipStream_t steamStatic, streamDynamic;
hipStreamCreate(&steamStatic);
hipStreamCreate(&streamDynamic);
auto startMemoryTraverse = std::chrono::steady_clock::now();
auto endMemoryTraverse = std::chrono::steady_clock::now();
long durationMemoryTraverse = 0;
auto startProcessing = std::chrono::steady_clock::now();
uint cursorStartSwap = isFromTail ? fragmentNum - 1 : staticFragmentNum + 1;
//uint cursorStartSwap = staticFragmentNum + 1;
uint swapValidNodeSum = 0;
uint swapValidEdgeSum = 0;
uint swapNotValidNodeSum = 0;
uint swapNotValidEdgeSum = 0;
uint visitEdgeSum = 0;
uint swapInEdgeSum = 0;
uint partOverloadSum = 0;
while (activeNodesNum > 0) {
startPreGpuProcessing = std::chrono::steady_clock::now();
iter++;
cout << "iter " << iter << " activeNodesNum " << activeNodesNum << endl;
hipLaunchKernelGGL(( setStaticAndOverloadLabel), dim3(grid), dim3(block), 0, 0, testNumNodes, isActiveD1, isStaticActive, isOverloadActive,
isInStaticD);
uint staticNodeNum = thrust::reduce(ptr_labeling_static, ptr_labeling_static + testNumNodes);
if (staticNodeNum > 0) {
cout << "iter " << iter << " staticNodeNum " << staticNodeNum << endl;
thrust::exclusive_scan(ptr_labeling_static, ptr_labeling_static + testNumNodes, ptr_labeling_prefixsum);
hipLaunchKernelGGL(( setStaticActiveNodeArray), dim3(grid), dim3(block), 0, 0, testNumNodes, activeNodeListD, isStaticActive,
activeNodeLabelingPrefixD);
}
uint overloadNodeNum = thrust::reduce(ptr_labeling_overload, ptr_labeling_overload + testNumNodes);
uint overloadEdgeNum = 0;
if (overloadNodeNum > 0) {
cout << "iter " << iter << " overloadNodeNum " << overloadNodeNum << endl;
thrust::exclusive_scan(ptr_labeling_overload, ptr_labeling_overload + testNumNodes, ptrOverloadPrefixsum);
hipLaunchKernelGGL(( setOverloadNodePointerSwap), dim3(grid), dim3(block), 0, 0, testNumNodes, overloadNodeListD, activeOverloadDegreeD,
isOverloadActive,
overloadLabelingPrefixD, degreeD);
thrust::exclusive_scan(ptrOverloadDegree, ptrOverloadDegree + overloadNodeNum, activeOverloadNodePointersD);
overloadEdgeNum = thrust::reduce(thrust::device, ptrOverloadDegree,
ptrOverloadDegree + overloadNodeNum, 0);
cout << "iter " << iter << " overloadEdgeNum " << overloadEdgeNum << endl;
overloadEdgeSum += overloadEdgeNum;
}
endPreGpuProcessing = std::chrono::steady_clock::now();
durationPreGpuProcessing += std::chrono::duration_cast<std::chrono::milliseconds>(
endPreGpuProcessing - startPreGpuProcessing).count();
startGpuProcessing = std::chrono::steady_clock::now();
if (staticNodeNum > 0) {
hipLaunchKernelGGL(( setLabelDefaultOpt), dim3(grid), dim3(block), 0, steamStatic, staticNodeNum, activeNodeListD, isActiveD1);
}
if (overloadNodeNum > 0) {
hipLaunchKernelGGL(( setLabelDefaultOpt), dim3(grid), dim3(block), 0, steamStatic, overloadNodeNum, overloadNodeListD, isActiveD1);
}
hipLaunchKernelGGL(( bfs_kernelStaticSwap), dim3(grid), dim3(block), 0, steamStatic, staticNodeNum, activeNodeListD,
staticNodePointerD, degreeD,
staticEdgeListD, valueD, isActiveD1,
staticFragmentVisitRecordsD, fragment_size,
max_partition_size, testNumNodes);
/*mixDynamicPartLabel<<<grid, block, 0, steamStatic>>>(staticNodeNum, 0, activeNodeListD, isActiveD1,
isActiveD2);
bfs_kernelStatic2Label<<<grid, block, 0, steamStatic>>>(staticNodeNum, activeNodeListD,
staticNodePointerD, degreeD,
staticEdgeListD, valueD, isActiveD1, isActiveD2);*/
if (overloadNodeNum > 0) {
startCpu = std::chrono::steady_clock::now();
/*hipMemcpyAsync(staticActiveNodeList, activeNodeListD, activeNodesNum * sizeof(uint), hipMemcpyDeviceToHost,
streamDynamic);*/
hipMemcpyAsync(overloadNodeList, overloadNodeListD, overloadNodeNum * sizeof(uint), hipMemcpyDeviceToHost,
streamDynamic);
hipMemcpyAsync(activeOverloadNodePointers, activeOverloadNodePointersD, overloadNodeNum * sizeof(uint),
hipMemcpyDeviceToHost, streamDynamic);
int threadNum = 20;
if (overloadNodeNum < 50) {
threadNum = 1;
}
thread runThreads[threadNum];
for (int i = 0; i < threadNum; i++) {
runThreads[i] = thread(fillDynamic,
i,
threadNum,
0,
overloadNodeNum,
degree,
activeOverloadNodePointers,
nodePointersI,
overloadNodeList,
overloadEdgeList,
edgeList);
}
for (unsigned int t = 0; t < threadNum; t++) {
runThreads[t].join();
}
caculatePartInfoForEdgeList(activeOverloadNodePointers, overloadNodeList, degree, partEdgeListInfoArr,
overloadNodeNum, partOverloadSize, overloadEdgeNum);
endReadCpu = std::chrono::steady_clock::now();
durationReadCpu += std::chrono::duration_cast<std::chrono::milliseconds>(endReadCpu - startCpu).count();
uint canSwapFragmentNum;
hipLaunchKernelGGL(( setFragmentDataOpt), dim3(grid), dim3(block), 0, steamStatic, canSwapStaticFragmentDataD, staticFragmentNum,
staticFragmentVisitRecordsD);
canSwapFragmentNum = thrust::reduce(ptr_canSwapFragment, ptr_canSwapFragment + staticFragmentNum);
if (canSwapFragmentNum > 0) {
thrust::exclusive_scan(ptr_canSwapFragment, ptr_canSwapFragment + staticFragmentNum,
ptr_canSwapFragmentPrefixSum);
hipLaunchKernelGGL(( setStaticFragmentData), dim3(grid), dim3(block), 0, steamStatic, staticFragmentNum, canSwapStaticFragmentDataD,
canSwapFragmentPrefixSumD, staticFragmentDataD);
hipMemcpyAsync(staticFragmentData, staticFragmentDataD, canSwapFragmentNum * sizeof(uint),
hipMemcpyDeviceToHost, steamStatic);
}
hipDeviceSynchronize();
//gpuErrorcheck(hipPeekAtLastError())
endGpuProcessing = std::chrono::steady_clock::now();
durationGpuProcessing += std::chrono::duration_cast<std::chrono::milliseconds>(
endGpuProcessing - startGpuProcessing).count();
startMemoryTraverse = std::chrono::steady_clock::now();
partOverloadSum += partEdgeListInfoArr.size();
for (int i = 0; i < partEdgeListInfoArr.size(); i++) {
gpuErrorcheck(hipMemcpy(overloadEdgeListD, overloadEdgeList +
activeOverloadNodePointers[partEdgeListInfoArr[i].partStartIndex],
partEdgeListInfoArr[i].partEdgeNums * sizeof(uint),
hipMemcpyHostToDevice))
startOverloadGpuProcessing = std::chrono::steady_clock::now();
/*mixDynamicPartLabel<<<grid, block, 0, streamDynamic>>>(partEdgeListInfoArr[i].partActiveNodeNums,
partEdgeListInfoArr[i].partStartIndex,
overloadNodeListD, isActiveD1,
isActiveD2);*/
hipLaunchKernelGGL(( bfs_kernelDynamicPart), dim3(grid), dim3(block), 0, streamDynamic,
partEdgeListInfoArr[i].partStartIndex,
partEdgeListInfoArr[i].partActiveNodeNums,
overloadNodeListD, degreeD,
valueD, isActiveD1,
overloadEdgeListD,
activeOverloadNodePointersD);
if (canSwapFragmentNum > 0) {
startSwap = std::chrono::steady_clock::now();
uint canSwapStaticFragmentIndex = 0;
uint swapSum = 0;
//for (uint i = cursorStartSwap; i > 0; i--) {
for (uint i = cursorStartSwap; i < fragmentNum; i++) {
if (hipSuccess == hipStreamQuery(streamDynamic) ||
canSwapStaticFragmentIndex >= canSwapFragmentNum) {
if (i < fragmentNum) {
cursorStartSwap = i + 1;
}
cout << "iter " << iter << " swapSum " << swapSum << " swap to " << i << endl;
swapInEdgeSum += swapSum * fragment_size;
endSwap = std::chrono::steady_clock::now();
durationSwap += std::chrono::duration_cast<std::chrono::milliseconds>(
endSwap - startOverloadGpuProcessing).count();
break;
}
if (hipErrorNotReady == hipStreamQuery(streamDynamic)) {
const FragmentData swapFragmentData = fragmentData[i];
if (!swapFragmentData.isVisit && !swapFragmentData.isIn && swapFragmentData.vertexNum > 0) {
uint swapStaticFragmentIndex = staticFragmentData[canSwapStaticFragmentIndex++];
uint beSwappedFragmentIndex = staticFragmentToNormalMap[swapStaticFragmentIndex];
fragmentData[beSwappedFragmentIndex].isVisit = true;
fragmentData[beSwappedFragmentIndex].isIn = false;
FragmentData beSwappedFragment = fragmentData[beSwappedFragmentIndex];
uint moveFrom = testNumEdge;
uint moveTo = testNumEdge;
uint moveNum = testNumEdge;
if (beSwappedFragment.vertexNum > 0 && beSwappedFragmentIndex > 0 &&
beSwappedFragmentIndex < fragmentNum) {
for (uint j = beSwappedFragment.startVertex - 1;
j < beSwappedFragment.startVertex + beSwappedFragment.vertexNum + 1 &&
j < testNumNodes; j++) {
isInStatic[j] = false;
}
for (uint j = swapFragmentData.startVertex;
j < swapFragmentData.startVertex + swapFragmentData.vertexNum; j++) {
isInStatic[j] = true;
staticNodePointer[j] =
nodePointersI[j] - i * fragment_size +
swapStaticFragmentIndex * fragment_size;
}
moveFrom = nodePointersI[swapFragmentData.startVertex];
moveTo = staticNodePointer[swapFragmentData.startVertex];
moveNum = nodePointersI[swapFragmentData.startVertex + swapFragmentData.vertexNum] -
nodePointersI[swapFragmentData.startVertex];
hipMemcpyAsync(staticEdgeListD + moveTo, edgeList + moveFrom,
moveNum * sizeof(uint),
hipMemcpyHostToDevice, steamStatic);
hipMemcpyAsync(isInStaticD + beSwappedFragment.startVertex - 1,
isInStatic + beSwappedFragment.startVertex - 1,
(beSwappedFragment.vertexNum + 2) * sizeof(bool),
hipMemcpyHostToDevice, steamStatic);
hipMemcpyAsync(isInStaticD + swapFragmentData.startVertex,
isInStatic + swapFragmentData.startVertex,
swapFragmentData.vertexNum * sizeof(bool),
hipMemcpyHostToDevice, steamStatic);
hipMemcpyAsync(staticNodePointerD + swapFragmentData.startVertex,
staticNodePointer + swapFragmentData.startVertex,
swapFragmentData.vertexNum * sizeof(uint), hipMemcpyHostToDevice,
steamStatic);
staticFragmentToNormalMap[swapStaticFragmentIndex] = i;
fragmentData[i].isIn = true;
swapSum++;
}
}
}
}
}
hipDeviceSynchronize();
endOverloadGpuProcessing = std::chrono::steady_clock::now();
durationOverloadGpuProcessing += std::chrono::duration_cast<std::chrono::milliseconds>(
endOverloadGpuProcessing - startOverloadGpuProcessing).count();
}
endMemoryTraverse = std::chrono::steady_clock::now();
durationMemoryTraverse += std::chrono::duration_cast<std::chrono::milliseconds>(
endMemoryTraverse - startMemoryTraverse).count();
//gpuErrorcheck(hipPeekAtLastError())
} else {
hipDeviceSynchronize();
endGpuProcessing = std::chrono::steady_clock::now();
durationGpuProcessing += std::chrono::duration_cast<std::chrono::milliseconds>(
endGpuProcessing - startGpuProcessing).count();
}
hipDeviceSynchronize();
startPreGpuProcessing = std::chrono::steady_clock::now();
/*mixCommonLabel<<<grid, block, 0, streamDynamic>>>(testNumNodes, isActiveD1, isActiveD2);
hipDeviceSynchronize();*/
activeNodesNum = thrust::reduce(ptr_labeling, ptr_labeling + testNumNodes);
nodeSum += activeNodesNum;
endPreGpuProcessing = std::chrono::steady_clock::now();
durationPreGpuProcessing += std::chrono::duration_cast<std::chrono::milliseconds>(
endPreGpuProcessing - startPreGpuProcessing).count();
}
//hipDeviceSynchronize();
auto endRead = std::chrono::steady_clock::now();
durationRead = std::chrono::duration_cast<std::chrono::milliseconds>(endRead - startProcessing).count();
cout << "finish time : " << durationRead << " ms" << endl;
cout << "cpu time : " << durationReadCpu << " ms" << endl;
cout << "fact processing time : " << durationGpuProcessing << " ms" << endl;
cout << "durationOverloadGpuProcessing time : " << durationOverloadGpuProcessing << " ms" << endl;
cout << "durationMemoryTraverse time : " << durationMemoryTraverse << " ms" << endl;
cout << "gpu pre processing time : " << durationPreGpuProcessing << " ms" << endl;
cout << "overloadEdgeSum : " << overloadEdgeSum << " " << endl;
cout << "partOverloadSum : " << partOverloadSum << " " << endl;
cout << "nodeSum: " << nodeSum << endl;
hipFree(staticEdgeListD);
hipFree(degreeD);
hipFree(isActiveD1);
hipFree(valueD);
hipFree(activeNodeListD);
hipFree(activeNodeLabelingPrefixD);
hipFree(activeOverloadNodePointersD);
hipFree(activeOverloadDegreeD);
hipFree(isInStaticD);
hipFree(staticNodePointerD);
hipFree(overloadNodeListD);
hipFree(staticFragmentVisitRecordsD);
hipFree(staticFragmentDataD);
hipFree(canSwapStaticFragmentDataD);
hipFree(canSwapFragmentPrefixSumD);
hipFree(overloadEdgeListD);
hipFree(isStaticActive);
hipFree(isOverloadActive);
hipFree(overloadLabelingPrefixD);
delete[] label;
delete[] degree;
delete[] value;
delete[] activeNodeList;
delete[] activeOverloadNodePointers;
delete[] staticFragmentData;
delete[] isInStatic;
delete[] overloadNodeList;
delete[] staticNodePointer;
delete[] staticFragmentToNormalMap;
delete[] fragmentData;
delete[] overloadFragmentData;
delete[] overloadEdgeList;
partEdgeListInfoArr.clear();
return durationRead;
}
long
bfsCaculateInAsyncNoUVM(uint testNumNodes, uint testNumEdge, uint *nodePointersI, uint *edgeList, uint sourceNode,
float adviseK) {
cout << "=========bfsCaculateInAsyncNoUVM========" << endl;
ulong edgeIterationMax = 0;
auto start = std::chrono::steady_clock::now();
auto startPreCaculate = std::chrono::steady_clock::now();
//CPU
long durationRead;
ulong transferSum = 0;
unsigned long max_partition_size;
unsigned long total_gpu_size;
uint maxStaticNode = 0;
uint *degree;
uint *value;
uint *label;
bool *isInStatic;
uint *overloadNodeList;
uint *staticNodePointer;
uint *activeNodeList;
uint *activeOverloadNodePointers;
vector<PartEdgeListInfo> partEdgeListInfoArr;
/*
* overloadEdgeList overload edge list in every iteration
* */
uint *overloadEdgeList;
bool isFromTail = false;
//GPU
uint *staticEdgeListD;
uint *overloadEdgeListD;
bool *isInStaticD;
uint *overloadNodeListD;
uint *staticNodePointerD;
uint *degreeD;
// async need two labels
uint *isActiveD1;
uint *isStaticActive;
uint *isOverloadActive;
uint *valueD;
uint *activeNodeListD;
uint *activeNodeLabelingPrefixD;
uint *overloadLabelingPrefixD;
uint *activeOverloadNodePointersD;
uint *activeOverloadDegreeD;
degree = new uint[testNumNodes];
value = new uint[testNumNodes];
label = new uint[testNumNodes];
isInStatic = new bool[testNumNodes];
overloadNodeList = new uint[testNumNodes];
staticNodePointer = new uint[testNumNodes];
activeNodeList = new uint[testNumNodes];
activeOverloadNodePointers = new uint[testNumNodes];
//getMaxPartitionSize(max_partition_size, testNumNodes);
getMaxPartitionSize(max_partition_size, total_gpu_size, testNumNodes, adviseK, sizeof(uint), testNumEdge, 15);
//caculate degree
uint meanDegree = testNumEdge / testNumNodes;
cout << " meanDegree " << meanDegree << endl;
uint degree0Sum = 0;
for (uint i = 0; i < testNumNodes - 1; i++) {
if (nodePointersI[i] > testNumEdge) {
cout << i << " " << nodePointersI[i] << endl;
break;
}
degree[i] = nodePointersI[i + 1] - nodePointersI[i];
}
degree[testNumNodes - 1] = testNumEdge - nodePointersI[testNumNodes - 1];
memcpy(staticNodePointer, nodePointersI, testNumNodes * sizeof(uint));
//caculate static staticEdgeListD
gpuErrorcheck(hipMalloc(&staticEdgeListD, max_partition_size * sizeof(uint)));
auto startmove = std::chrono::steady_clock::now();
gpuErrorcheck(
hipMemcpy(staticEdgeListD, edgeList, max_partition_size * sizeof(uint), hipMemcpyHostToDevice));
auto endMove = std::chrono::steady_clock::now();
long testDuration = std::chrono::duration_cast<std::chrono::milliseconds>(
endMove - startmove).count();
cout << "move duration " << testDuration << endl;
gpuErrorcheck(hipMalloc(&isInStaticD, testNumNodes * sizeof(bool)))
gpuErrorcheck(hipMalloc(&overloadNodeListD, testNumNodes * sizeof(uint)));
gpuErrorcheck(hipMalloc(&staticNodePointerD, testNumNodes * sizeof(uint)))
gpuErrorcheck(hipMemcpy(staticNodePointerD, nodePointersI, testNumNodes * sizeof(uint), hipMemcpyHostToDevice));
for (uint i = 0; i < testNumNodes; i++) {
label[i] = 0;
value[i] = UINT_MAX - 1;
if (nodePointersI[i] < max_partition_size && (nodePointersI[i] + degree[i] - 1) < max_partition_size) {
isInStatic[i] = true;
if (i > maxStaticNode) maxStaticNode = i;
} else {
isInStatic[i] = false;
}
}
label[sourceNode] = 1;
value[sourceNode] = 1;
hipMemcpy(isInStaticD, isInStatic, testNumNodes * sizeof(bool), hipMemcpyHostToDevice);
cout << "max_partition_size: " << max_partition_size << " maxStaticNode: " << maxStaticNode << endl;
//uint partOverloadSize = max_partition_size / 2;
uint partOverloadSize = total_gpu_size - max_partition_size;
uint overloadSize = testNumEdge - nodePointersI[maxStaticNode + 1];
cout << " partOverloadSize " << partOverloadSize << " overloadSize " << overloadSize << endl;
overloadEdgeList = (uint *) malloc(overloadSize * sizeof(uint));
if (overloadEdgeList == NULL) {
cout << "overloadEdgeList is null" << endl;
return 0;
}
gpuErrorcheck(hipMalloc(&overloadEdgeListD, partOverloadSize * sizeof(uint)));
//gpuErrorcheck(hipMallocManaged(&edgeListOverloadManage, overloadSize * sizeof(uint)));
gpuErrorcheck(hipMalloc(°reeD, testNumNodes * sizeof(uint)));
gpuErrorcheck(hipMalloc(&isActiveD1, testNumNodes * sizeof(uint)));
gpuErrorcheck(hipMalloc(&isStaticActive, testNumNodes * sizeof(uint)));
gpuErrorcheck(hipMalloc(&isOverloadActive, testNumNodes * sizeof(uint)));
gpuErrorcheck(hipMalloc(&valueD, testNumNodes * sizeof(uint)));
gpuErrorcheck(hipMalloc(&activeNodeLabelingPrefixD, testNumNodes * sizeof(unsigned int)));
gpuErrorcheck(hipMalloc(&overloadLabelingPrefixD, testNumNodes * sizeof(unsigned int)));
gpuErrorcheck(hipMalloc(&activeNodeListD, testNumNodes * sizeof(uint)));
gpuErrorcheck(hipMalloc(&activeOverloadNodePointersD, testNumNodes * sizeof(uint)));
gpuErrorcheck(hipMalloc(&activeOverloadDegreeD, testNumNodes * sizeof(uint)));
gpuErrorcheck(hipMemcpy(degreeD, degree, testNumNodes * sizeof(uint), hipMemcpyHostToDevice));
gpuErrorcheck(hipMemcpy(valueD, value, testNumNodes * sizeof(uint), hipMemcpyHostToDevice));
gpuErrorcheck(hipMemcpy(isActiveD1, label, testNumNodes * sizeof(uint), hipMemcpyHostToDevice));
gpuErrorcheck(hipMemset(isStaticActive, 0, testNumNodes * sizeof(uint)));
gpuErrorcheck(hipMemset(isOverloadActive, 0, testNumNodes * sizeof(uint)));
//cacaulate the active node And make active node array
dim3 grid = dim3(56, 1, 1);
dim3 block = dim3(1024, 1, 1);
//setLabeling<<<grid, block>>>(testNumNodes, labelD, activeNodeLabelingD);
thrust::device_ptr<unsigned int> ptr_labeling(isActiveD1);
thrust::device_ptr<unsigned int> ptr_labeling_static(isStaticActive);
thrust::device_ptr<unsigned int> ptr_labeling_overload(isOverloadActive);
thrust::device_ptr<unsigned int> ptr_labeling_prefixsum(activeNodeLabelingPrefixD);
thrust::device_ptr<unsigned int> ptrOverloadDegree(activeOverloadDegreeD);
thrust::device_ptr<unsigned int> ptrOverloadPrefixsum(overloadLabelingPrefixD);
uint activeNodesNum = thrust::reduce(ptr_labeling, ptr_labeling + testNumNodes);
int iter = 0;
uint nodeSum = activeNodesNum;
ulong overloadEdgeSum = 0;
auto startCpu = std::chrono::steady_clock::now();
auto endReadCpu = std::chrono::steady_clock::now();
long durationReadCpu = 0;
auto startSwap = std::chrono::steady_clock::now();
auto endSwap = std::chrono::steady_clock::now();
long durationSwap = 0;
auto startGpuProcessing = std::chrono::steady_clock::now();
auto endGpuProcessing = std::chrono::steady_clock::now();
long durationGpuProcessing = 0;
auto startOverloadGpuProcessing = std::chrono::steady_clock::now();
auto endOverloadGpuProcessing = std::chrono::steady_clock::now();
long durationOverloadGpuProcessing = 0;
auto startPreGpuProcessing = std::chrono::steady_clock::now();
auto endPreGpuProcessing = std::chrono::steady_clock::now();
long durationPreGpuProcessing = 0;
auto endPreCaculate = std::chrono::steady_clock::now();
long durationPreCaculate = std::chrono::duration_cast<std::chrono::milliseconds>(
endPreCaculate - startPreCaculate).count();
cout << "durationPreCaculate time : " << durationPreCaculate << " ms" << endl;
hipStream_t steamStatic, streamDynamic;
hipStreamCreate(&steamStatic);
hipStreamCreate(&streamDynamic);
auto startMemoryTraverse = std::chrono::steady_clock::now();
auto endMemoryTraverse = std::chrono::steady_clock::now();
long durationMemoryTraverse = 0;
auto startProcessing = std::chrono::steady_clock::now();
//uint cursorStartSwap = staticFragmentNum + 1;
uint swapValidNodeSum = 0;
uint swapValidEdgeSum = 0;
uint swapNotValidNodeSum = 0;
uint swapNotValidEdgeSum = 0;
uint visitEdgeSum = 0;
uint swapInEdgeSum = 0;
uint partOverloadSum = 0;
while (activeNodesNum > 0) {
startPreGpuProcessing = std::chrono::steady_clock::now();
iter++;
//cout << "iter " << iter << " activeNodesNum " << activeNodesNum << endl;
hipLaunchKernelGGL(( setStaticAndOverloadLabel), dim3(grid), dim3(block), 0, 0, testNumNodes, isActiveD1, isStaticActive, isOverloadActive,
isInStaticD);
uint staticNodeNum = thrust::reduce(ptr_labeling_static, ptr_labeling_static + testNumNodes);
if (staticNodeNum > 0) {
//cout << "iter " << iter << " staticNodeNum " << staticNodeNum << endl;
thrust::exclusive_scan(ptr_labeling_static, ptr_labeling_static + testNumNodes, ptr_labeling_prefixsum);
hipLaunchKernelGGL(( setStaticActiveNodeArray), dim3(grid), dim3(block), 0, 0, testNumNodes, activeNodeListD, isStaticActive,
activeNodeLabelingPrefixD);
}
uint overloadNodeNum = thrust::reduce(ptr_labeling_overload, ptr_labeling_overload + testNumNodes);
uint overloadEdgeNum = 0;
if (overloadNodeNum > 0) {
//cout << "iter " << iter << " overloadNodeNum " << overloadNodeNum << endl;
thrust::exclusive_scan(ptr_labeling_overload, ptr_labeling_overload + testNumNodes, ptrOverloadPrefixsum);
hipLaunchKernelGGL(( setOverloadNodePointerSwap), dim3(grid), dim3(block), 0, 0, testNumNodes, overloadNodeListD, activeOverloadDegreeD,
isOverloadActive,
overloadLabelingPrefixD, degreeD);
thrust::exclusive_scan(ptrOverloadDegree, ptrOverloadDegree + overloadNodeNum, activeOverloadNodePointersD);
overloadEdgeNum = thrust::reduce(thrust::device, ptrOverloadDegree,
ptrOverloadDegree + overloadNodeNum, 0);
//cout << "iter " << iter << " overloadEdgeNum " << overloadEdgeNum << endl;
overloadEdgeSum += overloadEdgeNum;
if (overloadEdgeNum > edgeIterationMax) {
edgeIterationMax = overloadEdgeNum;
}
}
endPreGpuProcessing = std::chrono::steady_clock::now();
durationPreGpuProcessing += std::chrono::duration_cast<std::chrono::milliseconds>(
endPreGpuProcessing - startPreGpuProcessing).count();
startGpuProcessing = std::chrono::steady_clock::now();
if (staticNodeNum > 0) {
hipLaunchKernelGGL(( setLabelDefaultOpt), dim3(grid), dim3(block), 0, steamStatic, staticNodeNum, activeNodeListD, isActiveD1);
}
if (overloadNodeNum > 0) {
hipLaunchKernelGGL(( setLabelDefaultOpt), dim3(grid), dim3(block), 0, steamStatic, overloadNodeNum, overloadNodeListD, isActiveD1);
}
hipLaunchKernelGGL(( bfs_kernelStatic), dim3(grid), dim3(block), 0, steamStatic, staticNodeNum, activeNodeListD,
staticNodePointerD, degreeD,
staticEdgeListD, valueD, isActiveD1);
//hipDeviceSynchronize();
if (overloadNodeNum > 0) {
startCpu = std::chrono::steady_clock::now();
hipMemcpyAsync(overloadNodeList, overloadNodeListD, overloadNodeNum * sizeof(uint), hipMemcpyDeviceToHost,
streamDynamic);
hipMemcpyAsync(activeOverloadNodePointers, activeOverloadNodePointersD, overloadNodeNum * sizeof(uint),
hipMemcpyDeviceToHost, streamDynamic);
int threadNum = 20;
if (overloadNodeNum < 50) {
threadNum = 1;
}
thread runThreads[threadNum];
for (int i = 0; i < threadNum; i++) {
runThreads[i] = thread(fillDynamic,
i,
threadNum,
0,
overloadNodeNum,
degree,
activeOverloadNodePointers,
nodePointersI,
overloadNodeList,
overloadEdgeList,
edgeList);
}
for (unsigned int t = 0; t < threadNum; t++) {
runThreads[t].join();
}
caculatePartInfoForEdgeList(activeOverloadNodePointers, overloadNodeList, degree, partEdgeListInfoArr,
overloadNodeNum, partOverloadSize, overloadEdgeNum);
endReadCpu = std::chrono::steady_clock::now();
durationReadCpu += std::chrono::duration_cast<std::chrono::milliseconds>(endReadCpu - startCpu).count();
hipDeviceSynchronize();
//gpuErrorcheck(hipPeekAtLastError())
endGpuProcessing = std::chrono::steady_clock::now();
durationGpuProcessing += std::chrono::duration_cast<std::chrono::milliseconds>(
endGpuProcessing - startGpuProcessing).count();
partOverloadSum += partEdgeListInfoArr.size();
for (int i = 0; i < partEdgeListInfoArr.size(); i++) {
startMemoryTraverse = std::chrono::steady_clock::now();
gpuErrorcheck(hipMemcpy(overloadEdgeListD, overloadEdgeList +
activeOverloadNodePointers[partEdgeListInfoArr[i].partStartIndex],
partEdgeListInfoArr[i].partEdgeNums * sizeof(uint),
hipMemcpyHostToDevice))
transferSum += partEdgeListInfoArr[i].partEdgeNums;
endMemoryTraverse = std::chrono::steady_clock::now();
durationMemoryTraverse += std::chrono::duration_cast<std::chrono::milliseconds>(
endMemoryTraverse - startMemoryTraverse).count();
startOverloadGpuProcessing = std::chrono::steady_clock::now();
hipLaunchKernelGGL(( bfs_kernelDynamicPart), dim3(grid), dim3(block), 0, streamDynamic,
partEdgeListInfoArr[i].partStartIndex,
partEdgeListInfoArr[i].partActiveNodeNums,
overloadNodeListD, degreeD,
valueD, isActiveD1,
overloadEdgeListD,
activeOverloadNodePointersD);
hipDeviceSynchronize();
endOverloadGpuProcessing = std::chrono::steady_clock::now();
durationOverloadGpuProcessing += std::chrono::duration_cast<std::chrono::milliseconds>(
endOverloadGpuProcessing - startOverloadGpuProcessing).count();
}
//gpuErrorcheck(hipPeekAtLastError())
} else {
hipDeviceSynchronize();
endGpuProcessing = std::chrono::steady_clock::now();
durationGpuProcessing += std::chrono::duration_cast<std::chrono::milliseconds>(
endGpuProcessing - startGpuProcessing).count();
}
hipDeviceSynchronize();
startPreGpuProcessing = std::chrono::steady_clock::now();
/*mixCommonLabel<<<grid, block, 0, streamDynamic>>>(testNumNodes, isActiveD1, isActiveD2);
hipDeviceSynchronize();*/
activeNodesNum = thrust::reduce(ptr_labeling, ptr_labeling + testNumNodes);
nodeSum += activeNodesNum;
endPreGpuProcessing = std::chrono::steady_clock::now();
durationPreGpuProcessing += std::chrono::duration_cast<std::chrono::milliseconds>(
endPreGpuProcessing - startPreGpuProcessing).count();
}
//hipDeviceSynchronize();
auto endRead = std::chrono::steady_clock::now();
durationRead = std::chrono::duration_cast<std::chrono::milliseconds>(endRead - startProcessing).count();
transferSum += max_partition_size;
cout << "iterationSum " << iter << endl;
double edgeIterationAvg = (double) overloadEdgeSum / (double) testNumEdge / iter;
double edgeIterationMaxAvg = (double) edgeIterationMax / (double) testNumEdge;
cout << "edgeIterationAvg " << edgeIterationAvg << " edgeIterationMaxAvg " << edgeIterationMaxAvg << endl;
cout << "transferSum : " << transferSum * 4 << " byte" << endl;
cout << "finish time : " << durationRead << " ms" << endl;
cout << "total time : " << durationRead + testDuration << " ms" << endl;
cout << "cpu time : " << durationReadCpu << " ms" << endl;
cout << "fact processing time : " << durationGpuProcessing << " ms" << endl;
cout << "durationOverloadGpuProcessing time : " << durationOverloadGpuProcessing << " ms" << endl;
cout << "durationMemoryTraverse time : " << durationMemoryTraverse << " mskail" << endl;
cout << "gpu pre processing time : " << durationPreGpuProcessing << " ms" << endl;
cout << "overloadEdgeSum : " << overloadEdgeSum << " " << endl;
cout << "partOverloadSum : " << partOverloadSum << " " << endl;
cout << "nodeSum: " << nodeSum << endl;
hipFree(staticEdgeListD);
hipFree(degreeD);
hipFree(isActiveD1);
hipFree(valueD);
hipFree(activeNodeListD);
hipFree(activeNodeLabelingPrefixD);
hipFree(activeOverloadNodePointersD);
hipFree(activeOverloadDegreeD);
hipFree(isInStaticD);
hipFree(staticNodePointerD);
hipFree(overloadNodeListD);
hipFree(overloadEdgeListD);
hipFree(isStaticActive);
hipFree(isOverloadActive);
hipFree(overloadLabelingPrefixD);
delete[] label;
delete[] degree;
delete[] value;
delete[] activeNodeList;
delete[] activeOverloadNodePointers;
delete[] isInStatic;
delete[] overloadNodeList;
delete[] staticNodePointer;
delete[] overloadEdgeList;
partEdgeListInfoArr.clear();
return durationRead;
}
long
bfsCaculateInAsyncNoUVMVisitRecord(uint testNumNodes, uint testNumEdge, uint *nodePointersI, uint *edgeList,
uint sourceNode,
float adviseK) {
cout << "=========bfsCaculateInAsyncNoUVM========" << endl;
ulong edgeIterationMax = 0;
auto start = std::chrono::steady_clock::now();
auto startPreCaculate = std::chrono::steady_clock::now();
//CPU
long durationRead;
ulong transferSum = 0;
unsigned long max_partition_size;
unsigned long total_gpu_size;
uint maxStaticNode = 0;
uint *degree;
uint *value;
uint *label;
bool *isInStatic;
uint *overloadNodeList;
uint *staticNodePointer;
uint *activeNodeList;
uint *activeOverloadNodePointers;
vector<PartEdgeListInfo> partEdgeListInfoArr;
/*
* overloadEdgeList overload edge list in every iteration
* */
uint *overloadEdgeList;
bool isFromTail = false;
//GPU
uint *staticEdgeListD;
uint *overloadEdgeListD;
bool *isInStaticD;
uint *overloadNodeListD;
uint *staticNodePointerD;
uint *degreeD;
// async need two labels
uint *isActiveD1;
uint *isStaticActive;
uint *isOverloadActive;
uint *valueD;
uint *activeNodeListD;
uint *activeNodeLabelingPrefixD;
uint *overloadLabelingPrefixD;
uint *activeOverloadNodePointersD;
uint *activeOverloadDegreeD;
degree = new uint[testNumNodes];
value = new uint[testNumNodes];
label = new uint[testNumNodes];
isInStatic = new bool[testNumNodes];
overloadNodeList = new uint[testNumNodes];
staticNodePointer = new uint[testNumNodes];
activeNodeList = new uint[testNumNodes];
activeOverloadNodePointers = new uint[testNumNodes];
uint *vertexVisitRecord;
uint *vertexVisitRecordD;
vertexVisitRecord = new uint[testNumNodes];
hipMalloc(&vertexVisitRecordD, testNumNodes * sizeof(uint));
hipMemset(vertexVisitRecordD, 0, testNumNodes * sizeof(uint));
//getMaxPartitionSize(max_partition_size, testNumNodes);
getMaxPartitionSize(max_partition_size, total_gpu_size, testNumNodes, adviseK, sizeof(uint), testNumEdge, 15);
//caculate degree
uint meanDegree = testNumEdge / testNumNodes;
cout << " meanDegree " << meanDegree << endl;
uint degree0Sum = 0;
for (uint i = 0; i < testNumNodes - 1; i++) {
if (nodePointersI[i] > testNumEdge) {
cout << i << " " << nodePointersI[i] << endl;
break;
}
degree[i] = nodePointersI[i + 1] - nodePointersI[i];
}
degree[testNumNodes - 1] = testNumEdge - nodePointersI[testNumNodes - 1];
memcpy(staticNodePointer, nodePointersI, testNumNodes * sizeof(uint));
//caculate static staticEdgeListD
gpuErrorcheck(hipMalloc(&staticEdgeListD, max_partition_size * sizeof(uint)));
auto startmove = std::chrono::steady_clock::now();
gpuErrorcheck(
hipMemcpy(staticEdgeListD, edgeList, max_partition_size * sizeof(uint), hipMemcpyHostToDevice));
auto endMove = std::chrono::steady_clock::now();
long testDuration = std::chrono::duration_cast<std::chrono::milliseconds>(
endMove - startmove).count();
cout << "move duration " << testDuration << endl;
gpuErrorcheck(hipMalloc(&isInStaticD, testNumNodes * sizeof(bool)))
gpuErrorcheck(hipMalloc(&overloadNodeListD, testNumNodes * sizeof(uint)));
gpuErrorcheck(hipMalloc(&staticNodePointerD, testNumNodes * sizeof(uint)))
gpuErrorcheck(hipMemcpy(staticNodePointerD, nodePointersI, testNumNodes * sizeof(uint), hipMemcpyHostToDevice));
for (uint i = 0; i < testNumNodes; i++) {
label[i] = 0;
value[i] = UINT_MAX - 1;
if (nodePointersI[i] < max_partition_size && (nodePointersI[i] + degree[i] - 1) < max_partition_size) {
isInStatic[i] = true;
if (i > maxStaticNode) maxStaticNode = i;
} else {
isInStatic[i] = false;
}
}
label[sourceNode] = 1;
value[sourceNode] = 1;
hipMemcpy(isInStaticD, isInStatic, testNumNodes * sizeof(bool), hipMemcpyHostToDevice);
cout << "max_partition_size: " << max_partition_size << " maxStaticNode: " << maxStaticNode << endl;
//uint partOverloadSize = max_partition_size / 2;
uint partOverloadSize = total_gpu_size - max_partition_size;
uint overloadSize = testNumEdge - nodePointersI[maxStaticNode + 1];
cout << " partOverloadSize " << partOverloadSize << " overloadSize " << overloadSize << endl;
overloadEdgeList = (uint *) malloc(overloadSize * sizeof(uint));
if (overloadEdgeList == NULL) {
cout << "overloadEdgeList is null" << endl;
return 0;
}
gpuErrorcheck(hipMalloc(&overloadEdgeListD, partOverloadSize * sizeof(uint)));
//gpuErrorcheck(hipMallocManaged(&edgeListOverloadManage, overloadSize * sizeof(uint)));
gpuErrorcheck(hipMalloc(°reeD, testNumNodes * sizeof(uint)));
gpuErrorcheck(hipMalloc(&isActiveD1, testNumNodes * sizeof(uint)));
gpuErrorcheck(hipMalloc(&isStaticActive, testNumNodes * sizeof(uint)));
gpuErrorcheck(hipMalloc(&isOverloadActive, testNumNodes * sizeof(uint)));
gpuErrorcheck(hipMalloc(&valueD, testNumNodes * sizeof(uint)));
gpuErrorcheck(hipMalloc(&activeNodeLabelingPrefixD, testNumNodes * sizeof(unsigned int)));
gpuErrorcheck(hipMalloc(&overloadLabelingPrefixD, testNumNodes * sizeof(unsigned int)));
gpuErrorcheck(hipMalloc(&activeNodeListD, testNumNodes * sizeof(uint)));
gpuErrorcheck(hipMalloc(&activeOverloadNodePointersD, testNumNodes * sizeof(uint)));
gpuErrorcheck(hipMalloc(&activeOverloadDegreeD, testNumNodes * sizeof(uint)));
gpuErrorcheck(hipMemcpy(degreeD, degree, testNumNodes * sizeof(uint), hipMemcpyHostToDevice));
gpuErrorcheck(hipMemcpy(valueD, value, testNumNodes * sizeof(uint), hipMemcpyHostToDevice));
gpuErrorcheck(hipMemcpy(isActiveD1, label, testNumNodes * sizeof(uint), hipMemcpyHostToDevice));
gpuErrorcheck(hipMemset(isStaticActive, 0, testNumNodes * sizeof(uint)));
gpuErrorcheck(hipMemset(isOverloadActive, 0, testNumNodes * sizeof(uint)));
//cacaulate the active node And make active node array
dim3 grid = dim3(56, 1, 1);
dim3 block = dim3(1024, 1, 1);
//setLabeling<<<grid, block>>>(testNumNodes, labelD, activeNodeLabelingD);
thrust::device_ptr<unsigned int> ptr_labeling(isActiveD1);
thrust::device_ptr<unsigned int> ptr_labeling_static(isStaticActive);
thrust::device_ptr<unsigned int> ptr_labeling_overload(isOverloadActive);
thrust::device_ptr<unsigned int> ptr_labeling_prefixsum(activeNodeLabelingPrefixD);
thrust::device_ptr<unsigned int> ptrOverloadDegree(activeOverloadDegreeD);
thrust::device_ptr<unsigned int> ptrOverloadPrefixsum(overloadLabelingPrefixD);
uint activeNodesNum = thrust::reduce(ptr_labeling, ptr_labeling + testNumNodes);
int iter = 0;
uint nodeSum = activeNodesNum;
ulong overloadEdgeSum = 0;
auto startCpu = std::chrono::steady_clock::now();
auto endReadCpu = std::chrono::steady_clock::now();
long durationReadCpu = 0;
auto startSwap = std::chrono::steady_clock::now();
auto endSwap = std::chrono::steady_clock::now();
long durationSwap = 0;
auto startGpuProcessing = std::chrono::steady_clock::now();
auto endGpuProcessing = std::chrono::steady_clock::now();
long durationGpuProcessing = 0;
auto startOverloadGpuProcessing = std::chrono::steady_clock::now();
auto endOverloadGpuProcessing = std::chrono::steady_clock::now();
long durationOverloadGpuProcessing = 0;
auto startPreGpuProcessing = std::chrono::steady_clock::now();
auto endPreGpuProcessing = std::chrono::steady_clock::now();
long durationPreGpuProcessing = 0;
auto endPreCaculate = std::chrono::steady_clock::now();
long durationPreCaculate = std::chrono::duration_cast<std::chrono::milliseconds>(
endPreCaculate - startPreCaculate).count();
cout << "durationPreCaculate time : " << durationPreCaculate << " ms" << endl;
hipStream_t steamStatic, streamDynamic;
hipStreamCreate(&steamStatic);
hipStreamCreate(&streamDynamic);
auto startMemoryTraverse = std::chrono::steady_clock::now();
auto endMemoryTraverse = std::chrono::steady_clock::now();
long durationMemoryTraverse = 0;
auto startProcessing = std::chrono::steady_clock::now();
//uint cursorStartSwap = staticFragmentNum + 1;
uint swapValidNodeSum = 0;
uint swapValidEdgeSum = 0;
uint swapNotValidNodeSum = 0;
uint swapNotValidEdgeSum = 0;
uint visitEdgeSum = 0;
uint swapInEdgeSum = 0;
uint partOverloadSum = 0;
while (activeNodesNum > 0) {
startPreGpuProcessing = std::chrono::steady_clock::now();
iter++;
//cout << "iter " << iter << " activeNodesNum " << activeNodesNum << endl;
hipLaunchKernelGGL(( setStaticAndOverloadLabelAndRecord), dim3(grid), dim3(block), 0, 0, testNumNodes, isActiveD1, isStaticActive, isOverloadActive,
isInStaticD, vertexVisitRecordD);
uint staticNodeNum = thrust::reduce(ptr_labeling_static, ptr_labeling_static + testNumNodes);
if (staticNodeNum > 0) {
//cout << "iter " << iter << " staticNodeNum " << staticNodeNum << endl;
thrust::exclusive_scan(ptr_labeling_static, ptr_labeling_static + testNumNodes, ptr_labeling_prefixsum);
hipLaunchKernelGGL(( setStaticActiveNodeArray), dim3(grid), dim3(block), 0, 0, testNumNodes, activeNodeListD, isStaticActive,
activeNodeLabelingPrefixD);
}
uint overloadNodeNum = thrust::reduce(ptr_labeling_overload, ptr_labeling_overload + testNumNodes);
uint overloadEdgeNum = 0;
if (overloadNodeNum > 0) {
//cout << "iter " << iter << " overloadNodeNum " << overloadNodeNum << endl;
thrust::exclusive_scan(ptr_labeling_overload, ptr_labeling_overload + testNumNodes, ptrOverloadPrefixsum);
hipLaunchKernelGGL(( setOverloadNodePointerSwap), dim3(grid), dim3(block), 0, 0, testNumNodes, overloadNodeListD, activeOverloadDegreeD,
isOverloadActive,
overloadLabelingPrefixD, degreeD);
thrust::exclusive_scan(ptrOverloadDegree, ptrOverloadDegree + overloadNodeNum, activeOverloadNodePointersD);
overloadEdgeNum = thrust::reduce(thrust::device, ptrOverloadDegree,
ptrOverloadDegree + overloadNodeNum, 0);
//cout << "iter " << iter << " overloadEdgeNum " << overloadEdgeNum << endl;
overloadEdgeSum += overloadEdgeNum;
if (overloadEdgeNum > edgeIterationMax) {
edgeIterationMax = overloadEdgeNum;
}
}
endPreGpuProcessing = std::chrono::steady_clock::now();
durationPreGpuProcessing += std::chrono::duration_cast<std::chrono::milliseconds>(
endPreGpuProcessing - startPreGpuProcessing).count();
startGpuProcessing = std::chrono::steady_clock::now();
if (staticNodeNum > 0) {
hipLaunchKernelGGL(( setLabelDefaultOpt), dim3(grid), dim3(block), 0, steamStatic, staticNodeNum, activeNodeListD, isActiveD1);
}
if (overloadNodeNum > 0) {
hipLaunchKernelGGL(( setLabelDefaultOpt), dim3(grid), dim3(block), 0, steamStatic, overloadNodeNum, overloadNodeListD, isActiveD1);
}
hipLaunchKernelGGL(( bfs_kernelStatic), dim3(grid), dim3(block), 0, steamStatic, staticNodeNum, activeNodeListD,
staticNodePointerD, degreeD,
staticEdgeListD, valueD, isActiveD1);
hipDeviceSynchronize();
if (overloadNodeNum > 0) {
startCpu = std::chrono::steady_clock::now();
hipMemcpyAsync(overloadNodeList, overloadNodeListD, overloadNodeNum * sizeof(uint), hipMemcpyDeviceToHost,
streamDynamic);
hipMemcpyAsync(activeOverloadNodePointers, activeOverloadNodePointersD, overloadNodeNum * sizeof(uint),
hipMemcpyDeviceToHost, streamDynamic);
int threadNum = 20;
if (overloadNodeNum < 50) {
threadNum = 1;
}
thread runThreads[threadNum];
for (int i = 0; i < threadNum; i++) {
runThreads[i] = thread(fillDynamic,
i,
threadNum,
0,
overloadNodeNum,
degree,
activeOverloadNodePointers,
nodePointersI,
overloadNodeList,
overloadEdgeList,
edgeList);
}
for (unsigned int t = 0; t < threadNum; t++) {
runThreads[t].join();
}
caculatePartInfoForEdgeList(activeOverloadNodePointers, overloadNodeList, degree, partEdgeListInfoArr,
overloadNodeNum, partOverloadSize, overloadEdgeNum);
endReadCpu = std::chrono::steady_clock::now();
durationReadCpu += std::chrono::duration_cast<std::chrono::milliseconds>(endReadCpu - startCpu).count();
hipDeviceSynchronize();
//gpuErrorcheck(hipPeekAtLastError())
endGpuProcessing = std::chrono::steady_clock::now();
durationGpuProcessing += std::chrono::duration_cast<std::chrono::milliseconds>(
endGpuProcessing - startGpuProcessing).count();
startMemoryTraverse = std::chrono::steady_clock::now();
partOverloadSum += partEdgeListInfoArr.size();
for (int i = 0; i < partEdgeListInfoArr.size(); i++) {
gpuErrorcheck(hipMemcpy(overloadEdgeListD, overloadEdgeList +
activeOverloadNodePointers[partEdgeListInfoArr[i].partStartIndex],
partEdgeListInfoArr[i].partEdgeNums * sizeof(uint),
hipMemcpyHostToDevice))
transferSum += partEdgeListInfoArr[i].partEdgeNums;
startOverloadGpuProcessing = std::chrono::steady_clock::now();
hipLaunchKernelGGL(( bfs_kernelDynamicPart), dim3(grid), dim3(block), 0, streamDynamic,
partEdgeListInfoArr[i].partStartIndex,
partEdgeListInfoArr[i].partActiveNodeNums,
overloadNodeListD, degreeD,
valueD, isActiveD1,
overloadEdgeListD,
activeOverloadNodePointersD);
hipDeviceSynchronize();
endOverloadGpuProcessing = std::chrono::steady_clock::now();
durationOverloadGpuProcessing += std::chrono::duration_cast<std::chrono::milliseconds>(
endOverloadGpuProcessing - startOverloadGpuProcessing).count();
}
endMemoryTraverse = std::chrono::steady_clock::now();
durationMemoryTraverse += std::chrono::duration_cast<std::chrono::milliseconds>(
endMemoryTraverse - startMemoryTraverse).count();
//gpuErrorcheck(hipPeekAtLastError())
} else {
hipDeviceSynchronize();
endGpuProcessing = std::chrono::steady_clock::now();
durationGpuProcessing += std::chrono::duration_cast<std::chrono::milliseconds>(
endGpuProcessing - startGpuProcessing).count();
}
hipDeviceSynchronize();
startPreGpuProcessing = std::chrono::steady_clock::now();
/*mixCommonLabel<<<grid, block, 0, streamDynamic>>>(testNumNodes, isActiveD1, isActiveD2);
hipDeviceSynchronize();*/
activeNodesNum = thrust::reduce(ptr_labeling, ptr_labeling + testNumNodes);
nodeSum += activeNodesNum;
endPreGpuProcessing = std::chrono::steady_clock::now();
durationPreGpuProcessing += std::chrono::duration_cast<std::chrono::milliseconds>(
endPreGpuProcessing - startPreGpuProcessing).count();
}
//hipDeviceSynchronize();
auto endRead = std::chrono::steady_clock::now();
durationRead = std::chrono::duration_cast<std::chrono::milliseconds>(endRead - startProcessing).count();
transferSum += max_partition_size;
hipMemcpy(vertexVisitRecord, vertexVisitRecordD, testNumNodes * sizeof(uint), hipMemcpyDeviceToHost);
uint partNum = 50;
uint partSize = testNumEdge / partNum;
vector<uint> partVistRecordList(partNum + 1);
uint partSizeCursor = 0;
for (uint i = 0; i < testNumNodes; i++) {
uint edgeStartIndex = nodePointersI[i];
uint edgeEndIndex = nodePointersI[i] + degree[i];
uint maxPartIndex = partSizeCursor * partSize + partSize;
if (edgeStartIndex < maxPartIndex && edgeEndIndex < maxPartIndex) {
partVistRecordList[partSizeCursor] += vertexVisitRecord[i] * degree[i];
} else if (edgeStartIndex < maxPartIndex && edgeEndIndex >= maxPartIndex) {
partVistRecordList[partSizeCursor] += vertexVisitRecord[i] * (maxPartIndex - edgeStartIndex);
partSizeCursor += 1;
partVistRecordList[partSizeCursor] += vertexVisitRecord[i] * (edgeEndIndex - maxPartIndex);
} else {
partSizeCursor += 1;
partVistRecordList[partSizeCursor] += vertexVisitRecord[i] * degree[i];
}
}
for (uint i = 0; i < partNum + 1; i++) {
cout << "part " << i << " is " << partVistRecordList[i] << endl;
}
for (uint i = 0; i < partNum + 1; i++) {
cout << partVistRecordList[i] << "\t";
}
cout << "iterationSum " << iter << endl;
double edgeIterationAvg = (double) overloadEdgeSum / (double) testNumEdge / iter;
double edgeIterationMaxAvg = (double) edgeIterationMax / (double) testNumEdge;
cout << "edgeIterationAvg " << edgeIterationAvg << " edgeIterationMaxAvg " << edgeIterationMaxAvg << endl;
cout << "transferSum : " << transferSum * 4 << " byte" << endl;
cout << "finish time : " << durationRead << " ms" << endl;
cout << "total time : " << durationRead + testDuration << " ms" << endl;
cout << "cpu time : " << durationReadCpu << " ms" << endl;
cout << "fact processing time : " << durationGpuProcessing << " ms" << endl;
cout << "durationOverloadGpuProcessing time : " << durationOverloadGpuProcessing << " ms" << endl;
cout << "durationMemoryTraverse time : " << durationMemoryTraverse << " mskail" << endl;
cout << "gpu pre processing time : " << durationPreGpuProcessing << " ms" << endl;
cout << "overloadEdgeSum : " << overloadEdgeSum << " " << endl;
cout << "partOverloadSum : " << partOverloadSum << " " << endl;
cout << "nodeSum: " << nodeSum << endl;
hipFree(staticEdgeListD);
hipFree(degreeD);
hipFree(isActiveD1);
hipFree(valueD);
hipFree(activeNodeListD);
hipFree(activeNodeLabelingPrefixD);
hipFree(activeOverloadNodePointersD);
hipFree(activeOverloadDegreeD);
hipFree(isInStaticD);
hipFree(staticNodePointerD);
hipFree(overloadNodeListD);
hipFree(overloadEdgeListD);
hipFree(isStaticActive);
hipFree(isOverloadActive);
hipFree(overloadLabelingPrefixD);
delete[] label;
delete[] degree;
delete[] value;
delete[] activeNodeList;
delete[] activeOverloadNodePointers;
delete[] isInStatic;
delete[] overloadNodeList;
delete[] staticNodePointer;
delete[] overloadEdgeList;
partEdgeListInfoArr.clear();
return durationRead;
}
void bfsShareTrace(string bfsPath, int sampleSourceNode) {
uint testNumNodes = 0;
ulong testNumEdge = 0;
uint *nodePointersI;
uint *edgeList;
bool isUseShare = true;
auto startReadGraph = std::chrono::steady_clock::now();
ifstream infile(bfsPath, ios::in | ios::binary);
infile.read((char *) &testNumNodes, sizeof(uint));
uint numEdge = 0;
infile.read((char *) &numEdge, sizeof(uint));
testNumEdge = numEdge;
cout << "vertex num: " << testNumNodes << " edge num: " << testNumEdge << endl;
nodePointersI = new uint[testNumNodes];
infile.read((char *) nodePointersI, sizeof(uint) * testNumNodes);
gpuErrorcheck(hipMallocManaged(&edgeList, (numEdge) * sizeof(uint)));
hipMemAdvise(nodePointersI, (testNumNodes + 1) * sizeof(uint), hipMemAdviseSetReadMostly, 0);
hipMemAdvise(edgeList, (numEdge) * sizeof(uint), hipMemAdviseSetReadMostly, 0);
infile.read((char *) edgeList, sizeof(uint) * testNumEdge);
infile.close();
//preprocessData(nodePointersI, edgeList, testNumNodes, testNumEdge);
auto endReadGraph = std::chrono::steady_clock::now();
long durationReadGraph = std::chrono::duration_cast<std::chrono::milliseconds>(
endReadGraph - startReadGraph).count();
cout << "read graph time : " << durationReadGraph << "ms" << endl;
int testTimes = 1;
long timeSum = 0;
for (int i = 0; i < testTimes; i++) {
uint sourceNode = rand() % testNumNodes;
sourceNode = sampleSourceNode;
cout << "sourceNode " << sourceNode << endl;
timeSum += bfsCaculateInShareTrace(testNumNodes, testNumEdge, nodePointersI, edgeList, sourceNode);
//timeSum += bfsCaculateInShare(testNumNodes, testNumEdge, nodePointersI, edgeList, 53037907);
break;
}
}
long
bfsCaculateInShareTrace(uint testNumNodes, uint testNumEdge, uint *nodePointersI, uint *edgeList, uint sourceNode) {
auto start = std::chrono::steady_clock::now();
uint *degree = new uint[testNumNodes];
uint *value = new uint[testNumNodes];
uint sourceCode = 0;
auto startPreCaculate = std::chrono::steady_clock::now();
for (uint i = 0; i < testNumNodes - 1; i++) {
degree[i] = nodePointersI[i + 1] - nodePointersI[i];
}
degree[testNumNodes - 1] = testNumEdge - nodePointersI[testNumNodes - 1];
sourceCode = sourceNode;
bool *label = new bool[testNumNodes];
for (uint i = 0; i < testNumNodes; i++) {
label[i] = false;
value[i] = UINT_MAX;
}
label[sourceCode] = true;
value[sourceCode] = 1;
uint *activeNodeListD;
uint *degreeD;
uint *valueD;
bool *labelD;
uint *nodePointersD;
hipMalloc(&activeNodeListD, testNumNodes * sizeof(uint));
hipMalloc(&nodePointersD, testNumNodes * sizeof(uint));
hipMalloc(°reeD, testNumNodes * sizeof(uint));
hipMalloc(&valueD, testNumNodes * sizeof(uint));
hipMalloc(&labelD, testNumNodes * sizeof(bool));
hipMemcpy(degreeD, degree, testNumNodes * sizeof(uint), hipMemcpyHostToDevice);
hipMemcpy(valueD, value, testNumNodes * sizeof(uint), hipMemcpyHostToDevice);
hipMemcpy(labelD, label, testNumNodes * sizeof(bool), hipMemcpyHostToDevice);
hipMemcpy(nodePointersD, nodePointersI, testNumNodes * sizeof(uint), hipMemcpyHostToDevice);
//cacaulate the active node And make active node array
uint *activeNodeLabelingD;
gpuErrorcheck(hipMalloc(&activeNodeLabelingD, testNumNodes * sizeof(unsigned int)));
uint *activeNodeLabelingPrefixD;
gpuErrorcheck(hipMalloc(&activeNodeLabelingPrefixD, testNumNodes * sizeof(unsigned int)));
dim3 grid = dim3(56, 1, 1);
dim3 block = dim3(1024, 1, 1);
auto endPreCaculate = std::chrono::steady_clock::now();
long durationPreCaculate = std::chrono::duration_cast<std::chrono::milliseconds>(
endPreCaculate - startPreCaculate).count();
cout << "durationPreCaculate time : " << durationPreCaculate << " ms" << endl;
hipLaunchKernelGGL(( setLabeling), dim3(grid), dim3(block), 0, 0, testNumNodes, labelD, activeNodeLabelingD);
thrust::device_ptr<unsigned int> ptr_labeling(activeNodeLabelingD);
thrust::device_ptr<unsigned int> ptr_labeling_prefixsum(activeNodeLabelingPrefixD);
uint activeNodesNum = thrust::reduce(ptr_labeling, ptr_labeling + testNumNodes);
int iter = 0;
uint nodeSum = activeNodesNum;
auto startProcessing = std::chrono::steady_clock::now();
while (activeNodesNum > 0) {
iter++;
thrust::exclusive_scan(ptr_labeling, ptr_labeling + testNumNodes, ptr_labeling_prefixsum);
hipLaunchKernelGGL(( setActiveNodeArray), dim3(grid), dim3(block), 0, 0, testNumNodes, activeNodeListD, labelD, activeNodeLabelingPrefixD);
hipLaunchKernelGGL(( setLabelDefault), dim3(grid), dim3(block), 0, 0, activeNodesNum, activeNodeListD, labelD);
hipLaunchKernelGGL(( bfs_kernel), dim3(grid), dim3(block), 0, 0, activeNodesNum, activeNodeListD, nodePointersD, degreeD, edgeList, valueD, labelD);
hipDeviceSynchronize();
gpuErrorcheck(hipPeekAtLastError());
long temp = 0;
for (uint j = 0; j < testNumEdge; j++) {
temp += edgeList[j] % 10;
}
cout << "iter " << iter << " " << temp;
hipLaunchKernelGGL(( setLabeling), dim3(grid), dim3(block), 0, 0, testNumNodes, labelD, activeNodeLabelingD);
activeNodesNum = thrust::reduce(ptr_labeling, ptr_labeling + testNumNodes);
nodeSum += activeNodesNum;
cout << "iter: " << iter << " activeNodes: " << activeNodesNum << endl;
}
hipDeviceSynchronize();
cout << "nodeSum: " << nodeSum << endl;
auto endRead = std::chrono::steady_clock::now();
long durationRead = std::chrono::duration_cast<std::chrono::milliseconds>(endRead - startProcessing).count();
cout << "iter sum is " << iter << " finish time : " << durationRead << " ms" << endl;
//cout << "range min " << rangeMin << " range max " << rangeMax << " range sum " << rangeSum << endl;
cout << "source node pointer " << nodePointersI[sourceNode] << endl;
return durationRead;
}
long
bfsCaculateInAsyncNoUVMRandom(uint testNumNodes, uint testNumEdge, uint *nodePointersI, uint *edgeList, uint sourceNode,
float adviseK) {
cout << "=========bfsCaculateInAsyncNoUVM========" << endl;
ulong edgeIterationMax = 0;
auto start = std::chrono::steady_clock::now();
auto startPreCaculate = std::chrono::steady_clock::now();
//CPU
long durationRead;
ulong transferSum = 0;
unsigned long max_partition_size;
unsigned long total_gpu_size;
uint maxStaticNode = 0;
uint *degree;
uint *value;
uint *label;
bool *isInStatic;
uint *overloadNodeList;
uint *staticNodePointer;
uint *activeNodeList;
uint *activeOverloadNodePointers;
vector<PartEdgeListInfo> partEdgeListInfoArr;
/*
* overloadEdgeList overload edge list in every iteration
* */
uint *overloadEdgeList;
bool isFromTail = false;
//GPU
uint *staticEdgeListD;
uint *overloadEdgeListD;
bool *isInStaticD;
uint *overloadNodeListD;
uint *staticNodePointerD;
uint *degreeD;
// async need two labels
uint *isActiveD1;
uint *isStaticActive;
uint *isOverloadActive;
uint *valueD;
uint *activeNodeListD;
uint *activeNodeLabelingPrefixD;
uint *overloadLabelingPrefixD;
uint *activeOverloadNodePointersD;
uint *activeOverloadDegreeD;
degree = new uint[testNumNodes];
value = new uint[testNumNodes];
label = new uint[testNumNodes];
isInStatic = new bool[testNumNodes];
overloadNodeList = new uint[testNumNodes];
staticNodePointer = new uint[testNumNodes];
activeNodeList = new uint[testNumNodes];
activeOverloadNodePointers = new uint[testNumNodes];
//getMaxPartitionSize(max_partition_size, testNumNodes);
getMaxPartitionSize(max_partition_size, total_gpu_size, testNumNodes, adviseK, sizeof(uint), testNumEdge, 15);
calculateDegree(testNumNodes, nodePointersI, testNumEdge, degree);
//memcpy(staticNodePointer, nodePointersI, testNumNodes * sizeof(uint));
uint edgesInStatic = 0;
float startRate = (1 - (float) max_partition_size / (float) testNumEdge) / 2;
uint startIndex = (float) testNumNodes * startRate;
/*uint tempStaticSum = 0;
for (uint i = testNumNodes - 1; i >= 0; i--) {
tempStaticSum += degree[i];
if (tempStaticSum > max_partition_size) {
startIndex = i;
break;
}
}*/
startIndex = 0;
if (nodePointersI[startIndex] + max_partition_size > testNumEdge) {
startIndex = (float) testNumNodes * 0.1f;
}
for (uint i = 0; i < testNumNodes; i++) {
label[i] = 0;
value[i] = UINT_MAX - 1;
if (i >= startIndex && nodePointersI[i] < nodePointersI[startIndex] + max_partition_size - degree[i]) {
isInStatic[i] = true;
staticNodePointer[i] = nodePointersI[i] - nodePointersI[startIndex];
if (i > maxStaticNode) {
maxStaticNode = i;
}
edgesInStatic += degree[i];
} else {
isInStatic[i] = false;
}
}
label[sourceNode] = 1;
value[sourceNode] = 1;
gpuErrorcheck(hipMalloc(&staticEdgeListD, max_partition_size * sizeof(uint)));
auto startmove = std::chrono::steady_clock::now();
gpuErrorcheck(
hipMemcpy(staticEdgeListD, edgeList + nodePointersI[startIndex], max_partition_size * sizeof(uint),
hipMemcpyHostToDevice));
auto endMove = std::chrono::steady_clock::now();
long testDuration = std::chrono::duration_cast<std::chrono::milliseconds>(
endMove - startmove).count();
cout << "move duration " << testDuration << endl;
gpuErrorcheck(hipMalloc(&isInStaticD, testNumNodes * sizeof(bool)))
gpuErrorcheck(hipMalloc(&overloadNodeListD, testNumNodes * sizeof(uint)));
gpuErrorcheck(hipMalloc(&staticNodePointerD, testNumNodes * sizeof(uint)))
gpuErrorcheck(
hipMemcpy(staticNodePointerD, staticNodePointer, testNumNodes * sizeof(uint), hipMemcpyHostToDevice));
hipMemcpy(isInStaticD, isInStatic, testNumNodes * sizeof(bool), hipMemcpyHostToDevice);
label[sourceNode] = 1;
value[sourceNode] = 1;
cout << "max_partition_size: " << max_partition_size << " maxStaticNode: " << maxStaticNode << endl;
//uint partOverloadSize = max_partition_size / 2;
uint partOverloadSize = total_gpu_size - max_partition_size;
uint overloadSize = testNumEdge - edgesInStatic;
cout << " partOverloadSize " << partOverloadSize << " overloadSize " << overloadSize << endl;
overloadEdgeList = (uint *) malloc(overloadSize * sizeof(uint));
if (overloadEdgeList == NULL) {
cout << "overloadEdgeList is null" << endl;
return 0;
}
gpuErrorcheck(hipMalloc(&overloadEdgeListD, partOverloadSize * sizeof(uint)));
//gpuErrorcheck(hipMallocManaged(&edgeListOverloadManage, overloadSize * sizeof(uint)));
gpuErrorcheck(hipMalloc(°reeD, testNumNodes * sizeof(uint)));
gpuErrorcheck(hipMalloc(&isActiveD1, testNumNodes * sizeof(uint)));
gpuErrorcheck(hipMalloc(&isStaticActive, testNumNodes * sizeof(uint)));
gpuErrorcheck(hipMalloc(&isOverloadActive, testNumNodes * sizeof(uint)));
gpuErrorcheck(hipMalloc(&valueD, testNumNodes * sizeof(uint)));
gpuErrorcheck(hipMalloc(&activeNodeLabelingPrefixD, testNumNodes * sizeof(unsigned int)));
gpuErrorcheck(hipMalloc(&overloadLabelingPrefixD, testNumNodes * sizeof(unsigned int)));
gpuErrorcheck(hipMalloc(&activeNodeListD, testNumNodes * sizeof(uint)));
gpuErrorcheck(hipMalloc(&activeOverloadNodePointersD, testNumNodes * sizeof(uint)));
gpuErrorcheck(hipMalloc(&activeOverloadDegreeD, testNumNodes * sizeof(uint)));
gpuErrorcheck(hipMemcpy(degreeD, degree, testNumNodes * sizeof(uint), hipMemcpyHostToDevice));
gpuErrorcheck(hipMemcpy(valueD, value, testNumNodes * sizeof(uint), hipMemcpyHostToDevice));
gpuErrorcheck(hipMemcpy(isActiveD1, label, testNumNodes * sizeof(uint), hipMemcpyHostToDevice));
gpuErrorcheck(hipMemset(isStaticActive, 0, testNumNodes * sizeof(uint)));
gpuErrorcheck(hipMemset(isOverloadActive, 0, testNumNodes * sizeof(uint)));
//cacaulate the active node And make active node array
dim3 grid = dim3(56, 1, 1);
dim3 block = dim3(1024, 1, 1);
//setLabeling<<<grid, block>>>(testNumNodes, labelD, activeNodeLabelingD);
thrust::device_ptr<unsigned int> ptr_labeling(isActiveD1);
thrust::device_ptr<unsigned int> ptr_labeling_static(isStaticActive);
thrust::device_ptr<unsigned int> ptr_labeling_overload(isOverloadActive);
thrust::device_ptr<unsigned int> ptr_labeling_prefixsum(activeNodeLabelingPrefixD);
thrust::device_ptr<unsigned int> ptrOverloadDegree(activeOverloadDegreeD);
thrust::device_ptr<unsigned int> ptrOverloadPrefixsum(overloadLabelingPrefixD);
uint activeNodesNum = thrust::reduce(ptr_labeling, ptr_labeling + testNumNodes);
int iter = 0;
uint nodeSum = activeNodesNum;
ulong overloadEdgeSum = 0;
auto startCpu = std::chrono::steady_clock::now();
auto endReadCpu = std::chrono::steady_clock::now();
long durationReadCpu = 0;
auto startSwap = std::chrono::steady_clock::now();
auto endSwap = std::chrono::steady_clock::now();
long durationSwap = 0;
auto startGpuProcessing = std::chrono::steady_clock::now();
auto endGpuProcessing = std::chrono::steady_clock::now();
long durationGpuProcessing = 0;
auto startOverloadGpuProcessing = std::chrono::steady_clock::now();
auto endOverloadGpuProcessing = std::chrono::steady_clock::now();
long durationOverloadGpuProcessing = 0;
auto startPreGpuProcessing = std::chrono::steady_clock::now();
auto endPreGpuProcessing = std::chrono::steady_clock::now();
long durationPreGpuProcessing = 0;
auto endPreCaculate = std::chrono::steady_clock::now();
long durationPreCaculate = std::chrono::duration_cast<std::chrono::milliseconds>(
endPreCaculate - startPreCaculate).count();
cout << "durationPreCaculate time : " << durationPreCaculate << " ms" << endl;
hipStream_t steamStatic, streamDynamic;
hipStreamCreate(&steamStatic);
hipStreamCreate(&streamDynamic);
auto startMemoryTraverse = std::chrono::steady_clock::now();
auto endMemoryTraverse = std::chrono::steady_clock::now();
long durationMemoryTraverse = 0;
//uint cursorStartSwap = staticFragmentNum + 1;
uint swapValidNodeSum = 0;
uint swapValidEdgeSum = 0;
uint swapNotValidNodeSum = 0;
uint swapNotValidEdgeSum = 0;
uint visitEdgeSum = 0;
uint swapInEdgeSum = 0;
uint partOverloadSum = 0;
long TIME = 0;
int testTimes = 1;
for (int testIndex = 0; testIndex < testTimes; testIndex++) {
for (uint i = 0; i < testNumNodes; i++) {
label[i] = 0;
value[i] = UINT_MAX - 1;
}
label[sourceNode] = 1;
value[sourceNode] = 1;
hipMemcpy(isInStaticD, isInStatic, testNumNodes * sizeof(bool), hipMemcpyHostToDevice);
gpuErrorcheck(hipMemcpy(valueD, value, testNumNodes * sizeof(uint), hipMemcpyHostToDevice));
gpuErrorcheck(hipMemcpy(isActiveD1, label, testNumNodes * sizeof(uint), hipMemcpyHostToDevice));
gpuErrorcheck(hipMemset(isStaticActive, 0, testNumNodes * sizeof(uint)));
gpuErrorcheck(hipMemset(isOverloadActive, 0, testNumNodes * sizeof(uint)));
activeNodesNum = thrust::reduce(ptr_labeling, ptr_labeling + testNumNodes);
iter = 0;
auto startProcessing = std::chrono::steady_clock::now();
auto startTest = std::chrono::steady_clock::now();
auto endTest = std::chrono::steady_clock::now();
long durationTest = 0;
while (activeNodesNum > 0) {
startPreGpuProcessing = std::chrono::steady_clock::now();
iter++;
cout << "iter " << iter << " activeNodesNum " << activeNodesNum << endl;
hipLaunchKernelGGL(( setStaticAndOverloadLabel), dim3(grid), dim3(block), 0, 0, testNumNodes, isActiveD1, isStaticActive, isOverloadActive,
isInStaticD);
uint staticNodeNum = thrust::reduce(ptr_labeling_static, ptr_labeling_static + testNumNodes);
if (staticNodeNum > 0) {
cout << "iter " << iter << " staticNodeNum " << staticNodeNum << endl;
thrust::exclusive_scan(ptr_labeling_static, ptr_labeling_static + testNumNodes, ptr_labeling_prefixsum);
hipLaunchKernelGGL(( setStaticActiveNodeArray), dim3(grid), dim3(block), 0, 0, testNumNodes, activeNodeListD, isStaticActive,
activeNodeLabelingPrefixD);
}
uint overloadNodeNum = thrust::reduce(ptr_labeling_overload, ptr_labeling_overload + testNumNodes);
uint overloadEdgeNum = 0;
if (overloadNodeNum > 0) {
cout << "iter " << iter << " overloadNodeNum " << overloadNodeNum << endl;
thrust::exclusive_scan(ptr_labeling_overload, ptr_labeling_overload + testNumNodes,
ptrOverloadPrefixsum);
hipLaunchKernelGGL(( setOverloadNodePointerSwap), dim3(grid), dim3(block), 0, 0, testNumNodes, overloadNodeListD, activeOverloadDegreeD,
isOverloadActive,
overloadLabelingPrefixD, degreeD);
thrust::exclusive_scan(ptrOverloadDegree, ptrOverloadDegree + overloadNodeNum,
activeOverloadNodePointersD);
overloadEdgeNum = thrust::reduce(thrust::device, ptrOverloadDegree,
ptrOverloadDegree + overloadNodeNum, 0);
cout << "iter " << iter << " overloadEdgeNum " << overloadEdgeNum << endl;
overloadEdgeSum += overloadEdgeNum;
if (overloadEdgeNum > edgeIterationMax) {
edgeIterationMax = overloadEdgeNum;
}
}
endPreGpuProcessing = std::chrono::steady_clock::now();
durationPreGpuProcessing += std::chrono::duration_cast<std::chrono::milliseconds>(
endPreGpuProcessing - startPreGpuProcessing).count();
startGpuProcessing = std::chrono::steady_clock::now();
if (staticNodeNum > 0) {
hipLaunchKernelGGL(( setLabelDefaultOpt), dim3(grid), dim3(block), 0, steamStatic, staticNodeNum, activeNodeListD, isActiveD1);
}
if (overloadNodeNum > 0) {
hipLaunchKernelGGL(( setLabelDefaultOpt), dim3(grid), dim3(block), 0, steamStatic, overloadNodeNum, overloadNodeListD, isActiveD1);
}
hipLaunchKernelGGL(( bfs_kernelStatic), dim3(grid), dim3(block), 0, steamStatic, staticNodeNum, activeNodeListD,
staticNodePointerD, degreeD,
staticEdgeListD, valueD, isActiveD1);
//hipDeviceSynchronize();
if (overloadNodeNum > 0) {
startCpu = std::chrono::steady_clock::now();
hipMemcpyAsync(overloadNodeList, overloadNodeListD, overloadNodeNum * sizeof(uint),
hipMemcpyDeviceToHost,
streamDynamic);
hipMemcpyAsync(activeOverloadNodePointers, activeOverloadNodePointersD, overloadNodeNum * sizeof(uint),
hipMemcpyDeviceToHost, streamDynamic);
int threadNum = 20;
if (overloadNodeNum < 50) {
threadNum = 1;
}
thread runThreads[threadNum];
for (int i = 0; i < threadNum; i++) {
runThreads[i] = thread(fillDynamic,
i,
threadNum,
0,
overloadNodeNum,
degree,
activeOverloadNodePointers,
nodePointersI,
overloadNodeList,
overloadEdgeList,
edgeList);
}
for (unsigned int t = 0; t < threadNum; t++) {
runThreads[t].join();
}
caculatePartInfoForEdgeList(activeOverloadNodePointers, overloadNodeList, degree, partEdgeListInfoArr,
overloadNodeNum, partOverloadSize, overloadEdgeNum);
endReadCpu = std::chrono::steady_clock::now();
durationReadCpu += std::chrono::duration_cast<std::chrono::milliseconds>(endReadCpu - startCpu).count();
hipDeviceSynchronize();
//gpuErrorcheck(hipPeekAtLastError())
endGpuProcessing = std::chrono::steady_clock::now();
durationGpuProcessing += std::chrono::duration_cast<std::chrono::milliseconds>(
endGpuProcessing - startGpuProcessing).count();
partOverloadSum += partEdgeListInfoArr.size();
for (int i = 0; i < partEdgeListInfoArr.size(); i++) {
startMemoryTraverse = std::chrono::steady_clock::now();
gpuErrorcheck(hipMemcpy(overloadEdgeListD, overloadEdgeList +
activeOverloadNodePointers[partEdgeListInfoArr[i].partStartIndex],
partEdgeListInfoArr[i].partEdgeNums * sizeof(uint),
hipMemcpyHostToDevice))
transferSum += partEdgeListInfoArr[i].partEdgeNums;
endMemoryTraverse = std::chrono::steady_clock::now();
durationMemoryTraverse += std::chrono::duration_cast<std::chrono::milliseconds>(
endMemoryTraverse - startMemoryTraverse).count();
startOverloadGpuProcessing = std::chrono::steady_clock::now();
activeNodesNum = thrust::reduce(ptr_labeling, ptr_labeling + testNumNodes);
cout << "iter " << iter << " activeNodesNum " << activeNodesNum << endl;
hipLaunchKernelGGL(( bfs_kernelDynamicPart), dim3(grid), dim3(block), 0, streamDynamic,
partEdgeListInfoArr[i].partStartIndex,
partEdgeListInfoArr[i].partActiveNodeNums,
overloadNodeListD, degreeD,
valueD, isActiveD1,
overloadEdgeListD,
activeOverloadNodePointersD);
hipDeviceSynchronize();
endOverloadGpuProcessing = std::chrono::steady_clock::now();
durationOverloadGpuProcessing += std::chrono::duration_cast<std::chrono::milliseconds>(
endOverloadGpuProcessing - startOverloadGpuProcessing).count();
}
//gpuErrorcheck(hipPeekAtLastError())
} else {
hipDeviceSynchronize();
endGpuProcessing = std::chrono::steady_clock::now();
durationGpuProcessing += std::chrono::duration_cast<std::chrono::milliseconds>(
endGpuProcessing - startGpuProcessing).count();
}
hipDeviceSynchronize();
startPreGpuProcessing = std::chrono::steady_clock::now();
/*mixCommonLabel<<<grid, block, 0, streamDynamic>>>(testNumNodes, isActiveD1, isActiveD2);
hipDeviceSynchronize();*/
activeNodesNum = thrust::reduce(ptr_labeling, ptr_labeling + testNumNodes);
nodeSum += activeNodesNum;
endPreGpuProcessing = std::chrono::steady_clock::now();
durationPreGpuProcessing += std::chrono::duration_cast<std::chrono::milliseconds>(
endPreGpuProcessing - startPreGpuProcessing).count();
cout << "iter " << iter << " activeNodesNum " << activeNodesNum << endl;
if (iter == 2) {
break;
}
}
//hipDeviceSynchronize();
auto endRead = std::chrono::steady_clock::now();
durationRead = std::chrono::duration_cast<std::chrono::milliseconds>(endRead - startProcessing).count();
transferSum += max_partition_size;
cout << "iterationSum " << iter << endl;
double edgeIterationAvg = (double) overloadEdgeSum / (double) testNumEdge / iter;
double edgeIterationMaxAvg = (double) edgeIterationMax / (double) testNumEdge;
cout << "edgeIterationAvg " << edgeIterationAvg << " edgeIterationMaxAvg " << edgeIterationMaxAvg << endl;
cout << "transferSum : " << transferSum * 4 << " byte" << endl;
cout << "finish time : " << durationRead << " ms" << endl;
cout << "total time : " << durationRead + testDuration << " ms" << endl;
cout << "cpu time : " << durationReadCpu << " ms" << endl;
cout << "fact processing time : " << durationGpuProcessing << " ms" << endl;
cout << "durationOverloadGpuProcessing time : " << durationOverloadGpuProcessing << " ms" << endl;
cout << "durationMemoryTraverse time : " << durationMemoryTraverse << " mskail" << endl;
cout << "gpu pre processing time : " << durationPreGpuProcessing << " ms" << endl;
cout << "overloadEdgeSum : " << overloadEdgeSum << " " << endl;
cout << "partOverloadSum : " << partOverloadSum << " " << endl;
cout << "nodeSum: " << nodeSum << endl;
TIME += durationRead;
}
cout << "TIME " << (float) TIME / (float) testTimes << endl;
hipFree(staticEdgeListD);
hipFree(degreeD);
hipFree(isActiveD1);
hipFree(valueD);
hipFree(activeNodeListD);
hipFree(activeNodeLabelingPrefixD);
hipFree(activeOverloadNodePointersD);
hipFree(activeOverloadDegreeD);
hipFree(isInStaticD);
hipFree(staticNodePointerD);
hipFree(overloadNodeListD);
hipFree(overloadEdgeListD);
hipFree(isStaticActive);
hipFree(isOverloadActive);
hipFree(overloadLabelingPrefixD);
delete[] label;
delete[] degree;
delete[] value;
delete[] activeNodeList;
delete[] activeOverloadNodePointers;
delete[] isInStatic;
delete[] overloadNodeList;
delete[] staticNodePointer;
delete[] overloadEdgeList;
partEdgeListInfoArr.clear();
return durationRead;
} | bc1820ffc1f981d71ac4d979a0ecc98a27081a9c.cu | //
// Created by gxl on 2020/12/30.
//
#include "bfs.cuh"
void conventionParticipateBFS(string bfsPath, int sampleSourceNode) {
cout << "===============conventionParticipateBFS==============" << endl;
uint testNumNodes = 0;
ulong testNumEdge = 0;
ulong traverseSum = 0;
uint *nodePointersI;
uint *edgeList;
auto startReadGraph = std::chrono::steady_clock::now();
ifstream infile(bfsPath, ios::in | ios::binary);
infile.read((char *) &testNumNodes, sizeof(uint));
uint numEdge = 0;
infile.read((char *) &numEdge, sizeof(uint));
testNumEdge = numEdge;
cout << "vertex num: " << testNumNodes << " edge num: " << testNumEdge << endl;
nodePointersI = new uint[testNumNodes];
infile.read((char *) nodePointersI, sizeof(uint) * testNumNodes);
edgeList = new uint[testNumEdge];
infile.read((char *) edgeList, sizeof(uint) * testNumEdge);
infile.close();
unsigned long max_partition_size;
unsigned long total_gpu_size;
getMaxPartitionSize(max_partition_size, total_gpu_size, testNumNodes, 0.9, sizeof(uint), 5);
uint partitionNum;
if (testNumEdge > max_partition_size) {
partitionNum = testNumEdge / max_partition_size + 1;
} else {
partitionNum = 1;
}
uint *degree = new uint[testNumNodes];
uint *value = new uint[testNumNodes];
bool *isActiveNodeList = new bool[testNumNodes];
CommonPartitionInfo *partitionInfoList = new CommonPartitionInfo[partitionNum];
bool *needTransferPartition = new bool[partitionNum];
for (uint i = 0; i < testNumNodes; i++) {
isActiveNodeList[i] = false;
value[i] = UINT_MAX;
if (i + 1 < testNumNodes) {
degree[i] = nodePointersI[i + 1] - nodePointersI[i];
} else {
degree[i] = testNumEdge - nodePointersI[i];
}
if (degree[i] > max_partition_size) {
cout << "node " << i << " degree > maxPartition " << endl;
return;
}
}
for (uint i = 0; i < partitionNum; i++) {
partitionInfoList[i].startVertex = -1;
partitionInfoList[i].endVertex = -1;
partitionInfoList[i].nodePointerOffset = -1;
partitionInfoList[i].partitionEdgeSize = -1;
}
int tempPartitionIndex = 0;
uint tempNodeIndex = 0;
while (tempNodeIndex < testNumNodes) {
if (partitionInfoList[tempPartitionIndex].startVertex == -1) {
partitionInfoList[tempPartitionIndex].startVertex = tempNodeIndex;
partitionInfoList[tempPartitionIndex].endVertex = tempNodeIndex;
partitionInfoList[tempPartitionIndex].nodePointerOffset = nodePointersI[tempNodeIndex];
partitionInfoList[tempPartitionIndex].partitionEdgeSize = degree[tempNodeIndex];
tempNodeIndex++;
} else {
if (partitionInfoList[tempPartitionIndex].partitionEdgeSize + degree[tempNodeIndex] > max_partition_size) {
tempPartitionIndex++;
} else {
partitionInfoList[tempPartitionIndex].endVertex = tempNodeIndex;
partitionInfoList[tempPartitionIndex].partitionEdgeSize += degree[tempNodeIndex];
tempNodeIndex++;
}
}
}
uint *degreeD;
bool *isActiveNodeListD;
bool *nextActiveNodeListD;
uint *nodePointerListD;
uint *partitionEdgeListD;
uint *valueD;
cudaMalloc(°reeD, testNumNodes * sizeof(uint));
cudaMalloc(&valueD, testNumNodes * sizeof(uint));
cudaMalloc(&isActiveNodeListD, testNumNodes * sizeof(bool));
cudaMalloc(&nextActiveNodeListD, testNumNodes * sizeof(bool));
cudaMalloc(&nodePointerListD, testNumNodes * sizeof(uint));
cudaMalloc(&partitionEdgeListD, max_partition_size * sizeof(uint));
cudaMemcpy(degreeD, degree, testNumNodes * sizeof(uint), cudaMemcpyHostToDevice);
cudaMemcpy(nodePointerListD, nodePointersI, testNumNodes * sizeof(uint), cudaMemcpyHostToDevice);
cudaMemset(nextActiveNodeListD, 0, testNumNodes * sizeof(bool));
//cacaulate the active node And make active node array
dim3 grid = dim3(56, 1, 1);
dim3 block = dim3(1024, 1, 1);
int testTimes = 1;
long timeSum = 0;
for (int i = 0; i < testTimes; i++) {
uint sourceNode = rand() % testNumNodes;
sourceNode = sampleSourceNode;
//sourceNode = 25838548;
//sourceNode = 26890152;
//sourceNode = 47235513;
cout << "sourceNode " << sourceNode << endl;
for (int j = 0; j < testNumNodes; j++) {
isActiveNodeList[j] = false;
value[j] = UINT_MAX;
}
isActiveNodeList[sourceNode] = true;
value[sourceNode] = 1;
cudaMemcpy(valueD, value, testNumNodes * sizeof(uint), cudaMemcpyHostToDevice);
uint activeSum = 0;
int iteration = 0;
auto startProcessing = std::chrono::steady_clock::now();
while (true) {
uint activeNodeNum = 0;
checkNeedTransferPartitionOpt(needTransferPartition, partitionInfoList, isActiveNodeList, partitionNum,
testNumNodes, activeNodeNum);
if (activeNodeNum <= 0) {
break;
} else {
//cout << "iteration " << iteration << " activeNodes " << activeNodeNum << endl;
activeSum += activeNodeNum;
}
cudaMemcpy(isActiveNodeListD, isActiveNodeList, testNumNodes * sizeof(bool), cudaMemcpyHostToDevice);
for (int j = 0; j < partitionNum; j++) {
if (needTransferPartition[j]) {
cudaMemcpy(partitionEdgeListD, edgeList + partitionInfoList[j].nodePointerOffset,
partitionInfoList[j].partitionEdgeSize * sizeof(uint), cudaMemcpyHostToDevice);
traverseSum += partitionInfoList[j].partitionEdgeSize * sizeof(uint);
bfsKernel_CommonPartition<<<grid, block>>>(partitionInfoList[j].startVertex,
partitionInfoList[j].endVertex,
partitionInfoList[j].nodePointerOffset,
isActiveNodeListD, nodePointerListD,
partitionEdgeListD, degreeD, valueD,
nextActiveNodeListD);
cudaDeviceSynchronize();
gpuErrorcheck(cudaPeekAtLastError())
}
}
cudaMemcpy(isActiveNodeList, nextActiveNodeListD, testNumNodes * sizeof(bool), cudaMemcpyDeviceToHost);
cudaMemset(nextActiveNodeListD, 0, testNumNodes * sizeof(bool));
iteration++;
}
cout << " activeSum " << activeSum << endl;
auto endRead = std::chrono::steady_clock::now();
long durationRead = std::chrono::duration_cast<std::chrono::milliseconds>(endRead - startProcessing).count();
cout << " finish time : " << durationRead << " ms" << endl;
cout << "traverseSum " << traverseSum << endl;
}
free(nodePointersI);
free(edgeList);
free(degree);
free(isActiveNodeList);
cudaFree(isActiveNodeListD);
cudaFree(nextActiveNodeListD);
cudaFree(nodePointerListD);
cudaFree(partitionEdgeListD);
//todo free partitionInfoList needTransferPartition
}
long
bfsCaculateInShareReturnValue(uint testNumNodes, uint testNumEdge, uint *nodePointersI, uint *edgeList, uint sourceNode,
uint **bfsValue, int valueIndex) {
auto start = std::chrono::steady_clock::now();
uint *degree;
uint *value;
uint sourceCode = 0;
gpuErrorcheck(cudaMallocManaged(°ree, testNumNodes * sizeof(uint)));
gpuErrorcheck(cudaMallocManaged(&value, testNumNodes * sizeof(uint)));
auto startPreCaculate = std::chrono::steady_clock::now();
//caculate degree
for (uint i = 0; i < testNumNodes - 1; i++) {
if (nodePointersI[i] > testNumEdge) {
cout << i << " " << nodePointersI[i] << endl;
break;
}
degree[i] = nodePointersI[i + 1] - nodePointersI[i];
}
degree[testNumNodes - 1] = testNumEdge - nodePointersI[testNumNodes - 1];
sourceCode = sourceNode;
cout << "sourceNode " << sourceNode << " degree " << degree[sourceNode] << endl;
bool *label;
gpuErrorcheck(cudaMallocManaged(&label, testNumNodes * sizeof(bool)));
for (uint i = 0; i < testNumNodes; i++) {
label[i] = false;
value[i] = UINT_MAX;
}
auto endPreCaculate = std::chrono::steady_clock::now();
long durationPreCaculate = std::chrono::duration_cast<std::chrono::milliseconds>(
endPreCaculate - startPreCaculate).count();
label[sourceCode] = true;
value[sourceCode] = 0;
uint *activeNodeList;
cudaMallocManaged(&activeNodeList, testNumNodes * sizeof(uint));
//cacaulate the active node And make active node array
uint *activeNodeLabelingD;
gpuErrorcheck(cudaMallocManaged(&activeNodeLabelingD, testNumNodes * sizeof(unsigned int)));
uint *activeNodeLabelingPrefixD;
gpuErrorcheck(cudaMallocManaged(&activeNodeLabelingPrefixD, testNumNodes * sizeof(unsigned int)));
dim3 grid = dim3(56, 1, 1);
dim3 block = dim3(1024, 1, 1);
setLabeling<<<grid, block>>>(testNumNodes, label, activeNodeLabelingD);
thrust::device_ptr<unsigned int> ptr_labeling(activeNodeLabelingD);
thrust::device_ptr<unsigned int> ptr_labeling_prefixsum(activeNodeLabelingPrefixD);
uint activeNodesNum = thrust::reduce(ptr_labeling, ptr_labeling + testNumNodes);
int iter = 0;
uint nodeSum = activeNodesNum;
auto startProcessing = std::chrono::steady_clock::now();
while (activeNodesNum > 0) {
iter++;
thrust::exclusive_scan(ptr_labeling, ptr_labeling + testNumNodes, ptr_labeling_prefixsum);
setActiveNodeArray<<<grid, block>>>(testNumNodes, activeNodeList, label, activeNodeLabelingPrefixD);
setLabelDefault<<<grid, block>>>(activeNodesNum, activeNodeList, label);
bfs_kernel<<<grid, block>>>(activeNodesNum, activeNodeList, nodePointersI, degree, edgeList, value, label);
cudaDeviceSynchronize();
gpuErrorcheck(cudaPeekAtLastError());
setLabeling<<<grid, block>>>(testNumNodes, label, activeNodeLabelingD);
activeNodesNum = thrust::reduce(ptr_labeling, ptr_labeling + testNumNodes);
nodeSum += activeNodesNum;
}
cudaDeviceSynchronize();
for (int i = 0; i < testNumNodes; i++) {
bfsValue[valueIndex][i] = value[i];
}
auto endRead = std::chrono::steady_clock::now();
long durationRead = std::chrono::duration_cast<std::chrono::milliseconds>(endRead - startProcessing).count();
cudaFree(degree);
cudaFree(label);
cudaFree(value);
cudaFree(activeNodeList);
cudaFree(activeNodeLabelingD);
cudaFree(activeNodeLabelingPrefixD);
return durationRead;
}
void bfsShare(string bfsPath, int sampleSourceNode) {
uint testNumNodes = 0;
ulong testNumEdge = 0;
uint *nodePointersI;
uint *edgeList;
bool isUseShare = true;
auto startReadGraph = std::chrono::steady_clock::now();
ifstream infile(bfsPath, ios::in | ios::binary);
infile.read((char *) &testNumNodes, sizeof(uint));
uint numEdge = 0;
infile.read((char *) &numEdge, sizeof(uint));
testNumEdge = numEdge;
cout << "vertex num: " << testNumNodes << " edge num: " << testNumEdge << endl;
gpuErrorcheck(cudaMallocManaged(&nodePointersI, (testNumNodes + 1) * sizeof(uint)));
infile.read((char *) nodePointersI, sizeof(uint) * testNumNodes);
gpuErrorcheck(cudaMallocManaged(&edgeList, (numEdge) * sizeof(uint)));
cudaMemAdvise(nodePointersI, (testNumNodes + 1) * sizeof(uint), cudaMemAdviseSetReadMostly, 0);
cudaMemAdvise(edgeList, (numEdge) * sizeof(uint), cudaMemAdviseSetReadMostly, 0);
infile.read((char *) edgeList, sizeof(uint) * testNumEdge);
infile.close();
//preprocessData(nodePointersI, edgeList, testNumNodes, testNumEdge);
auto endReadGraph = std::chrono::steady_clock::now();
long durationReadGraph = std::chrono::duration_cast<std::chrono::milliseconds>(
endReadGraph - startReadGraph).count();
cout << "read graph time : " << durationReadGraph << "ms" << endl;
int testTimes = 1;
long timeSum = 0;
for (int i = 0; i < testTimes; i++) {
uint sourceNode = rand() % testNumNodes;
sourceNode = sampleSourceNode;
cout << "sourceNode " << sourceNode << endl;
timeSum += bfsCaculateInShare(testNumNodes, testNumEdge, nodePointersI, edgeList, sourceNode);
//timeSum += bfsCaculateInShare(testNumNodes, testNumEdge, nodePointersI, edgeList, 53037907);
break;
}
}
void bfsOpt(string bfsPath, int sampleSourceNode, float adviseK) {
uint testNumNodes = 0;
ulong testNumEdge = 0;
uint *nodePointersI;
uint *edgeList;
bool isUseShare = true;
auto startReadGraph = std::chrono::steady_clock::now();
ifstream infile(bfsPath, ios::in | ios::binary);
infile.read((char *) &testNumNodes, sizeof(uint));
uint numEdge = 0;
infile.read((char *) &numEdge, sizeof(uint));
testNumEdge = numEdge;
cout << "vertex num: " << testNumNodes << " edge num: " << testNumEdge << endl;
nodePointersI = new uint[testNumNodes + 1];
infile.read((char *) nodePointersI, sizeof(uint) * testNumNodes);
edgeList = new uint[testNumEdge + 1];
infile.read((char *) edgeList, sizeof(uint) * testNumEdge);
infile.close();
auto endReadGraph = std::chrono::steady_clock::now();
long durationReadGraph = std::chrono::duration_cast<std::chrono::milliseconds>(
endReadGraph - startReadGraph).count();
cout << "read graph time : " << durationReadGraph << "ms" << endl;
int testTimes = 1;
long timeSum = 0;
for (int i = 0; i < testTimes; i++) {
uint sourceNode = rand() % testNumNodes;
sourceNode = 47235513;
sourceNode = 25838548;
cout << "sourceNode " << sourceNode << endl;
//timeSum += bfsCaculateInOpt(testNumNodes, testNumEdge, nodePointersI, edgeList, 25838548);
//caculateInOptChooseByDegree(testNumNodes, testNumEdge, nodePointersI, edgeList);
//timeSum += bfsCaculateInAsync(testNumNodes, testNumEdge, nodePointersI, edgeList, 53037907);
timeSum += bfsCaculateInAsyncNoUVMRandom(testNumNodes, testNumEdge, nodePointersI, edgeList, sourceNode,
adviseK);
//timeSum += bfsCaculateInAsyncSwapOpt(testNumNodes, testNumEdge, nodePointersI, edgeList, 25838548);
//timeSum += bfsCaculateInAsyncSwapOptWithOverload(testNumNodes, testNumEdge, nodePointersI, edgeList, 25838548);
//timeSum += bfsCaculateInAsyncSwapManage(testNumNodes, testNumEdge, nodePointersI, edgeList, 25838548);
//break;
cout << i << "========================================" << endl;
}
cudaFree(nodePointersI);
cudaFree(edgeList);
}
void testBFS() {
uint testNumNodes = 0;
ulong testNumEdge = 0;
uint *nodePointersI;
uint *edgeList;
bool isUseShare = true;
auto startReadGraph = std::chrono::steady_clock::now();
ifstream infile(testGraphPath, ios::in | ios::binary);
infile.read((char *) &testNumNodes, sizeof(uint));
uint numEdge = 0;
infile.read((char *) &numEdge, sizeof(uint));
testNumEdge = numEdge;
cout << "vertex num: " << testNumNodes << " edge num: " << testNumEdge << endl;
if (isUseShare) {
gpuErrorcheck(cudaMallocManaged(&nodePointersI, (testNumNodes + 1) * sizeof(uint)));
infile.read((char *) nodePointersI, sizeof(uint) * testNumNodes);
gpuErrorcheck(cudaMallocManaged(&edgeList, (numEdge) * sizeof(uint)));
cudaMemAdvise(nodePointersI, (testNumNodes + 1) * sizeof(uint), cudaMemAdviseSetReadMostly, 0);
cudaMemAdvise(edgeList, (numEdge) * sizeof(uint), cudaMemAdviseSetReadMostly, 0);
infile.read((char *) edgeList, sizeof(uint) * testNumEdge);
infile.close();
//preprocessData(nodePointersI, edgeList, testNumNodes, testNumEdge);
} else {
nodePointersI = new uint[testNumNodes + 1];
infile.read((char *) nodePointersI, sizeof(uint) * testNumNodes);
edgeList = new uint[testNumEdge + 1];
infile.read((char *) edgeList, sizeof(uint) * testNumEdge);
infile.close();
}
auto endReadGraph = std::chrono::steady_clock::now();
long durationReadGraph = std::chrono::duration_cast<std::chrono::milliseconds>(
endReadGraph - startReadGraph).count();
cout << "read graph time : " << durationReadGraph << "ms" << endl;
int testTimes = 1;
long timeSum = 0;
for (int i = 0; i < testTimes; i++) {
uint sourceNode = rand() % testNumNodes;
cout << "sourceNode " << sourceNode << endl;
if (isUseShare) {
//timeSum += bfsCaculateInShare(testNumNodes, testNumEdge, nodePointersI, edgeList, 25838548);
timeSum += bfsCaculateInShare(testNumNodes, testNumEdge, nodePointersI, edgeList, 53037907);
break;
} else {
//timeSum += bfsCaculateInOpt(testNumNodes, testNumEdge, nodePointersI, edgeList, 25838548);
//caculateInOptChooseByDegree(testNumNodes, testNumEdge, nodePointersI, edgeList);
//timeSum += bfsCaculateInAsync(testNumNodes, testNumEdge, nodePointersI, edgeList, 53037907);
timeSum += bfsCaculateInAsyncNoUVMSwap(testNumNodes, testNumEdge, nodePointersI, edgeList, 25838548);
//timeSum += bfsCaculateInAsyncSwapOpt(testNumNodes, testNumEdge, nodePointersI, edgeList, 25838548);
//timeSum += bfsCaculateInAsyncSwapOptWithOverload(testNumNodes, testNumEdge, nodePointersI, edgeList, 25838548);
//timeSum += bfsCaculateInAsyncSwapManage(testNumNodes, testNumEdge, nodePointersI, edgeList, 25838548);
//break;
}
cout << i << "========================================" << endl;
}
if (isUseShare) {
cudaFree(nodePointersI);
cudaFree(edgeList);
} else {
delete[]nodePointersI;
delete[]edgeList;
}
}
long bfsCaculateInShare(uint testNumNodes, uint testNumEdge, uint *nodePointersI, uint *edgeList, uint sourceNode) {
auto start = std::chrono::steady_clock::now();
uint *degree;
uint *value;
uint sourceCode = 0;
gpuErrorcheck(cudaMallocManaged(°ree, testNumNodes * sizeof(uint)));
gpuErrorcheck(cudaMallocManaged(&value, testNumNodes * sizeof(uint)));
auto startPreCaculate = std::chrono::steady_clock::now();
//caculate degree
for (uint i = 0; i < testNumNodes - 1; i++) {
if (nodePointersI[i] > testNumEdge) {
cout << i << " " << nodePointersI[i] << endl;
break;
}
degree[i] = nodePointersI[i + 1] - nodePointersI[i];
}
degree[testNumNodes - 1] = testNumEdge - nodePointersI[testNumNodes - 1];
sourceCode = sourceNode;
cout << "sourceNode " << sourceNode << " degree " << degree[sourceNode] << endl;
bool *label;
gpuErrorcheck(cudaMallocManaged(&label, testNumNodes * sizeof(bool)));
for (uint i = 0; i < testNumNodes; i++) {
label[i] = false;
value[i] = UINT_MAX;
}
auto endPreCaculate = std::chrono::steady_clock::now();
long durationPreCaculate = std::chrono::duration_cast<std::chrono::milliseconds>(
endPreCaculate - startPreCaculate).count();
cout << "durationPreCaculate time : " << durationPreCaculate << " ms" << endl;
label[sourceCode] = true;
value[sourceCode] = 1;
uint *activeNodeList;
cudaMallocManaged(&activeNodeList, testNumNodes * sizeof(uint));
//cacaulate the active node And make active node array
uint *activeNodeLabelingD;
gpuErrorcheck(cudaMallocManaged(&activeNodeLabelingD, testNumNodes * sizeof(unsigned int)));
uint *activeNodeLabelingPrefixD;
gpuErrorcheck(cudaMallocManaged(&activeNodeLabelingPrefixD, testNumNodes * sizeof(unsigned int)));
dim3 grid = dim3(56, 1, 1);
dim3 block = dim3(1024, 1, 1);
setLabeling<<<grid, block>>>(testNumNodes, label, activeNodeLabelingD);
thrust::device_ptr<unsigned int> ptr_labeling(activeNodeLabelingD);
thrust::device_ptr<unsigned int> ptr_labeling_prefixsum(activeNodeLabelingPrefixD);
uint activeNodesNum = thrust::reduce(ptr_labeling, ptr_labeling + testNumNodes);
int iter = 0;
uint nodeSum = activeNodesNum;
auto startProcessing = std::chrono::steady_clock::now();
//vector<vector<uint>> visitRecordByIteration;
while (activeNodesNum > 0) {
iter++;
thrust::exclusive_scan(ptr_labeling, ptr_labeling + testNumNodes, ptr_labeling_prefixsum);
setActiveNodeArray<<<grid, block>>>(testNumNodes, activeNodeList, label, activeNodeLabelingPrefixD);
setLabelDefault<<<grid, block>>>(activeNodesNum, activeNodeList, label);
bfs_kernel<<<grid, block>>>(activeNodesNum, activeNodeList, nodePointersI, degree, edgeList, value, label);
cudaDeviceSynchronize();
gpuErrorcheck(cudaPeekAtLastError());
//visitRecordByIteration.push_back(countDataByIteration(testNumEdge, testNumNodes, nodePointersI, degree, activeNodeLabelingD));
setLabeling<<<grid, block>>>(testNumNodes, label, activeNodeLabelingD);
activeNodesNum = thrust::reduce(ptr_labeling, ptr_labeling + testNumNodes);
nodeSum += activeNodesNum;
cout << "iter: " << iter << " activeNodes: " << activeNodesNum << endl;
}
cudaDeviceSynchronize();
//writeTrunkVistInIteration(visitRecordByIteration, "./CountByIterationbfs.txt");
cout << "nodeSum: " << nodeSum << endl;
auto endRead = std::chrono::steady_clock::now();
long durationRead = std::chrono::duration_cast<std::chrono::milliseconds>(endRead - startProcessing).count();
cout << "iter sum is " << iter << " finish time : " << durationRead << " ms" << endl;
//cout << "range min " << rangeMin << " range max " << rangeMax << " range sum " << rangeSum << endl;
cout << "source node pointer " << nodePointersI[sourceNode] << endl;
cudaFree(degree);
cudaFree(label);
cudaFree(value);
cudaFree(activeNodeList);
cudaFree(activeNodeLabelingD);
cudaFree(activeNodeLabelingPrefixD);
return durationRead;
}
long
bfsCaculateInAsyncNoUVMSwap(uint testNumNodes, uint testNumEdge, uint *nodePointersI, uint *edgeList, uint sourceNode) {
cout << "=========bfsCaculateInAsyncNoUVM========" << endl;
auto start = std::chrono::steady_clock::now();
auto startPreCaculate = std::chrono::steady_clock::now();
//CPU
long durationRead;
uint fragmentNum = testNumEdge / fragment_size;
unsigned long max_partition_size;
unsigned long total_gpu_size;
uint staticFragmentNum;
uint maxStaticNode = 0;
uint *degree;
uint *value;
uint *label;
uint *staticFragmentToNormalMap;
bool *isInStatic;
uint *overloadNodeList;
uint *staticNodePointer;
uint *staticFragmentData;
uint *overloadFragmentData;
uint *activeNodeList;
uint *activeOverloadNodePointers;
vector<PartEdgeListInfo> partEdgeListInfoArr;
/*
* overloadEdgeList overload edge list in every iteration
* */
uint *overloadEdgeList;
FragmentData *fragmentData;
bool isFromTail = false;
//GPU
uint *staticEdgeListD;
uint *overloadEdgeListD;
bool *isInStaticD;
uint *overloadNodeListD;
uint *staticNodePointerD;
uint *staticFragmentVisitRecordsD;
uint *staticFragmentDataD;
uint *canSwapStaticFragmentDataD;
uint *canSwapFragmentPrefixSumD;
uint *degreeD;
// async need two labels
uint *isActiveD1;
uint *isStaticActive;
uint *isOverloadActive;
uint *valueD;
uint *activeNodeListD;
uint *activeNodeLabelingPrefixD;
uint *overloadLabelingPrefixD;
uint *activeOverloadNodePointersD;
uint *activeOverloadDegreeD;
degree = new uint[testNumNodes];
value = new uint[testNumNodes];
label = new uint[testNumNodes];
isInStatic = new bool[testNumNodes];
overloadNodeList = new uint[testNumNodes];
staticNodePointer = new uint[testNumNodes];
activeNodeList = new uint[testNumNodes];
activeOverloadNodePointers = new uint[testNumNodes];
fragmentData = new FragmentData[fragmentNum];
//getMaxPartitionSize(max_partition_size, testNumNodes);
getMaxPartitionSize(max_partition_size, total_gpu_size, testNumNodes, 0.97, sizeof(uint));
staticFragmentNum = max_partition_size / fragment_size;
staticFragmentToNormalMap = new uint[staticFragmentNum];
staticFragmentData = new uint[staticFragmentNum];
overloadFragmentData = new uint[fragmentNum];
//caculate degree
uint meanDegree = testNumEdge / testNumNodes;
cout << " meanDegree " << meanDegree << endl;
uint degree0Sum = 0;
for (uint i = 0; i < testNumNodes - 1; i++) {
if (nodePointersI[i] > testNumEdge) {
cout << i << " " << nodePointersI[i] << endl;
break;
}
degree[i] = nodePointersI[i + 1] - nodePointersI[i];
}
degree[testNumNodes - 1] = testNumEdge - nodePointersI[testNumNodes - 1];
memcpy(staticNodePointer, nodePointersI, testNumNodes * sizeof(uint));
//caculate static staticEdgeListD
gpuErrorcheck(cudaMalloc(&staticEdgeListD, max_partition_size * sizeof(uint)));
auto startmove = std::chrono::steady_clock::now();
gpuErrorcheck(
cudaMemcpy(staticEdgeListD, edgeList, max_partition_size * sizeof(uint), cudaMemcpyHostToDevice));
auto endMove = std::chrono::steady_clock::now();
long testDuration = std::chrono::duration_cast<std::chrono::milliseconds>(
endMove - startmove).count();
gpuErrorcheck(cudaMalloc(&isInStaticD, testNumNodes * sizeof(bool)))
gpuErrorcheck(cudaMalloc(&overloadNodeListD, testNumNodes * sizeof(uint)));
gpuErrorcheck(cudaMalloc(&staticNodePointerD, testNumNodes * sizeof(uint)))
gpuErrorcheck(cudaMemcpy(staticNodePointerD, nodePointersI, testNumNodes * sizeof(uint), cudaMemcpyHostToDevice));
for (uint i = 0; i < testNumNodes; i++) {
label[i] = 0;
value[i] = UINT_MAX - 1;
uint pointStartFragmentIndex = nodePointersI[i] / fragment_size;
uint pointEndFragmentIndex =
degree[i] == 0 ? pointStartFragmentIndex : (nodePointersI[i] + degree[i] - 1) / fragment_size;
if (pointStartFragmentIndex == pointEndFragmentIndex && pointStartFragmentIndex >= 0 &&
pointStartFragmentIndex < fragmentNum) {
if (fragmentData[pointStartFragmentIndex].vertexNum == 0) {
fragmentData[pointStartFragmentIndex].startVertex = i;
} else if (fragmentData[pointStartFragmentIndex].startVertex > i) {
fragmentData[pointStartFragmentIndex].startVertex = i;
}
fragmentData[pointStartFragmentIndex].vertexNum++;
}
if (nodePointersI[i] < max_partition_size && (nodePointersI[i] + degree[i] - 1) < max_partition_size) {
isInStatic[i] = true;
if (i > maxStaticNode) maxStaticNode = i;
} else {
isInStatic[i] = false;
}
}
label[sourceNode] = 1;
value[sourceNode] = 1;
cudaMemcpy(isInStaticD, isInStatic, testNumNodes * sizeof(bool), cudaMemcpyHostToDevice);
cout << "max_partition_size: " << max_partition_size << " maxStaticNode: " << maxStaticNode << endl;
cout << "fragmentNum " << fragmentNum << " staticFragmentNum " << staticFragmentNum << endl;
for (int i = 0; i < staticFragmentNum; i++) {
fragmentData[i].isIn = true;
}
for (uint i = 0; i < staticFragmentNum; i++) {
staticFragmentToNormalMap[i] = i;
}
//uint partOverloadSize = max_partition_size / 2;
uint partOverloadSize = total_gpu_size - max_partition_size;
uint overloadSize = testNumEdge - nodePointersI[maxStaticNode + 1];
cout << " partOverloadSize " << partOverloadSize << " overloadSize " << overloadSize << endl;
overloadEdgeList = (uint *) malloc(overloadSize * sizeof(uint));
if (overloadEdgeList == NULL) {
cout << "overloadEdgeList is null" << endl;
return 0;
}
gpuErrorcheck(cudaMalloc(&overloadEdgeListD, partOverloadSize * sizeof(uint)));
//gpuErrorcheck(cudaMallocManaged(&edgeListOverloadManage, overloadSize * sizeof(uint)));
gpuErrorcheck(cudaMalloc(&staticFragmentDataD, staticFragmentNum * sizeof(uint)));
gpuErrorcheck(cudaMalloc(&staticFragmentVisitRecordsD, staticFragmentNum * sizeof(uint)));
gpuErrorcheck(cudaMalloc(&canSwapStaticFragmentDataD, staticFragmentNum * sizeof(uint)));
gpuErrorcheck(cudaMalloc(&canSwapFragmentPrefixSumD, staticFragmentNum * sizeof(uint)));
thrust::device_ptr<unsigned int> ptr_canSwapFragment(canSwapStaticFragmentDataD);
thrust::device_ptr<unsigned int> ptr_canSwapFragmentPrefixSum(canSwapFragmentPrefixSumD);
gpuErrorcheck(cudaMalloc(°reeD, testNumNodes * sizeof(uint)));
gpuErrorcheck(cudaMalloc(&isActiveD1, testNumNodes * sizeof(uint)));
gpuErrorcheck(cudaMalloc(&isStaticActive, testNumNodes * sizeof(uint)));
gpuErrorcheck(cudaMalloc(&isOverloadActive, testNumNodes * sizeof(uint)));
gpuErrorcheck(cudaMalloc(&valueD, testNumNodes * sizeof(uint)));
gpuErrorcheck(cudaMalloc(&activeNodeLabelingPrefixD, testNumNodes * sizeof(unsigned int)));
gpuErrorcheck(cudaMalloc(&overloadLabelingPrefixD, testNumNodes * sizeof(unsigned int)));
gpuErrorcheck(cudaMalloc(&activeNodeListD, testNumNodes * sizeof(uint)));
gpuErrorcheck(cudaMalloc(&activeOverloadNodePointersD, testNumNodes * sizeof(uint)));
gpuErrorcheck(cudaMalloc(&activeOverloadDegreeD, testNumNodes * sizeof(uint)));
gpuErrorcheck(cudaMemcpy(degreeD, degree, testNumNodes * sizeof(uint), cudaMemcpyHostToDevice));
gpuErrorcheck(cudaMemcpy(valueD, value, testNumNodes * sizeof(uint), cudaMemcpyHostToDevice));
gpuErrorcheck(cudaMemcpy(isActiveD1, label, testNumNodes * sizeof(uint), cudaMemcpyHostToDevice));
gpuErrorcheck(cudaMemset(isStaticActive, 0, testNumNodes * sizeof(uint)));
gpuErrorcheck(cudaMemset(isOverloadActive, 0, testNumNodes * sizeof(uint)));
//cacaulate the active node And make active node array
dim3 grid = dim3(56, 1, 1);
dim3 block = dim3(1024, 1, 1);
//setLabeling<<<grid, block>>>(testNumNodes, labelD, activeNodeLabelingD);
thrust::device_ptr<unsigned int> ptr_labeling(isActiveD1);
thrust::device_ptr<unsigned int> ptr_labeling_static(isStaticActive);
thrust::device_ptr<unsigned int> ptr_labeling_overload(isOverloadActive);
thrust::device_ptr<unsigned int> ptr_labeling_prefixsum(activeNodeLabelingPrefixD);
thrust::device_ptr<unsigned int> ptrOverloadDegree(activeOverloadDegreeD);
thrust::device_ptr<unsigned int> ptrOverloadPrefixsum(overloadLabelingPrefixD);
uint activeNodesNum = thrust::reduce(ptr_labeling, ptr_labeling + testNumNodes);
int iter = 0;
uint nodeSum = activeNodesNum;
uint overloadEdgeSum = 0;
auto startCpu = std::chrono::steady_clock::now();
auto endReadCpu = std::chrono::steady_clock::now();
long durationReadCpu = 0;
auto startSwap = std::chrono::steady_clock::now();
auto endSwap = std::chrono::steady_clock::now();
long durationSwap = 0;
auto startGpuProcessing = std::chrono::steady_clock::now();
auto endGpuProcessing = std::chrono::steady_clock::now();
long durationGpuProcessing = 0;
auto startOverloadGpuProcessing = std::chrono::steady_clock::now();
auto endOverloadGpuProcessing = std::chrono::steady_clock::now();
long durationOverloadGpuProcessing = 0;
auto startPreGpuProcessing = std::chrono::steady_clock::now();
auto endPreGpuProcessing = std::chrono::steady_clock::now();
long durationPreGpuProcessing = 0;
auto endPreCaculate = std::chrono::steady_clock::now();
long durationPreCaculate = std::chrono::duration_cast<std::chrono::milliseconds>(
endPreCaculate - startPreCaculate).count();
cout << "durationPreCaculate time : " << durationPreCaculate << " ms" << endl;
cudaStream_t steamStatic, streamDynamic;
cudaStreamCreate(&steamStatic);
cudaStreamCreate(&streamDynamic);
auto startMemoryTraverse = std::chrono::steady_clock::now();
auto endMemoryTraverse = std::chrono::steady_clock::now();
long durationMemoryTraverse = 0;
auto startProcessing = std::chrono::steady_clock::now();
uint cursorStartSwap = isFromTail ? fragmentNum - 1 : staticFragmentNum + 1;
//uint cursorStartSwap = staticFragmentNum + 1;
uint swapValidNodeSum = 0;
uint swapValidEdgeSum = 0;
uint swapNotValidNodeSum = 0;
uint swapNotValidEdgeSum = 0;
uint visitEdgeSum = 0;
uint swapInEdgeSum = 0;
uint partOverloadSum = 0;
while (activeNodesNum > 0) {
startPreGpuProcessing = std::chrono::steady_clock::now();
iter++;
cout << "iter " << iter << " activeNodesNum " << activeNodesNum << endl;
setStaticAndOverloadLabel<<<grid, block>>>(testNumNodes, isActiveD1, isStaticActive, isOverloadActive,
isInStaticD);
uint staticNodeNum = thrust::reduce(ptr_labeling_static, ptr_labeling_static + testNumNodes);
if (staticNodeNum > 0) {
cout << "iter " << iter << " staticNodeNum " << staticNodeNum << endl;
thrust::exclusive_scan(ptr_labeling_static, ptr_labeling_static + testNumNodes, ptr_labeling_prefixsum);
setStaticActiveNodeArray<<<grid, block>>>(testNumNodes, activeNodeListD, isStaticActive,
activeNodeLabelingPrefixD);
}
uint overloadNodeNum = thrust::reduce(ptr_labeling_overload, ptr_labeling_overload + testNumNodes);
uint overloadEdgeNum = 0;
if (overloadNodeNum > 0) {
cout << "iter " << iter << " overloadNodeNum " << overloadNodeNum << endl;
thrust::exclusive_scan(ptr_labeling_overload, ptr_labeling_overload + testNumNodes, ptrOverloadPrefixsum);
setOverloadNodePointerSwap<<<grid, block>>>(testNumNodes, overloadNodeListD, activeOverloadDegreeD,
isOverloadActive,
overloadLabelingPrefixD, degreeD);
thrust::exclusive_scan(ptrOverloadDegree, ptrOverloadDegree + overloadNodeNum, activeOverloadNodePointersD);
overloadEdgeNum = thrust::reduce(thrust::device, ptrOverloadDegree,
ptrOverloadDegree + overloadNodeNum, 0);
cout << "iter " << iter << " overloadEdgeNum " << overloadEdgeNum << endl;
overloadEdgeSum += overloadEdgeNum;
}
endPreGpuProcessing = std::chrono::steady_clock::now();
durationPreGpuProcessing += std::chrono::duration_cast<std::chrono::milliseconds>(
endPreGpuProcessing - startPreGpuProcessing).count();
startGpuProcessing = std::chrono::steady_clock::now();
if (staticNodeNum > 0) {
setLabelDefaultOpt<<<grid, block, 0, steamStatic>>>(staticNodeNum, activeNodeListD, isActiveD1);
}
if (overloadNodeNum > 0) {
setLabelDefaultOpt<<<grid, block, 0, steamStatic>>>(overloadNodeNum, overloadNodeListD, isActiveD1);
}
bfs_kernelStaticSwap<<<grid, block, 0, steamStatic>>>(staticNodeNum, activeNodeListD,
staticNodePointerD, degreeD,
staticEdgeListD, valueD, isActiveD1,
staticFragmentVisitRecordsD, fragment_size,
max_partition_size, testNumNodes);
/*mixDynamicPartLabel<<<grid, block, 0, steamStatic>>>(staticNodeNum, 0, activeNodeListD, isActiveD1,
isActiveD2);
bfs_kernelStatic2Label<<<grid, block, 0, steamStatic>>>(staticNodeNum, activeNodeListD,
staticNodePointerD, degreeD,
staticEdgeListD, valueD, isActiveD1, isActiveD2);*/
if (overloadNodeNum > 0) {
startCpu = std::chrono::steady_clock::now();
/*cudaMemcpyAsync(staticActiveNodeList, activeNodeListD, activeNodesNum * sizeof(uint), cudaMemcpyDeviceToHost,
streamDynamic);*/
cudaMemcpyAsync(overloadNodeList, overloadNodeListD, overloadNodeNum * sizeof(uint), cudaMemcpyDeviceToHost,
streamDynamic);
cudaMemcpyAsync(activeOverloadNodePointers, activeOverloadNodePointersD, overloadNodeNum * sizeof(uint),
cudaMemcpyDeviceToHost, streamDynamic);
int threadNum = 20;
if (overloadNodeNum < 50) {
threadNum = 1;
}
thread runThreads[threadNum];
for (int i = 0; i < threadNum; i++) {
runThreads[i] = thread(fillDynamic,
i,
threadNum,
0,
overloadNodeNum,
degree,
activeOverloadNodePointers,
nodePointersI,
overloadNodeList,
overloadEdgeList,
edgeList);
}
for (unsigned int t = 0; t < threadNum; t++) {
runThreads[t].join();
}
caculatePartInfoForEdgeList(activeOverloadNodePointers, overloadNodeList, degree, partEdgeListInfoArr,
overloadNodeNum, partOverloadSize, overloadEdgeNum);
endReadCpu = std::chrono::steady_clock::now();
durationReadCpu += std::chrono::duration_cast<std::chrono::milliseconds>(endReadCpu - startCpu).count();
uint canSwapFragmentNum;
setFragmentDataOpt<<<grid, block, 0, steamStatic>>>(canSwapStaticFragmentDataD, staticFragmentNum,
staticFragmentVisitRecordsD);
canSwapFragmentNum = thrust::reduce(ptr_canSwapFragment, ptr_canSwapFragment + staticFragmentNum);
if (canSwapFragmentNum > 0) {
thrust::exclusive_scan(ptr_canSwapFragment, ptr_canSwapFragment + staticFragmentNum,
ptr_canSwapFragmentPrefixSum);
setStaticFragmentData<<<grid, block, 0, steamStatic>>>(staticFragmentNum, canSwapStaticFragmentDataD,
canSwapFragmentPrefixSumD, staticFragmentDataD);
cudaMemcpyAsync(staticFragmentData, staticFragmentDataD, canSwapFragmentNum * sizeof(uint),
cudaMemcpyDeviceToHost, steamStatic);
}
cudaDeviceSynchronize();
//gpuErrorcheck(cudaPeekAtLastError())
endGpuProcessing = std::chrono::steady_clock::now();
durationGpuProcessing += std::chrono::duration_cast<std::chrono::milliseconds>(
endGpuProcessing - startGpuProcessing).count();
startMemoryTraverse = std::chrono::steady_clock::now();
partOverloadSum += partEdgeListInfoArr.size();
for (int i = 0; i < partEdgeListInfoArr.size(); i++) {
gpuErrorcheck(cudaMemcpy(overloadEdgeListD, overloadEdgeList +
activeOverloadNodePointers[partEdgeListInfoArr[i].partStartIndex],
partEdgeListInfoArr[i].partEdgeNums * sizeof(uint),
cudaMemcpyHostToDevice))
startOverloadGpuProcessing = std::chrono::steady_clock::now();
/*mixDynamicPartLabel<<<grid, block, 0, streamDynamic>>>(partEdgeListInfoArr[i].partActiveNodeNums,
partEdgeListInfoArr[i].partStartIndex,
overloadNodeListD, isActiveD1,
isActiveD2);*/
bfs_kernelDynamicPart<<<grid, block, 0, streamDynamic>>>(
partEdgeListInfoArr[i].partStartIndex,
partEdgeListInfoArr[i].partActiveNodeNums,
overloadNodeListD, degreeD,
valueD, isActiveD1,
overloadEdgeListD,
activeOverloadNodePointersD);
if (canSwapFragmentNum > 0) {
startSwap = std::chrono::steady_clock::now();
uint canSwapStaticFragmentIndex = 0;
uint swapSum = 0;
//for (uint i = cursorStartSwap; i > 0; i--) {
for (uint i = cursorStartSwap; i < fragmentNum; i++) {
if (cudaSuccess == cudaStreamQuery(streamDynamic) ||
canSwapStaticFragmentIndex >= canSwapFragmentNum) {
if (i < fragmentNum) {
cursorStartSwap = i + 1;
}
cout << "iter " << iter << " swapSum " << swapSum << " swap to " << i << endl;
swapInEdgeSum += swapSum * fragment_size;
endSwap = std::chrono::steady_clock::now();
durationSwap += std::chrono::duration_cast<std::chrono::milliseconds>(
endSwap - startOverloadGpuProcessing).count();
break;
}
if (cudaErrorNotReady == cudaStreamQuery(streamDynamic)) {
const FragmentData swapFragmentData = fragmentData[i];
if (!swapFragmentData.isVisit && !swapFragmentData.isIn && swapFragmentData.vertexNum > 0) {
uint swapStaticFragmentIndex = staticFragmentData[canSwapStaticFragmentIndex++];
uint beSwappedFragmentIndex = staticFragmentToNormalMap[swapStaticFragmentIndex];
fragmentData[beSwappedFragmentIndex].isVisit = true;
fragmentData[beSwappedFragmentIndex].isIn = false;
FragmentData beSwappedFragment = fragmentData[beSwappedFragmentIndex];
uint moveFrom = testNumEdge;
uint moveTo = testNumEdge;
uint moveNum = testNumEdge;
if (beSwappedFragment.vertexNum > 0 && beSwappedFragmentIndex > 0 &&
beSwappedFragmentIndex < fragmentNum) {
for (uint j = beSwappedFragment.startVertex - 1;
j < beSwappedFragment.startVertex + beSwappedFragment.vertexNum + 1 &&
j < testNumNodes; j++) {
isInStatic[j] = false;
}
for (uint j = swapFragmentData.startVertex;
j < swapFragmentData.startVertex + swapFragmentData.vertexNum; j++) {
isInStatic[j] = true;
staticNodePointer[j] =
nodePointersI[j] - i * fragment_size +
swapStaticFragmentIndex * fragment_size;
}
moveFrom = nodePointersI[swapFragmentData.startVertex];
moveTo = staticNodePointer[swapFragmentData.startVertex];
moveNum = nodePointersI[swapFragmentData.startVertex + swapFragmentData.vertexNum] -
nodePointersI[swapFragmentData.startVertex];
cudaMemcpyAsync(staticEdgeListD + moveTo, edgeList + moveFrom,
moveNum * sizeof(uint),
cudaMemcpyHostToDevice, steamStatic);
cudaMemcpyAsync(isInStaticD + beSwappedFragment.startVertex - 1,
isInStatic + beSwappedFragment.startVertex - 1,
(beSwappedFragment.vertexNum + 2) * sizeof(bool),
cudaMemcpyHostToDevice, steamStatic);
cudaMemcpyAsync(isInStaticD + swapFragmentData.startVertex,
isInStatic + swapFragmentData.startVertex,
swapFragmentData.vertexNum * sizeof(bool),
cudaMemcpyHostToDevice, steamStatic);
cudaMemcpyAsync(staticNodePointerD + swapFragmentData.startVertex,
staticNodePointer + swapFragmentData.startVertex,
swapFragmentData.vertexNum * sizeof(uint), cudaMemcpyHostToDevice,
steamStatic);
staticFragmentToNormalMap[swapStaticFragmentIndex] = i;
fragmentData[i].isIn = true;
swapSum++;
}
}
}
}
}
cudaDeviceSynchronize();
endOverloadGpuProcessing = std::chrono::steady_clock::now();
durationOverloadGpuProcessing += std::chrono::duration_cast<std::chrono::milliseconds>(
endOverloadGpuProcessing - startOverloadGpuProcessing).count();
}
endMemoryTraverse = std::chrono::steady_clock::now();
durationMemoryTraverse += std::chrono::duration_cast<std::chrono::milliseconds>(
endMemoryTraverse - startMemoryTraverse).count();
//gpuErrorcheck(cudaPeekAtLastError())
} else {
cudaDeviceSynchronize();
endGpuProcessing = std::chrono::steady_clock::now();
durationGpuProcessing += std::chrono::duration_cast<std::chrono::milliseconds>(
endGpuProcessing - startGpuProcessing).count();
}
cudaDeviceSynchronize();
startPreGpuProcessing = std::chrono::steady_clock::now();
/*mixCommonLabel<<<grid, block, 0, streamDynamic>>>(testNumNodes, isActiveD1, isActiveD2);
cudaDeviceSynchronize();*/
activeNodesNum = thrust::reduce(ptr_labeling, ptr_labeling + testNumNodes);
nodeSum += activeNodesNum;
endPreGpuProcessing = std::chrono::steady_clock::now();
durationPreGpuProcessing += std::chrono::duration_cast<std::chrono::milliseconds>(
endPreGpuProcessing - startPreGpuProcessing).count();
}
//cudaDeviceSynchronize();
auto endRead = std::chrono::steady_clock::now();
durationRead = std::chrono::duration_cast<std::chrono::milliseconds>(endRead - startProcessing).count();
cout << "finish time : " << durationRead << " ms" << endl;
cout << "cpu time : " << durationReadCpu << " ms" << endl;
cout << "fact processing time : " << durationGpuProcessing << " ms" << endl;
cout << "durationOverloadGpuProcessing time : " << durationOverloadGpuProcessing << " ms" << endl;
cout << "durationMemoryTraverse time : " << durationMemoryTraverse << " ms" << endl;
cout << "gpu pre processing time : " << durationPreGpuProcessing << " ms" << endl;
cout << "overloadEdgeSum : " << overloadEdgeSum << " " << endl;
cout << "partOverloadSum : " << partOverloadSum << " " << endl;
cout << "nodeSum: " << nodeSum << endl;
cudaFree(staticEdgeListD);
cudaFree(degreeD);
cudaFree(isActiveD1);
cudaFree(valueD);
cudaFree(activeNodeListD);
cudaFree(activeNodeLabelingPrefixD);
cudaFree(activeOverloadNodePointersD);
cudaFree(activeOverloadDegreeD);
cudaFree(isInStaticD);
cudaFree(staticNodePointerD);
cudaFree(overloadNodeListD);
cudaFree(staticFragmentVisitRecordsD);
cudaFree(staticFragmentDataD);
cudaFree(canSwapStaticFragmentDataD);
cudaFree(canSwapFragmentPrefixSumD);
cudaFree(overloadEdgeListD);
cudaFree(isStaticActive);
cudaFree(isOverloadActive);
cudaFree(overloadLabelingPrefixD);
delete[] label;
delete[] degree;
delete[] value;
delete[] activeNodeList;
delete[] activeOverloadNodePointers;
delete[] staticFragmentData;
delete[] isInStatic;
delete[] overloadNodeList;
delete[] staticNodePointer;
delete[] staticFragmentToNormalMap;
delete[] fragmentData;
delete[] overloadFragmentData;
delete[] overloadEdgeList;
partEdgeListInfoArr.clear();
return durationRead;
}
long
bfsCaculateInAsyncNoUVM(uint testNumNodes, uint testNumEdge, uint *nodePointersI, uint *edgeList, uint sourceNode,
float adviseK) {
cout << "=========bfsCaculateInAsyncNoUVM========" << endl;
ulong edgeIterationMax = 0;
auto start = std::chrono::steady_clock::now();
auto startPreCaculate = std::chrono::steady_clock::now();
//CPU
long durationRead;
ulong transferSum = 0;
unsigned long max_partition_size;
unsigned long total_gpu_size;
uint maxStaticNode = 0;
uint *degree;
uint *value;
uint *label;
bool *isInStatic;
uint *overloadNodeList;
uint *staticNodePointer;
uint *activeNodeList;
uint *activeOverloadNodePointers;
vector<PartEdgeListInfo> partEdgeListInfoArr;
/*
* overloadEdgeList overload edge list in every iteration
* */
uint *overloadEdgeList;
bool isFromTail = false;
//GPU
uint *staticEdgeListD;
uint *overloadEdgeListD;
bool *isInStaticD;
uint *overloadNodeListD;
uint *staticNodePointerD;
uint *degreeD;
// async need two labels
uint *isActiveD1;
uint *isStaticActive;
uint *isOverloadActive;
uint *valueD;
uint *activeNodeListD;
uint *activeNodeLabelingPrefixD;
uint *overloadLabelingPrefixD;
uint *activeOverloadNodePointersD;
uint *activeOverloadDegreeD;
degree = new uint[testNumNodes];
value = new uint[testNumNodes];
label = new uint[testNumNodes];
isInStatic = new bool[testNumNodes];
overloadNodeList = new uint[testNumNodes];
staticNodePointer = new uint[testNumNodes];
activeNodeList = new uint[testNumNodes];
activeOverloadNodePointers = new uint[testNumNodes];
//getMaxPartitionSize(max_partition_size, testNumNodes);
getMaxPartitionSize(max_partition_size, total_gpu_size, testNumNodes, adviseK, sizeof(uint), testNumEdge, 15);
//caculate degree
uint meanDegree = testNumEdge / testNumNodes;
cout << " meanDegree " << meanDegree << endl;
uint degree0Sum = 0;
for (uint i = 0; i < testNumNodes - 1; i++) {
if (nodePointersI[i] > testNumEdge) {
cout << i << " " << nodePointersI[i] << endl;
break;
}
degree[i] = nodePointersI[i + 1] - nodePointersI[i];
}
degree[testNumNodes - 1] = testNumEdge - nodePointersI[testNumNodes - 1];
memcpy(staticNodePointer, nodePointersI, testNumNodes * sizeof(uint));
//caculate static staticEdgeListD
gpuErrorcheck(cudaMalloc(&staticEdgeListD, max_partition_size * sizeof(uint)));
auto startmove = std::chrono::steady_clock::now();
gpuErrorcheck(
cudaMemcpy(staticEdgeListD, edgeList, max_partition_size * sizeof(uint), cudaMemcpyHostToDevice));
auto endMove = std::chrono::steady_clock::now();
long testDuration = std::chrono::duration_cast<std::chrono::milliseconds>(
endMove - startmove).count();
cout << "move duration " << testDuration << endl;
gpuErrorcheck(cudaMalloc(&isInStaticD, testNumNodes * sizeof(bool)))
gpuErrorcheck(cudaMalloc(&overloadNodeListD, testNumNodes * sizeof(uint)));
gpuErrorcheck(cudaMalloc(&staticNodePointerD, testNumNodes * sizeof(uint)))
gpuErrorcheck(cudaMemcpy(staticNodePointerD, nodePointersI, testNumNodes * sizeof(uint), cudaMemcpyHostToDevice));
for (uint i = 0; i < testNumNodes; i++) {
label[i] = 0;
value[i] = UINT_MAX - 1;
if (nodePointersI[i] < max_partition_size && (nodePointersI[i] + degree[i] - 1) < max_partition_size) {
isInStatic[i] = true;
if (i > maxStaticNode) maxStaticNode = i;
} else {
isInStatic[i] = false;
}
}
label[sourceNode] = 1;
value[sourceNode] = 1;
cudaMemcpy(isInStaticD, isInStatic, testNumNodes * sizeof(bool), cudaMemcpyHostToDevice);
cout << "max_partition_size: " << max_partition_size << " maxStaticNode: " << maxStaticNode << endl;
//uint partOverloadSize = max_partition_size / 2;
uint partOverloadSize = total_gpu_size - max_partition_size;
uint overloadSize = testNumEdge - nodePointersI[maxStaticNode + 1];
cout << " partOverloadSize " << partOverloadSize << " overloadSize " << overloadSize << endl;
overloadEdgeList = (uint *) malloc(overloadSize * sizeof(uint));
if (overloadEdgeList == NULL) {
cout << "overloadEdgeList is null" << endl;
return 0;
}
gpuErrorcheck(cudaMalloc(&overloadEdgeListD, partOverloadSize * sizeof(uint)));
//gpuErrorcheck(cudaMallocManaged(&edgeListOverloadManage, overloadSize * sizeof(uint)));
gpuErrorcheck(cudaMalloc(°reeD, testNumNodes * sizeof(uint)));
gpuErrorcheck(cudaMalloc(&isActiveD1, testNumNodes * sizeof(uint)));
gpuErrorcheck(cudaMalloc(&isStaticActive, testNumNodes * sizeof(uint)));
gpuErrorcheck(cudaMalloc(&isOverloadActive, testNumNodes * sizeof(uint)));
gpuErrorcheck(cudaMalloc(&valueD, testNumNodes * sizeof(uint)));
gpuErrorcheck(cudaMalloc(&activeNodeLabelingPrefixD, testNumNodes * sizeof(unsigned int)));
gpuErrorcheck(cudaMalloc(&overloadLabelingPrefixD, testNumNodes * sizeof(unsigned int)));
gpuErrorcheck(cudaMalloc(&activeNodeListD, testNumNodes * sizeof(uint)));
gpuErrorcheck(cudaMalloc(&activeOverloadNodePointersD, testNumNodes * sizeof(uint)));
gpuErrorcheck(cudaMalloc(&activeOverloadDegreeD, testNumNodes * sizeof(uint)));
gpuErrorcheck(cudaMemcpy(degreeD, degree, testNumNodes * sizeof(uint), cudaMemcpyHostToDevice));
gpuErrorcheck(cudaMemcpy(valueD, value, testNumNodes * sizeof(uint), cudaMemcpyHostToDevice));
gpuErrorcheck(cudaMemcpy(isActiveD1, label, testNumNodes * sizeof(uint), cudaMemcpyHostToDevice));
gpuErrorcheck(cudaMemset(isStaticActive, 0, testNumNodes * sizeof(uint)));
gpuErrorcheck(cudaMemset(isOverloadActive, 0, testNumNodes * sizeof(uint)));
//cacaulate the active node And make active node array
dim3 grid = dim3(56, 1, 1);
dim3 block = dim3(1024, 1, 1);
//setLabeling<<<grid, block>>>(testNumNodes, labelD, activeNodeLabelingD);
thrust::device_ptr<unsigned int> ptr_labeling(isActiveD1);
thrust::device_ptr<unsigned int> ptr_labeling_static(isStaticActive);
thrust::device_ptr<unsigned int> ptr_labeling_overload(isOverloadActive);
thrust::device_ptr<unsigned int> ptr_labeling_prefixsum(activeNodeLabelingPrefixD);
thrust::device_ptr<unsigned int> ptrOverloadDegree(activeOverloadDegreeD);
thrust::device_ptr<unsigned int> ptrOverloadPrefixsum(overloadLabelingPrefixD);
uint activeNodesNum = thrust::reduce(ptr_labeling, ptr_labeling + testNumNodes);
int iter = 0;
uint nodeSum = activeNodesNum;
ulong overloadEdgeSum = 0;
auto startCpu = std::chrono::steady_clock::now();
auto endReadCpu = std::chrono::steady_clock::now();
long durationReadCpu = 0;
auto startSwap = std::chrono::steady_clock::now();
auto endSwap = std::chrono::steady_clock::now();
long durationSwap = 0;
auto startGpuProcessing = std::chrono::steady_clock::now();
auto endGpuProcessing = std::chrono::steady_clock::now();
long durationGpuProcessing = 0;
auto startOverloadGpuProcessing = std::chrono::steady_clock::now();
auto endOverloadGpuProcessing = std::chrono::steady_clock::now();
long durationOverloadGpuProcessing = 0;
auto startPreGpuProcessing = std::chrono::steady_clock::now();
auto endPreGpuProcessing = std::chrono::steady_clock::now();
long durationPreGpuProcessing = 0;
auto endPreCaculate = std::chrono::steady_clock::now();
long durationPreCaculate = std::chrono::duration_cast<std::chrono::milliseconds>(
endPreCaculate - startPreCaculate).count();
cout << "durationPreCaculate time : " << durationPreCaculate << " ms" << endl;
cudaStream_t steamStatic, streamDynamic;
cudaStreamCreate(&steamStatic);
cudaStreamCreate(&streamDynamic);
auto startMemoryTraverse = std::chrono::steady_clock::now();
auto endMemoryTraverse = std::chrono::steady_clock::now();
long durationMemoryTraverse = 0;
auto startProcessing = std::chrono::steady_clock::now();
//uint cursorStartSwap = staticFragmentNum + 1;
uint swapValidNodeSum = 0;
uint swapValidEdgeSum = 0;
uint swapNotValidNodeSum = 0;
uint swapNotValidEdgeSum = 0;
uint visitEdgeSum = 0;
uint swapInEdgeSum = 0;
uint partOverloadSum = 0;
while (activeNodesNum > 0) {
startPreGpuProcessing = std::chrono::steady_clock::now();
iter++;
//cout << "iter " << iter << " activeNodesNum " << activeNodesNum << endl;
setStaticAndOverloadLabel<<<grid, block>>>(testNumNodes, isActiveD1, isStaticActive, isOverloadActive,
isInStaticD);
uint staticNodeNum = thrust::reduce(ptr_labeling_static, ptr_labeling_static + testNumNodes);
if (staticNodeNum > 0) {
//cout << "iter " << iter << " staticNodeNum " << staticNodeNum << endl;
thrust::exclusive_scan(ptr_labeling_static, ptr_labeling_static + testNumNodes, ptr_labeling_prefixsum);
setStaticActiveNodeArray<<<grid, block>>>(testNumNodes, activeNodeListD, isStaticActive,
activeNodeLabelingPrefixD);
}
uint overloadNodeNum = thrust::reduce(ptr_labeling_overload, ptr_labeling_overload + testNumNodes);
uint overloadEdgeNum = 0;
if (overloadNodeNum > 0) {
//cout << "iter " << iter << " overloadNodeNum " << overloadNodeNum << endl;
thrust::exclusive_scan(ptr_labeling_overload, ptr_labeling_overload + testNumNodes, ptrOverloadPrefixsum);
setOverloadNodePointerSwap<<<grid, block>>>(testNumNodes, overloadNodeListD, activeOverloadDegreeD,
isOverloadActive,
overloadLabelingPrefixD, degreeD);
thrust::exclusive_scan(ptrOverloadDegree, ptrOverloadDegree + overloadNodeNum, activeOverloadNodePointersD);
overloadEdgeNum = thrust::reduce(thrust::device, ptrOverloadDegree,
ptrOverloadDegree + overloadNodeNum, 0);
//cout << "iter " << iter << " overloadEdgeNum " << overloadEdgeNum << endl;
overloadEdgeSum += overloadEdgeNum;
if (overloadEdgeNum > edgeIterationMax) {
edgeIterationMax = overloadEdgeNum;
}
}
endPreGpuProcessing = std::chrono::steady_clock::now();
durationPreGpuProcessing += std::chrono::duration_cast<std::chrono::milliseconds>(
endPreGpuProcessing - startPreGpuProcessing).count();
startGpuProcessing = std::chrono::steady_clock::now();
if (staticNodeNum > 0) {
setLabelDefaultOpt<<<grid, block, 0, steamStatic>>>(staticNodeNum, activeNodeListD, isActiveD1);
}
if (overloadNodeNum > 0) {
setLabelDefaultOpt<<<grid, block, 0, steamStatic>>>(overloadNodeNum, overloadNodeListD, isActiveD1);
}
bfs_kernelStatic<<<grid, block, 0, steamStatic>>>(staticNodeNum, activeNodeListD,
staticNodePointerD, degreeD,
staticEdgeListD, valueD, isActiveD1);
//cudaDeviceSynchronize();
if (overloadNodeNum > 0) {
startCpu = std::chrono::steady_clock::now();
cudaMemcpyAsync(overloadNodeList, overloadNodeListD, overloadNodeNum * sizeof(uint), cudaMemcpyDeviceToHost,
streamDynamic);
cudaMemcpyAsync(activeOverloadNodePointers, activeOverloadNodePointersD, overloadNodeNum * sizeof(uint),
cudaMemcpyDeviceToHost, streamDynamic);
int threadNum = 20;
if (overloadNodeNum < 50) {
threadNum = 1;
}
thread runThreads[threadNum];
for (int i = 0; i < threadNum; i++) {
runThreads[i] = thread(fillDynamic,
i,
threadNum,
0,
overloadNodeNum,
degree,
activeOverloadNodePointers,
nodePointersI,
overloadNodeList,
overloadEdgeList,
edgeList);
}
for (unsigned int t = 0; t < threadNum; t++) {
runThreads[t].join();
}
caculatePartInfoForEdgeList(activeOverloadNodePointers, overloadNodeList, degree, partEdgeListInfoArr,
overloadNodeNum, partOverloadSize, overloadEdgeNum);
endReadCpu = std::chrono::steady_clock::now();
durationReadCpu += std::chrono::duration_cast<std::chrono::milliseconds>(endReadCpu - startCpu).count();
cudaDeviceSynchronize();
//gpuErrorcheck(cudaPeekAtLastError())
endGpuProcessing = std::chrono::steady_clock::now();
durationGpuProcessing += std::chrono::duration_cast<std::chrono::milliseconds>(
endGpuProcessing - startGpuProcessing).count();
partOverloadSum += partEdgeListInfoArr.size();
for (int i = 0; i < partEdgeListInfoArr.size(); i++) {
startMemoryTraverse = std::chrono::steady_clock::now();
gpuErrorcheck(cudaMemcpy(overloadEdgeListD, overloadEdgeList +
activeOverloadNodePointers[partEdgeListInfoArr[i].partStartIndex],
partEdgeListInfoArr[i].partEdgeNums * sizeof(uint),
cudaMemcpyHostToDevice))
transferSum += partEdgeListInfoArr[i].partEdgeNums;
endMemoryTraverse = std::chrono::steady_clock::now();
durationMemoryTraverse += std::chrono::duration_cast<std::chrono::milliseconds>(
endMemoryTraverse - startMemoryTraverse).count();
startOverloadGpuProcessing = std::chrono::steady_clock::now();
bfs_kernelDynamicPart<<<grid, block, 0, streamDynamic>>>(
partEdgeListInfoArr[i].partStartIndex,
partEdgeListInfoArr[i].partActiveNodeNums,
overloadNodeListD, degreeD,
valueD, isActiveD1,
overloadEdgeListD,
activeOverloadNodePointersD);
cudaDeviceSynchronize();
endOverloadGpuProcessing = std::chrono::steady_clock::now();
durationOverloadGpuProcessing += std::chrono::duration_cast<std::chrono::milliseconds>(
endOverloadGpuProcessing - startOverloadGpuProcessing).count();
}
//gpuErrorcheck(cudaPeekAtLastError())
} else {
cudaDeviceSynchronize();
endGpuProcessing = std::chrono::steady_clock::now();
durationGpuProcessing += std::chrono::duration_cast<std::chrono::milliseconds>(
endGpuProcessing - startGpuProcessing).count();
}
cudaDeviceSynchronize();
startPreGpuProcessing = std::chrono::steady_clock::now();
/*mixCommonLabel<<<grid, block, 0, streamDynamic>>>(testNumNodes, isActiveD1, isActiveD2);
cudaDeviceSynchronize();*/
activeNodesNum = thrust::reduce(ptr_labeling, ptr_labeling + testNumNodes);
nodeSum += activeNodesNum;
endPreGpuProcessing = std::chrono::steady_clock::now();
durationPreGpuProcessing += std::chrono::duration_cast<std::chrono::milliseconds>(
endPreGpuProcessing - startPreGpuProcessing).count();
}
//cudaDeviceSynchronize();
auto endRead = std::chrono::steady_clock::now();
durationRead = std::chrono::duration_cast<std::chrono::milliseconds>(endRead - startProcessing).count();
transferSum += max_partition_size;
cout << "iterationSum " << iter << endl;
double edgeIterationAvg = (double) overloadEdgeSum / (double) testNumEdge / iter;
double edgeIterationMaxAvg = (double) edgeIterationMax / (double) testNumEdge;
cout << "edgeIterationAvg " << edgeIterationAvg << " edgeIterationMaxAvg " << edgeIterationMaxAvg << endl;
cout << "transferSum : " << transferSum * 4 << " byte" << endl;
cout << "finish time : " << durationRead << " ms" << endl;
cout << "total time : " << durationRead + testDuration << " ms" << endl;
cout << "cpu time : " << durationReadCpu << " ms" << endl;
cout << "fact processing time : " << durationGpuProcessing << " ms" << endl;
cout << "durationOverloadGpuProcessing time : " << durationOverloadGpuProcessing << " ms" << endl;
cout << "durationMemoryTraverse time : " << durationMemoryTraverse << " mskail" << endl;
cout << "gpu pre processing time : " << durationPreGpuProcessing << " ms" << endl;
cout << "overloadEdgeSum : " << overloadEdgeSum << " " << endl;
cout << "partOverloadSum : " << partOverloadSum << " " << endl;
cout << "nodeSum: " << nodeSum << endl;
cudaFree(staticEdgeListD);
cudaFree(degreeD);
cudaFree(isActiveD1);
cudaFree(valueD);
cudaFree(activeNodeListD);
cudaFree(activeNodeLabelingPrefixD);
cudaFree(activeOverloadNodePointersD);
cudaFree(activeOverloadDegreeD);
cudaFree(isInStaticD);
cudaFree(staticNodePointerD);
cudaFree(overloadNodeListD);
cudaFree(overloadEdgeListD);
cudaFree(isStaticActive);
cudaFree(isOverloadActive);
cudaFree(overloadLabelingPrefixD);
delete[] label;
delete[] degree;
delete[] value;
delete[] activeNodeList;
delete[] activeOverloadNodePointers;
delete[] isInStatic;
delete[] overloadNodeList;
delete[] staticNodePointer;
delete[] overloadEdgeList;
partEdgeListInfoArr.clear();
return durationRead;
}
long
bfsCaculateInAsyncNoUVMVisitRecord(uint testNumNodes, uint testNumEdge, uint *nodePointersI, uint *edgeList,
uint sourceNode,
float adviseK) {
cout << "=========bfsCaculateInAsyncNoUVM========" << endl;
ulong edgeIterationMax = 0;
auto start = std::chrono::steady_clock::now();
auto startPreCaculate = std::chrono::steady_clock::now();
//CPU
long durationRead;
ulong transferSum = 0;
unsigned long max_partition_size;
unsigned long total_gpu_size;
uint maxStaticNode = 0;
uint *degree;
uint *value;
uint *label;
bool *isInStatic;
uint *overloadNodeList;
uint *staticNodePointer;
uint *activeNodeList;
uint *activeOverloadNodePointers;
vector<PartEdgeListInfo> partEdgeListInfoArr;
/*
* overloadEdgeList overload edge list in every iteration
* */
uint *overloadEdgeList;
bool isFromTail = false;
//GPU
uint *staticEdgeListD;
uint *overloadEdgeListD;
bool *isInStaticD;
uint *overloadNodeListD;
uint *staticNodePointerD;
uint *degreeD;
// async need two labels
uint *isActiveD1;
uint *isStaticActive;
uint *isOverloadActive;
uint *valueD;
uint *activeNodeListD;
uint *activeNodeLabelingPrefixD;
uint *overloadLabelingPrefixD;
uint *activeOverloadNodePointersD;
uint *activeOverloadDegreeD;
degree = new uint[testNumNodes];
value = new uint[testNumNodes];
label = new uint[testNumNodes];
isInStatic = new bool[testNumNodes];
overloadNodeList = new uint[testNumNodes];
staticNodePointer = new uint[testNumNodes];
activeNodeList = new uint[testNumNodes];
activeOverloadNodePointers = new uint[testNumNodes];
uint *vertexVisitRecord;
uint *vertexVisitRecordD;
vertexVisitRecord = new uint[testNumNodes];
cudaMalloc(&vertexVisitRecordD, testNumNodes * sizeof(uint));
cudaMemset(vertexVisitRecordD, 0, testNumNodes * sizeof(uint));
//getMaxPartitionSize(max_partition_size, testNumNodes);
getMaxPartitionSize(max_partition_size, total_gpu_size, testNumNodes, adviseK, sizeof(uint), testNumEdge, 15);
//caculate degree
uint meanDegree = testNumEdge / testNumNodes;
cout << " meanDegree " << meanDegree << endl;
uint degree0Sum = 0;
for (uint i = 0; i < testNumNodes - 1; i++) {
if (nodePointersI[i] > testNumEdge) {
cout << i << " " << nodePointersI[i] << endl;
break;
}
degree[i] = nodePointersI[i + 1] - nodePointersI[i];
}
degree[testNumNodes - 1] = testNumEdge - nodePointersI[testNumNodes - 1];
memcpy(staticNodePointer, nodePointersI, testNumNodes * sizeof(uint));
//caculate static staticEdgeListD
gpuErrorcheck(cudaMalloc(&staticEdgeListD, max_partition_size * sizeof(uint)));
auto startmove = std::chrono::steady_clock::now();
gpuErrorcheck(
cudaMemcpy(staticEdgeListD, edgeList, max_partition_size * sizeof(uint), cudaMemcpyHostToDevice));
auto endMove = std::chrono::steady_clock::now();
long testDuration = std::chrono::duration_cast<std::chrono::milliseconds>(
endMove - startmove).count();
cout << "move duration " << testDuration << endl;
gpuErrorcheck(cudaMalloc(&isInStaticD, testNumNodes * sizeof(bool)))
gpuErrorcheck(cudaMalloc(&overloadNodeListD, testNumNodes * sizeof(uint)));
gpuErrorcheck(cudaMalloc(&staticNodePointerD, testNumNodes * sizeof(uint)))
gpuErrorcheck(cudaMemcpy(staticNodePointerD, nodePointersI, testNumNodes * sizeof(uint), cudaMemcpyHostToDevice));
for (uint i = 0; i < testNumNodes; i++) {
label[i] = 0;
value[i] = UINT_MAX - 1;
if (nodePointersI[i] < max_partition_size && (nodePointersI[i] + degree[i] - 1) < max_partition_size) {
isInStatic[i] = true;
if (i > maxStaticNode) maxStaticNode = i;
} else {
isInStatic[i] = false;
}
}
label[sourceNode] = 1;
value[sourceNode] = 1;
cudaMemcpy(isInStaticD, isInStatic, testNumNodes * sizeof(bool), cudaMemcpyHostToDevice);
cout << "max_partition_size: " << max_partition_size << " maxStaticNode: " << maxStaticNode << endl;
//uint partOverloadSize = max_partition_size / 2;
uint partOverloadSize = total_gpu_size - max_partition_size;
uint overloadSize = testNumEdge - nodePointersI[maxStaticNode + 1];
cout << " partOverloadSize " << partOverloadSize << " overloadSize " << overloadSize << endl;
overloadEdgeList = (uint *) malloc(overloadSize * sizeof(uint));
if (overloadEdgeList == NULL) {
cout << "overloadEdgeList is null" << endl;
return 0;
}
gpuErrorcheck(cudaMalloc(&overloadEdgeListD, partOverloadSize * sizeof(uint)));
//gpuErrorcheck(cudaMallocManaged(&edgeListOverloadManage, overloadSize * sizeof(uint)));
gpuErrorcheck(cudaMalloc(°reeD, testNumNodes * sizeof(uint)));
gpuErrorcheck(cudaMalloc(&isActiveD1, testNumNodes * sizeof(uint)));
gpuErrorcheck(cudaMalloc(&isStaticActive, testNumNodes * sizeof(uint)));
gpuErrorcheck(cudaMalloc(&isOverloadActive, testNumNodes * sizeof(uint)));
gpuErrorcheck(cudaMalloc(&valueD, testNumNodes * sizeof(uint)));
gpuErrorcheck(cudaMalloc(&activeNodeLabelingPrefixD, testNumNodes * sizeof(unsigned int)));
gpuErrorcheck(cudaMalloc(&overloadLabelingPrefixD, testNumNodes * sizeof(unsigned int)));
gpuErrorcheck(cudaMalloc(&activeNodeListD, testNumNodes * sizeof(uint)));
gpuErrorcheck(cudaMalloc(&activeOverloadNodePointersD, testNumNodes * sizeof(uint)));
gpuErrorcheck(cudaMalloc(&activeOverloadDegreeD, testNumNodes * sizeof(uint)));
gpuErrorcheck(cudaMemcpy(degreeD, degree, testNumNodes * sizeof(uint), cudaMemcpyHostToDevice));
gpuErrorcheck(cudaMemcpy(valueD, value, testNumNodes * sizeof(uint), cudaMemcpyHostToDevice));
gpuErrorcheck(cudaMemcpy(isActiveD1, label, testNumNodes * sizeof(uint), cudaMemcpyHostToDevice));
gpuErrorcheck(cudaMemset(isStaticActive, 0, testNumNodes * sizeof(uint)));
gpuErrorcheck(cudaMemset(isOverloadActive, 0, testNumNodes * sizeof(uint)));
//cacaulate the active node And make active node array
dim3 grid = dim3(56, 1, 1);
dim3 block = dim3(1024, 1, 1);
//setLabeling<<<grid, block>>>(testNumNodes, labelD, activeNodeLabelingD);
thrust::device_ptr<unsigned int> ptr_labeling(isActiveD1);
thrust::device_ptr<unsigned int> ptr_labeling_static(isStaticActive);
thrust::device_ptr<unsigned int> ptr_labeling_overload(isOverloadActive);
thrust::device_ptr<unsigned int> ptr_labeling_prefixsum(activeNodeLabelingPrefixD);
thrust::device_ptr<unsigned int> ptrOverloadDegree(activeOverloadDegreeD);
thrust::device_ptr<unsigned int> ptrOverloadPrefixsum(overloadLabelingPrefixD);
uint activeNodesNum = thrust::reduce(ptr_labeling, ptr_labeling + testNumNodes);
int iter = 0;
uint nodeSum = activeNodesNum;
ulong overloadEdgeSum = 0;
auto startCpu = std::chrono::steady_clock::now();
auto endReadCpu = std::chrono::steady_clock::now();
long durationReadCpu = 0;
auto startSwap = std::chrono::steady_clock::now();
auto endSwap = std::chrono::steady_clock::now();
long durationSwap = 0;
auto startGpuProcessing = std::chrono::steady_clock::now();
auto endGpuProcessing = std::chrono::steady_clock::now();
long durationGpuProcessing = 0;
auto startOverloadGpuProcessing = std::chrono::steady_clock::now();
auto endOverloadGpuProcessing = std::chrono::steady_clock::now();
long durationOverloadGpuProcessing = 0;
auto startPreGpuProcessing = std::chrono::steady_clock::now();
auto endPreGpuProcessing = std::chrono::steady_clock::now();
long durationPreGpuProcessing = 0;
auto endPreCaculate = std::chrono::steady_clock::now();
long durationPreCaculate = std::chrono::duration_cast<std::chrono::milliseconds>(
endPreCaculate - startPreCaculate).count();
cout << "durationPreCaculate time : " << durationPreCaculate << " ms" << endl;
cudaStream_t steamStatic, streamDynamic;
cudaStreamCreate(&steamStatic);
cudaStreamCreate(&streamDynamic);
auto startMemoryTraverse = std::chrono::steady_clock::now();
auto endMemoryTraverse = std::chrono::steady_clock::now();
long durationMemoryTraverse = 0;
auto startProcessing = std::chrono::steady_clock::now();
//uint cursorStartSwap = staticFragmentNum + 1;
uint swapValidNodeSum = 0;
uint swapValidEdgeSum = 0;
uint swapNotValidNodeSum = 0;
uint swapNotValidEdgeSum = 0;
uint visitEdgeSum = 0;
uint swapInEdgeSum = 0;
uint partOverloadSum = 0;
while (activeNodesNum > 0) {
startPreGpuProcessing = std::chrono::steady_clock::now();
iter++;
//cout << "iter " << iter << " activeNodesNum " << activeNodesNum << endl;
setStaticAndOverloadLabelAndRecord<<<grid, block>>>(testNumNodes, isActiveD1, isStaticActive, isOverloadActive,
isInStaticD, vertexVisitRecordD);
uint staticNodeNum = thrust::reduce(ptr_labeling_static, ptr_labeling_static + testNumNodes);
if (staticNodeNum > 0) {
//cout << "iter " << iter << " staticNodeNum " << staticNodeNum << endl;
thrust::exclusive_scan(ptr_labeling_static, ptr_labeling_static + testNumNodes, ptr_labeling_prefixsum);
setStaticActiveNodeArray<<<grid, block>>>(testNumNodes, activeNodeListD, isStaticActive,
activeNodeLabelingPrefixD);
}
uint overloadNodeNum = thrust::reduce(ptr_labeling_overload, ptr_labeling_overload + testNumNodes);
uint overloadEdgeNum = 0;
if (overloadNodeNum > 0) {
//cout << "iter " << iter << " overloadNodeNum " << overloadNodeNum << endl;
thrust::exclusive_scan(ptr_labeling_overload, ptr_labeling_overload + testNumNodes, ptrOverloadPrefixsum);
setOverloadNodePointerSwap<<<grid, block>>>(testNumNodes, overloadNodeListD, activeOverloadDegreeD,
isOverloadActive,
overloadLabelingPrefixD, degreeD);
thrust::exclusive_scan(ptrOverloadDegree, ptrOverloadDegree + overloadNodeNum, activeOverloadNodePointersD);
overloadEdgeNum = thrust::reduce(thrust::device, ptrOverloadDegree,
ptrOverloadDegree + overloadNodeNum, 0);
//cout << "iter " << iter << " overloadEdgeNum " << overloadEdgeNum << endl;
overloadEdgeSum += overloadEdgeNum;
if (overloadEdgeNum > edgeIterationMax) {
edgeIterationMax = overloadEdgeNum;
}
}
endPreGpuProcessing = std::chrono::steady_clock::now();
durationPreGpuProcessing += std::chrono::duration_cast<std::chrono::milliseconds>(
endPreGpuProcessing - startPreGpuProcessing).count();
startGpuProcessing = std::chrono::steady_clock::now();
if (staticNodeNum > 0) {
setLabelDefaultOpt<<<grid, block, 0, steamStatic>>>(staticNodeNum, activeNodeListD, isActiveD1);
}
if (overloadNodeNum > 0) {
setLabelDefaultOpt<<<grid, block, 0, steamStatic>>>(overloadNodeNum, overloadNodeListD, isActiveD1);
}
bfs_kernelStatic<<<grid, block, 0, steamStatic>>>(staticNodeNum, activeNodeListD,
staticNodePointerD, degreeD,
staticEdgeListD, valueD, isActiveD1);
cudaDeviceSynchronize();
if (overloadNodeNum > 0) {
startCpu = std::chrono::steady_clock::now();
cudaMemcpyAsync(overloadNodeList, overloadNodeListD, overloadNodeNum * sizeof(uint), cudaMemcpyDeviceToHost,
streamDynamic);
cudaMemcpyAsync(activeOverloadNodePointers, activeOverloadNodePointersD, overloadNodeNum * sizeof(uint),
cudaMemcpyDeviceToHost, streamDynamic);
int threadNum = 20;
if (overloadNodeNum < 50) {
threadNum = 1;
}
thread runThreads[threadNum];
for (int i = 0; i < threadNum; i++) {
runThreads[i] = thread(fillDynamic,
i,
threadNum,
0,
overloadNodeNum,
degree,
activeOverloadNodePointers,
nodePointersI,
overloadNodeList,
overloadEdgeList,
edgeList);
}
for (unsigned int t = 0; t < threadNum; t++) {
runThreads[t].join();
}
caculatePartInfoForEdgeList(activeOverloadNodePointers, overloadNodeList, degree, partEdgeListInfoArr,
overloadNodeNum, partOverloadSize, overloadEdgeNum);
endReadCpu = std::chrono::steady_clock::now();
durationReadCpu += std::chrono::duration_cast<std::chrono::milliseconds>(endReadCpu - startCpu).count();
cudaDeviceSynchronize();
//gpuErrorcheck(cudaPeekAtLastError())
endGpuProcessing = std::chrono::steady_clock::now();
durationGpuProcessing += std::chrono::duration_cast<std::chrono::milliseconds>(
endGpuProcessing - startGpuProcessing).count();
startMemoryTraverse = std::chrono::steady_clock::now();
partOverloadSum += partEdgeListInfoArr.size();
for (int i = 0; i < partEdgeListInfoArr.size(); i++) {
gpuErrorcheck(cudaMemcpy(overloadEdgeListD, overloadEdgeList +
activeOverloadNodePointers[partEdgeListInfoArr[i].partStartIndex],
partEdgeListInfoArr[i].partEdgeNums * sizeof(uint),
cudaMemcpyHostToDevice))
transferSum += partEdgeListInfoArr[i].partEdgeNums;
startOverloadGpuProcessing = std::chrono::steady_clock::now();
bfs_kernelDynamicPart<<<grid, block, 0, streamDynamic>>>(
partEdgeListInfoArr[i].partStartIndex,
partEdgeListInfoArr[i].partActiveNodeNums,
overloadNodeListD, degreeD,
valueD, isActiveD1,
overloadEdgeListD,
activeOverloadNodePointersD);
cudaDeviceSynchronize();
endOverloadGpuProcessing = std::chrono::steady_clock::now();
durationOverloadGpuProcessing += std::chrono::duration_cast<std::chrono::milliseconds>(
endOverloadGpuProcessing - startOverloadGpuProcessing).count();
}
endMemoryTraverse = std::chrono::steady_clock::now();
durationMemoryTraverse += std::chrono::duration_cast<std::chrono::milliseconds>(
endMemoryTraverse - startMemoryTraverse).count();
//gpuErrorcheck(cudaPeekAtLastError())
} else {
cudaDeviceSynchronize();
endGpuProcessing = std::chrono::steady_clock::now();
durationGpuProcessing += std::chrono::duration_cast<std::chrono::milliseconds>(
endGpuProcessing - startGpuProcessing).count();
}
cudaDeviceSynchronize();
startPreGpuProcessing = std::chrono::steady_clock::now();
/*mixCommonLabel<<<grid, block, 0, streamDynamic>>>(testNumNodes, isActiveD1, isActiveD2);
cudaDeviceSynchronize();*/
activeNodesNum = thrust::reduce(ptr_labeling, ptr_labeling + testNumNodes);
nodeSum += activeNodesNum;
endPreGpuProcessing = std::chrono::steady_clock::now();
durationPreGpuProcessing += std::chrono::duration_cast<std::chrono::milliseconds>(
endPreGpuProcessing - startPreGpuProcessing).count();
}
//cudaDeviceSynchronize();
auto endRead = std::chrono::steady_clock::now();
durationRead = std::chrono::duration_cast<std::chrono::milliseconds>(endRead - startProcessing).count();
transferSum += max_partition_size;
cudaMemcpy(vertexVisitRecord, vertexVisitRecordD, testNumNodes * sizeof(uint), cudaMemcpyDeviceToHost);
uint partNum = 50;
uint partSize = testNumEdge / partNum;
vector<uint> partVistRecordList(partNum + 1);
uint partSizeCursor = 0;
for (uint i = 0; i < testNumNodes; i++) {
uint edgeStartIndex = nodePointersI[i];
uint edgeEndIndex = nodePointersI[i] + degree[i];
uint maxPartIndex = partSizeCursor * partSize + partSize;
if (edgeStartIndex < maxPartIndex && edgeEndIndex < maxPartIndex) {
partVistRecordList[partSizeCursor] += vertexVisitRecord[i] * degree[i];
} else if (edgeStartIndex < maxPartIndex && edgeEndIndex >= maxPartIndex) {
partVistRecordList[partSizeCursor] += vertexVisitRecord[i] * (maxPartIndex - edgeStartIndex);
partSizeCursor += 1;
partVistRecordList[partSizeCursor] += vertexVisitRecord[i] * (edgeEndIndex - maxPartIndex);
} else {
partSizeCursor += 1;
partVistRecordList[partSizeCursor] += vertexVisitRecord[i] * degree[i];
}
}
for (uint i = 0; i < partNum + 1; i++) {
cout << "part " << i << " is " << partVistRecordList[i] << endl;
}
for (uint i = 0; i < partNum + 1; i++) {
cout << partVistRecordList[i] << "\t";
}
cout << "iterationSum " << iter << endl;
double edgeIterationAvg = (double) overloadEdgeSum / (double) testNumEdge / iter;
double edgeIterationMaxAvg = (double) edgeIterationMax / (double) testNumEdge;
cout << "edgeIterationAvg " << edgeIterationAvg << " edgeIterationMaxAvg " << edgeIterationMaxAvg << endl;
cout << "transferSum : " << transferSum * 4 << " byte" << endl;
cout << "finish time : " << durationRead << " ms" << endl;
cout << "total time : " << durationRead + testDuration << " ms" << endl;
cout << "cpu time : " << durationReadCpu << " ms" << endl;
cout << "fact processing time : " << durationGpuProcessing << " ms" << endl;
cout << "durationOverloadGpuProcessing time : " << durationOverloadGpuProcessing << " ms" << endl;
cout << "durationMemoryTraverse time : " << durationMemoryTraverse << " mskail" << endl;
cout << "gpu pre processing time : " << durationPreGpuProcessing << " ms" << endl;
cout << "overloadEdgeSum : " << overloadEdgeSum << " " << endl;
cout << "partOverloadSum : " << partOverloadSum << " " << endl;
cout << "nodeSum: " << nodeSum << endl;
cudaFree(staticEdgeListD);
cudaFree(degreeD);
cudaFree(isActiveD1);
cudaFree(valueD);
cudaFree(activeNodeListD);
cudaFree(activeNodeLabelingPrefixD);
cudaFree(activeOverloadNodePointersD);
cudaFree(activeOverloadDegreeD);
cudaFree(isInStaticD);
cudaFree(staticNodePointerD);
cudaFree(overloadNodeListD);
cudaFree(overloadEdgeListD);
cudaFree(isStaticActive);
cudaFree(isOverloadActive);
cudaFree(overloadLabelingPrefixD);
delete[] label;
delete[] degree;
delete[] value;
delete[] activeNodeList;
delete[] activeOverloadNodePointers;
delete[] isInStatic;
delete[] overloadNodeList;
delete[] staticNodePointer;
delete[] overloadEdgeList;
partEdgeListInfoArr.clear();
return durationRead;
}
void bfsShareTrace(string bfsPath, int sampleSourceNode) {
uint testNumNodes = 0;
ulong testNumEdge = 0;
uint *nodePointersI;
uint *edgeList;
bool isUseShare = true;
auto startReadGraph = std::chrono::steady_clock::now();
ifstream infile(bfsPath, ios::in | ios::binary);
infile.read((char *) &testNumNodes, sizeof(uint));
uint numEdge = 0;
infile.read((char *) &numEdge, sizeof(uint));
testNumEdge = numEdge;
cout << "vertex num: " << testNumNodes << " edge num: " << testNumEdge << endl;
nodePointersI = new uint[testNumNodes];
infile.read((char *) nodePointersI, sizeof(uint) * testNumNodes);
gpuErrorcheck(cudaMallocManaged(&edgeList, (numEdge) * sizeof(uint)));
cudaMemAdvise(nodePointersI, (testNumNodes + 1) * sizeof(uint), cudaMemAdviseSetReadMostly, 0);
cudaMemAdvise(edgeList, (numEdge) * sizeof(uint), cudaMemAdviseSetReadMostly, 0);
infile.read((char *) edgeList, sizeof(uint) * testNumEdge);
infile.close();
//preprocessData(nodePointersI, edgeList, testNumNodes, testNumEdge);
auto endReadGraph = std::chrono::steady_clock::now();
long durationReadGraph = std::chrono::duration_cast<std::chrono::milliseconds>(
endReadGraph - startReadGraph).count();
cout << "read graph time : " << durationReadGraph << "ms" << endl;
int testTimes = 1;
long timeSum = 0;
for (int i = 0; i < testTimes; i++) {
uint sourceNode = rand() % testNumNodes;
sourceNode = sampleSourceNode;
cout << "sourceNode " << sourceNode << endl;
timeSum += bfsCaculateInShareTrace(testNumNodes, testNumEdge, nodePointersI, edgeList, sourceNode);
//timeSum += bfsCaculateInShare(testNumNodes, testNumEdge, nodePointersI, edgeList, 53037907);
break;
}
}
long
bfsCaculateInShareTrace(uint testNumNodes, uint testNumEdge, uint *nodePointersI, uint *edgeList, uint sourceNode) {
auto start = std::chrono::steady_clock::now();
uint *degree = new uint[testNumNodes];
uint *value = new uint[testNumNodes];
uint sourceCode = 0;
auto startPreCaculate = std::chrono::steady_clock::now();
for (uint i = 0; i < testNumNodes - 1; i++) {
degree[i] = nodePointersI[i + 1] - nodePointersI[i];
}
degree[testNumNodes - 1] = testNumEdge - nodePointersI[testNumNodes - 1];
sourceCode = sourceNode;
bool *label = new bool[testNumNodes];
for (uint i = 0; i < testNumNodes; i++) {
label[i] = false;
value[i] = UINT_MAX;
}
label[sourceCode] = true;
value[sourceCode] = 1;
uint *activeNodeListD;
uint *degreeD;
uint *valueD;
bool *labelD;
uint *nodePointersD;
cudaMalloc(&activeNodeListD, testNumNodes * sizeof(uint));
cudaMalloc(&nodePointersD, testNumNodes * sizeof(uint));
cudaMalloc(°reeD, testNumNodes * sizeof(uint));
cudaMalloc(&valueD, testNumNodes * sizeof(uint));
cudaMalloc(&labelD, testNumNodes * sizeof(bool));
cudaMemcpy(degreeD, degree, testNumNodes * sizeof(uint), cudaMemcpyHostToDevice);
cudaMemcpy(valueD, value, testNumNodes * sizeof(uint), cudaMemcpyHostToDevice);
cudaMemcpy(labelD, label, testNumNodes * sizeof(bool), cudaMemcpyHostToDevice);
cudaMemcpy(nodePointersD, nodePointersI, testNumNodes * sizeof(uint), cudaMemcpyHostToDevice);
//cacaulate the active node And make active node array
uint *activeNodeLabelingD;
gpuErrorcheck(cudaMalloc(&activeNodeLabelingD, testNumNodes * sizeof(unsigned int)));
uint *activeNodeLabelingPrefixD;
gpuErrorcheck(cudaMalloc(&activeNodeLabelingPrefixD, testNumNodes * sizeof(unsigned int)));
dim3 grid = dim3(56, 1, 1);
dim3 block = dim3(1024, 1, 1);
auto endPreCaculate = std::chrono::steady_clock::now();
long durationPreCaculate = std::chrono::duration_cast<std::chrono::milliseconds>(
endPreCaculate - startPreCaculate).count();
cout << "durationPreCaculate time : " << durationPreCaculate << " ms" << endl;
setLabeling<<<grid, block>>>(testNumNodes, labelD, activeNodeLabelingD);
thrust::device_ptr<unsigned int> ptr_labeling(activeNodeLabelingD);
thrust::device_ptr<unsigned int> ptr_labeling_prefixsum(activeNodeLabelingPrefixD);
uint activeNodesNum = thrust::reduce(ptr_labeling, ptr_labeling + testNumNodes);
int iter = 0;
uint nodeSum = activeNodesNum;
auto startProcessing = std::chrono::steady_clock::now();
while (activeNodesNum > 0) {
iter++;
thrust::exclusive_scan(ptr_labeling, ptr_labeling + testNumNodes, ptr_labeling_prefixsum);
setActiveNodeArray<<<grid, block>>>(testNumNodes, activeNodeListD, labelD, activeNodeLabelingPrefixD);
setLabelDefault<<<grid, block>>>(activeNodesNum, activeNodeListD, labelD);
bfs_kernel<<<grid, block>>>(activeNodesNum, activeNodeListD, nodePointersD, degreeD, edgeList, valueD, labelD);
cudaDeviceSynchronize();
gpuErrorcheck(cudaPeekAtLastError());
long temp = 0;
for (uint j = 0; j < testNumEdge; j++) {
temp += edgeList[j] % 10;
}
cout << "iter " << iter << " " << temp;
setLabeling<<<grid, block>>>(testNumNodes, labelD, activeNodeLabelingD);
activeNodesNum = thrust::reduce(ptr_labeling, ptr_labeling + testNumNodes);
nodeSum += activeNodesNum;
cout << "iter: " << iter << " activeNodes: " << activeNodesNum << endl;
}
cudaDeviceSynchronize();
cout << "nodeSum: " << nodeSum << endl;
auto endRead = std::chrono::steady_clock::now();
long durationRead = std::chrono::duration_cast<std::chrono::milliseconds>(endRead - startProcessing).count();
cout << "iter sum is " << iter << " finish time : " << durationRead << " ms" << endl;
//cout << "range min " << rangeMin << " range max " << rangeMax << " range sum " << rangeSum << endl;
cout << "source node pointer " << nodePointersI[sourceNode] << endl;
return durationRead;
}
long
bfsCaculateInAsyncNoUVMRandom(uint testNumNodes, uint testNumEdge, uint *nodePointersI, uint *edgeList, uint sourceNode,
float adviseK) {
cout << "=========bfsCaculateInAsyncNoUVM========" << endl;
ulong edgeIterationMax = 0;
auto start = std::chrono::steady_clock::now();
auto startPreCaculate = std::chrono::steady_clock::now();
//CPU
long durationRead;
ulong transferSum = 0;
unsigned long max_partition_size;
unsigned long total_gpu_size;
uint maxStaticNode = 0;
uint *degree;
uint *value;
uint *label;
bool *isInStatic;
uint *overloadNodeList;
uint *staticNodePointer;
uint *activeNodeList;
uint *activeOverloadNodePointers;
vector<PartEdgeListInfo> partEdgeListInfoArr;
/*
* overloadEdgeList overload edge list in every iteration
* */
uint *overloadEdgeList;
bool isFromTail = false;
//GPU
uint *staticEdgeListD;
uint *overloadEdgeListD;
bool *isInStaticD;
uint *overloadNodeListD;
uint *staticNodePointerD;
uint *degreeD;
// async need two labels
uint *isActiveD1;
uint *isStaticActive;
uint *isOverloadActive;
uint *valueD;
uint *activeNodeListD;
uint *activeNodeLabelingPrefixD;
uint *overloadLabelingPrefixD;
uint *activeOverloadNodePointersD;
uint *activeOverloadDegreeD;
degree = new uint[testNumNodes];
value = new uint[testNumNodes];
label = new uint[testNumNodes];
isInStatic = new bool[testNumNodes];
overloadNodeList = new uint[testNumNodes];
staticNodePointer = new uint[testNumNodes];
activeNodeList = new uint[testNumNodes];
activeOverloadNodePointers = new uint[testNumNodes];
//getMaxPartitionSize(max_partition_size, testNumNodes);
getMaxPartitionSize(max_partition_size, total_gpu_size, testNumNodes, adviseK, sizeof(uint), testNumEdge, 15);
calculateDegree(testNumNodes, nodePointersI, testNumEdge, degree);
//memcpy(staticNodePointer, nodePointersI, testNumNodes * sizeof(uint));
uint edgesInStatic = 0;
float startRate = (1 - (float) max_partition_size / (float) testNumEdge) / 2;
uint startIndex = (float) testNumNodes * startRate;
/*uint tempStaticSum = 0;
for (uint i = testNumNodes - 1; i >= 0; i--) {
tempStaticSum += degree[i];
if (tempStaticSum > max_partition_size) {
startIndex = i;
break;
}
}*/
startIndex = 0;
if (nodePointersI[startIndex] + max_partition_size > testNumEdge) {
startIndex = (float) testNumNodes * 0.1f;
}
for (uint i = 0; i < testNumNodes; i++) {
label[i] = 0;
value[i] = UINT_MAX - 1;
if (i >= startIndex && nodePointersI[i] < nodePointersI[startIndex] + max_partition_size - degree[i]) {
isInStatic[i] = true;
staticNodePointer[i] = nodePointersI[i] - nodePointersI[startIndex];
if (i > maxStaticNode) {
maxStaticNode = i;
}
edgesInStatic += degree[i];
} else {
isInStatic[i] = false;
}
}
label[sourceNode] = 1;
value[sourceNode] = 1;
gpuErrorcheck(cudaMalloc(&staticEdgeListD, max_partition_size * sizeof(uint)));
auto startmove = std::chrono::steady_clock::now();
gpuErrorcheck(
cudaMemcpy(staticEdgeListD, edgeList + nodePointersI[startIndex], max_partition_size * sizeof(uint),
cudaMemcpyHostToDevice));
auto endMove = std::chrono::steady_clock::now();
long testDuration = std::chrono::duration_cast<std::chrono::milliseconds>(
endMove - startmove).count();
cout << "move duration " << testDuration << endl;
gpuErrorcheck(cudaMalloc(&isInStaticD, testNumNodes * sizeof(bool)))
gpuErrorcheck(cudaMalloc(&overloadNodeListD, testNumNodes * sizeof(uint)));
gpuErrorcheck(cudaMalloc(&staticNodePointerD, testNumNodes * sizeof(uint)))
gpuErrorcheck(
cudaMemcpy(staticNodePointerD, staticNodePointer, testNumNodes * sizeof(uint), cudaMemcpyHostToDevice));
cudaMemcpy(isInStaticD, isInStatic, testNumNodes * sizeof(bool), cudaMemcpyHostToDevice);
label[sourceNode] = 1;
value[sourceNode] = 1;
cout << "max_partition_size: " << max_partition_size << " maxStaticNode: " << maxStaticNode << endl;
//uint partOverloadSize = max_partition_size / 2;
uint partOverloadSize = total_gpu_size - max_partition_size;
uint overloadSize = testNumEdge - edgesInStatic;
cout << " partOverloadSize " << partOverloadSize << " overloadSize " << overloadSize << endl;
overloadEdgeList = (uint *) malloc(overloadSize * sizeof(uint));
if (overloadEdgeList == NULL) {
cout << "overloadEdgeList is null" << endl;
return 0;
}
gpuErrorcheck(cudaMalloc(&overloadEdgeListD, partOverloadSize * sizeof(uint)));
//gpuErrorcheck(cudaMallocManaged(&edgeListOverloadManage, overloadSize * sizeof(uint)));
gpuErrorcheck(cudaMalloc(°reeD, testNumNodes * sizeof(uint)));
gpuErrorcheck(cudaMalloc(&isActiveD1, testNumNodes * sizeof(uint)));
gpuErrorcheck(cudaMalloc(&isStaticActive, testNumNodes * sizeof(uint)));
gpuErrorcheck(cudaMalloc(&isOverloadActive, testNumNodes * sizeof(uint)));
gpuErrorcheck(cudaMalloc(&valueD, testNumNodes * sizeof(uint)));
gpuErrorcheck(cudaMalloc(&activeNodeLabelingPrefixD, testNumNodes * sizeof(unsigned int)));
gpuErrorcheck(cudaMalloc(&overloadLabelingPrefixD, testNumNodes * sizeof(unsigned int)));
gpuErrorcheck(cudaMalloc(&activeNodeListD, testNumNodes * sizeof(uint)));
gpuErrorcheck(cudaMalloc(&activeOverloadNodePointersD, testNumNodes * sizeof(uint)));
gpuErrorcheck(cudaMalloc(&activeOverloadDegreeD, testNumNodes * sizeof(uint)));
gpuErrorcheck(cudaMemcpy(degreeD, degree, testNumNodes * sizeof(uint), cudaMemcpyHostToDevice));
gpuErrorcheck(cudaMemcpy(valueD, value, testNumNodes * sizeof(uint), cudaMemcpyHostToDevice));
gpuErrorcheck(cudaMemcpy(isActiveD1, label, testNumNodes * sizeof(uint), cudaMemcpyHostToDevice));
gpuErrorcheck(cudaMemset(isStaticActive, 0, testNumNodes * sizeof(uint)));
gpuErrorcheck(cudaMemset(isOverloadActive, 0, testNumNodes * sizeof(uint)));
//cacaulate the active node And make active node array
dim3 grid = dim3(56, 1, 1);
dim3 block = dim3(1024, 1, 1);
//setLabeling<<<grid, block>>>(testNumNodes, labelD, activeNodeLabelingD);
thrust::device_ptr<unsigned int> ptr_labeling(isActiveD1);
thrust::device_ptr<unsigned int> ptr_labeling_static(isStaticActive);
thrust::device_ptr<unsigned int> ptr_labeling_overload(isOverloadActive);
thrust::device_ptr<unsigned int> ptr_labeling_prefixsum(activeNodeLabelingPrefixD);
thrust::device_ptr<unsigned int> ptrOverloadDegree(activeOverloadDegreeD);
thrust::device_ptr<unsigned int> ptrOverloadPrefixsum(overloadLabelingPrefixD);
uint activeNodesNum = thrust::reduce(ptr_labeling, ptr_labeling + testNumNodes);
int iter = 0;
uint nodeSum = activeNodesNum;
ulong overloadEdgeSum = 0;
auto startCpu = std::chrono::steady_clock::now();
auto endReadCpu = std::chrono::steady_clock::now();
long durationReadCpu = 0;
auto startSwap = std::chrono::steady_clock::now();
auto endSwap = std::chrono::steady_clock::now();
long durationSwap = 0;
auto startGpuProcessing = std::chrono::steady_clock::now();
auto endGpuProcessing = std::chrono::steady_clock::now();
long durationGpuProcessing = 0;
auto startOverloadGpuProcessing = std::chrono::steady_clock::now();
auto endOverloadGpuProcessing = std::chrono::steady_clock::now();
long durationOverloadGpuProcessing = 0;
auto startPreGpuProcessing = std::chrono::steady_clock::now();
auto endPreGpuProcessing = std::chrono::steady_clock::now();
long durationPreGpuProcessing = 0;
auto endPreCaculate = std::chrono::steady_clock::now();
long durationPreCaculate = std::chrono::duration_cast<std::chrono::milliseconds>(
endPreCaculate - startPreCaculate).count();
cout << "durationPreCaculate time : " << durationPreCaculate << " ms" << endl;
cudaStream_t steamStatic, streamDynamic;
cudaStreamCreate(&steamStatic);
cudaStreamCreate(&streamDynamic);
auto startMemoryTraverse = std::chrono::steady_clock::now();
auto endMemoryTraverse = std::chrono::steady_clock::now();
long durationMemoryTraverse = 0;
//uint cursorStartSwap = staticFragmentNum + 1;
uint swapValidNodeSum = 0;
uint swapValidEdgeSum = 0;
uint swapNotValidNodeSum = 0;
uint swapNotValidEdgeSum = 0;
uint visitEdgeSum = 0;
uint swapInEdgeSum = 0;
uint partOverloadSum = 0;
long TIME = 0;
int testTimes = 1;
for (int testIndex = 0; testIndex < testTimes; testIndex++) {
for (uint i = 0; i < testNumNodes; i++) {
label[i] = 0;
value[i] = UINT_MAX - 1;
}
label[sourceNode] = 1;
value[sourceNode] = 1;
cudaMemcpy(isInStaticD, isInStatic, testNumNodes * sizeof(bool), cudaMemcpyHostToDevice);
gpuErrorcheck(cudaMemcpy(valueD, value, testNumNodes * sizeof(uint), cudaMemcpyHostToDevice));
gpuErrorcheck(cudaMemcpy(isActiveD1, label, testNumNodes * sizeof(uint), cudaMemcpyHostToDevice));
gpuErrorcheck(cudaMemset(isStaticActive, 0, testNumNodes * sizeof(uint)));
gpuErrorcheck(cudaMemset(isOverloadActive, 0, testNumNodes * sizeof(uint)));
activeNodesNum = thrust::reduce(ptr_labeling, ptr_labeling + testNumNodes);
iter = 0;
auto startProcessing = std::chrono::steady_clock::now();
auto startTest = std::chrono::steady_clock::now();
auto endTest = std::chrono::steady_clock::now();
long durationTest = 0;
while (activeNodesNum > 0) {
startPreGpuProcessing = std::chrono::steady_clock::now();
iter++;
cout << "iter " << iter << " activeNodesNum " << activeNodesNum << endl;
setStaticAndOverloadLabel<<<grid, block>>>(testNumNodes, isActiveD1, isStaticActive, isOverloadActive,
isInStaticD);
uint staticNodeNum = thrust::reduce(ptr_labeling_static, ptr_labeling_static + testNumNodes);
if (staticNodeNum > 0) {
cout << "iter " << iter << " staticNodeNum " << staticNodeNum << endl;
thrust::exclusive_scan(ptr_labeling_static, ptr_labeling_static + testNumNodes, ptr_labeling_prefixsum);
setStaticActiveNodeArray<<<grid, block>>>(testNumNodes, activeNodeListD, isStaticActive,
activeNodeLabelingPrefixD);
}
uint overloadNodeNum = thrust::reduce(ptr_labeling_overload, ptr_labeling_overload + testNumNodes);
uint overloadEdgeNum = 0;
if (overloadNodeNum > 0) {
cout << "iter " << iter << " overloadNodeNum " << overloadNodeNum << endl;
thrust::exclusive_scan(ptr_labeling_overload, ptr_labeling_overload + testNumNodes,
ptrOverloadPrefixsum);
setOverloadNodePointerSwap<<<grid, block>>>(testNumNodes, overloadNodeListD, activeOverloadDegreeD,
isOverloadActive,
overloadLabelingPrefixD, degreeD);
thrust::exclusive_scan(ptrOverloadDegree, ptrOverloadDegree + overloadNodeNum,
activeOverloadNodePointersD);
overloadEdgeNum = thrust::reduce(thrust::device, ptrOverloadDegree,
ptrOverloadDegree + overloadNodeNum, 0);
cout << "iter " << iter << " overloadEdgeNum " << overloadEdgeNum << endl;
overloadEdgeSum += overloadEdgeNum;
if (overloadEdgeNum > edgeIterationMax) {
edgeIterationMax = overloadEdgeNum;
}
}
endPreGpuProcessing = std::chrono::steady_clock::now();
durationPreGpuProcessing += std::chrono::duration_cast<std::chrono::milliseconds>(
endPreGpuProcessing - startPreGpuProcessing).count();
startGpuProcessing = std::chrono::steady_clock::now();
if (staticNodeNum > 0) {
setLabelDefaultOpt<<<grid, block, 0, steamStatic>>>(staticNodeNum, activeNodeListD, isActiveD1);
}
if (overloadNodeNum > 0) {
setLabelDefaultOpt<<<grid, block, 0, steamStatic>>>(overloadNodeNum, overloadNodeListD, isActiveD1);
}
bfs_kernelStatic<<<grid, block, 0, steamStatic>>>(staticNodeNum, activeNodeListD,
staticNodePointerD, degreeD,
staticEdgeListD, valueD, isActiveD1);
//cudaDeviceSynchronize();
if (overloadNodeNum > 0) {
startCpu = std::chrono::steady_clock::now();
cudaMemcpyAsync(overloadNodeList, overloadNodeListD, overloadNodeNum * sizeof(uint),
cudaMemcpyDeviceToHost,
streamDynamic);
cudaMemcpyAsync(activeOverloadNodePointers, activeOverloadNodePointersD, overloadNodeNum * sizeof(uint),
cudaMemcpyDeviceToHost, streamDynamic);
int threadNum = 20;
if (overloadNodeNum < 50) {
threadNum = 1;
}
thread runThreads[threadNum];
for (int i = 0; i < threadNum; i++) {
runThreads[i] = thread(fillDynamic,
i,
threadNum,
0,
overloadNodeNum,
degree,
activeOverloadNodePointers,
nodePointersI,
overloadNodeList,
overloadEdgeList,
edgeList);
}
for (unsigned int t = 0; t < threadNum; t++) {
runThreads[t].join();
}
caculatePartInfoForEdgeList(activeOverloadNodePointers, overloadNodeList, degree, partEdgeListInfoArr,
overloadNodeNum, partOverloadSize, overloadEdgeNum);
endReadCpu = std::chrono::steady_clock::now();
durationReadCpu += std::chrono::duration_cast<std::chrono::milliseconds>(endReadCpu - startCpu).count();
cudaDeviceSynchronize();
//gpuErrorcheck(cudaPeekAtLastError())
endGpuProcessing = std::chrono::steady_clock::now();
durationGpuProcessing += std::chrono::duration_cast<std::chrono::milliseconds>(
endGpuProcessing - startGpuProcessing).count();
partOverloadSum += partEdgeListInfoArr.size();
for (int i = 0; i < partEdgeListInfoArr.size(); i++) {
startMemoryTraverse = std::chrono::steady_clock::now();
gpuErrorcheck(cudaMemcpy(overloadEdgeListD, overloadEdgeList +
activeOverloadNodePointers[partEdgeListInfoArr[i].partStartIndex],
partEdgeListInfoArr[i].partEdgeNums * sizeof(uint),
cudaMemcpyHostToDevice))
transferSum += partEdgeListInfoArr[i].partEdgeNums;
endMemoryTraverse = std::chrono::steady_clock::now();
durationMemoryTraverse += std::chrono::duration_cast<std::chrono::milliseconds>(
endMemoryTraverse - startMemoryTraverse).count();
startOverloadGpuProcessing = std::chrono::steady_clock::now();
activeNodesNum = thrust::reduce(ptr_labeling, ptr_labeling + testNumNodes);
cout << "iter " << iter << " activeNodesNum " << activeNodesNum << endl;
bfs_kernelDynamicPart<<<grid, block, 0, streamDynamic>>>(
partEdgeListInfoArr[i].partStartIndex,
partEdgeListInfoArr[i].partActiveNodeNums,
overloadNodeListD, degreeD,
valueD, isActiveD1,
overloadEdgeListD,
activeOverloadNodePointersD);
cudaDeviceSynchronize();
endOverloadGpuProcessing = std::chrono::steady_clock::now();
durationOverloadGpuProcessing += std::chrono::duration_cast<std::chrono::milliseconds>(
endOverloadGpuProcessing - startOverloadGpuProcessing).count();
}
//gpuErrorcheck(cudaPeekAtLastError())
} else {
cudaDeviceSynchronize();
endGpuProcessing = std::chrono::steady_clock::now();
durationGpuProcessing += std::chrono::duration_cast<std::chrono::milliseconds>(
endGpuProcessing - startGpuProcessing).count();
}
cudaDeviceSynchronize();
startPreGpuProcessing = std::chrono::steady_clock::now();
/*mixCommonLabel<<<grid, block, 0, streamDynamic>>>(testNumNodes, isActiveD1, isActiveD2);
cudaDeviceSynchronize();*/
activeNodesNum = thrust::reduce(ptr_labeling, ptr_labeling + testNumNodes);
nodeSum += activeNodesNum;
endPreGpuProcessing = std::chrono::steady_clock::now();
durationPreGpuProcessing += std::chrono::duration_cast<std::chrono::milliseconds>(
endPreGpuProcessing - startPreGpuProcessing).count();
cout << "iter " << iter << " activeNodesNum " << activeNodesNum << endl;
if (iter == 2) {
break;
}
}
//cudaDeviceSynchronize();
auto endRead = std::chrono::steady_clock::now();
durationRead = std::chrono::duration_cast<std::chrono::milliseconds>(endRead - startProcessing).count();
transferSum += max_partition_size;
cout << "iterationSum " << iter << endl;
double edgeIterationAvg = (double) overloadEdgeSum / (double) testNumEdge / iter;
double edgeIterationMaxAvg = (double) edgeIterationMax / (double) testNumEdge;
cout << "edgeIterationAvg " << edgeIterationAvg << " edgeIterationMaxAvg " << edgeIterationMaxAvg << endl;
cout << "transferSum : " << transferSum * 4 << " byte" << endl;
cout << "finish time : " << durationRead << " ms" << endl;
cout << "total time : " << durationRead + testDuration << " ms" << endl;
cout << "cpu time : " << durationReadCpu << " ms" << endl;
cout << "fact processing time : " << durationGpuProcessing << " ms" << endl;
cout << "durationOverloadGpuProcessing time : " << durationOverloadGpuProcessing << " ms" << endl;
cout << "durationMemoryTraverse time : " << durationMemoryTraverse << " mskail" << endl;
cout << "gpu pre processing time : " << durationPreGpuProcessing << " ms" << endl;
cout << "overloadEdgeSum : " << overloadEdgeSum << " " << endl;
cout << "partOverloadSum : " << partOverloadSum << " " << endl;
cout << "nodeSum: " << nodeSum << endl;
TIME += durationRead;
}
cout << "TIME " << (float) TIME / (float) testTimes << endl;
cudaFree(staticEdgeListD);
cudaFree(degreeD);
cudaFree(isActiveD1);
cudaFree(valueD);
cudaFree(activeNodeListD);
cudaFree(activeNodeLabelingPrefixD);
cudaFree(activeOverloadNodePointersD);
cudaFree(activeOverloadDegreeD);
cudaFree(isInStaticD);
cudaFree(staticNodePointerD);
cudaFree(overloadNodeListD);
cudaFree(overloadEdgeListD);
cudaFree(isStaticActive);
cudaFree(isOverloadActive);
cudaFree(overloadLabelingPrefixD);
delete[] label;
delete[] degree;
delete[] value;
delete[] activeNodeList;
delete[] activeOverloadNodePointers;
delete[] isInStatic;
delete[] overloadNodeList;
delete[] staticNodePointer;
delete[] overloadEdgeList;
partEdgeListInfoArr.clear();
return durationRead;
} |
86d0f373d2efc7190b0ef025568e31cad9931746.hip | // !!! This is a file automatically generated by hipify!!!
#include <torch/extension.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <vector>
namespace {
template <typename scalar_t>
__device__ __forceinline__ scalar_t sigmoid(scalar_t z) {
return 1.0 / (1.0 + exp(-z));
}
template <typename scalar_t>
__device__ __forceinline__ scalar_t d_sigmoid(scalar_t z) {
const auto s = sigmoid(z);
return (1.0 - s) * s;
}
template <typename scalar_t>
__device__ __forceinline__ scalar_t d_tanh(scalar_t z) {
const auto t = tanh(z);
return 1 - (t * t);
}
template <typename scalar_t>
__device__ __forceinline__ scalar_t elu(scalar_t z, scalar_t alpha = 1.0) {
return fmaxf(0.0, z) + fminf(0.0, alpha * (exp(z) - 1.0));
}
template <typename scalar_t>
__device__ __forceinline__ scalar_t d_elu(scalar_t z, scalar_t alpha = 1.0) {
const auto e = exp(z);
const auto d_relu = z < 0.0 ? 0.0 : 1.0;
return d_relu + (((alpha * (e - 1.0)) < 0.0) ? (alpha * e) : 0.0);
}
template <typename scalar_t>
__global__ void lltm_cuda_forward_kernel(
const torch::PackedTensorAccessor<scalar_t,3,torch::RestrictPtrTraits,size_t> gates,
const torch::PackedTensorAccessor<scalar_t,2,torch::RestrictPtrTraits,size_t> old_cell,
torch::PackedTensorAccessor<scalar_t,2,torch::RestrictPtrTraits,size_t> new_h,
torch::PackedTensorAccessor<scalar_t,2,torch::RestrictPtrTraits,size_t> new_cell,
torch::PackedTensorAccessor<scalar_t,2,torch::RestrictPtrTraits,size_t> input_gate,
torch::PackedTensorAccessor<scalar_t,2,torch::RestrictPtrTraits,size_t> output_gate,
torch::PackedTensorAccessor<scalar_t,2,torch::RestrictPtrTraits,size_t> candidate_cell) {
//batch index
const int n = blockIdx.y;
// column index
const int c = blockIdx.x * blockDim.x + threadIdx.x;
if (c < gates.size(2)){
input_gate[n][c] = sigmoid(gates[n][0][c]);
output_gate[n][c] = sigmoid(gates[n][1][c]);
candidate_cell[n][c] = elu(gates[n][2][c]);
new_cell[n][c] =
old_cell[n][c] + candidate_cell[n][c] * input_gate[n][c];
new_h[n][c] = tanh(new_cell[n][c]) * output_gate[n][c];
}
}
template <typename scalar_t>
__global__ void lltm_cuda_backward_kernel(
torch::PackedTensorAccessor<scalar_t,2,torch::RestrictPtrTraits,size_t> d_old_cell,
torch::PackedTensorAccessor<scalar_t,3,torch::RestrictPtrTraits,size_t> d_gates,
const torch::PackedTensorAccessor<scalar_t,2,torch::RestrictPtrTraits,size_t> grad_h,
const torch::PackedTensorAccessor<scalar_t,2,torch::RestrictPtrTraits,size_t> grad_cell,
const torch::PackedTensorAccessor<scalar_t,2,torch::RestrictPtrTraits,size_t> new_cell,
const torch::PackedTensorAccessor<scalar_t,2,torch::RestrictPtrTraits,size_t> input_gate,
const torch::PackedTensorAccessor<scalar_t,2,torch::RestrictPtrTraits,size_t> output_gate,
const torch::PackedTensorAccessor<scalar_t,2,torch::RestrictPtrTraits,size_t> candidate_cell,
const torch::PackedTensorAccessor<scalar_t,3,torch::RestrictPtrTraits,size_t> gate_weights) {
//batch index
const int n = blockIdx.y;
// column index
const int c = blockIdx.x * blockDim.x + threadIdx.x;
if (c < d_gates.size(2)){
const auto d_output_gate = tanh(new_cell[n][c]) * grad_h[n][c];
const auto d_tanh_new_cell = output_gate[n][c] * grad_h[n][c];
const auto d_new_cell =
d_tanh(new_cell[n][c]) * d_tanh_new_cell + grad_cell[n][c];
d_old_cell[n][c] = d_new_cell;
const auto d_candidate_cell = input_gate[n][c] * d_new_cell;
const auto d_input_gate = candidate_cell[n][c] * d_new_cell;
d_gates[n][0][c] =
d_input_gate * d_sigmoid(gate_weights[n][0][c]);
d_gates[n][1][c] =
d_output_gate * d_sigmoid(gate_weights[n][1][c]);
d_gates[n][2][c] =
d_candidate_cell * d_elu(gate_weights[n][2][c]);
}
}
} // namespace
std::vector<torch::Tensor> lltm_cuda_forward(
torch::Tensor input,
torch::Tensor weights,
torch::Tensor bias,
torch::Tensor old_h,
torch::Tensor old_cell) {
auto X = torch::cat({old_h, input}, /*dim=*/1);
auto gate_weights = torch::addmm(bias, X, weights.transpose(0, 1));
const auto batch_size = old_cell.size(0);
const auto state_size = old_cell.size(1);
auto gates = gate_weights.reshape({batch_size, 3, state_size});
auto new_h = torch::zeros_like(old_cell);
auto new_cell = torch::zeros_like(old_cell);
auto input_gate = torch::zeros_like(old_cell);
auto output_gate = torch::zeros_like(old_cell);
auto candidate_cell = torch::zeros_like(old_cell);
const int threads = 1024;
const dim3 blocks((state_size + threads - 1) / threads, batch_size);
AT_DISPATCH_FLOATING_TYPES(gates.type(), "lltm_forward_cuda", ([&] {
hipLaunchKernelGGL(( lltm_cuda_forward_kernel<scalar_t>), dim3(blocks), dim3(threads), 0, 0,
gates.packed_accessor<scalar_t,3,torch::RestrictPtrTraits,size_t>(),
old_cell.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,size_t>(),
new_h.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,size_t>(),
new_cell.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,size_t>(),
input_gate.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,size_t>(),
output_gate.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,size_t>(),
candidate_cell.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,size_t>());
}));
return {new_h, new_cell, input_gate, output_gate, candidate_cell, X, gates};
}
std::vector<torch::Tensor> lltm_cuda_backward(
torch::Tensor grad_h,
torch::Tensor grad_cell,
torch::Tensor new_cell,
torch::Tensor input_gate,
torch::Tensor output_gate,
torch::Tensor candidate_cell,
torch::Tensor X,
torch::Tensor gates,
torch::Tensor weights) {
auto d_old_cell = torch::zeros_like(new_cell);
auto d_gates = torch::zeros_like(gates);
const auto batch_size = new_cell.size(0);
const auto state_size = new_cell.size(1);
const int threads = 1024;
const dim3 blocks((state_size + threads - 1) / threads, batch_size);
AT_DISPATCH_FLOATING_TYPES(X.type(), "lltm_forward_cuda", ([&] {
hipLaunchKernelGGL(( lltm_cuda_backward_kernel<scalar_t>), dim3(blocks), dim3(threads), 0, 0,
d_old_cell.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,size_t>(),
d_gates.packed_accessor<scalar_t,3,torch::RestrictPtrTraits,size_t>(),
grad_h.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,size_t>(),
grad_cell.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,size_t>(),
new_cell.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,size_t>(),
input_gate.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,size_t>(),
output_gate.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,size_t>(),
candidate_cell.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,size_t>(),
gates.packed_accessor<scalar_t,3,torch::RestrictPtrTraits,size_t>());
}));
auto d_gate_weights = d_gates.flatten(1, 2);
auto d_weights = d_gate_weights.t().mm(X);
auto d_bias = d_gate_weights.sum(/*dim=*/0, /*keepdim=*/true);
auto d_X = d_gate_weights.mm(weights);
auto d_old_h = d_X.slice(/*dim=*/1, 0, state_size);
auto d_input = d_X.slice(/*dim=*/1, state_size);
return {d_old_h, d_input, d_weights, d_bias, d_old_cell, d_gates};
} | 86d0f373d2efc7190b0ef025568e31cad9931746.cu | #include <torch/extension.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <vector>
namespace {
template <typename scalar_t>
__device__ __forceinline__ scalar_t sigmoid(scalar_t z) {
return 1.0 / (1.0 + exp(-z));
}
template <typename scalar_t>
__device__ __forceinline__ scalar_t d_sigmoid(scalar_t z) {
const auto s = sigmoid(z);
return (1.0 - s) * s;
}
template <typename scalar_t>
__device__ __forceinline__ scalar_t d_tanh(scalar_t z) {
const auto t = tanh(z);
return 1 - (t * t);
}
template <typename scalar_t>
__device__ __forceinline__ scalar_t elu(scalar_t z, scalar_t alpha = 1.0) {
return fmaxf(0.0, z) + fminf(0.0, alpha * (exp(z) - 1.0));
}
template <typename scalar_t>
__device__ __forceinline__ scalar_t d_elu(scalar_t z, scalar_t alpha = 1.0) {
const auto e = exp(z);
const auto d_relu = z < 0.0 ? 0.0 : 1.0;
return d_relu + (((alpha * (e - 1.0)) < 0.0) ? (alpha * e) : 0.0);
}
template <typename scalar_t>
__global__ void lltm_cuda_forward_kernel(
const torch::PackedTensorAccessor<scalar_t,3,torch::RestrictPtrTraits,size_t> gates,
const torch::PackedTensorAccessor<scalar_t,2,torch::RestrictPtrTraits,size_t> old_cell,
torch::PackedTensorAccessor<scalar_t,2,torch::RestrictPtrTraits,size_t> new_h,
torch::PackedTensorAccessor<scalar_t,2,torch::RestrictPtrTraits,size_t> new_cell,
torch::PackedTensorAccessor<scalar_t,2,torch::RestrictPtrTraits,size_t> input_gate,
torch::PackedTensorAccessor<scalar_t,2,torch::RestrictPtrTraits,size_t> output_gate,
torch::PackedTensorAccessor<scalar_t,2,torch::RestrictPtrTraits,size_t> candidate_cell) {
//batch index
const int n = blockIdx.y;
// column index
const int c = blockIdx.x * blockDim.x + threadIdx.x;
if (c < gates.size(2)){
input_gate[n][c] = sigmoid(gates[n][0][c]);
output_gate[n][c] = sigmoid(gates[n][1][c]);
candidate_cell[n][c] = elu(gates[n][2][c]);
new_cell[n][c] =
old_cell[n][c] + candidate_cell[n][c] * input_gate[n][c];
new_h[n][c] = tanh(new_cell[n][c]) * output_gate[n][c];
}
}
template <typename scalar_t>
__global__ void lltm_cuda_backward_kernel(
torch::PackedTensorAccessor<scalar_t,2,torch::RestrictPtrTraits,size_t> d_old_cell,
torch::PackedTensorAccessor<scalar_t,3,torch::RestrictPtrTraits,size_t> d_gates,
const torch::PackedTensorAccessor<scalar_t,2,torch::RestrictPtrTraits,size_t> grad_h,
const torch::PackedTensorAccessor<scalar_t,2,torch::RestrictPtrTraits,size_t> grad_cell,
const torch::PackedTensorAccessor<scalar_t,2,torch::RestrictPtrTraits,size_t> new_cell,
const torch::PackedTensorAccessor<scalar_t,2,torch::RestrictPtrTraits,size_t> input_gate,
const torch::PackedTensorAccessor<scalar_t,2,torch::RestrictPtrTraits,size_t> output_gate,
const torch::PackedTensorAccessor<scalar_t,2,torch::RestrictPtrTraits,size_t> candidate_cell,
const torch::PackedTensorAccessor<scalar_t,3,torch::RestrictPtrTraits,size_t> gate_weights) {
//batch index
const int n = blockIdx.y;
// column index
const int c = blockIdx.x * blockDim.x + threadIdx.x;
if (c < d_gates.size(2)){
const auto d_output_gate = tanh(new_cell[n][c]) * grad_h[n][c];
const auto d_tanh_new_cell = output_gate[n][c] * grad_h[n][c];
const auto d_new_cell =
d_tanh(new_cell[n][c]) * d_tanh_new_cell + grad_cell[n][c];
d_old_cell[n][c] = d_new_cell;
const auto d_candidate_cell = input_gate[n][c] * d_new_cell;
const auto d_input_gate = candidate_cell[n][c] * d_new_cell;
d_gates[n][0][c] =
d_input_gate * d_sigmoid(gate_weights[n][0][c]);
d_gates[n][1][c] =
d_output_gate * d_sigmoid(gate_weights[n][1][c]);
d_gates[n][2][c] =
d_candidate_cell * d_elu(gate_weights[n][2][c]);
}
}
} // namespace
std::vector<torch::Tensor> lltm_cuda_forward(
torch::Tensor input,
torch::Tensor weights,
torch::Tensor bias,
torch::Tensor old_h,
torch::Tensor old_cell) {
auto X = torch::cat({old_h, input}, /*dim=*/1);
auto gate_weights = torch::addmm(bias, X, weights.transpose(0, 1));
const auto batch_size = old_cell.size(0);
const auto state_size = old_cell.size(1);
auto gates = gate_weights.reshape({batch_size, 3, state_size});
auto new_h = torch::zeros_like(old_cell);
auto new_cell = torch::zeros_like(old_cell);
auto input_gate = torch::zeros_like(old_cell);
auto output_gate = torch::zeros_like(old_cell);
auto candidate_cell = torch::zeros_like(old_cell);
const int threads = 1024;
const dim3 blocks((state_size + threads - 1) / threads, batch_size);
AT_DISPATCH_FLOATING_TYPES(gates.type(), "lltm_forward_cuda", ([&] {
lltm_cuda_forward_kernel<scalar_t><<<blocks, threads>>>(
gates.packed_accessor<scalar_t,3,torch::RestrictPtrTraits,size_t>(),
old_cell.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,size_t>(),
new_h.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,size_t>(),
new_cell.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,size_t>(),
input_gate.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,size_t>(),
output_gate.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,size_t>(),
candidate_cell.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,size_t>());
}));
return {new_h, new_cell, input_gate, output_gate, candidate_cell, X, gates};
}
std::vector<torch::Tensor> lltm_cuda_backward(
torch::Tensor grad_h,
torch::Tensor grad_cell,
torch::Tensor new_cell,
torch::Tensor input_gate,
torch::Tensor output_gate,
torch::Tensor candidate_cell,
torch::Tensor X,
torch::Tensor gates,
torch::Tensor weights) {
auto d_old_cell = torch::zeros_like(new_cell);
auto d_gates = torch::zeros_like(gates);
const auto batch_size = new_cell.size(0);
const auto state_size = new_cell.size(1);
const int threads = 1024;
const dim3 blocks((state_size + threads - 1) / threads, batch_size);
AT_DISPATCH_FLOATING_TYPES(X.type(), "lltm_forward_cuda", ([&] {
lltm_cuda_backward_kernel<scalar_t><<<blocks, threads>>>(
d_old_cell.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,size_t>(),
d_gates.packed_accessor<scalar_t,3,torch::RestrictPtrTraits,size_t>(),
grad_h.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,size_t>(),
grad_cell.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,size_t>(),
new_cell.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,size_t>(),
input_gate.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,size_t>(),
output_gate.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,size_t>(),
candidate_cell.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,size_t>(),
gates.packed_accessor<scalar_t,3,torch::RestrictPtrTraits,size_t>());
}));
auto d_gate_weights = d_gates.flatten(1, 2);
auto d_weights = d_gate_weights.t().mm(X);
auto d_bias = d_gate_weights.sum(/*dim=*/0, /*keepdim=*/true);
auto d_X = d_gate_weights.mm(weights);
auto d_old_h = d_X.slice(/*dim=*/1, 0, state_size);
auto d_input = d_X.slice(/*dim=*/1, state_size);
return {d_old_h, d_input, d_weights, d_bias, d_old_cell, d_gates};
} |
615080df7cb69bbb20b18ad36c3c0a61bd7915b7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
namespace scc4k{
//if BLOCK_KERNEL -> SAFE = 0
//if SYNC -> DYNAMIC PARALLELLISM = 0
#define HASHTABLE_BLOCK_POS 0
#define END_OF_HASHTABLE (4096 * 8) // 8: long long int size
#define F2Size_POS END_OF_HASHTABLE
#define TEMP_POS (F2Size_POS + 4)
#define END_TEMP_POS (TEMP_POS + 32 * 4)
#define FRONTIER_SIZE (((49152 - END_TEMP_POS) / 2) - 2) //-2 align
#define F1_BLOCK_POS (END_TEMP_POS)
#define F2_BLOCK_POS (F1_BLOCK_POS + FRONTIER_SIZE)
template<int BlockDim, int WARP_SZ, int DUP_REM>
__device__ __forceinline__ void BFS_BlockKernelB ( int* __restrict__ devNode,
int* __restrict__ devEdge,
dist_t* __restrict__ devDistance,
int* __restrict__ F1,
int* __restrict__ F2,
int* __restrict__ devF2,
int* __restrict__ F2SizePtr,
int FrontierSize, int level,
volatile long long int* HashTable) {
int Queue[REG_QUEUE];
int founds = 0;
for (int t = Tid >> _Log2<WARP_SZ>::VALUE; t < FrontierSize; t += BlockDim / WARP_SZ) {
const int index = F1[t];
const int start = devNode[index];
int end = devNode[index + 1];
EdgeVisit<BlockDim, WARP_SZ, DUP_REM * 2>(devEdge, devDistance, NULL, NULL, index, start, end, Queue, founds, level, HashTable);
}
/*const int indexT1 = Tid >> _Log2<WARP_SZ>::VALUE;
#pragma unroll
for (int t = 0; t < DIV(BLOCK_FRONTIER_LIMIT, BlockDim); t++) {
const int indexT = indexT1 + t * (BlockDim / WARP_SZ);
if (indexT < FrontierSize) {
const int index = ThreadLoadV<int, LOAD_MODE>(F1, indexT);
const int start = ThreadLoadV<int, LOAD_MODE>(devNode, index);
int end = ThreadLoadV<int, LOAD_MODE>(devNode, index + 1);
EdgeVisit<BlockDim, WARP_SZ, DUP_REM>(devEdge, devDistance, devF2, F2SizePtr, index, start, end, Queue, founds, level, HashTable);
}
}*/
int WarpPos, n, total;
singleblockQueueAdd(founds, F2SizePtr, WarpPos, n, total, level, (int*) &SMem[TEMP_POS]);
if (WarpPos + total >= BLOCK_FRONTIER_LIMIT) {
if (WarpPos < BLOCK_FRONTIER_LIMIT)
SMem[0] = WarpPos;
writeOPT<SIMPLE, STORE_DEFAULT>(devF2, Queue, founds, WarpPos, n, total);
} else {
writeOPT<SIMPLE, STORE_DEFAULT>(F2, Queue, founds, WarpPos, n, total);
}
}
#define fun(a) BFS_BlockKernelB<1024, (a), DUP_REM>\
(devNodes, devEdges, devDistance, SMemF1, SMemF2, devF2, F2SizePtr, FrontierSize, level, HashTable);
template<int DUP_REM>
__global__ void BFS_BlockKernel ( int* __restrict__ devNodes,
int* __restrict__ devEdges,
dist_t* __restrict__ devDistance,
int* __restrict__ devF1,
int* __restrict__ devF2,
const int devF1Size) {
volatile long long int* HashTable;
if (DUP_REM)
HashTable = (volatile long long int*) SMem;
int level = devLevel;
int FrontierSize = devF1Size;
// if (Tid == 0)
// printf("END_OF_HASH %d END_TEMP_POS %d FRONTIER_SIZE %d F1 %d F2 %d\n", END_OF_HASHTABLE, END_TEMP_POS, FRONTIER_SIZE, F1_BLOCK_POS, F2_BLOCK_POS);
int* SMemF1 = devF1;
int* SMemF2 = (int*) &SMem[F2_BLOCK_POS];
int* F2SizePtr = (int*) &SMem[F2Size_POS];
int size = logValueDevice<1024, MIN_VW, MAX_VW>(FrontierSize);
def_SWITCH(size);
SMemF1 = SMemF2;
SMemF2 = (int*) &SMem[F1_BLOCK_POS];
level++;
__syncthreads();
FrontierSize = F2SizePtr[0];
while (FrontierSize && FrontierSize < BLOCK_FRONTIER_LIMIT) {
int size = logValueDevice<1024, MIN_VW, MAX_VW>(FrontierSize);
def_SWITCH(size);
swapDev(SMemF1, SMemF2);
level++;
__syncthreads();
FrontierSize = F2SizePtr[0];
}
// ----------------------- ENDING PHASE-------------------------------
__syncthreads();
if (Tid == 0)
devF2Size[level & 3] = FrontierSize;
if ( FrontierSize == 0 )
return;
if (Tid == 32)
devLevel = level;
const int totan_on_SMem = SMem[0];
#pragma unroll
for (int i = 0; i < DIV(BLOCK_FRONTIER_LIMIT, 1024); i++) {
const int index = Tid + i * 1024;
if (index < totan_on_SMem)
devF2[index] = SMem[index];
}
//if (Tid == 0)
// printf("SIZE: %d\n", FrontierSize);
}
#undef fun
}
| 615080df7cb69bbb20b18ad36c3c0a61bd7915b7.cu |
namespace scc4k{
//if BLOCK_KERNEL -> SAFE = 0
//if SYNC -> DYNAMIC PARALLELLISM = 0
#define HASHTABLE_BLOCK_POS 0
#define END_OF_HASHTABLE (4096 * 8) // 8: long long int size
#define F2Size_POS END_OF_HASHTABLE
#define TEMP_POS (F2Size_POS + 4)
#define END_TEMP_POS (TEMP_POS + 32 * 4)
#define FRONTIER_SIZE (((49152 - END_TEMP_POS) / 2) - 2) //-2 align
#define F1_BLOCK_POS (END_TEMP_POS)
#define F2_BLOCK_POS (F1_BLOCK_POS + FRONTIER_SIZE)
template<int BlockDim, int WARP_SZ, int DUP_REM>
__device__ __forceinline__ void BFS_BlockKernelB ( int* __restrict__ devNode,
int* __restrict__ devEdge,
dist_t* __restrict__ devDistance,
int* __restrict__ F1,
int* __restrict__ F2,
int* __restrict__ devF2,
int* __restrict__ F2SizePtr,
int FrontierSize, int level,
volatile long long int* HashTable) {
int Queue[REG_QUEUE];
int founds = 0;
for (int t = Tid >> _Log2<WARP_SZ>::VALUE; t < FrontierSize; t += BlockDim / WARP_SZ) {
const int index = F1[t];
const int start = devNode[index];
int end = devNode[index + 1];
EdgeVisit<BlockDim, WARP_SZ, DUP_REM * 2>(devEdge, devDistance, NULL, NULL, index, start, end, Queue, founds, level, HashTable);
}
/*const int indexT1 = Tid >> _Log2<WARP_SZ>::VALUE;
#pragma unroll
for (int t = 0; t < DIV(BLOCK_FRONTIER_LIMIT, BlockDim); t++) {
const int indexT = indexT1 + t * (BlockDim / WARP_SZ);
if (indexT < FrontierSize) {
const int index = ThreadLoadV<int, LOAD_MODE>(F1, indexT);
const int start = ThreadLoadV<int, LOAD_MODE>(devNode, index);
int end = ThreadLoadV<int, LOAD_MODE>(devNode, index + 1);
EdgeVisit<BlockDim, WARP_SZ, DUP_REM>(devEdge, devDistance, devF2, F2SizePtr, index, start, end, Queue, founds, level, HashTable);
}
}*/
int WarpPos, n, total;
singleblockQueueAdd(founds, F2SizePtr, WarpPos, n, total, level, (int*) &SMem[TEMP_POS]);
if (WarpPos + total >= BLOCK_FRONTIER_LIMIT) {
if (WarpPos < BLOCK_FRONTIER_LIMIT)
SMem[0] = WarpPos;
writeOPT<SIMPLE, STORE_DEFAULT>(devF2, Queue, founds, WarpPos, n, total);
} else {
writeOPT<SIMPLE, STORE_DEFAULT>(F2, Queue, founds, WarpPos, n, total);
}
}
#define fun(a) BFS_BlockKernelB<1024, (a), DUP_REM>\
(devNodes, devEdges, devDistance, SMemF1, SMemF2, devF2, F2SizePtr, FrontierSize, level, HashTable);
template<int DUP_REM>
__global__ void BFS_BlockKernel ( int* __restrict__ devNodes,
int* __restrict__ devEdges,
dist_t* __restrict__ devDistance,
int* __restrict__ devF1,
int* __restrict__ devF2,
const int devF1Size) {
volatile long long int* HashTable;
if (DUP_REM)
HashTable = (volatile long long int*) SMem;
int level = devLevel;
int FrontierSize = devF1Size;
// if (Tid == 0)
// printf("END_OF_HASH %d END_TEMP_POS %d FRONTIER_SIZE %d F1 %d F2 %d\n", END_OF_HASHTABLE, END_TEMP_POS, FRONTIER_SIZE, F1_BLOCK_POS, F2_BLOCK_POS);
int* SMemF1 = devF1;
int* SMemF2 = (int*) &SMem[F2_BLOCK_POS];
int* F2SizePtr = (int*) &SMem[F2Size_POS];
int size = logValueDevice<1024, MIN_VW, MAX_VW>(FrontierSize);
def_SWITCH(size);
SMemF1 = SMemF2;
SMemF2 = (int*) &SMem[F1_BLOCK_POS];
level++;
__syncthreads();
FrontierSize = F2SizePtr[0];
while (FrontierSize && FrontierSize < BLOCK_FRONTIER_LIMIT) {
int size = logValueDevice<1024, MIN_VW, MAX_VW>(FrontierSize);
def_SWITCH(size);
swapDev(SMemF1, SMemF2);
level++;
__syncthreads();
FrontierSize = F2SizePtr[0];
}
// ----------------------- ENDING PHASE-------------------------------
__syncthreads();
if (Tid == 0)
devF2Size[level & 3] = FrontierSize;
if ( FrontierSize == 0 )
return;
if (Tid == 32)
devLevel = level;
const int totan_on_SMem = SMem[0];
#pragma unroll
for (int i = 0; i < DIV(BLOCK_FRONTIER_LIMIT, 1024); i++) {
const int index = Tid + i * 1024;
if (index < totan_on_SMem)
devF2[index] = SMem[index];
}
//if (Tid == 0)
// printf("SIZE: %d\n", FrontierSize);
}
#undef fun
}
|
3cc39782a3480d942e639504f3c547ff393fe625.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// -------------------------------------------------------------
// CUDPP -- CUDA Data Parallel Primitives library
// -------------------------------------------------------------
// $Revision$
// $Date$
// -------------------------------------------------------------
// This source code is distributed under the terms of license.txt
// in the root directory of this source distribution.
// -------------------------------------------------------------
/**
* @file
* radixsort_app.cu
*
* @brief CUDPP application-level radix sorting routines
*/
/** @addtogroup cudpp_app
* @{
*/
/** @name RadixSort Functions
* @{
*/
#include "cudpp.h"
#include "cudpp_util.h"
#include "cudpp_radixsort.h"
#include "cudpp_scan.h"
#include "kernel/radixsort_kernel.cu"
#include "cudpp_maximal_launch.h"
#include <cutil.h>
#include <cstdlib>
#include <cstdio>
#include <assert.h>
typedef unsigned int uint;
/** @brief Perform one step of the radix sort. Sorts by nbits key bits per step,
* starting at startbit.
*
* Uses cudppScanDispatch() for the prefix sum of radix counters.
*
* @param[in,out] keys Keys to be sorted.
* @param[in,out] values Associated values to be sorted (through keys).
* @param[in] plan Configuration information for RadixSort.
* @param[in] numElements Number of elements in the sort.
**/
template<uint nbits, uint startbit, bool flip, bool unflip>
void radixSortStep(uint *keys,
uint *values,
const CUDPPRadixSortPlan *plan,
uint numElements)
{
const uint eltsPerBlock = SORT_CTA_SIZE * 4;
const uint eltsPerBlock2 = SORT_CTA_SIZE * 2;
bool fullBlocks = ((numElements % eltsPerBlock) == 0);
uint numBlocks = (fullBlocks) ?
(numElements / eltsPerBlock) :
(numElements / eltsPerBlock + 1);
uint numBlocks2 = ((numElements % eltsPerBlock2) == 0) ?
(numElements / eltsPerBlock2) :
(numElements / eltsPerBlock2 + 1);
bool loop = numBlocks > 65535;
uint blocks = loop ? 65535 : numBlocks;
uint blocksFind = loop ? 65535 : numBlocks2;
uint blocksReorder = loop ? 65535 : numBlocks2;
uint threshold = fullBlocks ? plan->m_persistentCTAThresholdFullBlocks[0] : plan->m_persistentCTAThreshold[0];
if (numElements >= threshold)
{
loop = (numElements > 262144) || (numElements >= 32768 && numElements < 65536);
blocks = numBlocks;
blocksFind = numBlocks2;
blocksReorder = numBlocks2;
// Run an empty kernel -- this seems to reset some of the CTA scheduling hardware
// on GT200, resulting in better scheduling and lower run times
if (startbit > 0)
{
hipLaunchKernelGGL(( emptyKernel), dim3(plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_EK]), dim3(SORT_CTA_SIZE), 0, 0, );
}
}
if (fullBlocks)
{
if (loop)
{
if (numElements >= threshold)
{
blocks = flip? plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_RSB_4_0_T_T_T] :
plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_RSB_4_0_T_F_T];
}
hipLaunchKernelGGL(( radixSortBlocks<nbits, startbit, true, flip, true>)
, dim3(blocks), dim3(SORT_CTA_SIZE), 4 * SORT_CTA_SIZE * sizeof(uint), 0,
(uint4*)plan->m_tempKeys, (uint4*)plan->m_tempValues, (uint4*)keys, (uint4*)values, numElements, numBlocks);
}
else
{
hipLaunchKernelGGL(( radixSortBlocks<nbits, startbit, true, flip, false>)
, dim3(blocks), dim3(SORT_CTA_SIZE), 4 * SORT_CTA_SIZE * sizeof(uint), 0,
(uint4*)plan->m_tempKeys, (uint4*)plan->m_tempValues, (uint4*)keys, (uint4*)values, numElements, numBlocks);
}
}
else
{
if (loop)
{
if (numElements >= threshold)
{
blocks = flip ? plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_RSB_4_0_F_T_T] :
plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_RSB_4_0_F_F_T];
}
hipLaunchKernelGGL(( radixSortBlocks<nbits, startbit, false, flip, true>)
, dim3(blocks), dim3(SORT_CTA_SIZE), 4 * SORT_CTA_SIZE * sizeof(uint), 0,
(uint4*)plan->m_tempKeys, (uint4*)plan->m_tempValues, (uint4*)keys, (uint4*)values, numElements, numBlocks);
}
else
{
hipLaunchKernelGGL(( radixSortBlocks<nbits, startbit, false, flip, false>)
, dim3(blocks), dim3(SORT_CTA_SIZE), 4 * SORT_CTA_SIZE * sizeof(uint), 0,
(uint4*)plan->m_tempKeys, (uint4*)plan->m_tempValues, (uint4*)keys, (uint4*)values, numElements, numBlocks);
}
}
CUT_CHECK_ERROR("radixSortBlocks");
if (fullBlocks)
{
if (loop)
{
if (numElements >= threshold)
{
blocksFind = plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_FRO_0_T_T];
}
hipLaunchKernelGGL(( findRadixOffsets<startbit, true, true>)
, dim3(blocksFind), dim3(SORT_CTA_SIZE), 3 * SORT_CTA_SIZE * sizeof(uint), 0,
(uint2*)plan->m_tempKeys, plan->m_counters, plan->m_blockOffsets, numElements, numBlocks2);
}
else
{
hipLaunchKernelGGL(( findRadixOffsets<startbit, true, false>)
, dim3(blocksFind), dim3(SORT_CTA_SIZE), 3 * SORT_CTA_SIZE * sizeof(uint), 0,
(uint2*)plan->m_tempKeys, plan->m_counters, plan->m_blockOffsets, numElements, numBlocks2);
}
}
else
{
if (loop)
{
if (numElements >= threshold)
{
blocksFind = plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_FRO_0_F_T];
}
hipLaunchKernelGGL(( findRadixOffsets<startbit, false, true>)
, dim3(blocksFind), dim3(SORT_CTA_SIZE), 3 * SORT_CTA_SIZE * sizeof(uint), 0,
(uint2*)plan->m_tempKeys, plan->m_counters, plan->m_blockOffsets, numElements, numBlocks2);
}
else
{
hipLaunchKernelGGL(( findRadixOffsets<startbit, false, false>)
, dim3(blocksFind), dim3(SORT_CTA_SIZE), 3 * SORT_CTA_SIZE * sizeof(uint), 0,
(uint2*)plan->m_tempKeys, plan->m_counters, plan->m_blockOffsets, numElements, numBlocks2);
}
}
CUT_CHECK_ERROR("findRadixOffsets");
cudppScanDispatch(plan->m_countersSum, plan->m_counters, 16*numBlocks2, 1, plan->m_scanPlan);
if (fullBlocks)
{
if (plan->m_bManualCoalesce)
{
if (loop)
{
if (numElements >= threshold)
{
blocksReorder = unflip ? plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_RD_0_T_T_T_T] :
plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_RD_0_T_T_F_T];
}
hipLaunchKernelGGL(( reorderData<startbit, true, true, unflip, true>)
, dim3(blocksReorder), dim3(SORT_CTA_SIZE), 0, 0,
keys, values, (uint2*)plan->m_tempKeys, (uint2*)plan->m_tempValues,
plan->m_blockOffsets, plan->m_countersSum, plan->m_counters, numElements, numBlocks2);
}
else
{
hipLaunchKernelGGL(( reorderData<startbit, true, true, unflip, false>)
, dim3(blocksReorder), dim3(SORT_CTA_SIZE), 0, 0,
keys, values, (uint2*)plan->m_tempKeys, (uint2*)plan->m_tempValues,
plan->m_blockOffsets, plan->m_countersSum, plan->m_counters, numElements, numBlocks2);
}
}
else
{
if (loop)
{
if (numElements >= threshold)
{
blocksReorder = unflip ? plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_RD_0_T_F_T_T] :
plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_RD_0_T_F_F_T];
}
hipLaunchKernelGGL(( reorderData<startbit, true, false, unflip, true>)
, dim3(blocksReorder), dim3(SORT_CTA_SIZE), 0, 0,
keys, values, (uint2*)plan->m_tempKeys, (uint2*)plan->m_tempValues,
plan->m_blockOffsets, plan->m_countersSum, plan->m_counters, numElements, numBlocks2);
}
else
{
hipLaunchKernelGGL(( reorderData<startbit, true, false, unflip, false>)
, dim3(blocksReorder), dim3(SORT_CTA_SIZE), 0, 0,
keys, values, (uint2*)plan->m_tempKeys, (uint2*)plan->m_tempValues,
plan->m_blockOffsets, plan->m_countersSum, plan->m_counters, numElements, numBlocks2);
}
}
}
else
{
if (plan->m_bManualCoalesce)
{
if (loop)
{
if (numElements >= threshold)
{
blocksReorder = unflip ?
plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_RD_0_F_T_T_T] :
plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_RD_0_F_T_F_T];
}
hipLaunchKernelGGL(( reorderData<startbit, false, true, unflip, true>)
, dim3(blocksReorder), dim3(SORT_CTA_SIZE), 0, 0,
keys, values, (uint2*)plan->m_tempKeys, (uint2*)plan->m_tempValues,
plan->m_blockOffsets, plan->m_countersSum, plan->m_counters, numElements, numBlocks2);
}
else
{
hipLaunchKernelGGL(( reorderData<startbit, false, true, unflip, false>)
, dim3(blocksReorder), dim3(SORT_CTA_SIZE), 0, 0,
keys, values, (uint2*)plan->m_tempKeys, (uint2*)plan->m_tempValues,
plan->m_blockOffsets, plan->m_countersSum, plan->m_counters, numElements, numBlocks2);
}
}
else
{
if (loop)
{
if (numElements >= threshold)
{
blocksReorder = unflip ?
plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_RD_0_F_F_T_T] :
plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_RD_0_F_F_F_T];
}
hipLaunchKernelGGL(( reorderData<startbit, false, false, unflip, true>)
, dim3(blocksReorder), dim3(SORT_CTA_SIZE), 0, 0,
keys, values, (uint2*)plan->m_tempKeys, (uint2*)plan->m_tempValues,
plan->m_blockOffsets, plan->m_countersSum, plan->m_counters, numElements, numBlocks2);
}
else
{
hipLaunchKernelGGL(( reorderData<startbit, false, false, unflip, false>)
, dim3(blocksReorder), dim3(SORT_CTA_SIZE), 0, 0,
keys, values, (uint2*)plan->m_tempKeys, (uint2*)plan->m_tempValues,
plan->m_blockOffsets, plan->m_countersSum, plan->m_counters, numElements, numBlocks2);
}
}
}
CUT_CHECK_ERROR("radixSortStep");
}
/**
* @brief Single-block optimization for sorts of fewer than 4 * CTA_SIZE elements
*
* @param[in,out] keys Keys to be sorted.
* @param[in,out] values Associated values to be sorted (through keys).
* @param numElements Number of elements in the sort.
**/
template <bool flip>
void radixSortSingleBlock(uint *keys,
uint *values,
uint numElements)
{
bool fullBlocks = (numElements % (SORT_CTA_SIZE * 4) == 0);
if (fullBlocks)
{
hipLaunchKernelGGL(( radixSortBlocks<32, 0, true, flip, false>)
, dim3(1), dim3(SORT_CTA_SIZE), 4 * SORT_CTA_SIZE * sizeof(uint), 0,
(uint4*)keys, (uint4*)values,
(uint4*)keys, (uint4*)values,
numElements, 0);
}
else
{
hipLaunchKernelGGL(( radixSortBlocks<32, 0, false, flip, false>)
, dim3(1), dim3(SORT_CTA_SIZE), 4 * SORT_CTA_SIZE * sizeof(uint), 0,
(uint4*)keys, (uint4*)values,
(uint4*)keys, (uint4*)values,
numElements, 0);
}
if (flip)hipLaunchKernelGGL(( unflipFloats), dim3(1), dim3(SORT_CTA_SIZE), 0, 0, keys, numElements);
CUT_CHECK_ERROR("radixSortSingleBlock");
}
/**
* @brief Main radix sort function
*
* Main radix sort function. Sorts in place in the keys and values arrays,
* but uses the other device arrays as temporary storage. All pointer
* parameters are device pointers. Uses cudppScan() for the prefix sum of
* radix counters.
*
* @param[in,out] keys Keys to be sorted.
* @param[in,out] values Associated values to be sorted (through keys).
* @param[in] plan Configuration information for RadixSort.
* @param[in] numElements Number of elements in the sort.
* @param[in] flipBits Is set true if key datatype is a float
* (neg. numbers) for special float sorting operations.
* @param[in] keyBits Number of interesting bits in the key
**/
void radixSort(uint *keys,
uint* values,
const CUDPPRadixSortPlan *plan,
size_t numElements,
bool flipBits,
int keyBits)
{
if(numElements <= WARP_SIZE)
{
if (flipBits)
hipLaunchKernelGGL(( radixSortSingleWarp<true>), dim3(1), dim3(numElements), 0, 0,
keys, values, numElements);
else
hipLaunchKernelGGL(( radixSortSingleWarp<false>), dim3(1), dim3(numElements), 0, 0,
keys, values, numElements);
CUT_CHECK_ERROR("radixSortSingleWarp");
return;
}
#ifdef __DEVICE_EMULATION__
printf("bits: %d\n", keyBits);
#endif
if(numElements <= SORT_CTA_SIZE * 4)
{
if (flipBits)
radixSortSingleBlock<true>(keys, values, numElements);
else
radixSortSingleBlock<false>(keys, values, numElements);
return;
}
// flip float bits on the first pass, unflip on the last pass
if (flipBits)
{
radixSortStep<4, 0, true, false>
(keys, values, plan, numElements);
}
else
{
radixSortStep<4, 0, false, false>
(keys, values, plan, numElements);
}
if (keyBits > 4)
{
radixSortStep<4, 4, false, false>
(keys, values, plan, numElements);
}
if (keyBits > 8)
{
radixSortStep<4, 8, false, false>
(keys, values, plan, numElements);
}
if (keyBits > 12)
{
radixSortStep<4, 12, false, false>
(keys, values, plan, numElements);
}
if (keyBits > 16)
{
radixSortStep<4, 16, false, false>
(keys, values, plan, numElements);
}
if (keyBits > 20)
{
radixSortStep<4, 20, false, false>
(keys, values, plan, numElements);
}
if (keyBits > 24)
{
radixSortStep<4, 24, false, false>
(keys, values, plan, numElements);
}
if (keyBits > 28)
{
if (flipBits) // last pass
{
radixSortStep<4, 28, false, true>
(keys, values, plan, numElements);
}
else
{
radixSortStep<4, 28, false, false>
(keys, values, plan, numElements);
}
}
}
/**
* @brief Wrapper to call main radix sort function. For float configuration.
*
* Calls the main radix sort function. For float configuration.
*
* @param[in,out] keys Keys to be sorted.
* @param[in,out] values Associated values to be sorted (through keys).
* @param[in] plan Configuration information for RadixSort.
* @param[in] numElements Number of elements in the sort.
* @param[in] negativeKeys Is set true if key datatype has neg. numbers.
* @param[in] keyBits Number of interesting bits in the key
**/
extern "C"
void radixSortFloatKeys(float* keys,
uint* values,
const CUDPPRadixSortPlan *plan,
size_t numElements,
bool negativeKeys,
int keyBits)
{
radixSort((uint*)keys, (uint*)values, plan,
numElements, negativeKeys, keyBits);
}
/** @brief Perform one step of the radix sort. Sorts by nbits key bits per step,
* starting at startbit.
*
* @param[in,out] keys Keys to be sorted.
* @param[in] plan Configuration information for RadixSort.
* @param[in] numElements Number of elements in the sort.
**/
template<uint nbits, uint startbit, bool flip, bool unflip>
void radixSortStepKeysOnly(uint *keys,
const CUDPPRadixSortPlan *plan,
uint numElements)
{
const uint eltsPerBlock = SORT_CTA_SIZE * 4;
const uint eltsPerBlock2 = SORT_CTA_SIZE * 2;
bool fullBlocks = ((numElements % eltsPerBlock) == 0);
uint numBlocks = (fullBlocks) ?
(numElements / eltsPerBlock) :
(numElements / eltsPerBlock + 1);
uint numBlocks2 = ((numElements % eltsPerBlock2) == 0) ?
(numElements / eltsPerBlock2) :
(numElements / eltsPerBlock2 + 1);
bool loop = numBlocks > 65535;
uint blocks = loop ? 65535 : numBlocks;
uint blocksFind = loop ? 65535 : numBlocks2;
uint blocksReorder = loop ? 65535 : numBlocks2;
uint threshold = fullBlocks ? plan->m_persistentCTAThresholdFullBlocks[1] : plan->m_persistentCTAThreshold[1];
if (numElements >= threshold)
{
loop = (numElements > 262144) || (numElements >= 32768 && numElements < 65536);
blocks = numBlocks;
blocksFind = numBlocks2;
blocksReorder = numBlocks2;
}
if (fullBlocks)
{
if (loop)
{
if (numElements >= threshold)
{
blocks = flip ? plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_RSBKO_4_0_T_T_T] :
plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_RSBKO_4_0_T_F_T];
}
hipLaunchKernelGGL(( radixSortBlocksKeysOnly<nbits, startbit, true, flip, true>)
, dim3(blocks), dim3(SORT_CTA_SIZE), 4 * SORT_CTA_SIZE * sizeof(uint), 0,
(uint4*)plan->m_tempKeys, (uint4*)keys, numElements, numBlocks);
}
else
hipLaunchKernelGGL(( radixSortBlocksKeysOnly<nbits, startbit, true, flip, false>)
, dim3(blocks), dim3(SORT_CTA_SIZE), 4 * SORT_CTA_SIZE * sizeof(uint), 0,
(uint4*)plan->m_tempKeys, (uint4*)keys, numElements, numBlocks);
}
else
{
if (loop)
{
if (numElements >= threshold)
{
blocks = flip ? plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_RSBKO_4_0_F_T_T] :
plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_RSBKO_4_0_F_F_T];
}
hipLaunchKernelGGL(( radixSortBlocksKeysOnly<nbits, startbit, false, flip, true>)
, dim3(blocks), dim3(SORT_CTA_SIZE), 4 * SORT_CTA_SIZE * sizeof(uint), 0,
(uint4*)plan->m_tempKeys, (uint4*)keys, numElements, numBlocks);
}
else
hipLaunchKernelGGL(( radixSortBlocksKeysOnly<nbits, startbit, false, flip, false>)
, dim3(blocks), dim3(SORT_CTA_SIZE), 4 * SORT_CTA_SIZE * sizeof(uint), 0,
(uint4*)plan->m_tempKeys, (uint4*)keys, numElements, numBlocks);
}
if (fullBlocks)
{
if (loop)
{
if (numElements >= threshold)
{
blocksFind = plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_FRO_0_T_T];
}
hipLaunchKernelGGL(( findRadixOffsets<startbit, true, true>)
, dim3(blocksFind), dim3(SORT_CTA_SIZE), 3 * SORT_CTA_SIZE * sizeof(uint), 0,
(uint2*)plan->m_tempKeys, plan->m_counters, plan->m_blockOffsets, numElements, numBlocks2);
}
else
hipLaunchKernelGGL(( findRadixOffsets<startbit, true, false>)
, dim3(blocksFind), dim3(SORT_CTA_SIZE), 3 * SORT_CTA_SIZE * sizeof(uint), 0,
(uint2*)plan->m_tempKeys, plan->m_counters, plan->m_blockOffsets, numElements, numBlocks2);
}
else
{
if (loop)
{
if (numElements >= threshold)
{
blocksFind = plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_FRO_0_F_T];
}
hipLaunchKernelGGL(( findRadixOffsets<startbit, false, true>)
, dim3(blocksFind), dim3(SORT_CTA_SIZE), 3 * SORT_CTA_SIZE * sizeof(uint), 0,
(uint2*)plan->m_tempKeys, plan->m_counters, plan->m_blockOffsets, numElements, numBlocks2);
}
else
hipLaunchKernelGGL(( findRadixOffsets<startbit, false, false>)
, dim3(blocksFind), dim3(SORT_CTA_SIZE), 3 * SORT_CTA_SIZE * sizeof(uint), 0,
(uint2*)plan->m_tempKeys, plan->m_counters, plan->m_blockOffsets, numElements, numBlocks2);
}
cudppScanDispatch(plan->m_countersSum, plan->m_counters, 16*numBlocks2, 1, plan->m_scanPlan);
if (fullBlocks)
{
if (plan->m_bManualCoalesce)
{
if (loop)
{
if (numElements >= threshold)
{
blocksReorder = unflip ?
plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_RDKO_0_T_T_T_T] :
plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_RDKO_0_T_T_F_T];
}
hipLaunchKernelGGL(( reorderDataKeysOnly<startbit, true, true, unflip, true>)
, dim3(blocksReorder), dim3(SORT_CTA_SIZE), 0, 0,
keys, (uint2*)plan->m_tempKeys, plan->m_blockOffsets, plan->m_countersSum, plan->m_counters,
numElements, numBlocks2);
}
else
hipLaunchKernelGGL(( reorderDataKeysOnly<startbit, true, true, unflip, false>)
, dim3(blocksReorder), dim3(SORT_CTA_SIZE), 0, 0,
keys, (uint2*)plan->m_tempKeys, plan->m_blockOffsets, plan->m_countersSum, plan->m_counters,
numElements, numBlocks2);
}
else
{
if (loop)
{
if (numElements >= threshold)
{
blocksReorder = unflip ?
plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_RDKO_0_T_F_T_T] :
plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_RDKO_0_T_F_F_T];
}
hipLaunchKernelGGL(( reorderDataKeysOnly<startbit, true, false, unflip, true>)
, dim3(blocksReorder), dim3(SORT_CTA_SIZE), 0, 0,
keys, (uint2*)plan->m_tempKeys, plan->m_blockOffsets, plan->m_countersSum, plan->m_counters,
numElements, numBlocks2);
}
else
hipLaunchKernelGGL(( reorderDataKeysOnly<startbit, true, false, unflip, false>)
, dim3(blocksReorder), dim3(SORT_CTA_SIZE), 0, 0,
keys, (uint2*)plan->m_tempKeys, plan->m_blockOffsets, plan->m_countersSum, plan->m_counters,
numElements, numBlocks2);
}
}
else
{
if (plan->m_bManualCoalesce)
{
if (loop)
{
if (numElements >= threshold)
{
blocksReorder = unflip ?
plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_RDKO_0_F_T_T_T] :
plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_RDKO_0_F_T_F_T];
}
hipLaunchKernelGGL(( reorderDataKeysOnly<startbit, false, true, unflip, true>)
, dim3(blocksReorder), dim3(SORT_CTA_SIZE), 0, 0,
keys, (uint2*)plan->m_tempKeys, plan->m_blockOffsets, plan->m_countersSum, plan->m_counters,
numElements, numBlocks2);
}
else
hipLaunchKernelGGL(( reorderDataKeysOnly<startbit, false, true, unflip, false>)
, dim3(blocksReorder), dim3(SORT_CTA_SIZE), 0, 0,
keys, (uint2*)plan->m_tempKeys, plan->m_blockOffsets, plan->m_countersSum, plan->m_counters,
numElements, numBlocks2);
}
else
{
if (loop)
{
if (numElements >= threshold)
{
blocksReorder = unflip ?
plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_RDKO_0_F_F_T_T] :
plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_RDKO_0_F_F_F_T];
}
hipLaunchKernelGGL(( reorderDataKeysOnly<startbit, false, false, unflip, true>)
, dim3(blocksReorder), dim3(SORT_CTA_SIZE), 0, 0,
keys, (uint2*)plan->m_tempKeys, plan->m_blockOffsets, plan->m_countersSum, plan->m_counters,
numElements, numBlocks2);
}
else
hipLaunchKernelGGL(( reorderDataKeysOnly<startbit, false, false, unflip, false>)
, dim3(blocksReorder), dim3(SORT_CTA_SIZE), 0, 0,
keys, (uint2*)plan->m_tempKeys, plan->m_blockOffsets, plan->m_countersSum, plan->m_counters,
numElements, numBlocks2);
}
}
CUT_CHECK_ERROR("radixSortStepKeysOnly");
}
/**
* @brief Optimization for sorts of fewer than 4 * CTA_SIZE elements (keys only).
*
* @param[in,out] keys Keys to be sorted.
* @param numElements Number of elements in the sort.
**/
template <bool flip>
void radixSortSingleBlockKeysOnly(uint *keys,
uint numElements)
{
bool fullBlocks = (numElements % (SORT_CTA_SIZE * 4) == 0);
if (fullBlocks)
{
hipLaunchKernelGGL(( radixSortBlocksKeysOnly<32, 0, true, flip, false>)
, dim3(1), dim3(SORT_CTA_SIZE), 4 * SORT_CTA_SIZE * sizeof(uint), 0,
(uint4*)keys, (uint4*)keys, numElements, 1 );
}
else
{
hipLaunchKernelGGL(( radixSortBlocksKeysOnly<32, 0, false, flip, false>)
, dim3(1), dim3(SORT_CTA_SIZE), 4 * SORT_CTA_SIZE * sizeof(uint), 0,
(uint4*)keys, (uint4*)keys, numElements, 1 );
}
if (flip)
hipLaunchKernelGGL(( unflipFloats), dim3(1), dim3(SORT_CTA_SIZE), 0, 0, keys, numElements);
CUT_CHECK_ERROR("radixSortSingleBlock");
}
/**
* @brief Main radix sort function. For keys only configuration.
*
* Main radix sort function. Sorts in place in the keys array,
* but uses the other device arrays as temporary storage. All pointer
* parameters are device pointers. Uses scan for the prefix sum of
* radix counters.
*
* @param[in,out] keys Keys to be sorted.
* @param[in] plan Configuration information for RadixSort.
* @param[in] flipBits Is set true if key datatype is a float (neg. numbers)
* for special float sorting operations.
* @param[in] numElements Number of elements in the sort.
* @param[in] keyBits Number of interesting bits in the key
**/
extern "C"
void radixSortKeysOnly(uint *keys,
const CUDPPRadixSortPlan *plan,
size_t numElements,
bool flipBits,
int keyBits)
{
if(numElements <= WARP_SIZE)
{
if (flipBits)
hipLaunchKernelGGL(( radixSortSingleWarpKeysOnly<true>), dim3(1), dim3(numElements), 0, 0, keys, numElements);
else
hipLaunchKernelGGL(( radixSortSingleWarpKeysOnly<false>), dim3(1), dim3(numElements), 0, 0, keys, numElements);
return;
}
if(numElements <= SORT_CTA_SIZE * 4)
{
if (flipBits)
radixSortSingleBlockKeysOnly<true>(keys, numElements);
else
radixSortSingleBlockKeysOnly<false>(keys, numElements);
return;
}
// flip float bits on the first pass, unflip on the last pass
if (flipBits)
{
radixSortStepKeysOnly<4, 0, true, false>(keys, plan, numElements);
}
else
{
radixSortStepKeysOnly<4, 0, false, false>(keys, plan, numElements);
}
if (keyBits > 4)
{
radixSortStepKeysOnly<4, 4, false, false>(keys, plan, numElements);
}
if (keyBits > 8)
{
radixSortStepKeysOnly<4, 8, false, false>(keys, plan, numElements);
}
if (keyBits > 12)
{
radixSortStepKeysOnly<4, 12, false, false>(keys, plan, numElements);
}
if (keyBits > 16)
{
radixSortStepKeysOnly<4, 16, false, false>(keys, plan, numElements);
}
if (keyBits > 20)
{
radixSortStepKeysOnly<4, 20, false, false>(keys, plan, numElements);
}
if (keyBits > 24)
{
radixSortStepKeysOnly<4, 24, false, false>(keys, plan, numElements);
}
if (keyBits > 28)
{
if (flipBits) // last pass
{
radixSortStepKeysOnly<4, 28, false, true>(keys, plan, numElements);
}
else
{
radixSortStepKeysOnly<4, 28, false, false>(keys, plan, numElements);
}
}
}
/**
* @brief Wrapper to call main radix sort function. For floats and keys only.
*
* Calls the radixSortKeysOnly function setting parameters for floats.
*
* @param[in,out] keys Keys to be sorted.
* @param[in] plan Configuration information for RadixSort.
* @param[in] negativeKeys Is set true if key flipBits is to be true in
* radixSortKeysOnly().
* @param[in] numElements Number of elements in the sort.
* @param[in] keyBits Number of interesting bits in the key
**/
extern "C"
void radixSortFloatKeysOnly(float *keys,
const CUDPPRadixSortPlan *plan,
size_t numElements,
bool negativeKeys,
int keyBits)
{
radixSortKeysOnly((uint*)keys, plan, numElements, negativeKeys, keyBits);
}
extern "C"
void initDeviceParameters(CUDPPRadixSortPlan *plan)
{
int deviceID = -1;
if (hipSuccess == hipGetDevice(&deviceID))
{
hipDeviceProp_t devprop;
hipGetDeviceProperties(&devprop, deviceID);
// sm_12 and later devices don't need help with coalesce in reorderData kernel
plan->m_bManualCoalesce = (devprop.major < 2 && devprop.minor < 2);
// Empirically we have found that for some (usually larger) sort
// sizes it is better to use exactly as many "persistent" CTAs
// as can fill the GPU, which loop over the "blocks" of work. For smaller
// arrays it is better to use the typical CUDA approach of launching one CTA
// per block of work.
// 0-element of these two-element arrays is for key-value sorts
// 1-element is for key-only sorts
plan->m_persistentCTAThreshold[0] = plan->m_bManualCoalesce ? 16777216 : 524288;
plan->m_persistentCTAThresholdFullBlocks[0] = plan->m_bManualCoalesce ? 2097152: 524288;
plan->m_persistentCTAThreshold[1] = plan->m_bManualCoalesce ? 16777216 : 8388608;
plan->m_persistentCTAThresholdFullBlocks[1] = plan->m_bManualCoalesce ? 2097152: 0;
// create a map of function pointers to register counts for more accurate occupancy calculation
// Must pass in the dynamic shared memory used by each kernel, since the runtime doesn't know it
// Note we only insert the "loop" version of the kernels (the one with the last template param = true)
// Because those are the only ones that require persistent CTAs that maximally fill the device.
plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_RSB_4_0_F_F_T] = maxBlocks(radixSortBlocks<4, 0, false, false, true>, 4 * SORT_CTA_SIZE * sizeof(uint), SORT_CTA_SIZE);
plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_RSB_4_0_F_T_T] = maxBlocks(radixSortBlocks<4, 0, false, true, true>, 4 * SORT_CTA_SIZE * sizeof(uint), SORT_CTA_SIZE);
plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_RSB_4_0_T_F_T] = maxBlocks(radixSortBlocks<4, 0, true, false, true>, 4 * SORT_CTA_SIZE * sizeof(uint), SORT_CTA_SIZE);
plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_RSB_4_0_T_T_T] = maxBlocks(radixSortBlocks<4, 0, true, true, true>, 4 * SORT_CTA_SIZE * sizeof(uint), SORT_CTA_SIZE);
plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_RSBKO_4_0_F_F_T] = maxBlocks(radixSortBlocksKeysOnly<4, 0, false, false, true>, 4 * SORT_CTA_SIZE * sizeof(uint), SORT_CTA_SIZE);
plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_RSBKO_4_0_F_T_T] = maxBlocks(radixSortBlocksKeysOnly<4, 0, false, true, true>, 4 * SORT_CTA_SIZE * sizeof(uint), SORT_CTA_SIZE);
plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_RSBKO_4_0_T_F_T] = maxBlocks(radixSortBlocksKeysOnly<4, 0, true, false, true>, 4 * SORT_CTA_SIZE * sizeof(uint), SORT_CTA_SIZE);
plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_RSBKO_4_0_T_T_T] = maxBlocks(radixSortBlocksKeysOnly<4, 0, true, true, true>, 4 * SORT_CTA_SIZE * sizeof(uint), SORT_CTA_SIZE);
plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_FRO_0_F_T] = maxBlocks(findRadixOffsets<0, false, true>, 3 * SORT_CTA_SIZE * sizeof(uint), SORT_CTA_SIZE);
plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_FRO_0_T_T] = maxBlocks(findRadixOffsets<0, true, true>, 3 * SORT_CTA_SIZE * sizeof(uint), SORT_CTA_SIZE);
plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_RD_0_F_F_F_T] = maxBlocks(reorderData<0, false, false, false, true>, 0, SORT_CTA_SIZE);
plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_RD_0_F_F_T_T] = maxBlocks(reorderData<0, false, false, true, true>, 0, SORT_CTA_SIZE);
plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_RD_0_F_T_F_T] = maxBlocks(reorderData<0, false, true, false, true>, 0, SORT_CTA_SIZE);
plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_RD_0_F_T_T_T] = maxBlocks(reorderData<0, false, true, true, true>, 0, SORT_CTA_SIZE);
plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_RD_0_T_F_F_T] = maxBlocks(reorderData<0, true, false, false, true>, 0, SORT_CTA_SIZE);
plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_RD_0_T_F_T_T] = maxBlocks(reorderData<0, true, false, true, true>, 0, SORT_CTA_SIZE);
plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_RD_0_T_T_F_T] = maxBlocks(reorderData<0, true, true, false, true>, 0, SORT_CTA_SIZE);
plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_RD_0_T_T_T_T] = maxBlocks(reorderData<0, true, true, true, true>, 0, SORT_CTA_SIZE);
plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_RDKO_0_F_F_F_T] = maxBlocks(reorderDataKeysOnly<0, false, false, false, true>, 0, SORT_CTA_SIZE);
plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_RDKO_0_F_F_T_T] = maxBlocks(reorderDataKeysOnly<0, false, false, true, true>, 0, SORT_CTA_SIZE);
plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_RDKO_0_F_T_F_T] = maxBlocks(reorderDataKeysOnly<0, false, true, false, true>, 0, SORT_CTA_SIZE);
plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_RDKO_0_F_T_T_T] = maxBlocks(reorderDataKeysOnly<0, false, true, true, true>, 0, SORT_CTA_SIZE);
plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_RDKO_0_T_F_F_T] = maxBlocks(reorderDataKeysOnly<0, true, false, false, true>, 0, SORT_CTA_SIZE);
plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_RDKO_0_T_F_T_T] = maxBlocks(reorderDataKeysOnly<0, true, false, true, true>, 0, SORT_CTA_SIZE);
plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_RDKO_0_T_T_F_T] = maxBlocks(reorderDataKeysOnly<0, true, true, false, true>, 0, SORT_CTA_SIZE);
plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_RDKO_0_T_T_T_T] = maxBlocks(reorderDataKeysOnly<0, true, true, true, true>, 0, SORT_CTA_SIZE);
plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_EK] = maxBlocks(emptyKernel, 0, SORT_CTA_SIZE);
}
}
/**
* @brief From the programmer-specified sort configuration,
* creates internal memory for performing the sort.
*
* @param[in] plan Pointer to CUDPPRadixSortPlan object
**/
extern "C"
void allocRadixSortStorage(CUDPPRadixSortPlan *plan)
{
unsigned int numElements = plan->m_numElements;
unsigned int numBlocks =
((numElements % (SORT_CTA_SIZE * 4)) == 0) ?
(numElements / (SORT_CTA_SIZE * 4)) :
(numElements / (SORT_CTA_SIZE * 4) + 1);
switch(plan->m_config.datatype)
{
case CUDPP_UINT:
CUDA_SAFE_CALL(hipMalloc((void **)&plan->m_tempKeys,
numElements * sizeof(unsigned int)));
if (!plan->m_bKeysOnly)
CUDA_SAFE_CALL(hipMalloc((void **)&plan->m_tempValues,
numElements * sizeof(unsigned int)));
CUDA_SAFE_CALL(hipMalloc((void **)&plan->m_counters,
WARP_SIZE * numBlocks * sizeof(unsigned int)));
CUDA_SAFE_CALL(hipMalloc((void **)&plan->m_countersSum,
WARP_SIZE * numBlocks * sizeof(unsigned int)));
CUDA_SAFE_CALL(hipMalloc((void **)&plan->m_blockOffsets,
WARP_SIZE * numBlocks * sizeof(unsigned int)));
break;
case CUDPP_FLOAT:
CUDA_SAFE_CALL(hipMalloc((void **)&plan->m_tempKeys,
numElements * sizeof(float)));
if (!plan->m_bKeysOnly)
CUDA_SAFE_CALL(hipMalloc((void **)&plan->m_tempValues,
numElements * sizeof(float)));
CUDA_SAFE_CALL(hipMalloc((void **)&plan->m_counters,
WARP_SIZE * numBlocks * sizeof(float)));
CUDA_SAFE_CALL(hipMalloc((void **)&plan->m_countersSum,
WARP_SIZE * numBlocks * sizeof(float)));
CUDA_SAFE_CALL(hipMalloc((void **)&plan->m_blockOffsets,
WARP_SIZE * numBlocks * sizeof(float)));
break;
}
initDeviceParameters(plan);
}
/** @brief Deallocates intermediate memory from allocRadixSortStorage.
*
*
* @param[in] plan Pointer to CUDPPRadixSortPlan object
**/
extern "C"
void freeRadixSortStorage(CUDPPRadixSortPlan* plan)
{
CUDA_SAFE_CALL( hipFree(plan->m_tempKeys));
CUDA_SAFE_CALL( hipFree(plan->m_tempValues));
CUDA_SAFE_CALL( hipFree(plan->m_counters));
CUDA_SAFE_CALL( hipFree(plan->m_countersSum));
CUDA_SAFE_CALL( hipFree(plan->m_blockOffsets));
}
/** @brief Dispatch function to perform a sort on an array with
* a specified configuration.
*
* This is the dispatch routine which calls radixSort...() with
* appropriate template parameters and arguments as specified by
* the plan.
* @param[in,out] keys Keys to be sorted.
* @param[in,out] values Associated values to be sorted (through keys).
* @param[in] numElements Number of elements in the sort.
* @param[in] keyBits Number of interesting bits in the key*
* @param[in] plan Configuration information for RadixSort.
**/
extern "C"
void cudppRadixSortDispatch(void *keys,
void *values,
size_t numElements,
int keyBits,
const CUDPPRadixSortPlan *plan)
{
if(plan->m_bKeysOnly)
{
switch(plan->m_config.datatype)
{
case CUDPP_UINT:
radixSortKeysOnly((uint*)keys, plan,
numElements, false, keyBits);
break;
case CUDPP_FLOAT:
radixSortFloatKeysOnly((float*)keys, plan,
numElements, true, keyBits);
}
}
else
{
switch(plan->m_config.datatype)
{
case CUDPP_UINT:
radixSort((uint*)keys, (uint*) values, plan,
numElements, false, keyBits);
break;
case CUDPP_FLOAT:
radixSortFloatKeys((float*)keys, (uint*) values, plan,
numElements, true, keyBits);
}
}
}
/** @} */ // end radixsort functions
/** @} */ // end cudpp_app
| 3cc39782a3480d942e639504f3c547ff393fe625.cu | // -------------------------------------------------------------
// CUDPP -- CUDA Data Parallel Primitives library
// -------------------------------------------------------------
// $Revision$
// $Date$
// -------------------------------------------------------------
// This source code is distributed under the terms of license.txt
// in the root directory of this source distribution.
// -------------------------------------------------------------
/**
* @file
* radixsort_app.cu
*
* @brief CUDPP application-level radix sorting routines
*/
/** @addtogroup cudpp_app
* @{
*/
/** @name RadixSort Functions
* @{
*/
#include "cudpp.h"
#include "cudpp_util.h"
#include "cudpp_radixsort.h"
#include "cudpp_scan.h"
#include "kernel/radixsort_kernel.cu"
#include "cudpp_maximal_launch.h"
#include <cutil.h>
#include <cstdlib>
#include <cstdio>
#include <assert.h>
typedef unsigned int uint;
/** @brief Perform one step of the radix sort. Sorts by nbits key bits per step,
* starting at startbit.
*
* Uses cudppScanDispatch() for the prefix sum of radix counters.
*
* @param[in,out] keys Keys to be sorted.
* @param[in,out] values Associated values to be sorted (through keys).
* @param[in] plan Configuration information for RadixSort.
* @param[in] numElements Number of elements in the sort.
**/
template<uint nbits, uint startbit, bool flip, bool unflip>
void radixSortStep(uint *keys,
uint *values,
const CUDPPRadixSortPlan *plan,
uint numElements)
{
const uint eltsPerBlock = SORT_CTA_SIZE * 4;
const uint eltsPerBlock2 = SORT_CTA_SIZE * 2;
bool fullBlocks = ((numElements % eltsPerBlock) == 0);
uint numBlocks = (fullBlocks) ?
(numElements / eltsPerBlock) :
(numElements / eltsPerBlock + 1);
uint numBlocks2 = ((numElements % eltsPerBlock2) == 0) ?
(numElements / eltsPerBlock2) :
(numElements / eltsPerBlock2 + 1);
bool loop = numBlocks > 65535;
uint blocks = loop ? 65535 : numBlocks;
uint blocksFind = loop ? 65535 : numBlocks2;
uint blocksReorder = loop ? 65535 : numBlocks2;
uint threshold = fullBlocks ? plan->m_persistentCTAThresholdFullBlocks[0] : plan->m_persistentCTAThreshold[0];
if (numElements >= threshold)
{
loop = (numElements > 262144) || (numElements >= 32768 && numElements < 65536);
blocks = numBlocks;
blocksFind = numBlocks2;
blocksReorder = numBlocks2;
// Run an empty kernel -- this seems to reset some of the CTA scheduling hardware
// on GT200, resulting in better scheduling and lower run times
if (startbit > 0)
{
emptyKernel<<<plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_EK], SORT_CTA_SIZE>>>();
}
}
if (fullBlocks)
{
if (loop)
{
if (numElements >= threshold)
{
blocks = flip? plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_RSB_4_0_T_T_T] :
plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_RSB_4_0_T_F_T];
}
radixSortBlocks<nbits, startbit, true, flip, true>
<<<blocks, SORT_CTA_SIZE, 4 * SORT_CTA_SIZE * sizeof(uint)>>>
((uint4*)plan->m_tempKeys, (uint4*)plan->m_tempValues, (uint4*)keys, (uint4*)values, numElements, numBlocks);
}
else
{
radixSortBlocks<nbits, startbit, true, flip, false>
<<<blocks, SORT_CTA_SIZE, 4 * SORT_CTA_SIZE * sizeof(uint)>>>
((uint4*)plan->m_tempKeys, (uint4*)plan->m_tempValues, (uint4*)keys, (uint4*)values, numElements, numBlocks);
}
}
else
{
if (loop)
{
if (numElements >= threshold)
{
blocks = flip ? plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_RSB_4_0_F_T_T] :
plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_RSB_4_0_F_F_T];
}
radixSortBlocks<nbits, startbit, false, flip, true>
<<<blocks, SORT_CTA_SIZE, 4 * SORT_CTA_SIZE * sizeof(uint)>>>
((uint4*)plan->m_tempKeys, (uint4*)plan->m_tempValues, (uint4*)keys, (uint4*)values, numElements, numBlocks);
}
else
{
radixSortBlocks<nbits, startbit, false, flip, false>
<<<blocks, SORT_CTA_SIZE, 4 * SORT_CTA_SIZE * sizeof(uint)>>>
((uint4*)plan->m_tempKeys, (uint4*)plan->m_tempValues, (uint4*)keys, (uint4*)values, numElements, numBlocks);
}
}
CUT_CHECK_ERROR("radixSortBlocks");
if (fullBlocks)
{
if (loop)
{
if (numElements >= threshold)
{
blocksFind = plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_FRO_0_T_T];
}
findRadixOffsets<startbit, true, true>
<<<blocksFind, SORT_CTA_SIZE, 3 * SORT_CTA_SIZE * sizeof(uint)>>>
((uint2*)plan->m_tempKeys, plan->m_counters, plan->m_blockOffsets, numElements, numBlocks2);
}
else
{
findRadixOffsets<startbit, true, false>
<<<blocksFind, SORT_CTA_SIZE, 3 * SORT_CTA_SIZE * sizeof(uint)>>>
((uint2*)plan->m_tempKeys, plan->m_counters, plan->m_blockOffsets, numElements, numBlocks2);
}
}
else
{
if (loop)
{
if (numElements >= threshold)
{
blocksFind = plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_FRO_0_F_T];
}
findRadixOffsets<startbit, false, true>
<<<blocksFind, SORT_CTA_SIZE, 3 * SORT_CTA_SIZE * sizeof(uint)>>>
((uint2*)plan->m_tempKeys, plan->m_counters, plan->m_blockOffsets, numElements, numBlocks2);
}
else
{
findRadixOffsets<startbit, false, false>
<<<blocksFind, SORT_CTA_SIZE, 3 * SORT_CTA_SIZE * sizeof(uint)>>>
((uint2*)plan->m_tempKeys, plan->m_counters, plan->m_blockOffsets, numElements, numBlocks2);
}
}
CUT_CHECK_ERROR("findRadixOffsets");
cudppScanDispatch(plan->m_countersSum, plan->m_counters, 16*numBlocks2, 1, plan->m_scanPlan);
if (fullBlocks)
{
if (plan->m_bManualCoalesce)
{
if (loop)
{
if (numElements >= threshold)
{
blocksReorder = unflip ? plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_RD_0_T_T_T_T] :
plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_RD_0_T_T_F_T];
}
reorderData<startbit, true, true, unflip, true>
<<<blocksReorder, SORT_CTA_SIZE>>>
(keys, values, (uint2*)plan->m_tempKeys, (uint2*)plan->m_tempValues,
plan->m_blockOffsets, plan->m_countersSum, plan->m_counters, numElements, numBlocks2);
}
else
{
reorderData<startbit, true, true, unflip, false>
<<<blocksReorder, SORT_CTA_SIZE>>>
(keys, values, (uint2*)plan->m_tempKeys, (uint2*)plan->m_tempValues,
plan->m_blockOffsets, plan->m_countersSum, plan->m_counters, numElements, numBlocks2);
}
}
else
{
if (loop)
{
if (numElements >= threshold)
{
blocksReorder = unflip ? plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_RD_0_T_F_T_T] :
plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_RD_0_T_F_F_T];
}
reorderData<startbit, true, false, unflip, true>
<<<blocksReorder, SORT_CTA_SIZE>>>
(keys, values, (uint2*)plan->m_tempKeys, (uint2*)plan->m_tempValues,
plan->m_blockOffsets, plan->m_countersSum, plan->m_counters, numElements, numBlocks2);
}
else
{
reorderData<startbit, true, false, unflip, false>
<<<blocksReorder, SORT_CTA_SIZE>>>
(keys, values, (uint2*)plan->m_tempKeys, (uint2*)plan->m_tempValues,
plan->m_blockOffsets, plan->m_countersSum, plan->m_counters, numElements, numBlocks2);
}
}
}
else
{
if (plan->m_bManualCoalesce)
{
if (loop)
{
if (numElements >= threshold)
{
blocksReorder = unflip ?
plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_RD_0_F_T_T_T] :
plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_RD_0_F_T_F_T];
}
reorderData<startbit, false, true, unflip, true>
<<<blocksReorder, SORT_CTA_SIZE>>>
(keys, values, (uint2*)plan->m_tempKeys, (uint2*)plan->m_tempValues,
plan->m_blockOffsets, plan->m_countersSum, plan->m_counters, numElements, numBlocks2);
}
else
{
reorderData<startbit, false, true, unflip, false>
<<<blocksReorder, SORT_CTA_SIZE>>>
(keys, values, (uint2*)plan->m_tempKeys, (uint2*)plan->m_tempValues,
plan->m_blockOffsets, plan->m_countersSum, plan->m_counters, numElements, numBlocks2);
}
}
else
{
if (loop)
{
if (numElements >= threshold)
{
blocksReorder = unflip ?
plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_RD_0_F_F_T_T] :
plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_RD_0_F_F_F_T];
}
reorderData<startbit, false, false, unflip, true>
<<<blocksReorder, SORT_CTA_SIZE>>>
(keys, values, (uint2*)plan->m_tempKeys, (uint2*)plan->m_tempValues,
plan->m_blockOffsets, plan->m_countersSum, plan->m_counters, numElements, numBlocks2);
}
else
{
reorderData<startbit, false, false, unflip, false>
<<<blocksReorder, SORT_CTA_SIZE>>>
(keys, values, (uint2*)plan->m_tempKeys, (uint2*)plan->m_tempValues,
plan->m_blockOffsets, plan->m_countersSum, plan->m_counters, numElements, numBlocks2);
}
}
}
CUT_CHECK_ERROR("radixSortStep");
}
/**
* @brief Single-block optimization for sorts of fewer than 4 * CTA_SIZE elements
*
* @param[in,out] keys Keys to be sorted.
* @param[in,out] values Associated values to be sorted (through keys).
* @param numElements Number of elements in the sort.
**/
template <bool flip>
void radixSortSingleBlock(uint *keys,
uint *values,
uint numElements)
{
bool fullBlocks = (numElements % (SORT_CTA_SIZE * 4) == 0);
if (fullBlocks)
{
radixSortBlocks<32, 0, true, flip, false>
<<<1, SORT_CTA_SIZE, 4 * SORT_CTA_SIZE * sizeof(uint)>>>
((uint4*)keys, (uint4*)values,
(uint4*)keys, (uint4*)values,
numElements, 0);
}
else
{
radixSortBlocks<32, 0, false, flip, false>
<<<1, SORT_CTA_SIZE, 4 * SORT_CTA_SIZE * sizeof(uint)>>>
((uint4*)keys, (uint4*)values,
(uint4*)keys, (uint4*)values,
numElements, 0);
}
if (flip) unflipFloats<<<1, SORT_CTA_SIZE>>>(keys, numElements);
CUT_CHECK_ERROR("radixSortSingleBlock");
}
/**
* @brief Main radix sort function
*
* Main radix sort function. Sorts in place in the keys and values arrays,
* but uses the other device arrays as temporary storage. All pointer
* parameters are device pointers. Uses cudppScan() for the prefix sum of
* radix counters.
*
* @param[in,out] keys Keys to be sorted.
* @param[in,out] values Associated values to be sorted (through keys).
* @param[in] plan Configuration information for RadixSort.
* @param[in] numElements Number of elements in the sort.
* @param[in] flipBits Is set true if key datatype is a float
* (neg. numbers) for special float sorting operations.
* @param[in] keyBits Number of interesting bits in the key
**/
void radixSort(uint *keys,
uint* values,
const CUDPPRadixSortPlan *plan,
size_t numElements,
bool flipBits,
int keyBits)
{
if(numElements <= WARP_SIZE)
{
if (flipBits)
radixSortSingleWarp<true><<<1, numElements>>>
(keys, values, numElements);
else
radixSortSingleWarp<false><<<1, numElements>>>
(keys, values, numElements);
CUT_CHECK_ERROR("radixSortSingleWarp");
return;
}
#ifdef __DEVICE_EMULATION__
printf("bits: %d\n", keyBits);
#endif
if(numElements <= SORT_CTA_SIZE * 4)
{
if (flipBits)
radixSortSingleBlock<true>(keys, values, numElements);
else
radixSortSingleBlock<false>(keys, values, numElements);
return;
}
// flip float bits on the first pass, unflip on the last pass
if (flipBits)
{
radixSortStep<4, 0, true, false>
(keys, values, plan, numElements);
}
else
{
radixSortStep<4, 0, false, false>
(keys, values, plan, numElements);
}
if (keyBits > 4)
{
radixSortStep<4, 4, false, false>
(keys, values, plan, numElements);
}
if (keyBits > 8)
{
radixSortStep<4, 8, false, false>
(keys, values, plan, numElements);
}
if (keyBits > 12)
{
radixSortStep<4, 12, false, false>
(keys, values, plan, numElements);
}
if (keyBits > 16)
{
radixSortStep<4, 16, false, false>
(keys, values, plan, numElements);
}
if (keyBits > 20)
{
radixSortStep<4, 20, false, false>
(keys, values, plan, numElements);
}
if (keyBits > 24)
{
radixSortStep<4, 24, false, false>
(keys, values, plan, numElements);
}
if (keyBits > 28)
{
if (flipBits) // last pass
{
radixSortStep<4, 28, false, true>
(keys, values, plan, numElements);
}
else
{
radixSortStep<4, 28, false, false>
(keys, values, plan, numElements);
}
}
}
/**
* @brief Wrapper to call main radix sort function. For float configuration.
*
* Calls the main radix sort function. For float configuration.
*
* @param[in,out] keys Keys to be sorted.
* @param[in,out] values Associated values to be sorted (through keys).
* @param[in] plan Configuration information for RadixSort.
* @param[in] numElements Number of elements in the sort.
* @param[in] negativeKeys Is set true if key datatype has neg. numbers.
* @param[in] keyBits Number of interesting bits in the key
**/
extern "C"
void radixSortFloatKeys(float* keys,
uint* values,
const CUDPPRadixSortPlan *plan,
size_t numElements,
bool negativeKeys,
int keyBits)
{
radixSort((uint*)keys, (uint*)values, plan,
numElements, negativeKeys, keyBits);
}
/** @brief Perform one step of the radix sort. Sorts by nbits key bits per step,
* starting at startbit.
*
* @param[in,out] keys Keys to be sorted.
* @param[in] plan Configuration information for RadixSort.
* @param[in] numElements Number of elements in the sort.
**/
template<uint nbits, uint startbit, bool flip, bool unflip>
void radixSortStepKeysOnly(uint *keys,
const CUDPPRadixSortPlan *plan,
uint numElements)
{
const uint eltsPerBlock = SORT_CTA_SIZE * 4;
const uint eltsPerBlock2 = SORT_CTA_SIZE * 2;
bool fullBlocks = ((numElements % eltsPerBlock) == 0);
uint numBlocks = (fullBlocks) ?
(numElements / eltsPerBlock) :
(numElements / eltsPerBlock + 1);
uint numBlocks2 = ((numElements % eltsPerBlock2) == 0) ?
(numElements / eltsPerBlock2) :
(numElements / eltsPerBlock2 + 1);
bool loop = numBlocks > 65535;
uint blocks = loop ? 65535 : numBlocks;
uint blocksFind = loop ? 65535 : numBlocks2;
uint blocksReorder = loop ? 65535 : numBlocks2;
uint threshold = fullBlocks ? plan->m_persistentCTAThresholdFullBlocks[1] : plan->m_persistentCTAThreshold[1];
if (numElements >= threshold)
{
loop = (numElements > 262144) || (numElements >= 32768 && numElements < 65536);
blocks = numBlocks;
blocksFind = numBlocks2;
blocksReorder = numBlocks2;
}
if (fullBlocks)
{
if (loop)
{
if (numElements >= threshold)
{
blocks = flip ? plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_RSBKO_4_0_T_T_T] :
plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_RSBKO_4_0_T_F_T];
}
radixSortBlocksKeysOnly<nbits, startbit, true, flip, true>
<<<blocks, SORT_CTA_SIZE, 4 * SORT_CTA_SIZE * sizeof(uint)>>>
((uint4*)plan->m_tempKeys, (uint4*)keys, numElements, numBlocks);
}
else
radixSortBlocksKeysOnly<nbits, startbit, true, flip, false>
<<<blocks, SORT_CTA_SIZE, 4 * SORT_CTA_SIZE * sizeof(uint)>>>
((uint4*)plan->m_tempKeys, (uint4*)keys, numElements, numBlocks);
}
else
{
if (loop)
{
if (numElements >= threshold)
{
blocks = flip ? plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_RSBKO_4_0_F_T_T] :
plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_RSBKO_4_0_F_F_T];
}
radixSortBlocksKeysOnly<nbits, startbit, false, flip, true>
<<<blocks, SORT_CTA_SIZE, 4 * SORT_CTA_SIZE * sizeof(uint)>>>
((uint4*)plan->m_tempKeys, (uint4*)keys, numElements, numBlocks);
}
else
radixSortBlocksKeysOnly<nbits, startbit, false, flip, false>
<<<blocks, SORT_CTA_SIZE, 4 * SORT_CTA_SIZE * sizeof(uint)>>>
((uint4*)plan->m_tempKeys, (uint4*)keys, numElements, numBlocks);
}
if (fullBlocks)
{
if (loop)
{
if (numElements >= threshold)
{
blocksFind = plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_FRO_0_T_T];
}
findRadixOffsets<startbit, true, true>
<<<blocksFind, SORT_CTA_SIZE, 3 * SORT_CTA_SIZE * sizeof(uint)>>>
((uint2*)plan->m_tempKeys, plan->m_counters, plan->m_blockOffsets, numElements, numBlocks2);
}
else
findRadixOffsets<startbit, true, false>
<<<blocksFind, SORT_CTA_SIZE, 3 * SORT_CTA_SIZE * sizeof(uint)>>>
((uint2*)plan->m_tempKeys, plan->m_counters, plan->m_blockOffsets, numElements, numBlocks2);
}
else
{
if (loop)
{
if (numElements >= threshold)
{
blocksFind = plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_FRO_0_F_T];
}
findRadixOffsets<startbit, false, true>
<<<blocksFind, SORT_CTA_SIZE, 3 * SORT_CTA_SIZE * sizeof(uint)>>>
((uint2*)plan->m_tempKeys, plan->m_counters, plan->m_blockOffsets, numElements, numBlocks2);
}
else
findRadixOffsets<startbit, false, false>
<<<blocksFind, SORT_CTA_SIZE, 3 * SORT_CTA_SIZE * sizeof(uint)>>>
((uint2*)plan->m_tempKeys, plan->m_counters, plan->m_blockOffsets, numElements, numBlocks2);
}
cudppScanDispatch(plan->m_countersSum, plan->m_counters, 16*numBlocks2, 1, plan->m_scanPlan);
if (fullBlocks)
{
if (plan->m_bManualCoalesce)
{
if (loop)
{
if (numElements >= threshold)
{
blocksReorder = unflip ?
plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_RDKO_0_T_T_T_T] :
plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_RDKO_0_T_T_F_T];
}
reorderDataKeysOnly<startbit, true, true, unflip, true>
<<<blocksReorder, SORT_CTA_SIZE>>>
(keys, (uint2*)plan->m_tempKeys, plan->m_blockOffsets, plan->m_countersSum, plan->m_counters,
numElements, numBlocks2);
}
else
reorderDataKeysOnly<startbit, true, true, unflip, false>
<<<blocksReorder, SORT_CTA_SIZE>>>
(keys, (uint2*)plan->m_tempKeys, plan->m_blockOffsets, plan->m_countersSum, plan->m_counters,
numElements, numBlocks2);
}
else
{
if (loop)
{
if (numElements >= threshold)
{
blocksReorder = unflip ?
plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_RDKO_0_T_F_T_T] :
plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_RDKO_0_T_F_F_T];
}
reorderDataKeysOnly<startbit, true, false, unflip, true>
<<<blocksReorder, SORT_CTA_SIZE>>>
(keys, (uint2*)plan->m_tempKeys, plan->m_blockOffsets, plan->m_countersSum, plan->m_counters,
numElements, numBlocks2);
}
else
reorderDataKeysOnly<startbit, true, false, unflip, false>
<<<blocksReorder, SORT_CTA_SIZE>>>
(keys, (uint2*)plan->m_tempKeys, plan->m_blockOffsets, plan->m_countersSum, plan->m_counters,
numElements, numBlocks2);
}
}
else
{
if (plan->m_bManualCoalesce)
{
if (loop)
{
if (numElements >= threshold)
{
blocksReorder = unflip ?
plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_RDKO_0_F_T_T_T] :
plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_RDKO_0_F_T_F_T];
}
reorderDataKeysOnly<startbit, false, true, unflip, true>
<<<blocksReorder, SORT_CTA_SIZE>>>
(keys, (uint2*)plan->m_tempKeys, plan->m_blockOffsets, plan->m_countersSum, plan->m_counters,
numElements, numBlocks2);
}
else
reorderDataKeysOnly<startbit, false, true, unflip, false>
<<<blocksReorder, SORT_CTA_SIZE>>>
(keys, (uint2*)plan->m_tempKeys, plan->m_blockOffsets, plan->m_countersSum, plan->m_counters,
numElements, numBlocks2);
}
else
{
if (loop)
{
if (numElements >= threshold)
{
blocksReorder = unflip ?
plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_RDKO_0_F_F_T_T] :
plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_RDKO_0_F_F_F_T];
}
reorderDataKeysOnly<startbit, false, false, unflip, true>
<<<blocksReorder, SORT_CTA_SIZE>>>
(keys, (uint2*)plan->m_tempKeys, plan->m_blockOffsets, plan->m_countersSum, plan->m_counters,
numElements, numBlocks2);
}
else
reorderDataKeysOnly<startbit, false, false, unflip, false>
<<<blocksReorder, SORT_CTA_SIZE>>>
(keys, (uint2*)plan->m_tempKeys, plan->m_blockOffsets, plan->m_countersSum, plan->m_counters,
numElements, numBlocks2);
}
}
CUT_CHECK_ERROR("radixSortStepKeysOnly");
}
/**
* @brief Optimization for sorts of fewer than 4 * CTA_SIZE elements (keys only).
*
* @param[in,out] keys Keys to be sorted.
* @param numElements Number of elements in the sort.
**/
template <bool flip>
void radixSortSingleBlockKeysOnly(uint *keys,
uint numElements)
{
bool fullBlocks = (numElements % (SORT_CTA_SIZE * 4) == 0);
if (fullBlocks)
{
radixSortBlocksKeysOnly<32, 0, true, flip, false>
<<<1, SORT_CTA_SIZE, 4 * SORT_CTA_SIZE * sizeof(uint)>>>
((uint4*)keys, (uint4*)keys, numElements, 1 );
}
else
{
radixSortBlocksKeysOnly<32, 0, false, flip, false>
<<<1, SORT_CTA_SIZE, 4 * SORT_CTA_SIZE * sizeof(uint)>>>
((uint4*)keys, (uint4*)keys, numElements, 1 );
}
if (flip)
unflipFloats<<<1, SORT_CTA_SIZE>>>(keys, numElements);
CUT_CHECK_ERROR("radixSortSingleBlock");
}
/**
* @brief Main radix sort function. For keys only configuration.
*
* Main radix sort function. Sorts in place in the keys array,
* but uses the other device arrays as temporary storage. All pointer
* parameters are device pointers. Uses scan for the prefix sum of
* radix counters.
*
* @param[in,out] keys Keys to be sorted.
* @param[in] plan Configuration information for RadixSort.
* @param[in] flipBits Is set true if key datatype is a float (neg. numbers)
* for special float sorting operations.
* @param[in] numElements Number of elements in the sort.
* @param[in] keyBits Number of interesting bits in the key
**/
extern "C"
void radixSortKeysOnly(uint *keys,
const CUDPPRadixSortPlan *plan,
size_t numElements,
bool flipBits,
int keyBits)
{
if(numElements <= WARP_SIZE)
{
if (flipBits)
radixSortSingleWarpKeysOnly<true><<<1, numElements>>>(keys, numElements);
else
radixSortSingleWarpKeysOnly<false><<<1, numElements>>>(keys, numElements);
return;
}
if(numElements <= SORT_CTA_SIZE * 4)
{
if (flipBits)
radixSortSingleBlockKeysOnly<true>(keys, numElements);
else
radixSortSingleBlockKeysOnly<false>(keys, numElements);
return;
}
// flip float bits on the first pass, unflip on the last pass
if (flipBits)
{
radixSortStepKeysOnly<4, 0, true, false>(keys, plan, numElements);
}
else
{
radixSortStepKeysOnly<4, 0, false, false>(keys, plan, numElements);
}
if (keyBits > 4)
{
radixSortStepKeysOnly<4, 4, false, false>(keys, plan, numElements);
}
if (keyBits > 8)
{
radixSortStepKeysOnly<4, 8, false, false>(keys, plan, numElements);
}
if (keyBits > 12)
{
radixSortStepKeysOnly<4, 12, false, false>(keys, plan, numElements);
}
if (keyBits > 16)
{
radixSortStepKeysOnly<4, 16, false, false>(keys, plan, numElements);
}
if (keyBits > 20)
{
radixSortStepKeysOnly<4, 20, false, false>(keys, plan, numElements);
}
if (keyBits > 24)
{
radixSortStepKeysOnly<4, 24, false, false>(keys, plan, numElements);
}
if (keyBits > 28)
{
if (flipBits) // last pass
{
radixSortStepKeysOnly<4, 28, false, true>(keys, plan, numElements);
}
else
{
radixSortStepKeysOnly<4, 28, false, false>(keys, plan, numElements);
}
}
}
/**
* @brief Wrapper to call main radix sort function. For floats and keys only.
*
* Calls the radixSortKeysOnly function setting parameters for floats.
*
* @param[in,out] keys Keys to be sorted.
* @param[in] plan Configuration information for RadixSort.
* @param[in] negativeKeys Is set true if key flipBits is to be true in
* radixSortKeysOnly().
* @param[in] numElements Number of elements in the sort.
* @param[in] keyBits Number of interesting bits in the key
**/
extern "C"
void radixSortFloatKeysOnly(float *keys,
const CUDPPRadixSortPlan *plan,
size_t numElements,
bool negativeKeys,
int keyBits)
{
radixSortKeysOnly((uint*)keys, plan, numElements, negativeKeys, keyBits);
}
extern "C"
void initDeviceParameters(CUDPPRadixSortPlan *plan)
{
int deviceID = -1;
if (cudaSuccess == cudaGetDevice(&deviceID))
{
cudaDeviceProp devprop;
cudaGetDeviceProperties(&devprop, deviceID);
// sm_12 and later devices don't need help with coalesce in reorderData kernel
plan->m_bManualCoalesce = (devprop.major < 2 && devprop.minor < 2);
// Empirically we have found that for some (usually larger) sort
// sizes it is better to use exactly as many "persistent" CTAs
// as can fill the GPU, which loop over the "blocks" of work. For smaller
// arrays it is better to use the typical CUDA approach of launching one CTA
// per block of work.
// 0-element of these two-element arrays is for key-value sorts
// 1-element is for key-only sorts
plan->m_persistentCTAThreshold[0] = plan->m_bManualCoalesce ? 16777216 : 524288;
plan->m_persistentCTAThresholdFullBlocks[0] = plan->m_bManualCoalesce ? 2097152: 524288;
plan->m_persistentCTAThreshold[1] = plan->m_bManualCoalesce ? 16777216 : 8388608;
plan->m_persistentCTAThresholdFullBlocks[1] = plan->m_bManualCoalesce ? 2097152: 0;
// create a map of function pointers to register counts for more accurate occupancy calculation
// Must pass in the dynamic shared memory used by each kernel, since the runtime doesn't know it
// Note we only insert the "loop" version of the kernels (the one with the last template param = true)
// Because those are the only ones that require persistent CTAs that maximally fill the device.
plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_RSB_4_0_F_F_T] = maxBlocks(radixSortBlocks<4, 0, false, false, true>, 4 * SORT_CTA_SIZE * sizeof(uint), SORT_CTA_SIZE);
plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_RSB_4_0_F_T_T] = maxBlocks(radixSortBlocks<4, 0, false, true, true>, 4 * SORT_CTA_SIZE * sizeof(uint), SORT_CTA_SIZE);
plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_RSB_4_0_T_F_T] = maxBlocks(radixSortBlocks<4, 0, true, false, true>, 4 * SORT_CTA_SIZE * sizeof(uint), SORT_CTA_SIZE);
plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_RSB_4_0_T_T_T] = maxBlocks(radixSortBlocks<4, 0, true, true, true>, 4 * SORT_CTA_SIZE * sizeof(uint), SORT_CTA_SIZE);
plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_RSBKO_4_0_F_F_T] = maxBlocks(radixSortBlocksKeysOnly<4, 0, false, false, true>, 4 * SORT_CTA_SIZE * sizeof(uint), SORT_CTA_SIZE);
plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_RSBKO_4_0_F_T_T] = maxBlocks(radixSortBlocksKeysOnly<4, 0, false, true, true>, 4 * SORT_CTA_SIZE * sizeof(uint), SORT_CTA_SIZE);
plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_RSBKO_4_0_T_F_T] = maxBlocks(radixSortBlocksKeysOnly<4, 0, true, false, true>, 4 * SORT_CTA_SIZE * sizeof(uint), SORT_CTA_SIZE);
plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_RSBKO_4_0_T_T_T] = maxBlocks(radixSortBlocksKeysOnly<4, 0, true, true, true>, 4 * SORT_CTA_SIZE * sizeof(uint), SORT_CTA_SIZE);
plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_FRO_0_F_T] = maxBlocks(findRadixOffsets<0, false, true>, 3 * SORT_CTA_SIZE * sizeof(uint), SORT_CTA_SIZE);
plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_FRO_0_T_T] = maxBlocks(findRadixOffsets<0, true, true>, 3 * SORT_CTA_SIZE * sizeof(uint), SORT_CTA_SIZE);
plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_RD_0_F_F_F_T] = maxBlocks(reorderData<0, false, false, false, true>, 0, SORT_CTA_SIZE);
plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_RD_0_F_F_T_T] = maxBlocks(reorderData<0, false, false, true, true>, 0, SORT_CTA_SIZE);
plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_RD_0_F_T_F_T] = maxBlocks(reorderData<0, false, true, false, true>, 0, SORT_CTA_SIZE);
plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_RD_0_F_T_T_T] = maxBlocks(reorderData<0, false, true, true, true>, 0, SORT_CTA_SIZE);
plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_RD_0_T_F_F_T] = maxBlocks(reorderData<0, true, false, false, true>, 0, SORT_CTA_SIZE);
plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_RD_0_T_F_T_T] = maxBlocks(reorderData<0, true, false, true, true>, 0, SORT_CTA_SIZE);
plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_RD_0_T_T_F_T] = maxBlocks(reorderData<0, true, true, false, true>, 0, SORT_CTA_SIZE);
plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_RD_0_T_T_T_T] = maxBlocks(reorderData<0, true, true, true, true>, 0, SORT_CTA_SIZE);
plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_RDKO_0_F_F_F_T] = maxBlocks(reorderDataKeysOnly<0, false, false, false, true>, 0, SORT_CTA_SIZE);
plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_RDKO_0_F_F_T_T] = maxBlocks(reorderDataKeysOnly<0, false, false, true, true>, 0, SORT_CTA_SIZE);
plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_RDKO_0_F_T_F_T] = maxBlocks(reorderDataKeysOnly<0, false, true, false, true>, 0, SORT_CTA_SIZE);
plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_RDKO_0_F_T_T_T] = maxBlocks(reorderDataKeysOnly<0, false, true, true, true>, 0, SORT_CTA_SIZE);
plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_RDKO_0_T_F_F_T] = maxBlocks(reorderDataKeysOnly<0, true, false, false, true>, 0, SORT_CTA_SIZE);
plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_RDKO_0_T_F_T_T] = maxBlocks(reorderDataKeysOnly<0, true, false, true, true>, 0, SORT_CTA_SIZE);
plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_RDKO_0_T_T_F_T] = maxBlocks(reorderDataKeysOnly<0, true, true, false, true>, 0, SORT_CTA_SIZE);
plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_RDKO_0_T_T_T_T] = maxBlocks(reorderDataKeysOnly<0, true, true, true, true>, 0, SORT_CTA_SIZE);
plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_EK] = maxBlocks(emptyKernel, 0, SORT_CTA_SIZE);
}
}
/**
* @brief From the programmer-specified sort configuration,
* creates internal memory for performing the sort.
*
* @param[in] plan Pointer to CUDPPRadixSortPlan object
**/
extern "C"
void allocRadixSortStorage(CUDPPRadixSortPlan *plan)
{
unsigned int numElements = plan->m_numElements;
unsigned int numBlocks =
((numElements % (SORT_CTA_SIZE * 4)) == 0) ?
(numElements / (SORT_CTA_SIZE * 4)) :
(numElements / (SORT_CTA_SIZE * 4) + 1);
switch(plan->m_config.datatype)
{
case CUDPP_UINT:
CUDA_SAFE_CALL(cudaMalloc((void **)&plan->m_tempKeys,
numElements * sizeof(unsigned int)));
if (!plan->m_bKeysOnly)
CUDA_SAFE_CALL(cudaMalloc((void **)&plan->m_tempValues,
numElements * sizeof(unsigned int)));
CUDA_SAFE_CALL(cudaMalloc((void **)&plan->m_counters,
WARP_SIZE * numBlocks * sizeof(unsigned int)));
CUDA_SAFE_CALL(cudaMalloc((void **)&plan->m_countersSum,
WARP_SIZE * numBlocks * sizeof(unsigned int)));
CUDA_SAFE_CALL(cudaMalloc((void **)&plan->m_blockOffsets,
WARP_SIZE * numBlocks * sizeof(unsigned int)));
break;
case CUDPP_FLOAT:
CUDA_SAFE_CALL(cudaMalloc((void **)&plan->m_tempKeys,
numElements * sizeof(float)));
if (!plan->m_bKeysOnly)
CUDA_SAFE_CALL(cudaMalloc((void **)&plan->m_tempValues,
numElements * sizeof(float)));
CUDA_SAFE_CALL(cudaMalloc((void **)&plan->m_counters,
WARP_SIZE * numBlocks * sizeof(float)));
CUDA_SAFE_CALL(cudaMalloc((void **)&plan->m_countersSum,
WARP_SIZE * numBlocks * sizeof(float)));
CUDA_SAFE_CALL(cudaMalloc((void **)&plan->m_blockOffsets,
WARP_SIZE * numBlocks * sizeof(float)));
break;
}
initDeviceParameters(plan);
}
/** @brief Deallocates intermediate memory from allocRadixSortStorage.
*
*
* @param[in] plan Pointer to CUDPPRadixSortPlan object
**/
extern "C"
void freeRadixSortStorage(CUDPPRadixSortPlan* plan)
{
CUDA_SAFE_CALL( cudaFree(plan->m_tempKeys));
CUDA_SAFE_CALL( cudaFree(plan->m_tempValues));
CUDA_SAFE_CALL( cudaFree(plan->m_counters));
CUDA_SAFE_CALL( cudaFree(plan->m_countersSum));
CUDA_SAFE_CALL( cudaFree(plan->m_blockOffsets));
}
/** @brief Dispatch function to perform a sort on an array with
* a specified configuration.
*
* This is the dispatch routine which calls radixSort...() with
* appropriate template parameters and arguments as specified by
* the plan.
* @param[in,out] keys Keys to be sorted.
* @param[in,out] values Associated values to be sorted (through keys).
* @param[in] numElements Number of elements in the sort.
* @param[in] keyBits Number of interesting bits in the key*
* @param[in] plan Configuration information for RadixSort.
**/
extern "C"
void cudppRadixSortDispatch(void *keys,
void *values,
size_t numElements,
int keyBits,
const CUDPPRadixSortPlan *plan)
{
if(plan->m_bKeysOnly)
{
switch(plan->m_config.datatype)
{
case CUDPP_UINT:
radixSortKeysOnly((uint*)keys, plan,
numElements, false, keyBits);
break;
case CUDPP_FLOAT:
radixSortFloatKeysOnly((float*)keys, plan,
numElements, true, keyBits);
}
}
else
{
switch(plan->m_config.datatype)
{
case CUDPP_UINT:
radixSort((uint*)keys, (uint*) values, plan,
numElements, false, keyBits);
break;
case CUDPP_FLOAT:
radixSortFloatKeys((float*)keys, (uint*) values, plan,
numElements, true, keyBits);
}
}
}
/** @} */ // end radixsort functions
/** @} */ // end cudpp_app
|
ce35537645b15c36cd91a231782321b67aa5f17b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Fill a vector of 100 ints on the GPU with consecutive values.
*/
#include <iostream>
#include <vector>
__global__ void fill( int * v, std::size_t size )
{
// Get the id of the thread ( 0 -> 99 ).
auto tid = threadIdx.x;
// Each thread fills a single element of the array.
v[ tid ] = tid;
}
int main()
{
std::vector< int > v( 100 );
int * v_d = nullptr;
// Allocate an array an the device.
hipMalloc( &v_d, v.size() * sizeof( int ) );
// Launch one block of 100 threads on the device.
// In this block, threads are numbered from 0 to 99.
hipLaunchKernelGGL(( fill), dim3(1), dim3(100) , 0, 0, v_d, v.size() );
// Copy data from the device memory to the host memory.
hipMemcpy( v.data(), v_d, v.size() * sizeof( int ), hipMemcpyDeviceToHost );
for( auto x: v )
{
std::cout << x << std::endl;
}
hipFree( v_d );
return 0;
} | ce35537645b15c36cd91a231782321b67aa5f17b.cu | /**
* Fill a vector of 100 ints on the GPU with consecutive values.
*/
#include <iostream>
#include <vector>
__global__ void fill( int * v, std::size_t size )
{
// Get the id of the thread ( 0 -> 99 ).
auto tid = threadIdx.x;
// Each thread fills a single element of the array.
v[ tid ] = tid;
}
int main()
{
std::vector< int > v( 100 );
int * v_d = nullptr;
// Allocate an array an the device.
cudaMalloc( &v_d, v.size() * sizeof( int ) );
// Launch one block of 100 threads on the device.
// In this block, threads are numbered from 0 to 99.
fill<<< 1, 100 >>>( v_d, v.size() );
// Copy data from the device memory to the host memory.
cudaMemcpy( v.data(), v_d, v.size() * sizeof( int ), cudaMemcpyDeviceToHost );
for( auto x: v )
{
std::cout << x << std::endl;
}
cudaFree( v_d );
return 0;
} |
269781c3ea4bde44cfc99368ade7d9aedddc5ade.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <errno.h>
#include <time.h>
#include <unistd.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/time.h>
#include "execute.h"
/* This variable is used to check if there should be next generation */
__device__ int diff = 0;
void execute(int dimension, int loops, char *input_file, int prints_enabled) {
int local_diff;
char *grid, *gpu_grid_1, *gpu_grid_2;
struct timeval time_1, time_2;
gettimeofday(&time_1, 0);
createGrid(&grid, dimension);
if (input_file != NULL) {
readGrid(grid, input_file, dimension);
} else {
initGrid(grid, dimension);
}
if (prints_enabled == 1) {
int dir_stat = mkdir("outputs", S_IRWXU | S_IRWXG | S_IROTH | S_IXOTH);
if (dir_stat != 0 && errno != EEXIST) {
printf("mkdir error %s\n", strerror(errno));
exit(EXIT_FAILURE);
}
printGrid(grid, dimension);
}
hipMalloc((void **) &gpu_grid_1, dimension * dimension * sizeof(char));
hipMalloc((void **) &gpu_grid_2, dimension * dimension * sizeof(char));
hipMemcpy(gpu_grid_1, grid, dimension * dimension * sizeof(char), hipMemcpyHostToDevice);
/* Kernel invocation */
dim3 dimBlock(16, 16);
dim3 dimGrid;
dimGrid.x = (dimension + dimBlock.x - 1) / dimBlock.x;
dimGrid.y = (dimension + dimBlock.y - 1) / dimBlock.y;
int generation = 1;
while (generation <= loops) {
if (prints_enabled == 1) {
printf("Generation: %d\n", generation);
}
local_diff = 0;
hipMemcpyToSymbol(diff, &local_diff,sizeof(int), 0, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, gpu_grid_1, gpu_grid_2, dimension);
if (hipGetLastError() != hipSuccess) {
printf("kernel launch failed\n");
}
hipDeviceSynchronize();
//todo 8elei optimize auto
if (prints_enabled == 1) {
hipMemcpy(grid, gpu_grid_2, dimension * dimension * sizeof(char), hipMemcpyDeviceToHost);
printGrid(grid, dimension);
}
hipMemcpyFromSymbol(&local_diff, diff, sizeof(int), 0, hipMemcpyDeviceToHost);
// printf("local_diff %d\n", local_diff);
/* If there are no differences between two generations
* OR if the next generation is 0 */
if(local_diff == 0) {
break;
}
char *temp = gpu_grid_1;
gpu_grid_1 = gpu_grid_2;
gpu_grid_2 = temp;
generation++;
}
gettimeofday(&time_2, 0);
double time = (1000000.0 * (time_2.tv_sec - time_1.tv_sec) + time_2.tv_usec - time_1.tv_usec) / 1000000;
printf("time elapsed: %lf\n", time);
hipFree(gpu_grid_1);
hipFree(gpu_grid_2);
freeGrid(&grid);
}
__global__ void kernel(char *grid_1, char *grid_2, int dimension) {
/* The variables below are used to iterate the grid */
int ix = (blockIdx.x * blockDim.x + threadIdx.x) % (dimension * dimension);
int iy = (blockIdx.y * blockDim.y + threadIdx.y) % (dimension * dimension);
int idx = (iy * dimension + ix) % (dimension * dimension);
int i = idx / dimension;
int j = idx % dimension;
int top_offset = ((i + dimension - 1) % dimension) * dimension;
int bot_offset = ((i + 1) % dimension) * dimension;
int right_offset = (j + 1) % dimension;
int left_offset = (j - 1 + dimension) % dimension;
int top = top_offset + j;
int top_right = top_offset + right_offset;
int top_left = top_offset + left_offset;
int bot = bot_offset + j;
int bot_right = bot_offset + right_offset;
int bot_left = bot_offset + left_offset;
int right = i * dimension + right_offset;
int left = i * dimension + left_offset;
int alive_neighbors = 0;
alive_neighbors += grid_1[top_left];
alive_neighbors += grid_1[top];
alive_neighbors += grid_1[top_right];
alive_neighbors += grid_1[right];
alive_neighbors += grid_1[bot_right];
alive_neighbors += grid_1[bot];
alive_neighbors += grid_1[bot_left];
alive_neighbors += grid_1[left];
int status = grid_1[idx];
// printf("status %d\n", grid_1[idx]);
if (status == 0) {
/* If there are exactly 3 neighbors create a new cell */
if (alive_neighbors == 3) {
/* CREATE NEW CELL */
grid_2[idx] = 1;
}
/* Leave it empty */
else {
grid_2[idx] = 0;
}
}
/* If a cell already lives */
else {
/* Determine if the cell lives or dies in next round */
/* DIE */
if (alive_neighbors < 2 || alive_neighbors > 3) {
grid_2[idx] = 0;
}
/* LIVE */
else {
grid_2[idx] = 1;
}
}
/* We don't care about race conditions, we only check if it is different than 0 */
if (grid_1[idx] != grid_2[idx]) {
if(grid_2[idx] != 0){
diff += 1;
}
}
}
| 269781c3ea4bde44cfc99368ade7d9aedddc5ade.cu | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <errno.h>
#include <time.h>
#include <unistd.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/time.h>
#include "execute.h"
/* This variable is used to check if there should be next generation */
__device__ int diff = 0;
void execute(int dimension, int loops, char *input_file, int prints_enabled) {
int local_diff;
char *grid, *gpu_grid_1, *gpu_grid_2;
struct timeval time_1, time_2;
gettimeofday(&time_1, 0);
createGrid(&grid, dimension);
if (input_file != NULL) {
readGrid(grid, input_file, dimension);
} else {
initGrid(grid, dimension);
}
if (prints_enabled == 1) {
int dir_stat = mkdir("outputs", S_IRWXU | S_IRWXG | S_IROTH | S_IXOTH);
if (dir_stat != 0 && errno != EEXIST) {
printf("mkdir error %s\n", strerror(errno));
exit(EXIT_FAILURE);
}
printGrid(grid, dimension);
}
cudaMalloc((void **) &gpu_grid_1, dimension * dimension * sizeof(char));
cudaMalloc((void **) &gpu_grid_2, dimension * dimension * sizeof(char));
cudaMemcpy(gpu_grid_1, grid, dimension * dimension * sizeof(char), cudaMemcpyHostToDevice);
/* Kernel invocation */
dim3 dimBlock(16, 16);
dim3 dimGrid;
dimGrid.x = (dimension + dimBlock.x - 1) / dimBlock.x;
dimGrid.y = (dimension + dimBlock.y - 1) / dimBlock.y;
int generation = 1;
while (generation <= loops) {
if (prints_enabled == 1) {
printf("Generation: %d\n", generation);
}
local_diff = 0;
cudaMemcpyToSymbol(diff, &local_diff,sizeof(int), 0, cudaMemcpyHostToDevice);
kernel<<<dimGrid, dimBlock>>>(gpu_grid_1, gpu_grid_2, dimension);
if (cudaGetLastError() != cudaSuccess) {
printf("kernel launch failed\n");
}
cudaThreadSynchronize();
//todo 8elei optimize auto
if (prints_enabled == 1) {
cudaMemcpy(grid, gpu_grid_2, dimension * dimension * sizeof(char), cudaMemcpyDeviceToHost);
printGrid(grid, dimension);
}
cudaMemcpyFromSymbol(&local_diff, diff, sizeof(int), 0, cudaMemcpyDeviceToHost);
// printf("local_diff %d\n", local_diff);
/* If there are no differences between two generations
* OR if the next generation is 0 */
if(local_diff == 0) {
break;
}
char *temp = gpu_grid_1;
gpu_grid_1 = gpu_grid_2;
gpu_grid_2 = temp;
generation++;
}
gettimeofday(&time_2, 0);
double time = (1000000.0 * (time_2.tv_sec - time_1.tv_sec) + time_2.tv_usec - time_1.tv_usec) / 1000000;
printf("time elapsed: %lf\n", time);
cudaFree(gpu_grid_1);
cudaFree(gpu_grid_2);
freeGrid(&grid);
}
__global__ void kernel(char *grid_1, char *grid_2, int dimension) {
/* The variables below are used to iterate the grid */
int ix = (blockIdx.x * blockDim.x + threadIdx.x) % (dimension * dimension);
int iy = (blockIdx.y * blockDim.y + threadIdx.y) % (dimension * dimension);
int idx = (iy * dimension + ix) % (dimension * dimension);
int i = idx / dimension;
int j = idx % dimension;
int top_offset = ((i + dimension - 1) % dimension) * dimension;
int bot_offset = ((i + 1) % dimension) * dimension;
int right_offset = (j + 1) % dimension;
int left_offset = (j - 1 + dimension) % dimension;
int top = top_offset + j;
int top_right = top_offset + right_offset;
int top_left = top_offset + left_offset;
int bot = bot_offset + j;
int bot_right = bot_offset + right_offset;
int bot_left = bot_offset + left_offset;
int right = i * dimension + right_offset;
int left = i * dimension + left_offset;
int alive_neighbors = 0;
alive_neighbors += grid_1[top_left];
alive_neighbors += grid_1[top];
alive_neighbors += grid_1[top_right];
alive_neighbors += grid_1[right];
alive_neighbors += grid_1[bot_right];
alive_neighbors += grid_1[bot];
alive_neighbors += grid_1[bot_left];
alive_neighbors += grid_1[left];
int status = grid_1[idx];
// printf("status %d\n", grid_1[idx]);
if (status == 0) {
/* If there are exactly 3 neighbors create a new cell */
if (alive_neighbors == 3) {
/* CREATE NEW CELL */
grid_2[idx] = 1;
}
/* Leave it empty */
else {
grid_2[idx] = 0;
}
}
/* If a cell already lives */
else {
/* Determine if the cell lives or dies in next round */
/* DIE */
if (alive_neighbors < 2 || alive_neighbors > 3) {
grid_2[idx] = 0;
}
/* LIVE */
else {
grid_2[idx] = 1;
}
}
/* We don't care about race conditions, we only check if it is different than 0 */
if (grid_1[idx] != grid_2[idx]) {
if(grid_2[idx] != 0){
diff += 1;
}
}
}
|
15fe11774de1f63f81cbcfd9ee05456dac9f4156.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/************************************************************************************
* This file is part of cudatest. *
* *
* MIT License *
* *
* Copyright (c) 2018 Ivo Filot <i.a.w.filot@tue.nl> *
* *
* Permission is hereby granted, free of charge, to any person obtaining a copy *
* of this software and associated documentation files (the "Software"), to deal *
* in the Software without restriction, including without limitation the rights *
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell *
* copies of the Software, and to permit persons to whom the Software is *
* furnished to do so, subject to the following conditions: *
* *
* The above copyright notice and this permission notice shall be included in all *
* copies or substantial portions of the Software. *
* *
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR *
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, *
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE *
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER *
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, *
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE *
* SOFTWARE. *
* *
************************************************************************************/
#include <iostream>
#include "add.h"
// Kernel function to add the elements of two arrays
__global__
void add(int n, float *x, float *y) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride) {
y[i] = x[i] + y[i];
}
}
int run_cuda() {
int N = 1<<20;
float *x, *y;
// Allocate Unified Memory accessible from CPU or GPU
hipMallocManaged(&x, N*sizeof(float));
hipMallocManaged(&y, N*sizeof(float));
// initialize x and y arrays on the host
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
// Run kernel on 1M elements on the GPU
int blockSize = 256;
int numBlocks = (N + blockSize - 1) / blockSize;
hipLaunchKernelGGL(( add), dim3(numBlocks), dim3(blockSize), 0, 0, N, x, y);
hipError_t errSync = hipGetLastError();
hipError_t errAsync = hipDeviceSynchronize();
if (errSync != hipSuccess) {
printf("Sync kernel error: %s\n", hipGetErrorString(errSync));
}
if (errAsync != hipSuccess) {
printf("Async kernel error: %s\n", hipGetErrorString(errAsync));
}
// Wait for GPU to finish before accessing on host
hipDeviceSynchronize();
// Check for errors (all values should be 3.0f)
float maxError = 0.0f;
for (int i = 0; i < N; i++){
maxError = fmax(maxError, fabs(y[i]-3.0f));
}
std::cout << "Max error: " << maxError << std::endl;
// Free memory
hipFree(x);
hipFree(y);
return 0;
}
| 15fe11774de1f63f81cbcfd9ee05456dac9f4156.cu | /************************************************************************************
* This file is part of cudatest. *
* *
* MIT License *
* *
* Copyright (c) 2018 Ivo Filot <i.a.w.filot@tue.nl> *
* *
* Permission is hereby granted, free of charge, to any person obtaining a copy *
* of this software and associated documentation files (the "Software"), to deal *
* in the Software without restriction, including without limitation the rights *
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell *
* copies of the Software, and to permit persons to whom the Software is *
* furnished to do so, subject to the following conditions: *
* *
* The above copyright notice and this permission notice shall be included in all *
* copies or substantial portions of the Software. *
* *
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR *
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, *
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE *
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER *
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, *
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE *
* SOFTWARE. *
* *
************************************************************************************/
#include <iostream>
#include "add.h"
// Kernel function to add the elements of two arrays
__global__
void add(int n, float *x, float *y) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride) {
y[i] = x[i] + y[i];
}
}
int run_cuda() {
int N = 1<<20;
float *x, *y;
// Allocate Unified Memory – accessible from CPU or GPU
cudaMallocManaged(&x, N*sizeof(float));
cudaMallocManaged(&y, N*sizeof(float));
// initialize x and y arrays on the host
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
// Run kernel on 1M elements on the GPU
int blockSize = 256;
int numBlocks = (N + blockSize - 1) / blockSize;
add<<<numBlocks, blockSize>>>(N, x, y);
cudaError_t errSync = cudaGetLastError();
cudaError_t errAsync = cudaDeviceSynchronize();
if (errSync != cudaSuccess) {
printf("Sync kernel error: %s\n", cudaGetErrorString(errSync));
}
if (errAsync != cudaSuccess) {
printf("Async kernel error: %s\n", cudaGetErrorString(errAsync));
}
// Wait for GPU to finish before accessing on host
cudaDeviceSynchronize();
// Check for errors (all values should be 3.0f)
float maxError = 0.0f;
for (int i = 0; i < N; i++){
maxError = fmax(maxError, fabs(y[i]-3.0f));
}
std::cout << "Max error: " << maxError << std::endl;
// Free memory
cudaFree(x);
cudaFree(y);
return 0;
}
|
769a38cf37d13d61a9de7cab856e0467b5afc0bc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "Macro.h"
#include "CUFLU.h"
#if ( defined GPU && MODEL == HYDRO && FLU_SCHEME == RTVD )
// check before compiling anything else
#if ( NCOMP_PASSIVE != 0 )
# error : RTVD scheme does NOT support passive scalars !!
#endif
#include "CUFLU_Shared_FluUtility.cu"
#include "CUDA_ConstMemory.h"
#define to1D1(z,y,x) ( __umul24(z, FLU_NXT*FLU_NXT) + __umul24(y, FLU_NXT) + x )
#define to1D2(z,y,x) ( __umul24(z-FLU_GHOST_SIZE, PS2*PS2) + __umul24(y-FLU_GHOST_SIZE, PS2) + x-FLU_GHOST_SIZE )
static __device__ void CUFLU_Advance( real g_Fluid_In [][5][ CUBE(FLU_NXT) ],
real g_Fluid_Out[][5][ CUBE(PS2) ],
real g_Flux[][9][5][ SQR(PS2) ],
const real dt, const real _dh, const bool StoreFlux,
const int j_gap, const int k_gap, real s_cu[][5][FLU_NXT],
real s_cw[][5][FLU_NXT], real s_flux[][5][FLU_NXT], real s_RLflux[][5][FLU_NXT],
const bool FinalOut, const int XYZ,
const real MinDens, const real MinPres, const real MinEint,
const EoS_t *EoS );
//-------------------------------------------------------------------------------------------------------
// Function : CUFLU_FluidSolver_RTVD
// Description : GPU fluid solver based on the relaxing TVD (RTVD) scheme
//
// Note : a. Prefix "g" for pointers pointing to the "Global" memory space
// Prefix "s" for pointers pointing to the "Shared" memory space
// b. The three-dimensional evolution is achieved by using the dimensional-split method
//
// Parameter : g_Fluid_In : Global memory array to store the input fluid variables
// g_Fluid_Out : Global memory array to store the output fluid variables
// g_Flux : Global memory array to store the output fluxes
// g_Corner : Global memory array storing the physical corner coordinates of each patch group (USELESS CURRENTLY)
// g_Pot_USG : Global memory array storing the input potential for UNSPLIT_GRAVITY (NOT SUPPORTED in RTVD)
// dt : Time interval to advance solution
// _dh : 1 / grid size
// StoreFlux : true --> store the coarse-fine fluxes
// XYZ : true : x->y->z ( forward sweep)
// false : z->y->x (backward sweep)
// MinDens : Density floor
// MinPres : Pressure floor
// MinEint : Internal energy floor
// EoS : EoS object
//-------------------------------------------------------------------------------------------------------
__global__ void CUFLU_FluidSolver_RTVD(
real g_Fluid_In [][NCOMP_TOTAL][ CUBE(FLU_NXT) ],
real g_Fluid_Out[][NCOMP_TOTAL][ CUBE(PS2) ],
real g_Flux [][9][NCOMP_TOTAL][ SQR(PS2) ],
const double g_Corner[][3],
const real g_Pot_USG[][ CUBE(USG_NXT_F) ],
const real dt, const real _dh, const bool StoreFlux,
const bool XYZ, const real MinDens, const real MinPres, const real MinEint,
const EoS_t EoS )
{
__shared__ real s_cu [FLU_BLOCK_SIZE_Y][5][FLU_NXT];
__shared__ real s_cw [FLU_BLOCK_SIZE_Y][5][FLU_NXT];
__shared__ real s_flux [FLU_BLOCK_SIZE_Y][5][FLU_NXT];
__shared__ real s_RLflux[FLU_BLOCK_SIZE_Y][5][FLU_NXT];
if ( XYZ )
{
CUFLU_Advance( g_Fluid_In, g_Fluid_Out, g_Flux, dt, _dh, StoreFlux, 0, 0,
s_cu, s_cw, s_flux, s_RLflux, false, 0, MinDens, MinPres, MinEint, &EoS );
CUFLU_Advance( g_Fluid_In, g_Fluid_Out, g_Flux, dt, _dh, StoreFlux, FLU_GHOST_SIZE, 0,
s_cu, s_cw, s_flux, s_RLflux, false, 3, MinDens, MinPres, MinEint, &EoS );
CUFLU_Advance( g_Fluid_In, g_Fluid_Out, g_Flux, dt, _dh, StoreFlux, FLU_GHOST_SIZE, FLU_GHOST_SIZE,
s_cu, s_cw, s_flux, s_RLflux, true, 6, MinDens, MinPres, MinEint, &EoS );
}
else
{
CUFLU_Advance( g_Fluid_In, g_Fluid_Out, g_Flux, dt, _dh, StoreFlux, 0, 0,
s_cu, s_cw, s_flux, s_RLflux, false, 6, MinDens, MinPres, MinEint, &EoS );
CUFLU_Advance( g_Fluid_In, g_Fluid_Out, g_Flux, dt, _dh, StoreFlux, 0, FLU_GHOST_SIZE,
s_cu, s_cw, s_flux, s_RLflux, false, 3, MinDens, MinPres, MinEint, &EoS );
CUFLU_Advance( g_Fluid_In, g_Fluid_Out, g_Flux, dt, _dh, StoreFlux, FLU_GHOST_SIZE, FLU_GHOST_SIZE,
s_cu, s_cw, s_flux, s_RLflux, true, 0, MinDens, MinPres, MinEint, &EoS );
}
} // FUNCTION : CUFLU_FluidSolver_RTVD
//-------------------------------------------------------------------------------------------------------
// Function : CUFLU_Advance
// Description : GPU device function, which performs a one-dimensional sweep based on the TVD scheme
//
// Note : a. Prefix "g" for pointers pointing to the "Global" memory space
// Prefix "s" for pointers pointing to the "Shared" memory space
// b. The direction of the one dimensional sweep is determined by the input parameter "XYZ"
//
// Parameter : g_Fluid_In : Global memory array to store the input fluid variables
// g_Fluid_Out : Global memory array to store the output fluid variables
// g_Flux : Global memory array to store the output fluxes
// dt : Time interval to advance solution
// _dh : 1 / grid size
// StoreFlux : true --> store the coarse-fine fluxes
// j_gap : Number of useless grids in each side in the j direction (j may not be equal to y)
// k_gap : Number of useless grids in each side in the k direction (k mya not be equal to z)
// s_cu : Shared memory array storing the normal flux
// s_cw : Shared memory array storing the auxiliary flux
// s_flux : Shared memory array storing the final flux used to update the fluid variables
// s_RLflux : Shared memory array storing the left/right-moving flux
// XYZ : 0 : Update the solution in the x direction
// 3 : Update the solution in the y direction
// 6 : Update the solution in the z direction
// --> This parameter is also used to determine the place to store the output fluxes
// MinDens : Density floor
// MinPres : Pressure floor
// MinEint : Internal energy floor
// EoS : EoS object
//-------------------------------------------------------------------------------------------------------
__device__ void CUFLU_Advance( real g_Fluid_In [][5][ CUBE(FLU_NXT) ],
real g_Fluid_Out[][5][ CUBE(PS2) ],
real g_Flux[][9][5][ SQR(PS2) ],
const real dt, const real _dh, const bool StoreFlux,
const int j_gap, const int k_gap, real s_cu[][5][FLU_NXT],
real s_cw[][5][FLU_NXT], real s_flux[][5][FLU_NXT], real s_RLflux[][5][FLU_NXT],
const bool FinalOut, const int XYZ,
const real MinDens, const real MinPres, const real MinEint,
const EoS_t *EoS )
{
const uint bx = blockIdx.x;
const uint tx = threadIdx.x;
const uint ty = threadIdx.y;
const uint dj = blockDim.y;
const uint size_j = FLU_NXT - (j_gap<<1);
const uint size_k = FLU_NXT - (k_gap<<1);
const uint NColumn = __umul24( size_j, size_k );
const uint i = tx; // (i,j) the element in shared memory under evaluation
const uint ip = i+1;
const uint im = i-1;
uint j = j_gap + ty%size_j;
uint k = k_gap + ty/size_j;
uint Column0 = 0; // the total number of columns that have been updated
const uint j_end = FLU_NXT - j_gap;
const uint k_end = FLU_NXT - k_gap;
const real dt_half = (real)0.5*dt;
const real *Passive = NULL; // RTVD does not support passive scalars
bool RuleOut = false;
const bool CheckMinPres_Yes = true;
real _rho, vx, p, c, Temp, Fluid[5], Fluid_half[5];
int ID1, ID2, ID3, Comp[5], delta_k;
// set the order of component for update in different directions
switch ( XYZ )
{
case 0: Comp[0] = 0; Comp[1] = 1; Comp[2] = 2; Comp[3] = 3; Comp[4] = 4; break;
case 3: Comp[0] = 0; Comp[1] = 2; Comp[2] = 1; Comp[3] = 3; Comp[4] = 4; break;
case 6: Comp[0] = 0; Comp[1] = 3; Comp[2] = 2; Comp[3] = 1; Comp[4] = 4; break;
}
// start the TVD scheme
do
{
// determine the array indices for updating in different directions
switch ( XYZ )
{
case 0: ID1 = to1D1( k, j, i ); break;
case 3: ID1 = to1D1( k, i, j ); break;
case 6: ID1 = to1D1( i, k, j ); break;
}
// load data into per-thread registers
for (int v=0; v<5; v++) Fluid[v] = g_Fluid_In[bx][ Comp[v] ][ID1];
// a. Evaluate the half-step values of fluid variables
//-----------------------------------------------------------------------------
// (a1). set variables defined in the center of cell
_rho = (real)1.0 / Fluid[0];
vx = _rho * Fluid[1];
p = Hydro_Con2Pres( Fluid[0], Fluid[1], Fluid[2], Fluid[3], Fluid[4], Passive,
CheckMinPres_Yes, MinPres, NULL_REAL, EoS->DensEint2Pres_FuncPtr,
EoS->AuxArrayDevPtr_Flt, EoS->AuxArrayDevPtr_Int, EoS->Table, NULL );
# ifdef CHECK_UNPHYSICAL_IN_FLUID
Hydro_CheckUnphysical( UNPHY_MODE_SING, &p , "pressure", ERROR_INFO, UNPHY_VERBOSE );
Hydro_CheckUnphysical( UNPHY_MODE_SING, &Fluid[0], "density" , ERROR_INFO, UNPHY_VERBOSE );
# endif
c = FABS( vx ) + SQRT( EoS->DensPres2CSqr_FuncPtr( Fluid[0], p, Passive, EoS->AuxArrayDevPtr_Flt,
EoS->AuxArrayDevPtr_Int, EoS->Table ) );
s_cw[ty][0][i] = Fluid[1];
s_cw[ty][1][i] = Fluid[1]*vx + p;
s_cw[ty][2][i] = Fluid[2]*vx;
s_cw[ty][3][i] = Fluid[3]*vx;
s_cw[ty][4][i] = ( Fluid[4]+p )*vx;
s_cu[ty][0][i] = c*Fluid[0];
s_cu[ty][1][i] = c*Fluid[1];
s_cu[ty][2][i] = c*Fluid[2];
s_cu[ty][3][i] = c*Fluid[3];
s_cu[ty][4][i] = c*Fluid[4];
__syncthreads();
// (a2). set flux defined in the right-hand surface of cell by the upwind scheme
if ( i < FLU_NXT-1 )
{
for (int v=0; v<5; v++)
s_flux[ty][v][i] = (real)0.5*( ( s_cu[ty][v][i ]+s_cw[ty][v][i ] ) -
( s_cu[ty][v][ip]-s_cw[ty][v][ip] ) );
}
__syncthreads();
// (a3). evaluate the intermidiate values (u_half)
// if ( i > 0 )
if ( i > 0 && i < FLU_NXT-1 )
{
for (int v=0; v<5; v++) Fluid_half[v] = Fluid[v] - _dh*dt_half*( s_flux[ty][v][i] - s_flux[ty][v][im] ) ;
// apply density and internal energy floors
Fluid_half[0] = FMAX( Fluid_half[0], MinDens );
Fluid_half[4] = Hydro_CheckMinEintInEngy( Fluid_half[0], Fluid_half[1], Fluid_half[2], Fluid_half[3], Fluid_half[4],
MinEint, NULL_REAL );
}
// Evaluate the full-step values of fluid variables
//-----------------------------------------------------------------------------
// (b1). reset variables defined in the center of cell at the intermidate state
if ( i > 0 && i < FLU_NXT-1 )
{
_rho = (real)1.0 / Fluid_half[0];
vx = _rho * Fluid_half[1];
p = Hydro_Con2Pres( Fluid_half[0], Fluid_half[1], Fluid_half[2], Fluid_half[3], Fluid_half[4], Passive,
CheckMinPres_Yes, MinPres, NULL_REAL, EoS->DensEint2Pres_FuncPtr,
EoS->AuxArrayDevPtr_Flt, EoS->AuxArrayDevPtr_Int, EoS->Table, NULL );
# ifdef CHECK_UNPHYSICAL_IN_FLUID
Hydro_CheckUnphysical( UNPHY_MODE_SING, &p , "pressure", ERROR_INFO, UNPHY_VERBOSE );
Hydro_CheckUnphysical( UNPHY_MODE_SING, &Fluid_half[0], "density" , ERROR_INFO, UNPHY_VERBOSE );
# endif
c = FABS( vx ) + SQRT( EoS->DensPres2CSqr_FuncPtr( Fluid_half[0], p, Passive, EoS->AuxArrayDevPtr_Flt,
EoS->AuxArrayDevPtr_Int, EoS->Table ) );
s_cw[ty][0][i] = Fluid_half[1];
s_cw[ty][1][i] = Fluid_half[1]*vx + p;
s_cw[ty][2][i] = Fluid_half[2]*vx;
s_cw[ty][3][i] = Fluid_half[3]*vx;
s_cw[ty][4][i] = ( Fluid_half[4]+p )*vx;
s_cu[ty][0][i] = c*Fluid_half[0];
s_cu[ty][1][i] = c*Fluid_half[1];
s_cu[ty][2][i] = c*Fluid_half[2];
s_cu[ty][3][i] = c*Fluid_half[3];
s_cu[ty][4][i] = c*Fluid_half[4];
} // if ( i > 0 && i < FLU_NXT-1 )
// (b2). set the right-moving flux defined in the right-hand surface by the TVD scheme
if ( i > 0 && i < FLU_NXT-2 )
{
for (int v=0; v<5; v++) s_RLflux[ty][v][i] = (real)0.5*( s_cu[ty][v][i] + s_cw[ty][v][i] );
}
__syncthreads();
if ( i > 1 && i < FLU_NXT-3 )
{
for (int v=0; v<5; v++)
{
s_flux[ty][v][i] = s_RLflux[ty][v][i];
Temp = ( s_RLflux[ty][v][ip]-s_RLflux[ty][v][i ] )
* ( s_RLflux[ty][v][i ]-s_RLflux[ty][v][im] );
if ( Temp > (real)0.0 )
s_flux[ty][v][i] += Temp / ( s_RLflux[ty][v][ip]-s_RLflux[ty][v][im] );
}
}
__syncthreads();
// (b3). set the left-moving flux defined in the left-hand surface by the TVD scheme, get the total flux
// if ( i < FLU_NXT-2 )
if ( i > 0 && i < FLU_NXT-2 )
{
for (int v=0; v<5; v++) s_RLflux[ty][v][i] = (real)0.5*( s_cu[ty][v][ip] - s_cw[ty][v][ip] );
}
__syncthreads();
if ( i > 1 && i < FLU_NXT-3 )
{
for (int v=0; v<5; v++)
{
s_flux[ty][v][i] -= s_RLflux[ty][v][i];
Temp = ( s_RLflux[ty][v][im]-s_RLflux[ty][v][i ] )
* ( s_RLflux[ty][v][i ]-s_RLflux[ty][v][ip] );
if ( Temp > (real)0.0 )
s_flux[ty][v][i] -= Temp / ( s_RLflux[ty][v][im]-s_RLflux[ty][v][ip] );
}
}
__syncthreads();
// (b4). advance fluid by one full time-step
// if ( i > 2 )
// if ( i > 2 && i < FLU_NXT-3 )
if ( i > 2 && i < FLU_NXT-3 && RuleOut == false )
{
for (int v=0; v<5; v++) Fluid[v] -= _dh*dt*( s_flux[ty][v][i] - s_flux[ty][v][im] ) ;
// apply density and internal energy floors
Fluid[0] = FMAX( Fluid[0], MinDens );
Fluid[4] = Hydro_CheckMinEintInEngy( Fluid[0], Fluid[1], Fluid[2], Fluid[3], Fluid[4],
MinEint, NULL_REAL );
// check negative density and energy
# ifdef CHECK_UNPHYSICAL_IN_FLUID
Hydro_CheckUnphysical( UNPHY_MODE_SING, &Fluid[4], "energy" , ERROR_INFO, UNPHY_VERBOSE );
Hydro_CheckUnphysical( UNPHY_MODE_SING, &Fluid[0], "density", ERROR_INFO, UNPHY_VERBOSE );
# endif
// store the updated data back to the global memory
if ( FinalOut )
{
switch ( XYZ )
{
case 0: ID2 = to1D2( k, j, i ); break;
case 3: ID2 = to1D2( k, i, j ); break;
case 6: ID2 = to1D2( i, k, j ); break;
}
for (int v=0; v<5; v++) g_Fluid_Out[bx][ Comp[v] ][ID2] = Fluid[v];
}
else
for (int v=0; v<5; v++) g_Fluid_In [bx][ Comp[v] ][ID1] = Fluid[v];
}
// (b5). save the flux required by the flux-correction operation
if ( StoreFlux )
if ( k >= FLU_GHOST_SIZE && k < FLU_NXT-FLU_GHOST_SIZE )
if ( j >= FLU_GHOST_SIZE && j < FLU_NXT-FLU_GHOST_SIZE )
if ( i == 0 )
{
ID3 = __umul24( k-FLU_GHOST_SIZE, PS2 ) + (j-FLU_GHOST_SIZE);
for (int v=0; v<5; v++)
{
g_Flux[bx][XYZ+0][v][ID3] = s_flux[ty][ Comp[v] ][ 2];
g_Flux[bx][XYZ+1][v][ID3] = s_flux[ty][ Comp[v] ][FLU_NXT/2-1];
g_Flux[bx][XYZ+2][v][ID3] = s_flux[ty][ Comp[v] ][FLU_NXT - 4];
}
}
// reset the target array indices
j += dj;
if ( j >= j_end )
{
delta_k = ( j - j_end )/size_j + 1;
k += delta_k;
j -= __umul24( size_j, delta_k );
}
Column0 += dj;
// if the index k exceeds the maximum allowed value --> reset (j,k) to harmless values and wait for other
// threads (all threads must exist the while loop "at the same time", otherwise __syncthreads will fail !!)
if ( k >= k_end )
{
j = 0;
k = 0;
RuleOut = true;
}
__syncthreads();
}
while ( Column0 < NColumn );
} // FUNCTION : CUFLU_Advance
#endif // #if ( defined GPU && MODEL == HYDRO && FLU_SCHEME == RTVD )
| 769a38cf37d13d61a9de7cab856e0467b5afc0bc.cu | #include "Macro.h"
#include "CUFLU.h"
#if ( defined GPU && MODEL == HYDRO && FLU_SCHEME == RTVD )
// check before compiling anything else
#if ( NCOMP_PASSIVE != 0 )
# error : RTVD scheme does NOT support passive scalars !!
#endif
#include "CUFLU_Shared_FluUtility.cu"
#include "CUDA_ConstMemory.h"
#define to1D1(z,y,x) ( __umul24(z, FLU_NXT*FLU_NXT) + __umul24(y, FLU_NXT) + x )
#define to1D2(z,y,x) ( __umul24(z-FLU_GHOST_SIZE, PS2*PS2) + __umul24(y-FLU_GHOST_SIZE, PS2) + x-FLU_GHOST_SIZE )
static __device__ void CUFLU_Advance( real g_Fluid_In [][5][ CUBE(FLU_NXT) ],
real g_Fluid_Out[][5][ CUBE(PS2) ],
real g_Flux[][9][5][ SQR(PS2) ],
const real dt, const real _dh, const bool StoreFlux,
const int j_gap, const int k_gap, real s_cu[][5][FLU_NXT],
real s_cw[][5][FLU_NXT], real s_flux[][5][FLU_NXT], real s_RLflux[][5][FLU_NXT],
const bool FinalOut, const int XYZ,
const real MinDens, const real MinPres, const real MinEint,
const EoS_t *EoS );
//-------------------------------------------------------------------------------------------------------
// Function : CUFLU_FluidSolver_RTVD
// Description : GPU fluid solver based on the relaxing TVD (RTVD) scheme
//
// Note : a. Prefix "g" for pointers pointing to the "Global" memory space
// Prefix "s" for pointers pointing to the "Shared" memory space
// b. The three-dimensional evolution is achieved by using the dimensional-split method
//
// Parameter : g_Fluid_In : Global memory array to store the input fluid variables
// g_Fluid_Out : Global memory array to store the output fluid variables
// g_Flux : Global memory array to store the output fluxes
// g_Corner : Global memory array storing the physical corner coordinates of each patch group (USELESS CURRENTLY)
// g_Pot_USG : Global memory array storing the input potential for UNSPLIT_GRAVITY (NOT SUPPORTED in RTVD)
// dt : Time interval to advance solution
// _dh : 1 / grid size
// StoreFlux : true --> store the coarse-fine fluxes
// XYZ : true : x->y->z ( forward sweep)
// false : z->y->x (backward sweep)
// MinDens : Density floor
// MinPres : Pressure floor
// MinEint : Internal energy floor
// EoS : EoS object
//-------------------------------------------------------------------------------------------------------
__global__ void CUFLU_FluidSolver_RTVD(
real g_Fluid_In [][NCOMP_TOTAL][ CUBE(FLU_NXT) ],
real g_Fluid_Out[][NCOMP_TOTAL][ CUBE(PS2) ],
real g_Flux [][9][NCOMP_TOTAL][ SQR(PS2) ],
const double g_Corner[][3],
const real g_Pot_USG[][ CUBE(USG_NXT_F) ],
const real dt, const real _dh, const bool StoreFlux,
const bool XYZ, const real MinDens, const real MinPres, const real MinEint,
const EoS_t EoS )
{
__shared__ real s_cu [FLU_BLOCK_SIZE_Y][5][FLU_NXT];
__shared__ real s_cw [FLU_BLOCK_SIZE_Y][5][FLU_NXT];
__shared__ real s_flux [FLU_BLOCK_SIZE_Y][5][FLU_NXT];
__shared__ real s_RLflux[FLU_BLOCK_SIZE_Y][5][FLU_NXT];
if ( XYZ )
{
CUFLU_Advance( g_Fluid_In, g_Fluid_Out, g_Flux, dt, _dh, StoreFlux, 0, 0,
s_cu, s_cw, s_flux, s_RLflux, false, 0, MinDens, MinPres, MinEint, &EoS );
CUFLU_Advance( g_Fluid_In, g_Fluid_Out, g_Flux, dt, _dh, StoreFlux, FLU_GHOST_SIZE, 0,
s_cu, s_cw, s_flux, s_RLflux, false, 3, MinDens, MinPres, MinEint, &EoS );
CUFLU_Advance( g_Fluid_In, g_Fluid_Out, g_Flux, dt, _dh, StoreFlux, FLU_GHOST_SIZE, FLU_GHOST_SIZE,
s_cu, s_cw, s_flux, s_RLflux, true, 6, MinDens, MinPres, MinEint, &EoS );
}
else
{
CUFLU_Advance( g_Fluid_In, g_Fluid_Out, g_Flux, dt, _dh, StoreFlux, 0, 0,
s_cu, s_cw, s_flux, s_RLflux, false, 6, MinDens, MinPres, MinEint, &EoS );
CUFLU_Advance( g_Fluid_In, g_Fluid_Out, g_Flux, dt, _dh, StoreFlux, 0, FLU_GHOST_SIZE,
s_cu, s_cw, s_flux, s_RLflux, false, 3, MinDens, MinPres, MinEint, &EoS );
CUFLU_Advance( g_Fluid_In, g_Fluid_Out, g_Flux, dt, _dh, StoreFlux, FLU_GHOST_SIZE, FLU_GHOST_SIZE,
s_cu, s_cw, s_flux, s_RLflux, true, 0, MinDens, MinPres, MinEint, &EoS );
}
} // FUNCTION : CUFLU_FluidSolver_RTVD
//-------------------------------------------------------------------------------------------------------
// Function : CUFLU_Advance
// Description : GPU device function, which performs a one-dimensional sweep based on the TVD scheme
//
// Note : a. Prefix "g" for pointers pointing to the "Global" memory space
// Prefix "s" for pointers pointing to the "Shared" memory space
// b. The direction of the one dimensional sweep is determined by the input parameter "XYZ"
//
// Parameter : g_Fluid_In : Global memory array to store the input fluid variables
// g_Fluid_Out : Global memory array to store the output fluid variables
// g_Flux : Global memory array to store the output fluxes
// dt : Time interval to advance solution
// _dh : 1 / grid size
// StoreFlux : true --> store the coarse-fine fluxes
// j_gap : Number of useless grids in each side in the j direction (j may not be equal to y)
// k_gap : Number of useless grids in each side in the k direction (k mya not be equal to z)
// s_cu : Shared memory array storing the normal flux
// s_cw : Shared memory array storing the auxiliary flux
// s_flux : Shared memory array storing the final flux used to update the fluid variables
// s_RLflux : Shared memory array storing the left/right-moving flux
// XYZ : 0 : Update the solution in the x direction
// 3 : Update the solution in the y direction
// 6 : Update the solution in the z direction
// --> This parameter is also used to determine the place to store the output fluxes
// MinDens : Density floor
// MinPres : Pressure floor
// MinEint : Internal energy floor
// EoS : EoS object
//-------------------------------------------------------------------------------------------------------
__device__ void CUFLU_Advance( real g_Fluid_In [][5][ CUBE(FLU_NXT) ],
real g_Fluid_Out[][5][ CUBE(PS2) ],
real g_Flux[][9][5][ SQR(PS2) ],
const real dt, const real _dh, const bool StoreFlux,
const int j_gap, const int k_gap, real s_cu[][5][FLU_NXT],
real s_cw[][5][FLU_NXT], real s_flux[][5][FLU_NXT], real s_RLflux[][5][FLU_NXT],
const bool FinalOut, const int XYZ,
const real MinDens, const real MinPres, const real MinEint,
const EoS_t *EoS )
{
const uint bx = blockIdx.x;
const uint tx = threadIdx.x;
const uint ty = threadIdx.y;
const uint dj = blockDim.y;
const uint size_j = FLU_NXT - (j_gap<<1);
const uint size_k = FLU_NXT - (k_gap<<1);
const uint NColumn = __umul24( size_j, size_k );
const uint i = tx; // (i,j) the element in shared memory under evaluation
const uint ip = i+1;
const uint im = i-1;
uint j = j_gap + ty%size_j;
uint k = k_gap + ty/size_j;
uint Column0 = 0; // the total number of columns that have been updated
const uint j_end = FLU_NXT - j_gap;
const uint k_end = FLU_NXT - k_gap;
const real dt_half = (real)0.5*dt;
const real *Passive = NULL; // RTVD does not support passive scalars
bool RuleOut = false;
const bool CheckMinPres_Yes = true;
real _rho, vx, p, c, Temp, Fluid[5], Fluid_half[5];
int ID1, ID2, ID3, Comp[5], delta_k;
// set the order of component for update in different directions
switch ( XYZ )
{
case 0: Comp[0] = 0; Comp[1] = 1; Comp[2] = 2; Comp[3] = 3; Comp[4] = 4; break;
case 3: Comp[0] = 0; Comp[1] = 2; Comp[2] = 1; Comp[3] = 3; Comp[4] = 4; break;
case 6: Comp[0] = 0; Comp[1] = 3; Comp[2] = 2; Comp[3] = 1; Comp[4] = 4; break;
}
// start the TVD scheme
do
{
// determine the array indices for updating in different directions
switch ( XYZ )
{
case 0: ID1 = to1D1( k, j, i ); break;
case 3: ID1 = to1D1( k, i, j ); break;
case 6: ID1 = to1D1( i, k, j ); break;
}
// load data into per-thread registers
for (int v=0; v<5; v++) Fluid[v] = g_Fluid_In[bx][ Comp[v] ][ID1];
// a. Evaluate the half-step values of fluid variables
//-----------------------------------------------------------------------------
// (a1). set variables defined in the center of cell
_rho = (real)1.0 / Fluid[0];
vx = _rho * Fluid[1];
p = Hydro_Con2Pres( Fluid[0], Fluid[1], Fluid[2], Fluid[3], Fluid[4], Passive,
CheckMinPres_Yes, MinPres, NULL_REAL, EoS->DensEint2Pres_FuncPtr,
EoS->AuxArrayDevPtr_Flt, EoS->AuxArrayDevPtr_Int, EoS->Table, NULL );
# ifdef CHECK_UNPHYSICAL_IN_FLUID
Hydro_CheckUnphysical( UNPHY_MODE_SING, &p , "pressure", ERROR_INFO, UNPHY_VERBOSE );
Hydro_CheckUnphysical( UNPHY_MODE_SING, &Fluid[0], "density" , ERROR_INFO, UNPHY_VERBOSE );
# endif
c = FABS( vx ) + SQRT( EoS->DensPres2CSqr_FuncPtr( Fluid[0], p, Passive, EoS->AuxArrayDevPtr_Flt,
EoS->AuxArrayDevPtr_Int, EoS->Table ) );
s_cw[ty][0][i] = Fluid[1];
s_cw[ty][1][i] = Fluid[1]*vx + p;
s_cw[ty][2][i] = Fluid[2]*vx;
s_cw[ty][3][i] = Fluid[3]*vx;
s_cw[ty][4][i] = ( Fluid[4]+p )*vx;
s_cu[ty][0][i] = c*Fluid[0];
s_cu[ty][1][i] = c*Fluid[1];
s_cu[ty][2][i] = c*Fluid[2];
s_cu[ty][3][i] = c*Fluid[3];
s_cu[ty][4][i] = c*Fluid[4];
__syncthreads();
// (a2). set flux defined in the right-hand surface of cell by the upwind scheme
if ( i < FLU_NXT-1 )
{
for (int v=0; v<5; v++)
s_flux[ty][v][i] = (real)0.5*( ( s_cu[ty][v][i ]+s_cw[ty][v][i ] ) -
( s_cu[ty][v][ip]-s_cw[ty][v][ip] ) );
}
__syncthreads();
// (a3). evaluate the intermidiate values (u_half)
// if ( i > 0 )
if ( i > 0 && i < FLU_NXT-1 )
{
for (int v=0; v<5; v++) Fluid_half[v] = Fluid[v] - _dh*dt_half*( s_flux[ty][v][i] - s_flux[ty][v][im] ) ;
// apply density and internal energy floors
Fluid_half[0] = FMAX( Fluid_half[0], MinDens );
Fluid_half[4] = Hydro_CheckMinEintInEngy( Fluid_half[0], Fluid_half[1], Fluid_half[2], Fluid_half[3], Fluid_half[4],
MinEint, NULL_REAL );
}
// Evaluate the full-step values of fluid variables
//-----------------------------------------------------------------------------
// (b1). reset variables defined in the center of cell at the intermidate state
if ( i > 0 && i < FLU_NXT-1 )
{
_rho = (real)1.0 / Fluid_half[0];
vx = _rho * Fluid_half[1];
p = Hydro_Con2Pres( Fluid_half[0], Fluid_half[1], Fluid_half[2], Fluid_half[3], Fluid_half[4], Passive,
CheckMinPres_Yes, MinPres, NULL_REAL, EoS->DensEint2Pres_FuncPtr,
EoS->AuxArrayDevPtr_Flt, EoS->AuxArrayDevPtr_Int, EoS->Table, NULL );
# ifdef CHECK_UNPHYSICAL_IN_FLUID
Hydro_CheckUnphysical( UNPHY_MODE_SING, &p , "pressure", ERROR_INFO, UNPHY_VERBOSE );
Hydro_CheckUnphysical( UNPHY_MODE_SING, &Fluid_half[0], "density" , ERROR_INFO, UNPHY_VERBOSE );
# endif
c = FABS( vx ) + SQRT( EoS->DensPres2CSqr_FuncPtr( Fluid_half[0], p, Passive, EoS->AuxArrayDevPtr_Flt,
EoS->AuxArrayDevPtr_Int, EoS->Table ) );
s_cw[ty][0][i] = Fluid_half[1];
s_cw[ty][1][i] = Fluid_half[1]*vx + p;
s_cw[ty][2][i] = Fluid_half[2]*vx;
s_cw[ty][3][i] = Fluid_half[3]*vx;
s_cw[ty][4][i] = ( Fluid_half[4]+p )*vx;
s_cu[ty][0][i] = c*Fluid_half[0];
s_cu[ty][1][i] = c*Fluid_half[1];
s_cu[ty][2][i] = c*Fluid_half[2];
s_cu[ty][3][i] = c*Fluid_half[3];
s_cu[ty][4][i] = c*Fluid_half[4];
} // if ( i > 0 && i < FLU_NXT-1 )
// (b2). set the right-moving flux defined in the right-hand surface by the TVD scheme
if ( i > 0 && i < FLU_NXT-2 )
{
for (int v=0; v<5; v++) s_RLflux[ty][v][i] = (real)0.5*( s_cu[ty][v][i] + s_cw[ty][v][i] );
}
__syncthreads();
if ( i > 1 && i < FLU_NXT-3 )
{
for (int v=0; v<5; v++)
{
s_flux[ty][v][i] = s_RLflux[ty][v][i];
Temp = ( s_RLflux[ty][v][ip]-s_RLflux[ty][v][i ] )
* ( s_RLflux[ty][v][i ]-s_RLflux[ty][v][im] );
if ( Temp > (real)0.0 )
s_flux[ty][v][i] += Temp / ( s_RLflux[ty][v][ip]-s_RLflux[ty][v][im] );
}
}
__syncthreads();
// (b3). set the left-moving flux defined in the left-hand surface by the TVD scheme, get the total flux
// if ( i < FLU_NXT-2 )
if ( i > 0 && i < FLU_NXT-2 )
{
for (int v=0; v<5; v++) s_RLflux[ty][v][i] = (real)0.5*( s_cu[ty][v][ip] - s_cw[ty][v][ip] );
}
__syncthreads();
if ( i > 1 && i < FLU_NXT-3 )
{
for (int v=0; v<5; v++)
{
s_flux[ty][v][i] -= s_RLflux[ty][v][i];
Temp = ( s_RLflux[ty][v][im]-s_RLflux[ty][v][i ] )
* ( s_RLflux[ty][v][i ]-s_RLflux[ty][v][ip] );
if ( Temp > (real)0.0 )
s_flux[ty][v][i] -= Temp / ( s_RLflux[ty][v][im]-s_RLflux[ty][v][ip] );
}
}
__syncthreads();
// (b4). advance fluid by one full time-step
// if ( i > 2 )
// if ( i > 2 && i < FLU_NXT-3 )
if ( i > 2 && i < FLU_NXT-3 && RuleOut == false )
{
for (int v=0; v<5; v++) Fluid[v] -= _dh*dt*( s_flux[ty][v][i] - s_flux[ty][v][im] ) ;
// apply density and internal energy floors
Fluid[0] = FMAX( Fluid[0], MinDens );
Fluid[4] = Hydro_CheckMinEintInEngy( Fluid[0], Fluid[1], Fluid[2], Fluid[3], Fluid[4],
MinEint, NULL_REAL );
// check negative density and energy
# ifdef CHECK_UNPHYSICAL_IN_FLUID
Hydro_CheckUnphysical( UNPHY_MODE_SING, &Fluid[4], "energy" , ERROR_INFO, UNPHY_VERBOSE );
Hydro_CheckUnphysical( UNPHY_MODE_SING, &Fluid[0], "density", ERROR_INFO, UNPHY_VERBOSE );
# endif
// store the updated data back to the global memory
if ( FinalOut )
{
switch ( XYZ )
{
case 0: ID2 = to1D2( k, j, i ); break;
case 3: ID2 = to1D2( k, i, j ); break;
case 6: ID2 = to1D2( i, k, j ); break;
}
for (int v=0; v<5; v++) g_Fluid_Out[bx][ Comp[v] ][ID2] = Fluid[v];
}
else
for (int v=0; v<5; v++) g_Fluid_In [bx][ Comp[v] ][ID1] = Fluid[v];
}
// (b5). save the flux required by the flux-correction operation
if ( StoreFlux )
if ( k >= FLU_GHOST_SIZE && k < FLU_NXT-FLU_GHOST_SIZE )
if ( j >= FLU_GHOST_SIZE && j < FLU_NXT-FLU_GHOST_SIZE )
if ( i == 0 )
{
ID3 = __umul24( k-FLU_GHOST_SIZE, PS2 ) + (j-FLU_GHOST_SIZE);
for (int v=0; v<5; v++)
{
g_Flux[bx][XYZ+0][v][ID3] = s_flux[ty][ Comp[v] ][ 2];
g_Flux[bx][XYZ+1][v][ID3] = s_flux[ty][ Comp[v] ][FLU_NXT/2-1];
g_Flux[bx][XYZ+2][v][ID3] = s_flux[ty][ Comp[v] ][FLU_NXT - 4];
}
}
// reset the target array indices
j += dj;
if ( j >= j_end )
{
delta_k = ( j - j_end )/size_j + 1;
k += delta_k;
j -= __umul24( size_j, delta_k );
}
Column0 += dj;
// if the index k exceeds the maximum allowed value --> reset (j,k) to harmless values and wait for other
// threads (all threads must exist the while loop "at the same time", otherwise __syncthreads will fail !!)
if ( k >= k_end )
{
j = 0;
k = 0;
RuleOut = true;
}
__syncthreads();
}
while ( Column0 < NColumn );
} // FUNCTION : CUFLU_Advance
#endif // #if ( defined GPU && MODEL == HYDRO && FLU_SCHEME == RTVD )
|
c9e5b438ce2b87a5a0d59bbcc0669ad25be4d595.hip | // !!! This is a file automatically generated by hipify!!!
/* Copyright (c) 2011-2016, Robert Wang, email: robertwgh (at) gmail.com
All rights reserved. https://github.com/robertwgh/cuLDPC
Implementation of LDPC decoding algorithm.
The details of implementation can be found from the following papers:
1. Wang, G., Wu, M., Sun, Y., & Cavallaro, J. R. (2011, June). A massively parallel implementation of QC-LDPC decoder on GPU. In Application Specific Processors (SASP), 2011 IEEE 9th Symposium on (pp. 82-85). IEEE.
2. Wang, G., Wu, M., Yin, B., & Cavallaro, J. R. (2013, December). High throughput low latency LDPC decoding on GPU for SDR systems. In Global Conference on Signal and Information Processing (GlobalSIP), 2013 IEEE (pp. 1258-1261). IEEE.
The current release is close to the GlobalSIP2013 paper.
*/
#include <stdio.h>
#include <stdlib.h>
#include <memory.h>
#include <math.h>
#include <chrono>
#include <hip/hip_runtime.h>
#include "LDPC.h"
#include "matrix.h"
#include "kernel.hip"
float sigma ;
int *info_bin ;
int main()
{
printf("GPU LDPC Decoder\r\nComputing...\r\n");
// For cnp kernel
#if MODE == WIMAX
const char h_element_count1[BLK_ROW] = {6, 7, 7, 6, 6, 7, 6, 6, 7, 6, 6, 6};
const char h_element_count2[BLK_COL] = {3, 3, 6, 3, 3, 6, 3, 6, 3, 6, 3, 6, \
3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2};
#else
const char h_element_count1[BLK_ROW] = {7, 7, 7, 7, 7, 7, 8, 7, 7, 7, 7, 8};
const char h_element_count2[BLK_COL] = {11,4, 3, 3,11, 3, 3, 3,11, 3, 3, 3, \
3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2};
#endif
h_element h_compact1 [H_COMPACT1_COL][H_COMPACT1_ROW]; // for update dt, R
h_element h_element_temp;
// init the compact matrix
for(int i = 0; i < H_COMPACT1_COL; i++)
{
for(int j = 0; j < H_COMPACT1_ROW; j ++)
{
h_element_temp.x = 0;
h_element_temp.y = 0;
h_element_temp.value = -1;
h_element_temp.valid = 0;
h_compact1[i][j] = h_element_temp; // h[i][0-11], the same column
}
}
// scan the h matrix, and gengerate compact mode of h
for(int i = 0; i < BLK_ROW; i++)
{
int k = 0;
for(int j = 0; j < BLK_COL; j ++)
{
if(h_base[i][j] != -1)
{
h_element_temp.x = i;
h_element_temp.y = j;
h_element_temp.value = h_base[i][j];
h_element_temp.valid = 1;
h_compact1[k][i] = h_element_temp;
k++;
}
}
// printf("row %d, #element=%d\n", i, k);
}
// h_compact2
h_element h_compact2 [H_COMPACT2_ROW][H_COMPACT2_COL]; // for update llr
// init the compact matrix
for(int i = 0; i < H_COMPACT2_ROW; i++)
{
for(int j = 0; j < H_COMPACT2_COL; j ++)
{
h_element_temp.x = 0;
h_element_temp.y = 0;
h_element_temp.value = -1;
h_element_temp.valid = 0;
h_compact2[i][j] = h_element_temp;
}
}
for(int j = 0; j < BLK_COL; j++)
{
int k = 0;
for(int i = 0; i < BLK_ROW; i ++)
{
if(h_base[i][j] != -1)
{
// although h is transposed, the (x,y) is still (iBlkRow, iBlkCol)
h_element_temp.x = i;
h_element_temp.y = j;
h_element_temp.value = h_base[i][j];
h_element_temp.valid = 1;
h_compact2[k][j] = h_element_temp;
k++;
}
}
}
//int memorySize_h_base = BLK_ROW * BLK_COL * sizeof(int);
int memorySize_h_compact1 = H_COMPACT1_ROW * H_COMPACT1_COL * sizeof(h_element);
int memorySize_h_compact2 = H_COMPACT2_ROW * H_COMPACT2_COL * sizeof(h_element);
int memorySize_infobits = INFO_LEN * sizeof(int);
int memorySize_codeword = CODEWORD_LEN * sizeof(int);
int memorySize_llr = CODEWORD_LEN * sizeof(float);
info_bin = (int *) malloc(memorySize_infobits) ;
int *codeword = (int *) malloc(memorySize_codeword) ;
float *trans = (float *) malloc(memorySize_llr) ;
float *recv = (float *) malloc(memorySize_llr) ;
float *llr = (float *) malloc(memorySize_llr) ;
float rate = (float)0.5f;
//////////////////////////////////////////////////////////////////////////////////
// all the variables Starting with _gpu is used in host code and for cuda computation
int memorySize_infobits_gpu = MCW * CW * memorySize_infobits ;
int memorySize_llr_gpu = MCW * CW * CODEWORD_LEN * sizeof(float);
int memorySize_dt_gpu = MCW * CW * ROW * BLK_COL * sizeof(float);
int memorySize_R_gpu = MCW * CW * ROW * BLK_COL * sizeof(float);
int memorySize_hard_decision_gpu = MCW * CW * CODEWORD_LEN * sizeof(int);
int *info_bin_gpu;
float *llr_gpu;
int * hard_decision_gpu;
info_bin_gpu = (int *) malloc(memorySize_infobits_gpu);
hard_decision_gpu = (int *) malloc(memorySize_hard_decision_gpu);
llr_gpu = (float *) malloc(memorySize_llr_gpu);
error_result this_error;
int total_frame_error = 0;
int total_bit_error = 0;
int total_codeword = 0;
// create device memory
float * dev_llr;
float * dev_dt;
float * dev_R;
int * dev_hard_decision;
h_element * dev_h_compact1;
h_element * dev_h_compact2;
char * dev_h_element_count1;
char * dev_h_element_count2;
hipMalloc((void **)&dev_llr, memorySize_llr_gpu);
hipMalloc((void **)&dev_dt, memorySize_dt_gpu);
hipMalloc((void **)&dev_R, memorySize_R_gpu);
hipMalloc((void **)&dev_hard_decision, memorySize_hard_decision_gpu);
hipMalloc((void **)&dev_h_compact1, memorySize_h_compact1);
hipMalloc((void **)&dev_h_compact2, memorySize_h_compact2);
hipMalloc((void **)&dev_h_element_count1, BLK_ROW);
hipMalloc((void **)&dev_h_element_count2, BLK_COL);
hipMemcpy(dev_h_element_count1, h_element_count1, BLK_ROW, hipMemcpyHostToDevice);
hipMemcpy(dev_h_element_count2, h_element_count2, BLK_COL, hipMemcpyHostToDevice);
hipMemcpy(dev_h_compact1, h_compact1, memorySize_h_compact1, hipMemcpyHostToDevice);
hipMemcpy(dev_h_compact2, h_compact2, memorySize_h_compact2, hipMemcpyHostToDevice);
srand(69012);
for(int snri = 0; snri < NUM_SNR; snri++)
{
float snr = snr_array[snri];
sigma = 1.0f/sqrt(2.0f*rate*pow(10.0f,(snr/10.0f)));
total_codeword = 0;
total_frame_error = 0;
total_bit_error = 0;
// Adjust MIN_CODWORD in LDPC.h to reduce simulation time
while ( (total_frame_error <= MIN_FER) && (total_codeword <= MIN_CODEWORD))
{
total_codeword += CW * MCW;
for(int i = 0; i < CW * MCW; i++)
{
// generate random data
info_gen (info_bin, rand());
// encode the data
structure_encode (info_bin, codeword, h_base);
// BPSK modulation
modulation (codeword, trans);
// additive white Gaussian noise
awgn (trans, recv, rand());
// LLR init
llr_init (llr, recv);
// copy the info_bin and llr to the total memory
memcpy(info_bin_gpu + i * INFO_LEN, info_bin, memorySize_infobits);
memcpy(llr_gpu + i * CODEWORD_LEN, llr, memorySize_llr);
}
// Define kernel dimension
dim3 dimGridKernel1(BLK_ROW, MCW, 1); // dim of the thread blocks
dim3 dimBlockKernel1(BLOCK_SIZE_X, CW, 1);
int sharedRCacheSize = THREADS_PER_BLOCK * NON_EMPTY_ELMENT * sizeof(float);
dim3 dimGridKernel2(BLK_COL, MCW, 1);
dim3 dimBlockKernel2(BLOCK_SIZE_X, CW, 1);
//int sharedDtCacheSize = THREADS_PER_BLOCK * NON_EMPTY_ELMENT_VNP * sizeof(float);
// run the kernel
float total_time = 0.f;
for(int j = 0; j < MAX_SIM; j++)
{
// Transfer LLR data into device.
hipMemcpy(dev_llr, llr_gpu, memorySize_llr_gpu, hipMemcpyHostToDevice);
// kernel launch
hipDeviceSynchronize();
auto start = std::chrono::steady_clock::now();
for(int ii = 0; ii < MAX_ITERATION; ii++)
{
// run check-node processing kernel
// TODO: run a special kernel the first iteration?
if(ii == 0) {
hipLaunchKernelGGL(ldpc_cnp_kernel_1st_iter, dimGridKernel1, dimBlockKernel1, 0, 0, dev_llr,
dev_dt,
dev_R,
dev_h_element_count1,
dev_h_compact1);
} else {
hipLaunchKernelGGL(ldpc_cnp_kernel, dimGridKernel1, dimBlockKernel1, sharedRCacheSize, 0, dev_llr,
dev_dt,
dev_R,
dev_h_element_count1,
dev_h_compact1);
}
// run variable-node processing kernel
// for the last iteration we run a special
// kernel. this is because we can make a hard
// decision instead of writing back the belief
// for the value of each bit.
if(ii < MAX_ITERATION - 1) {
hipLaunchKernelGGL(ldpc_vnp_kernel_normal, dimGridKernel2, dimBlockKernel2, 0, 0, dev_llr,
dev_dt,
dev_h_element_count2,
dev_h_compact2);
} else {
hipLaunchKernelGGL(ldpc_vnp_kernel_last_iter, dimGridKernel2, dimBlockKernel2, 0, 0, dev_llr,
dev_dt,
dev_hard_decision,
dev_h_element_count2,
dev_h_compact2);
}
}
hipDeviceSynchronize();
auto end = std::chrono::steady_clock::now();
auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
total_time += time;
// copy the decoded data from device to host
hipMemcpy(hard_decision_gpu,
dev_hard_decision,
memorySize_hard_decision_gpu,
hipMemcpyDeviceToHost);
this_error = cuda_error_check(info_bin_gpu, hard_decision_gpu);
total_bit_error += this_error.bit_error;
total_frame_error += this_error.frame_error;
} // end of MAX-SIM
printf ("\n");
printf ("Total kernel execution time: %f (s)\n", total_time * 1e-9f);
printf ("# codewords = %d, CW=%d, MCW=%d\n",total_codeword, CW, MCW);
printf ("total bit error = %d\n", total_bit_error);
printf ("total frame error = %d\n", total_frame_error);
printf ("BER = %1.2e, FER = %1.2e\n",
(float) total_bit_error/total_codeword/INFO_LEN,
(float) total_frame_error/total_codeword);
} // end of the MAX frame error.
}// end of the snr loop
hipFree(dev_llr);
hipFree(dev_dt);
hipFree(dev_R);
hipFree(dev_hard_decision);
hipFree(dev_h_compact1);
hipFree(dev_h_compact2);
hipFree(dev_h_element_count1);
hipFree(dev_h_element_count2);
free(info_bin);
free(codeword);
free(trans);
free(recv);
free(llr);
free(llr_gpu);
free(hard_decision_gpu);
free(info_bin_gpu);
return 0;
}
| c9e5b438ce2b87a5a0d59bbcc0669ad25be4d595.cu | /* Copyright (c) 2011-2016, Robert Wang, email: robertwgh (at) gmail.com
All rights reserved. https://github.com/robertwgh/cuLDPC
Implementation of LDPC decoding algorithm.
The details of implementation can be found from the following papers:
1. Wang, G., Wu, M., Sun, Y., & Cavallaro, J. R. (2011, June). A massively parallel implementation of QC-LDPC decoder on GPU. In Application Specific Processors (SASP), 2011 IEEE 9th Symposium on (pp. 82-85). IEEE.
2. Wang, G., Wu, M., Yin, B., & Cavallaro, J. R. (2013, December). High throughput low latency LDPC decoding on GPU for SDR systems. In Global Conference on Signal and Information Processing (GlobalSIP), 2013 IEEE (pp. 1258-1261). IEEE.
The current release is close to the GlobalSIP2013 paper.
*/
#include <stdio.h>
#include <stdlib.h>
#include <memory.h>
#include <math.h>
#include <chrono>
#include <hip/hip_runtime.h>
#include "LDPC.h"
#include "matrix.h"
#include "kernel.cu"
float sigma ;
int *info_bin ;
int main()
{
printf("GPU LDPC Decoder\r\nComputing...\r\n");
// For cnp kernel
#if MODE == WIMAX
const char h_element_count1[BLK_ROW] = {6, 7, 7, 6, 6, 7, 6, 6, 7, 6, 6, 6};
const char h_element_count2[BLK_COL] = {3, 3, 6, 3, 3, 6, 3, 6, 3, 6, 3, 6, \
3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2};
#else
const char h_element_count1[BLK_ROW] = {7, 7, 7, 7, 7, 7, 8, 7, 7, 7, 7, 8};
const char h_element_count2[BLK_COL] = {11,4, 3, 3,11, 3, 3, 3,11, 3, 3, 3, \
3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2};
#endif
h_element h_compact1 [H_COMPACT1_COL][H_COMPACT1_ROW]; // for update dt, R
h_element h_element_temp;
// init the compact matrix
for(int i = 0; i < H_COMPACT1_COL; i++)
{
for(int j = 0; j < H_COMPACT1_ROW; j ++)
{
h_element_temp.x = 0;
h_element_temp.y = 0;
h_element_temp.value = -1;
h_element_temp.valid = 0;
h_compact1[i][j] = h_element_temp; // h[i][0-11], the same column
}
}
// scan the h matrix, and gengerate compact mode of h
for(int i = 0; i < BLK_ROW; i++)
{
int k = 0;
for(int j = 0; j < BLK_COL; j ++)
{
if(h_base[i][j] != -1)
{
h_element_temp.x = i;
h_element_temp.y = j;
h_element_temp.value = h_base[i][j];
h_element_temp.valid = 1;
h_compact1[k][i] = h_element_temp;
k++;
}
}
// printf("row %d, #element=%d\n", i, k);
}
// h_compact2
h_element h_compact2 [H_COMPACT2_ROW][H_COMPACT2_COL]; // for update llr
// init the compact matrix
for(int i = 0; i < H_COMPACT2_ROW; i++)
{
for(int j = 0; j < H_COMPACT2_COL; j ++)
{
h_element_temp.x = 0;
h_element_temp.y = 0;
h_element_temp.value = -1;
h_element_temp.valid = 0;
h_compact2[i][j] = h_element_temp;
}
}
for(int j = 0; j < BLK_COL; j++)
{
int k = 0;
for(int i = 0; i < BLK_ROW; i ++)
{
if(h_base[i][j] != -1)
{
// although h is transposed, the (x,y) is still (iBlkRow, iBlkCol)
h_element_temp.x = i;
h_element_temp.y = j;
h_element_temp.value = h_base[i][j];
h_element_temp.valid = 1;
h_compact2[k][j] = h_element_temp;
k++;
}
}
}
//int memorySize_h_base = BLK_ROW * BLK_COL * sizeof(int);
int memorySize_h_compact1 = H_COMPACT1_ROW * H_COMPACT1_COL * sizeof(h_element);
int memorySize_h_compact2 = H_COMPACT2_ROW * H_COMPACT2_COL * sizeof(h_element);
int memorySize_infobits = INFO_LEN * sizeof(int);
int memorySize_codeword = CODEWORD_LEN * sizeof(int);
int memorySize_llr = CODEWORD_LEN * sizeof(float);
info_bin = (int *) malloc(memorySize_infobits) ;
int *codeword = (int *) malloc(memorySize_codeword) ;
float *trans = (float *) malloc(memorySize_llr) ;
float *recv = (float *) malloc(memorySize_llr) ;
float *llr = (float *) malloc(memorySize_llr) ;
float rate = (float)0.5f;
//////////////////////////////////////////////////////////////////////////////////
// all the variables Starting with _gpu is used in host code and for cuda computation
int memorySize_infobits_gpu = MCW * CW * memorySize_infobits ;
int memorySize_llr_gpu = MCW * CW * CODEWORD_LEN * sizeof(float);
int memorySize_dt_gpu = MCW * CW * ROW * BLK_COL * sizeof(float);
int memorySize_R_gpu = MCW * CW * ROW * BLK_COL * sizeof(float);
int memorySize_hard_decision_gpu = MCW * CW * CODEWORD_LEN * sizeof(int);
int *info_bin_gpu;
float *llr_gpu;
int * hard_decision_gpu;
info_bin_gpu = (int *) malloc(memorySize_infobits_gpu);
hard_decision_gpu = (int *) malloc(memorySize_hard_decision_gpu);
llr_gpu = (float *) malloc(memorySize_llr_gpu);
error_result this_error;
int total_frame_error = 0;
int total_bit_error = 0;
int total_codeword = 0;
// create device memory
float * dev_llr;
float * dev_dt;
float * dev_R;
int * dev_hard_decision;
h_element * dev_h_compact1;
h_element * dev_h_compact2;
char * dev_h_element_count1;
char * dev_h_element_count2;
hipMalloc((void **)&dev_llr, memorySize_llr_gpu);
hipMalloc((void **)&dev_dt, memorySize_dt_gpu);
hipMalloc((void **)&dev_R, memorySize_R_gpu);
hipMalloc((void **)&dev_hard_decision, memorySize_hard_decision_gpu);
hipMalloc((void **)&dev_h_compact1, memorySize_h_compact1);
hipMalloc((void **)&dev_h_compact2, memorySize_h_compact2);
hipMalloc((void **)&dev_h_element_count1, BLK_ROW);
hipMalloc((void **)&dev_h_element_count2, BLK_COL);
hipMemcpy(dev_h_element_count1, h_element_count1, BLK_ROW, hipMemcpyHostToDevice);
hipMemcpy(dev_h_element_count2, h_element_count2, BLK_COL, hipMemcpyHostToDevice);
hipMemcpy(dev_h_compact1, h_compact1, memorySize_h_compact1, hipMemcpyHostToDevice);
hipMemcpy(dev_h_compact2, h_compact2, memorySize_h_compact2, hipMemcpyHostToDevice);
srand(69012);
for(int snri = 0; snri < NUM_SNR; snri++)
{
float snr = snr_array[snri];
sigma = 1.0f/sqrt(2.0f*rate*pow(10.0f,(snr/10.0f)));
total_codeword = 0;
total_frame_error = 0;
total_bit_error = 0;
// Adjust MIN_CODWORD in LDPC.h to reduce simulation time
while ( (total_frame_error <= MIN_FER) && (total_codeword <= MIN_CODEWORD))
{
total_codeword += CW * MCW;
for(int i = 0; i < CW * MCW; i++)
{
// generate random data
info_gen (info_bin, rand());
// encode the data
structure_encode (info_bin, codeword, h_base);
// BPSK modulation
modulation (codeword, trans);
// additive white Gaussian noise
awgn (trans, recv, rand());
// LLR init
llr_init (llr, recv);
// copy the info_bin and llr to the total memory
memcpy(info_bin_gpu + i * INFO_LEN, info_bin, memorySize_infobits);
memcpy(llr_gpu + i * CODEWORD_LEN, llr, memorySize_llr);
}
// Define kernel dimension
dim3 dimGridKernel1(BLK_ROW, MCW, 1); // dim of the thread blocks
dim3 dimBlockKernel1(BLOCK_SIZE_X, CW, 1);
int sharedRCacheSize = THREADS_PER_BLOCK * NON_EMPTY_ELMENT * sizeof(float);
dim3 dimGridKernel2(BLK_COL, MCW, 1);
dim3 dimBlockKernel2(BLOCK_SIZE_X, CW, 1);
//int sharedDtCacheSize = THREADS_PER_BLOCK * NON_EMPTY_ELMENT_VNP * sizeof(float);
// run the kernel
float total_time = 0.f;
for(int j = 0; j < MAX_SIM; j++)
{
// Transfer LLR data into device.
hipMemcpy(dev_llr, llr_gpu, memorySize_llr_gpu, hipMemcpyHostToDevice);
// kernel launch
cudaDeviceSynchronize();
auto start = std::chrono::steady_clock::now();
for(int ii = 0; ii < MAX_ITERATION; ii++)
{
// run check-node processing kernel
// TODO: run a special kernel the first iteration?
if(ii == 0) {
hipLaunchKernelGGL(ldpc_cnp_kernel_1st_iter, dimGridKernel1, dimBlockKernel1, 0, 0, dev_llr,
dev_dt,
dev_R,
dev_h_element_count1,
dev_h_compact1);
} else {
hipLaunchKernelGGL(ldpc_cnp_kernel, dimGridKernel1, dimBlockKernel1, sharedRCacheSize, 0, dev_llr,
dev_dt,
dev_R,
dev_h_element_count1,
dev_h_compact1);
}
// run variable-node processing kernel
// for the last iteration we run a special
// kernel. this is because we can make a hard
// decision instead of writing back the belief
// for the value of each bit.
if(ii < MAX_ITERATION - 1) {
hipLaunchKernelGGL(ldpc_vnp_kernel_normal, dimGridKernel2, dimBlockKernel2, 0, 0, dev_llr,
dev_dt,
dev_h_element_count2,
dev_h_compact2);
} else {
hipLaunchKernelGGL(ldpc_vnp_kernel_last_iter, dimGridKernel2, dimBlockKernel2, 0, 0, dev_llr,
dev_dt,
dev_hard_decision,
dev_h_element_count2,
dev_h_compact2);
}
}
hipDeviceSynchronize();
auto end = std::chrono::steady_clock::now();
auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
total_time += time;
// copy the decoded data from device to host
hipMemcpy(hard_decision_gpu,
dev_hard_decision,
memorySize_hard_decision_gpu,
hipMemcpyDeviceToHost);
this_error = cuda_error_check(info_bin_gpu, hard_decision_gpu);
total_bit_error += this_error.bit_error;
total_frame_error += this_error.frame_error;
} // end of MAX-SIM
printf ("\n");
printf ("Total kernel execution time: %f (s)\n", total_time * 1e-9f);
printf ("# codewords = %d, CW=%d, MCW=%d\n",total_codeword, CW, MCW);
printf ("total bit error = %d\n", total_bit_error);
printf ("total frame error = %d\n", total_frame_error);
printf ("BER = %1.2e, FER = %1.2e\n",
(float) total_bit_error/total_codeword/INFO_LEN,
(float) total_frame_error/total_codeword);
} // end of the MAX frame error.
}// end of the snr loop
hipFree(dev_llr);
hipFree(dev_dt);
hipFree(dev_R);
hipFree(dev_hard_decision);
hipFree(dev_h_compact1);
hipFree(dev_h_compact2);
hipFree(dev_h_element_count1);
hipFree(dev_h_element_count2);
free(info_bin);
free(codeword);
free(trans);
free(recv);
free(llr);
free(llr_gpu);
free(hard_decision_gpu);
free(info_bin_gpu);
return 0;
}
|
ef9a1a8700fba5f019a68c1e93fb79bfac26eff4.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "gpu_copyPredictorTo32.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int32_t *in = NULL;
hipMalloc(&in, XSIZE*YSIZE);
int32_t *out = NULL;
hipMalloc(&out, XSIZE*YSIZE);
uint32_t stride = 2;
uint32_t *numSamples = NULL;
hipMalloc(&numSamples, XSIZE*YSIZE);
int32_t theOutputPacketBytes = 1;
uint32_t frameLength = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
gpu_copyPredictorTo32), dim3(gridBlock),dim3(threadBlock), 0, 0, in,out,stride,numSamples,theOutputPacketBytes,frameLength);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
gpu_copyPredictorTo32), dim3(gridBlock),dim3(threadBlock), 0, 0, in,out,stride,numSamples,theOutputPacketBytes,frameLength);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
gpu_copyPredictorTo32), dim3(gridBlock),dim3(threadBlock), 0, 0, in,out,stride,numSamples,theOutputPacketBytes,frameLength);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | ef9a1a8700fba5f019a68c1e93fb79bfac26eff4.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "gpu_copyPredictorTo32.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int32_t *in = NULL;
cudaMalloc(&in, XSIZE*YSIZE);
int32_t *out = NULL;
cudaMalloc(&out, XSIZE*YSIZE);
uint32_t stride = 2;
uint32_t *numSamples = NULL;
cudaMalloc(&numSamples, XSIZE*YSIZE);
int32_t theOutputPacketBytes = 1;
uint32_t frameLength = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
gpu_copyPredictorTo32<<<gridBlock,threadBlock>>>(in,out,stride,numSamples,theOutputPacketBytes,frameLength);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
gpu_copyPredictorTo32<<<gridBlock,threadBlock>>>(in,out,stride,numSamples,theOutputPacketBytes,frameLength);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
gpu_copyPredictorTo32<<<gridBlock,threadBlock>>>(in,out,stride,numSamples,theOutputPacketBytes,frameLength);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
2e6530e9da0220ea7221632d4dc870e5ed37d077.hip | // !!! This is a file automatically generated by hipify!!!
#include "Reduction.h"
#include <device_launch_parameters.h>
#include <hip/hip_runtime.h>
__global__ void timedReductionKernel(const float *input, float *output, clock_t *timer) {
// __shared__ float shared[2 * blockDim.x];
extern __shared__ float shared[];
const int tid = threadIdx.x;
const int bid = blockIdx.x;
if (tid == 0) timer[bid] = clock();
// Copy input.
shared[tid] = input[tid];
shared[tid + blockDim.x] = input[tid + blockDim.x];
// Perform reduction to find minimum.
for (int d = blockDim.x; d > 0; d /= 2) {
__syncthreads();
if (tid < d) {
float f0 = shared[tid];
float f1 = shared[tid + d];
if (f1 < f0) {
shared[tid] = f1;
}
}
}
// Write result.
if (tid == 0) output[bid] = shared[0];
__syncthreads();
if (tid == 0) timer[bid + gridDim.x] = clock();
}
void timedReduction(const float *input, float *output, clock_t *timer) {
timedReductionKernel << < NUM_BLOCKS, NUM_THREADS, sizeof(float) * 2 * NUM_THREADS >> > (input, output, timer);
}
| 2e6530e9da0220ea7221632d4dc870e5ed37d077.cu | #include "Reduction.h"
#include <device_launch_parameters.h>
#include <cuda_runtime.h>
__global__ void timedReductionKernel(const float *input, float *output, clock_t *timer) {
// __shared__ float shared[2 * blockDim.x];
extern __shared__ float shared[];
const int tid = threadIdx.x;
const int bid = blockIdx.x;
if (tid == 0) timer[bid] = clock();
// Copy input.
shared[tid] = input[tid];
shared[tid + blockDim.x] = input[tid + blockDim.x];
// Perform reduction to find minimum.
for (int d = blockDim.x; d > 0; d /= 2) {
__syncthreads();
if (tid < d) {
float f0 = shared[tid];
float f1 = shared[tid + d];
if (f1 < f0) {
shared[tid] = f1;
}
}
}
// Write result.
if (tid == 0) output[bid] = shared[0];
__syncthreads();
if (tid == 0) timer[bid + gridDim.x] = clock();
}
void timedReduction(const float *input, float *output, clock_t *timer) {
timedReductionKernel << < NUM_BLOCKS, NUM_THREADS, sizeof(float) * 2 * NUM_THREADS >> > (input, output, timer);
}
|
8e932f73e0a8d55e1ea9b8b4ab1b3e5617973a22.hip | // !!! This is a file automatically generated by hipify!!!
// includes, project
#include <stdio.h>
#include <stdlib.h>
#include <cutil.h>
#include "config.h"
extern "C" {
void dumpTemplate(FILE* fp, char* fstr);
void printResults(FILE* fpLog);
void showMembranePotential(unsigned int* spikeAddr, int spikeCnt);
}
// Function dumps the template parameters into a file
// pointed by fp. The file can be executed in matlab
void dumpTemplate(FILE* fp, char* fstr)
{
if(fp==NULL)
return;
printf( "Dumping %d templates to %s\n", num_object, fstr);
fflush(stdout); // for jaer to print this
for(int i = 0; i < num_object; i++){
fprintf( fp, " template%d = [ ", i);
for(int j=0; j < MAX_TEMPLATE_SIZE; j++) {
for(int k=0; k < MAX_TEMPLATE_SIZE; k++) {
fprintf( fp, " %f ", conv_template[i][j][k]);
}
fprintf(fp, "; \n");
}
fprintf(fp , " ];\nfigure;imagesc(template%d);\n\n",i);
}
fflush(fp);
}
int countMem = 0;
// only for debug, writes the membrane potentials to a file
void showMembranePotential(unsigned int* spikeAddr=NULL, int spikeCnt=0)
{
if((countMem >= RECORD_START && countMem <= RECORD_END))
{
if(runCuda)
hipMemcpyFromSymbol(membranePotential, "gpu_membranePotential", sizeof(membranePotential), 0, hipMemcpyDeviceToHost);
char fname[100];
sprintf(fname, "mem_pot%d.m", countMem);
FILE* fpDumpPot;
fpDumpPot = fopen(fname, "w");
for(int k = 0; k < num_object; k++){
fprintf( fpDumpPot, " memPot[%d] = [ ", k);
for(int i=0; i < MAX_Y; i++) {
for(int j=0; j < MAX_X; j++) {
fprintf( fpDumpPot, " %f ", membranePotential[k][i][j]);
}
fprintf(fpDumpPot, "; \n");
}
fprintf( fpDumpPot, "];\n\n");
}
fclose(fpDumpPot);
if(spikeAddr != NULL) {
char fname[100];
sprintf(fname, "inpSpike%d.m", countMem);
FILE* fpDumpPot;
fpDumpPot = fopen(fname, "w");
fprintf( fpDumpPot, " inpSpike = [ " );
for(int j=0; j < spikeCnt; j++) {
fprintf( fpDumpPot, " %u ", spikeAddr[j]);
}
fprintf(fpDumpPot , " ]; " );
fclose(fpDumpPot);
}
}
countMem++;
}
void printResults(FILE* fpLog)
{
int tot_fired = 0;
if(!runCuda) {
extern int cpu_totFiring;
extern int cpu_totFiringMO[MAX_NUM_TEMPLATE];
printf(" Number of fired neurons is %d\n", cpu_totFiring);
printf(" Template size is %dx%d\n", MAX_TEMPLATE_SIZE, MAX_TEMPLATE_SIZE);
fprintf(fpLog, " Template size is %dx%d\n", MAX_TEMPLATE_SIZE, MAX_TEMPLATE_SIZE);
fprintf(fpLog, " Number of fired neurons is %d\n", tot_fired);
for(int i=0; i < num_object; i++) {
printf(" Total firing in Array %d => %d\n", i, cpu_totFiringMO[i]);
fprintf(fpLog, " Total firing in Array %d => %d\n", i, cpu_totFiringMO[i]);
}
}
else {
printf("Kernel 1 called %d times\n", callCount);
printf(" Total number of spikes computed : %d\n", tot_filteredSpikes);
for(int i = 0; i < num_object; i++)
tot_fired = tot_fired + tot_fired_MO[i];
printf(" Number of fired neurons is %d\n", tot_fired);
printf(" Template size is %dx%d\n", MAX_TEMPLATE_SIZE, MAX_TEMPLATE_SIZE);
fprintf(fpLog, "Kernel 1 called %d times\n", callCount);
fprintf(fpLog, " Total number of spikes computed : %d\n", tot_filteredSpikes);
fprintf(fpLog, " Number of fired neurons is %d\n", tot_fired);
fprintf(fpLog, " Template size is %dx%d\n", MAX_TEMPLATE_SIZE, MAX_TEMPLATE_SIZE);
for(int i=0; i < num_object; i++) {
printf(" Total firing in Array %d => %d\n", i, tot_fired_MO[i]);
fprintf(fpLog, " Total firing in Array %d => %d\n", i, tot_fired_MO[i]);
}
}
int tot=0;
int minLen=1000;
int mini=0;
int maxLen=-1;
int maxi=0;
int cnt=(callCount>PARAM_LEN_SIZE)?PARAM_LEN_SIZE:callCount;
for(int i=0; i < cnt; i++) {
tot+= paramLenArr[i];
if (paramLenArr[i] < minLen) {minLen =paramLenArr[i]; mini=i; }
if (paramLenArr[i] > maxLen) {maxLen = paramLenArr[i]; maxi=i; }
}
if(runCuda) {
printf(" Spike Distribution Per Kernel Call: \nmean(%f), min (i=%d, val=%d), max(i=%d,val=%d)\n", tot*1.0/cnt, mini, minLen, maxi,maxLen);
fprintf(fpLog, " Spike Distribution Per Kernel Call: \nmean(%f), min (i=%d, val=%d), max(i=%d,val=%d)\n", tot*1.0/cnt, mini, minLen, maxi,maxLen);
}
if(runCuda) {
printf( " Total Object scanned : %d\n", num_object);
printf( " Total firing from Inhibition Neuron : %d\n", inhFireCnt);
printf( " Total firing is equal to %d\n", tot_fired);
printf( " Average firing is equal to %f\n", tot_fired*1.0/callCount);
printf( "\n\nAvg. GPU Processing time per spike: %f (ms)\n", accTimer/(tot_filteredSpikes));
printf( "\n\nTotal GPU Processing time : %f (ms)\n", accTimer);
fprintf( fpLog, " Total Object scanned : %d\n", num_object);
fprintf( fpLog, " Total firing from Inhibition Neuron : %d\n", inhFireCnt);
fprintf( fpLog, " Total firing is equal to %d\n", tot_fired);
fprintf( fpLog, "\n\nAvg. GPU Processing time per spike: %f (ms)\n", accTimer/(tot_filteredSpikes));
fprintf( fpLog, "\n\nTotal GPU Processing time : %f (ms)\n", accTimer);
}
else {
extern int cpu_totFiring;
extern int iNeuronFiringCnt;
extern int iNeuronCallingCnt;
printf( " Total Object scanned : %d\n", num_object);
printf(" INeuron Grouping Impact, Calls = %d, Firing = %d\n", iNeuronCallingCnt, iNeuronFiringCnt);
printf( " Total firing is equal to %d\n", cpu_totFiring);
printf( "\n\nCPU Processing time per spike: %f (ms)\n", accTimer/(tot_filteredSpikes));
printf( "\n\nTotal CPU Processing time : %f (ms)\n", accTimer);
fprintf( fpLog, " Total Object scanned : %d\n", num_object);
fprintf( fpLog, " Total firing from Inhibition Neuron : %d\n", iNeuronFiringCnt);
fprintf( fpLog, " Total firing is equal to %d\n", cpu_totFiring);
fprintf( fpLog, "\n\nCPU Processing time per spike: %f (ms)\n", accTimer/(tot_filteredSpikes));
fprintf( fpLog, "\n\nTotal CPU Processing time : %f (ms)\n", accTimer);
}
fflush(stdout); // so jaer gets it
}
| 8e932f73e0a8d55e1ea9b8b4ab1b3e5617973a22.cu | // includes, project
#include <stdio.h>
#include <stdlib.h>
#include <cutil.h>
#include "config.h"
extern "C" {
void dumpTemplate(FILE* fp, char* fstr);
void printResults(FILE* fpLog);
void showMembranePotential(unsigned int* spikeAddr, int spikeCnt);
}
// Function dumps the template parameters into a file
// pointed by fp. The file can be executed in matlab
void dumpTemplate(FILE* fp, char* fstr)
{
if(fp==NULL)
return;
printf( "Dumping %d templates to %s\n", num_object, fstr);
fflush(stdout); // for jaer to print this
for(int i = 0; i < num_object; i++){
fprintf( fp, " template%d = [ ", i);
for(int j=0; j < MAX_TEMPLATE_SIZE; j++) {
for(int k=0; k < MAX_TEMPLATE_SIZE; k++) {
fprintf( fp, " %f ", conv_template[i][j][k]);
}
fprintf(fp, "; \n");
}
fprintf(fp , " ];\nfigure;imagesc(template%d);\n\n",i);
}
fflush(fp);
}
int countMem = 0;
// only for debug, writes the membrane potentials to a file
void showMembranePotential(unsigned int* spikeAddr=NULL, int spikeCnt=0)
{
if((countMem >= RECORD_START && countMem <= RECORD_END))
{
if(runCuda)
cudaMemcpyFromSymbol(membranePotential, "gpu_membranePotential", sizeof(membranePotential), 0, cudaMemcpyDeviceToHost);
char fname[100];
sprintf(fname, "mem_pot%d.m", countMem);
FILE* fpDumpPot;
fpDumpPot = fopen(fname, "w");
for(int k = 0; k < num_object; k++){
fprintf( fpDumpPot, " memPot[%d] = [ ", k);
for(int i=0; i < MAX_Y; i++) {
for(int j=0; j < MAX_X; j++) {
fprintf( fpDumpPot, " %f ", membranePotential[k][i][j]);
}
fprintf(fpDumpPot, "; \n");
}
fprintf( fpDumpPot, "];\n\n");
}
fclose(fpDumpPot);
if(spikeAddr != NULL) {
char fname[100];
sprintf(fname, "inpSpike%d.m", countMem);
FILE* fpDumpPot;
fpDumpPot = fopen(fname, "w");
fprintf( fpDumpPot, " inpSpike = [ " );
for(int j=0; j < spikeCnt; j++) {
fprintf( fpDumpPot, " %u ", spikeAddr[j]);
}
fprintf(fpDumpPot , " ]; " );
fclose(fpDumpPot);
}
}
countMem++;
}
void printResults(FILE* fpLog)
{
int tot_fired = 0;
if(!runCuda) {
extern int cpu_totFiring;
extern int cpu_totFiringMO[MAX_NUM_TEMPLATE];
printf(" Number of fired neurons is %d\n", cpu_totFiring);
printf(" Template size is %dx%d\n", MAX_TEMPLATE_SIZE, MAX_TEMPLATE_SIZE);
fprintf(fpLog, " Template size is %dx%d\n", MAX_TEMPLATE_SIZE, MAX_TEMPLATE_SIZE);
fprintf(fpLog, " Number of fired neurons is %d\n", tot_fired);
for(int i=0; i < num_object; i++) {
printf(" Total firing in Array %d => %d\n", i, cpu_totFiringMO[i]);
fprintf(fpLog, " Total firing in Array %d => %d\n", i, cpu_totFiringMO[i]);
}
}
else {
printf("Kernel 1 called %d times\n", callCount);
printf(" Total number of spikes computed : %d\n", tot_filteredSpikes);
for(int i = 0; i < num_object; i++)
tot_fired = tot_fired + tot_fired_MO[i];
printf(" Number of fired neurons is %d\n", tot_fired);
printf(" Template size is %dx%d\n", MAX_TEMPLATE_SIZE, MAX_TEMPLATE_SIZE);
fprintf(fpLog, "Kernel 1 called %d times\n", callCount);
fprintf(fpLog, " Total number of spikes computed : %d\n", tot_filteredSpikes);
fprintf(fpLog, " Number of fired neurons is %d\n", tot_fired);
fprintf(fpLog, " Template size is %dx%d\n", MAX_TEMPLATE_SIZE, MAX_TEMPLATE_SIZE);
for(int i=0; i < num_object; i++) {
printf(" Total firing in Array %d => %d\n", i, tot_fired_MO[i]);
fprintf(fpLog, " Total firing in Array %d => %d\n", i, tot_fired_MO[i]);
}
}
int tot=0;
int minLen=1000;
int mini=0;
int maxLen=-1;
int maxi=0;
int cnt=(callCount>PARAM_LEN_SIZE)?PARAM_LEN_SIZE:callCount;
for(int i=0; i < cnt; i++) {
tot+= paramLenArr[i];
if (paramLenArr[i] < minLen) {minLen =paramLenArr[i]; mini=i; }
if (paramLenArr[i] > maxLen) {maxLen = paramLenArr[i]; maxi=i; }
}
if(runCuda) {
printf(" Spike Distribution Per Kernel Call: \nmean(%f), min (i=%d, val=%d), max(i=%d,val=%d)\n", tot*1.0/cnt, mini, minLen, maxi,maxLen);
fprintf(fpLog, " Spike Distribution Per Kernel Call: \nmean(%f), min (i=%d, val=%d), max(i=%d,val=%d)\n", tot*1.0/cnt, mini, minLen, maxi,maxLen);
}
if(runCuda) {
printf( " Total Object scanned : %d\n", num_object);
printf( " Total firing from Inhibition Neuron : %d\n", inhFireCnt);
printf( " Total firing is equal to %d\n", tot_fired);
printf( " Average firing is equal to %f\n", tot_fired*1.0/callCount);
printf( "\n\nAvg. GPU Processing time per spike: %f (ms)\n", accTimer/(tot_filteredSpikes));
printf( "\n\nTotal GPU Processing time : %f (ms)\n", accTimer);
fprintf( fpLog, " Total Object scanned : %d\n", num_object);
fprintf( fpLog, " Total firing from Inhibition Neuron : %d\n", inhFireCnt);
fprintf( fpLog, " Total firing is equal to %d\n", tot_fired);
fprintf( fpLog, "\n\nAvg. GPU Processing time per spike: %f (ms)\n", accTimer/(tot_filteredSpikes));
fprintf( fpLog, "\n\nTotal GPU Processing time : %f (ms)\n", accTimer);
}
else {
extern int cpu_totFiring;
extern int iNeuronFiringCnt;
extern int iNeuronCallingCnt;
printf( " Total Object scanned : %d\n", num_object);
printf(" INeuron Grouping Impact, Calls = %d, Firing = %d\n", iNeuronCallingCnt, iNeuronFiringCnt);
printf( " Total firing is equal to %d\n", cpu_totFiring);
printf( "\n\nCPU Processing time per spike: %f (ms)\n", accTimer/(tot_filteredSpikes));
printf( "\n\nTotal CPU Processing time : %f (ms)\n", accTimer);
fprintf( fpLog, " Total Object scanned : %d\n", num_object);
fprintf( fpLog, " Total firing from Inhibition Neuron : %d\n", iNeuronFiringCnt);
fprintf( fpLog, " Total firing is equal to %d\n", cpu_totFiring);
fprintf( fpLog, "\n\nCPU Processing time per spike: %f (ms)\n", accTimer/(tot_filteredSpikes));
fprintf( fpLog, "\n\nTotal CPU Processing time : %f (ms)\n", accTimer);
}
fflush(stdout); // so jaer gets it
}
|
7d8ef95fca26bf5061d9b5183492bb3ea02f32a2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "opticalflow_cuda.hpp"
#include <ftl/cuda/fixed.hpp>
#define T_PER_BLOCK 8
using ftl::cuda::TextureObject;
__global__ void show_optflow_kernel(
cv::cuda::PtrStepSz<short2> optflow,
TextureObject<uchar4> colour, float scale) {
const unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
const unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
if (x < colour.width() && y < colour.height()) {
short2 flow = optflow(y/4, x/4); // 4 is granularity
float fx = max(-1.0f, min(1.0f, fixed2float<5>(flow.x) / scale));
float fy = max(-1.0f, min(1.0f, fixed2float<5>(flow.y) / scale));
float f = sqrt(fx*fx+fy*fy);
float4 c = colour.tex2D(float(x)+0.5f, float(y)+0.5f);
c += make_float4(f*255.0f, 0.0f, f*255.0f, 0.0f);
colour(x,y) = make_uchar4(min(255.0f, c.x), min(255.0f, c.y), min(255.0f, c.z), 0.0f);
}
}
void ftl::cuda::show_optical_flow(const cv::cuda::GpuMat &optflow, const TextureObject<uchar4> &colour, float scale, hipStream_t stream) {
const dim3 gridSize((colour.width() + T_PER_BLOCK - 1)/T_PER_BLOCK, (colour.height() + T_PER_BLOCK - 1)/T_PER_BLOCK);
const dim3 blockSize(T_PER_BLOCK, T_PER_BLOCK);
hipLaunchKernelGGL(( show_optflow_kernel), dim3(gridSize), dim3(blockSize), 0, stream, optflow, colour, scale);
cudaSafeCall( hipGetLastError() );
}
// ==== Flow difference ========================================================
__global__ void gen_disparity_kernel(
cv::cuda::PtrStepSz<short2> optflow1,
cv::cuda::PtrStepSz<short2> optflow2,
TextureObject<short> disparity) {
const unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
const unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
if (x < disparity.width() && y < disparity.height()) {
short2 flow1 = optflow1(y/4, x/4); // 4 is granularity
float disp = -fixed2float<5>(flow1.x);
// Do a consistency check
if (disp > 0.0f && x-disp-0.5f > 0) { //< colour.width()) {
short2 flow2 = optflow2(y/4, (x-round(disp))/4); // 4 is granularity
if (fabsf(disp - fixed2float<5>(flow2.x)) > 1.0f) disp = 0.0f;
}
disparity(x,y) = float2fixed<4>(disp);
}
}
void ftl::cuda::disparity_from_flow(const cv::cuda::GpuMat &optflow1, const cv::cuda::GpuMat &optflow2, const TextureObject<short> &disparity, hipStream_t stream) {
const dim3 gridSize((disparity.width() + T_PER_BLOCK - 1)/T_PER_BLOCK, (disparity.height() + T_PER_BLOCK - 1)/T_PER_BLOCK);
const dim3 blockSize(T_PER_BLOCK, T_PER_BLOCK);
hipLaunchKernelGGL(( gen_disparity_kernel), dim3(gridSize), dim3(blockSize), 0, stream, optflow1, optflow2, disparity);
cudaSafeCall( hipGetLastError() );
}
__global__ void show_optflow_diff_kernel(
cv::cuda::PtrStepSz<short2> optflow1,
cv::cuda::PtrStepSz<short2> optflow2,
TextureObject<uchar4> colour, float scale,
ftl::rgbd::Camera cam) {
const unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
const unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
if (x < colour.width() && y < colour.height()) {
short2 flow1 = optflow1(y/4, x/4); // 4 is granularity
float disp = fixed2float<5>(flow1.x);
if (disp > 0.0f && x-disp-0.5f > 0) { //< colour.width()) {
short2 flow2 = optflow2(y/4, (x-round(disp))/4); // 4 is granularity
float dx = (fixed2float<5>(flow1.x) + fixed2float<5>(flow2.x)) / disp;
float fR = max(0.0f, min(1.0f, dx / scale));
float fB = -max(-1.0f, min(0.0f, dx / scale));
float fG = (fR == 1.0f || fB == 1.0f) ? 0.0f : 1.0f;
float4 c = colour.tex2D(float(x)+0.5f, float(y)+0.5f);
c += make_float4(fG*fB*255.0f, (1.0f-fG)*255.0f, fG*fR*255.0f, 0.0f);
colour(x,y) = make_uchar4(min(255.0f, c.x), min(255.0f, c.y), min(255.0f, c.z), 0.0f);
}
}
}
void ftl::cuda::show_optical_flow_diff(const cv::cuda::GpuMat &optflow1, const cv::cuda::GpuMat &optflow2, const TextureObject<uchar4> &colour, float scale, const ftl::rgbd::Camera &cam, hipStream_t stream) {
const dim3 gridSize((colour.width() + T_PER_BLOCK - 1)/T_PER_BLOCK, (colour.height() + T_PER_BLOCK - 1)/T_PER_BLOCK);
const dim3 blockSize(T_PER_BLOCK, T_PER_BLOCK);
hipLaunchKernelGGL(( show_optflow_diff_kernel), dim3(gridSize), dim3(blockSize), 0, stream, optflow1, optflow2, colour, scale, cam);
cudaSafeCall( hipGetLastError() );
}
| 7d8ef95fca26bf5061d9b5183492bb3ea02f32a2.cu | #include "opticalflow_cuda.hpp"
#include <ftl/cuda/fixed.hpp>
#define T_PER_BLOCK 8
using ftl::cuda::TextureObject;
__global__ void show_optflow_kernel(
cv::cuda::PtrStepSz<short2> optflow,
TextureObject<uchar4> colour, float scale) {
const unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
const unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
if (x < colour.width() && y < colour.height()) {
short2 flow = optflow(y/4, x/4); // 4 is granularity
float fx = max(-1.0f, min(1.0f, fixed2float<5>(flow.x) / scale));
float fy = max(-1.0f, min(1.0f, fixed2float<5>(flow.y) / scale));
float f = sqrt(fx*fx+fy*fy);
float4 c = colour.tex2D(float(x)+0.5f, float(y)+0.5f);
c += make_float4(f*255.0f, 0.0f, f*255.0f, 0.0f);
colour(x,y) = make_uchar4(min(255.0f, c.x), min(255.0f, c.y), min(255.0f, c.z), 0.0f);
}
}
void ftl::cuda::show_optical_flow(const cv::cuda::GpuMat &optflow, const TextureObject<uchar4> &colour, float scale, cudaStream_t stream) {
const dim3 gridSize((colour.width() + T_PER_BLOCK - 1)/T_PER_BLOCK, (colour.height() + T_PER_BLOCK - 1)/T_PER_BLOCK);
const dim3 blockSize(T_PER_BLOCK, T_PER_BLOCK);
show_optflow_kernel<<<gridSize, blockSize, 0, stream>>>(optflow, colour, scale);
cudaSafeCall( cudaGetLastError() );
}
// ==== Flow difference ========================================================
__global__ void gen_disparity_kernel(
cv::cuda::PtrStepSz<short2> optflow1,
cv::cuda::PtrStepSz<short2> optflow2,
TextureObject<short> disparity) {
const unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
const unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
if (x < disparity.width() && y < disparity.height()) {
short2 flow1 = optflow1(y/4, x/4); // 4 is granularity
float disp = -fixed2float<5>(flow1.x);
// Do a consistency check
if (disp > 0.0f && x-disp-0.5f > 0) { //< colour.width()) {
short2 flow2 = optflow2(y/4, (x-round(disp))/4); // 4 is granularity
if (fabsf(disp - fixed2float<5>(flow2.x)) > 1.0f) disp = 0.0f;
}
disparity(x,y) = float2fixed<4>(disp);
}
}
void ftl::cuda::disparity_from_flow(const cv::cuda::GpuMat &optflow1, const cv::cuda::GpuMat &optflow2, const TextureObject<short> &disparity, cudaStream_t stream) {
const dim3 gridSize((disparity.width() + T_PER_BLOCK - 1)/T_PER_BLOCK, (disparity.height() + T_PER_BLOCK - 1)/T_PER_BLOCK);
const dim3 blockSize(T_PER_BLOCK, T_PER_BLOCK);
gen_disparity_kernel<<<gridSize, blockSize, 0, stream>>>(optflow1, optflow2, disparity);
cudaSafeCall( cudaGetLastError() );
}
__global__ void show_optflow_diff_kernel(
cv::cuda::PtrStepSz<short2> optflow1,
cv::cuda::PtrStepSz<short2> optflow2,
TextureObject<uchar4> colour, float scale,
ftl::rgbd::Camera cam) {
const unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
const unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
if (x < colour.width() && y < colour.height()) {
short2 flow1 = optflow1(y/4, x/4); // 4 is granularity
float disp = fixed2float<5>(flow1.x);
if (disp > 0.0f && x-disp-0.5f > 0) { //< colour.width()) {
short2 flow2 = optflow2(y/4, (x-round(disp))/4); // 4 is granularity
float dx = (fixed2float<5>(flow1.x) + fixed2float<5>(flow2.x)) / disp;
float fR = max(0.0f, min(1.0f, dx / scale));
float fB = -max(-1.0f, min(0.0f, dx / scale));
float fG = (fR == 1.0f || fB == 1.0f) ? 0.0f : 1.0f;
float4 c = colour.tex2D(float(x)+0.5f, float(y)+0.5f);
c += make_float4(fG*fB*255.0f, (1.0f-fG)*255.0f, fG*fR*255.0f, 0.0f);
colour(x,y) = make_uchar4(min(255.0f, c.x), min(255.0f, c.y), min(255.0f, c.z), 0.0f);
}
}
}
void ftl::cuda::show_optical_flow_diff(const cv::cuda::GpuMat &optflow1, const cv::cuda::GpuMat &optflow2, const TextureObject<uchar4> &colour, float scale, const ftl::rgbd::Camera &cam, cudaStream_t stream) {
const dim3 gridSize((colour.width() + T_PER_BLOCK - 1)/T_PER_BLOCK, (colour.height() + T_PER_BLOCK - 1)/T_PER_BLOCK);
const dim3 blockSize(T_PER_BLOCK, T_PER_BLOCK);
show_optflow_diff_kernel<<<gridSize, blockSize, 0, stream>>>(optflow1, optflow2, colour, scale, cam);
cudaSafeCall( cudaGetLastError() );
}
|
a0b5269004a31915bb1cad7082af41e7233a8204.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <sys/time.h>
#include "helper_cuda.h"
const int N = 1024; // matrix size is N x N
const int K = 32; // tile size is K x K
void transpose_cpu(int *in, int *out)
{
for (int row = 0; row < N; row++) {
for (int col = 0; col < N; col++) {
out[col * N + row] = in[row * N + col];
}
}
}
__global__ void transpose_serial(int *in, int *out)
{
for (int row = 0; row < N; row++) {
for (int col = 0; col < N; col++) {
out[col * N + row] = in[row * N + col];
}
}
}
__global__ void transpose_parallel_per_row(int *in, int *out)
{
int row = threadIdx.x;
for (int col = 0; col < N; col++) {
out[col * N + row] = in[row * N + col];
}
}
__global__ void transpose_parallel_per_element(int *in, int *out)
{
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
out[col * N + row] = in[row * N + col];
}
__global__ void transpose_parallel_per_element_tiled(int *in, int *out)
{
__shared__ int s_data[K][K];
int x = threadIdx.x, y = threadIdx.y;
int in_corner_x = blockIdx.x * K, in_corner_y = blockIdx.y * K;
int out_corner_x = in_corner_y, out_corner_y = in_corner_x;
// write in[y][x] to s_data[y][x]
s_data[y][x] = in[(in_corner_y + y) * N + (in_corner_x + x)];
__syncthreads();
// write s_data[x][y] to out[y][x]
out[(out_corner_y + y) * N + (out_corner_x + x)] = s_data[x][y];
}
__global__ void transpose_parallel_per_element_tiled16(int *in, int *out)
{
__shared__ int s_data[16][16];
int x = threadIdx.x, y = threadIdx.y;
int in_corner_x = blockIdx.x * 16, in_corner_y = blockIdx.y * 16;
int out_corner_x = in_corner_y, out_corner_y = in_corner_x;
// write in[y][x] to s_data[y][x]
s_data[y][x] = in[(in_corner_y + y) * N + (in_corner_x + x)];
__syncthreads();
// write s_data[x][y] to out[y][x]
out[(out_corner_y + y) * N + (out_corner_x + x)] = s_data[x][y];
}
void print_matrix(int *in)
{
for (int row = 0; row < N; row++) {
for (int col = 0; col < N; col++) {
printf("%d ", in[row * N + col]);
}
printf("\n");
}
}
void fill_matrix(int *in)
{
int size = N * N;
for (int i = 0; i < size; i++) {
in[i] = rand() % 10;
}
}
// return (matrix a == matrix b)
bool same_matrices(int *a, int *b)
{
int size = N * N;
for (int i = 0; i < size; i++) {
if (a[i] != b[i]) {
return false;
}
}
return true;
}
int main(int argc, char **argv)
{
hipEvent_t start, stop;
struct timeval start_time, stop_time;
float elapsed_time;
int num_bytes = N * N * sizeof(int);
int *h_in = (int*)malloc(num_bytes);
int *h_out = (int*)malloc(num_bytes);
int *expected_out = (int*)malloc(num_bytes);
int *d_in, *d_out;
dim3 blocks(N/K, N/K); // blocks per grid
dim3 threads(K, K); // threads per block
dim3 blocks16x16(N/16, N/16); // blocks per grid
dim3 threads16x16(16, 16); // threads per block
hipDeviceProp_t prop; // CUDA device properties
int device = 0; // ID of device for GPU execution
double peakMemBwGbps; // GPU peak memory bandwidth in Gbps
double memUtil; // GPU memory bandwidth utilization
// no enough host memory
if (!h_in || !h_out || !expected_out) {
goto out;
}
// initialize matrix with random numbers
fill_matrix(h_in);
// transpose the matrix and get the expected matrix
gettimeofday(&start_time, NULL);
transpose_cpu(h_in, expected_out);
gettimeofday(&stop_time, NULL);
elapsed_time = (stop_time.tv_sec - start_time.tv_sec) * 1000 + (stop_time.tv_usec - start_time.tv_usec) / 1000.0;
printf("CPU time: %f ms\n", elapsed_time);
// Set device to be used for GPU executions
checkCudaErrors(hipSetDevice(device));
// Get device properties
hipGetDeviceProperties(&prop, device);
// Calculate peark memory bandwidth (GB/s) of GPU
peakMemBwGbps = 2.0 * prop.memoryClockRate * (prop.memoryBusWidth / 8) / 1.0e6;
printf("Peak memory bandwidth of GPU %d is %f GB/s\n", device, peakMemBwGbps);
printf("====================================================\n");
// allocate GPU memory
checkCudaErrors(hipMalloc(&d_in, num_bytes));
checkCudaErrors(hipMalloc(&d_out, num_bytes));
// copy input from host memory to GPU memory
checkCudaErrors(hipMemcpy(d_in, h_in, num_bytes, hipMemcpyHostToDevice));
hipEventCreate(&start);
hipEventCreate(&stop);
// launch serial kernel
hipEventRecord(start);
hipLaunchKernelGGL(( transpose_serial), dim3(1), dim3(1), 0, 0, d_in, d_out);
hipEventRecord(stop);
// copy output from GPU memory to host memory
checkCudaErrors(hipMemcpy(h_out, d_out, num_bytes, hipMemcpyDeviceToHost));
// calculate elapsed time in ms and memory utilization
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsed_time, start, stop);
memUtil = (2 * N * N * sizeof(int)) / (elapsed_time / 1.0e3) / (peakMemBwGbps * 1.0e9);
printf("transpose_serial\nTime: %f ms\nMemory utilization %f\%\n%s results\n",
elapsed_time,
memUtil * 100,
same_matrices(h_out, expected_out) ? "Correct" : "Wrong");
printf("====================================================\n");
// launch parallel per row kernel
hipEventRecord(start);
hipLaunchKernelGGL(( transpose_parallel_per_row), dim3(1), dim3(N), 0, 0, d_in, d_out);
hipEventRecord(stop);
// copy output from GPU memory to host memory
checkCudaErrors(hipMemcpy(h_out, d_out, num_bytes, hipMemcpyDeviceToHost));
// calculate elapsed time in ms and memory utilization
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsed_time, start, stop);
memUtil = (2 * N * N * sizeof(int)) / (elapsed_time / 1.0e3) / (peakMemBwGbps * 1.0e9);
printf("transpose_parallel_per_row\nTime: %f ms\nMemory utilization %f\%\n%s results\n",
elapsed_time,
memUtil * 100,
same_matrices(h_out, expected_out) ? "Correct" : "Wrong");
printf("====================================================\n");
// launch parallel per element kernel
hipEventRecord(start);
hipLaunchKernelGGL(( transpose_parallel_per_element), dim3(blocks), dim3(threads), 0, 0, d_in, d_out);
hipEventRecord(stop);
// copy output from GPU memory to host memory
checkCudaErrors(hipMemcpy(h_out, d_out, num_bytes, hipMemcpyDeviceToHost));
// calculate elapsed time in ms and check results
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsed_time, start, stop);
memUtil = (2 * N * N * sizeof(int)) / (elapsed_time / 1.0e3) / (peakMemBwGbps * 1.0e9);
printf("transpose_parallel_per_element\nTime: %f ms\nMemory utilization %f\%\n%s results\n",
elapsed_time,
memUtil * 100,
same_matrices(h_out, expected_out) ? "Correct" : "Wrong");
printf("====================================================\n");
// launch parallel per element tiled kernel
hipEventRecord(start);
hipLaunchKernelGGL(( transpose_parallel_per_element_tiled), dim3(blocks), dim3(threads), 0, 0, d_in, d_out);
hipEventRecord(stop);
// copy output from GPU memory to host memory
checkCudaErrors(hipMemcpy(h_out, d_out, num_bytes, hipMemcpyDeviceToHost));
// calculate elapsed time in ms and check results
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsed_time, start, stop);
memUtil = (2 * N * N * sizeof(int)) / (elapsed_time / 1.0e3) / (peakMemBwGbps * 1.0e9);
printf("transpose_parallel_per_element_tiled (block: %d x %d)\nTime: %f ms\nMemory utilization %f\%\n%s results\n",
K,
K,
elapsed_time,
memUtil * 100,
same_matrices(h_out, expected_out) ? "Correct" : "Wrong");
printf("====================================================\n");
// launch parallel per element tiled kernel with different block size (16x16)
hipEventRecord(start);
hipLaunchKernelGGL(( transpose_parallel_per_element_tiled16), dim3(blocks16x16), dim3(threads16x16), 0, 0, d_in, d_out);
hipEventRecord(stop);
// copy output from GPU memory to host memory
checkCudaErrors(hipMemcpy(h_out, d_out, num_bytes, hipMemcpyDeviceToHost));
// calculate elapsed time in ms and check results
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsed_time, start, stop);
memUtil = (2 * N * N * sizeof(int)) / (elapsed_time / 1.0e3) / (peakMemBwGbps * 1.0e9);
printf("transpose_parallel_per_element_tiled (block: 16 x 16)\nTime: %f ms\nMemory utilization %f\%\n%s results\n",
elapsed_time,
memUtil * 100,
same_matrices(h_out, expected_out) ? "Correct" : "Wrong");
printf("====================================================\n");
// free GPU memory
hipFree(d_in);
hipFree(d_out);
out:
free(h_in);
free(h_out);
free(expected_out);
return 0;
} | a0b5269004a31915bb1cad7082af41e7233a8204.cu | #include <stdio.h>
#include <sys/time.h>
#include "helper_cuda.h"
const int N = 1024; // matrix size is N x N
const int K = 32; // tile size is K x K
void transpose_cpu(int *in, int *out)
{
for (int row = 0; row < N; row++) {
for (int col = 0; col < N; col++) {
out[col * N + row] = in[row * N + col];
}
}
}
__global__ void transpose_serial(int *in, int *out)
{
for (int row = 0; row < N; row++) {
for (int col = 0; col < N; col++) {
out[col * N + row] = in[row * N + col];
}
}
}
__global__ void transpose_parallel_per_row(int *in, int *out)
{
int row = threadIdx.x;
for (int col = 0; col < N; col++) {
out[col * N + row] = in[row * N + col];
}
}
__global__ void transpose_parallel_per_element(int *in, int *out)
{
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
out[col * N + row] = in[row * N + col];
}
__global__ void transpose_parallel_per_element_tiled(int *in, int *out)
{
__shared__ int s_data[K][K];
int x = threadIdx.x, y = threadIdx.y;
int in_corner_x = blockIdx.x * K, in_corner_y = blockIdx.y * K;
int out_corner_x = in_corner_y, out_corner_y = in_corner_x;
// write in[y][x] to s_data[y][x]
s_data[y][x] = in[(in_corner_y + y) * N + (in_corner_x + x)];
__syncthreads();
// write s_data[x][y] to out[y][x]
out[(out_corner_y + y) * N + (out_corner_x + x)] = s_data[x][y];
}
__global__ void transpose_parallel_per_element_tiled16(int *in, int *out)
{
__shared__ int s_data[16][16];
int x = threadIdx.x, y = threadIdx.y;
int in_corner_x = blockIdx.x * 16, in_corner_y = blockIdx.y * 16;
int out_corner_x = in_corner_y, out_corner_y = in_corner_x;
// write in[y][x] to s_data[y][x]
s_data[y][x] = in[(in_corner_y + y) * N + (in_corner_x + x)];
__syncthreads();
// write s_data[x][y] to out[y][x]
out[(out_corner_y + y) * N + (out_corner_x + x)] = s_data[x][y];
}
void print_matrix(int *in)
{
for (int row = 0; row < N; row++) {
for (int col = 0; col < N; col++) {
printf("%d ", in[row * N + col]);
}
printf("\n");
}
}
void fill_matrix(int *in)
{
int size = N * N;
for (int i = 0; i < size; i++) {
in[i] = rand() % 10;
}
}
// return (matrix a == matrix b)
bool same_matrices(int *a, int *b)
{
int size = N * N;
for (int i = 0; i < size; i++) {
if (a[i] != b[i]) {
return false;
}
}
return true;
}
int main(int argc, char **argv)
{
cudaEvent_t start, stop;
struct timeval start_time, stop_time;
float elapsed_time;
int num_bytes = N * N * sizeof(int);
int *h_in = (int*)malloc(num_bytes);
int *h_out = (int*)malloc(num_bytes);
int *expected_out = (int*)malloc(num_bytes);
int *d_in, *d_out;
dim3 blocks(N/K, N/K); // blocks per grid
dim3 threads(K, K); // threads per block
dim3 blocks16x16(N/16, N/16); // blocks per grid
dim3 threads16x16(16, 16); // threads per block
cudaDeviceProp prop; // CUDA device properties
int device = 0; // ID of device for GPU execution
double peakMemBwGbps; // GPU peak memory bandwidth in Gbps
double memUtil; // GPU memory bandwidth utilization
// no enough host memory
if (!h_in || !h_out || !expected_out) {
goto out;
}
// initialize matrix with random numbers
fill_matrix(h_in);
// transpose the matrix and get the expected matrix
gettimeofday(&start_time, NULL);
transpose_cpu(h_in, expected_out);
gettimeofday(&stop_time, NULL);
elapsed_time = (stop_time.tv_sec - start_time.tv_sec) * 1000 + (stop_time.tv_usec - start_time.tv_usec) / 1000.0;
printf("CPU time: %f ms\n", elapsed_time);
// Set device to be used for GPU executions
checkCudaErrors(cudaSetDevice(device));
// Get device properties
cudaGetDeviceProperties(&prop, device);
// Calculate peark memory bandwidth (GB/s) of GPU
peakMemBwGbps = 2.0 * prop.memoryClockRate * (prop.memoryBusWidth / 8) / 1.0e6;
printf("Peak memory bandwidth of GPU %d is %f GB/s\n", device, peakMemBwGbps);
printf("====================================================\n");
// allocate GPU memory
checkCudaErrors(cudaMalloc(&d_in, num_bytes));
checkCudaErrors(cudaMalloc(&d_out, num_bytes));
// copy input from host memory to GPU memory
checkCudaErrors(cudaMemcpy(d_in, h_in, num_bytes, cudaMemcpyHostToDevice));
cudaEventCreate(&start);
cudaEventCreate(&stop);
// launch serial kernel
cudaEventRecord(start);
transpose_serial<<<1, 1>>>(d_in, d_out);
cudaEventRecord(stop);
// copy output from GPU memory to host memory
checkCudaErrors(cudaMemcpy(h_out, d_out, num_bytes, cudaMemcpyDeviceToHost));
// calculate elapsed time in ms and memory utilization
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed_time, start, stop);
memUtil = (2 * N * N * sizeof(int)) / (elapsed_time / 1.0e3) / (peakMemBwGbps * 1.0e9);
printf("transpose_serial\nTime: %f ms\nMemory utilization %f\%\n%s results\n",
elapsed_time,
memUtil * 100,
same_matrices(h_out, expected_out) ? "Correct" : "Wrong");
printf("====================================================\n");
// launch parallel per row kernel
cudaEventRecord(start);
transpose_parallel_per_row<<<1, N>>>(d_in, d_out);
cudaEventRecord(stop);
// copy output from GPU memory to host memory
checkCudaErrors(cudaMemcpy(h_out, d_out, num_bytes, cudaMemcpyDeviceToHost));
// calculate elapsed time in ms and memory utilization
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed_time, start, stop);
memUtil = (2 * N * N * sizeof(int)) / (elapsed_time / 1.0e3) / (peakMemBwGbps * 1.0e9);
printf("transpose_parallel_per_row\nTime: %f ms\nMemory utilization %f\%\n%s results\n",
elapsed_time,
memUtil * 100,
same_matrices(h_out, expected_out) ? "Correct" : "Wrong");
printf("====================================================\n");
// launch parallel per element kernel
cudaEventRecord(start);
transpose_parallel_per_element<<<blocks, threads>>>(d_in, d_out);
cudaEventRecord(stop);
// copy output from GPU memory to host memory
checkCudaErrors(cudaMemcpy(h_out, d_out, num_bytes, cudaMemcpyDeviceToHost));
// calculate elapsed time in ms and check results
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed_time, start, stop);
memUtil = (2 * N * N * sizeof(int)) / (elapsed_time / 1.0e3) / (peakMemBwGbps * 1.0e9);
printf("transpose_parallel_per_element\nTime: %f ms\nMemory utilization %f\%\n%s results\n",
elapsed_time,
memUtil * 100,
same_matrices(h_out, expected_out) ? "Correct" : "Wrong");
printf("====================================================\n");
// launch parallel per element tiled kernel
cudaEventRecord(start);
transpose_parallel_per_element_tiled<<<blocks, threads>>>(d_in, d_out);
cudaEventRecord(stop);
// copy output from GPU memory to host memory
checkCudaErrors(cudaMemcpy(h_out, d_out, num_bytes, cudaMemcpyDeviceToHost));
// calculate elapsed time in ms and check results
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed_time, start, stop);
memUtil = (2 * N * N * sizeof(int)) / (elapsed_time / 1.0e3) / (peakMemBwGbps * 1.0e9);
printf("transpose_parallel_per_element_tiled (block: %d x %d)\nTime: %f ms\nMemory utilization %f\%\n%s results\n",
K,
K,
elapsed_time,
memUtil * 100,
same_matrices(h_out, expected_out) ? "Correct" : "Wrong");
printf("====================================================\n");
// launch parallel per element tiled kernel with different block size (16x16)
cudaEventRecord(start);
transpose_parallel_per_element_tiled16<<<blocks16x16, threads16x16>>>(d_in, d_out);
cudaEventRecord(stop);
// copy output from GPU memory to host memory
checkCudaErrors(cudaMemcpy(h_out, d_out, num_bytes, cudaMemcpyDeviceToHost));
// calculate elapsed time in ms and check results
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed_time, start, stop);
memUtil = (2 * N * N * sizeof(int)) / (elapsed_time / 1.0e3) / (peakMemBwGbps * 1.0e9);
printf("transpose_parallel_per_element_tiled (block: 16 x 16)\nTime: %f ms\nMemory utilization %f\%\n%s results\n",
elapsed_time,
memUtil * 100,
same_matrices(h_out, expected_out) ? "Correct" : "Wrong");
printf("====================================================\n");
// free GPU memory
cudaFree(d_in);
cudaFree(d_out);
out:
free(h_in);
free(h_out);
free(expected_out);
return 0;
} |
30e38ae82d8915cd3cf177e654b94f0cabe7a0b4.hip | // !!! This is a file automatically generated by hipify!!!
/*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author raver119@gmail.com
//
#include "testlayers.h"
#include <array/ExtraArguments.h>
#include <array>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
using namespace sd;
class CudaExtraArgumentsTests : public testing::Test {
public:
CudaExtraArgumentsTests() {
printf("\n");
fflush(stdout);
}
};
TEST_F(CudaExtraArgumentsTests, Basic_Test_1) {
ExtraArguments args({1.0, 2.0, 3.0});
float ef[] = {1.f, 2.f, 3.f};
double ed[] = {1., 2., 3.};
auto ptrFloat = reinterpret_cast<float *>(args.argumentsAsT<float>());
auto ptrDouble = reinterpret_cast<double *>(args.argumentsAsT<double>());
ASSERT_TRUE(ptrFloat != nullptr);
ASSERT_TRUE(ptrDouble != nullptr);
auto tmpFloat = new float[3];
auto tmpDouble = new double[3];
hipMemcpy(tmpFloat, ptrFloat, 3 * sizeof(float), hipMemcpyDeviceToHost);
hipMemcpy(tmpDouble, ptrDouble, 3 * sizeof(double), hipMemcpyDeviceToHost);
for (int e = 0; e < 3; e++) {
ASSERT_NEAR(ef[e], tmpFloat[e], 1e-5f);
}
for (int e = 0; e < 3; e++) {
ASSERT_NEAR(ed[e], tmpDouble[e], 1e-5);
}
delete[] tmpFloat;
delete[] tmpDouble;
}
TEST_F(CudaExtraArgumentsTests, Basic_Test_2) {
ExtraArguments args;
auto ptrInt = args.argumentsAsT<int>();
ASSERT_TRUE(ptrInt == nullptr);
}
| 30e38ae82d8915cd3cf177e654b94f0cabe7a0b4.cu | /*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author raver119@gmail.com
//
#include "testlayers.h"
#include <array/ExtraArguments.h>
#include <array>
#include <cuda.h>
#include <cuda_runtime.h>
using namespace sd;
class CudaExtraArgumentsTests : public testing::Test {
public:
CudaExtraArgumentsTests() {
printf("\n");
fflush(stdout);
}
};
TEST_F(CudaExtraArgumentsTests, Basic_Test_1) {
ExtraArguments args({1.0, 2.0, 3.0});
float ef[] = {1.f, 2.f, 3.f};
double ed[] = {1., 2., 3.};
auto ptrFloat = reinterpret_cast<float *>(args.argumentsAsT<float>());
auto ptrDouble = reinterpret_cast<double *>(args.argumentsAsT<double>());
ASSERT_TRUE(ptrFloat != nullptr);
ASSERT_TRUE(ptrDouble != nullptr);
auto tmpFloat = new float[3];
auto tmpDouble = new double[3];
cudaMemcpy(tmpFloat, ptrFloat, 3 * sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(tmpDouble, ptrDouble, 3 * sizeof(double), cudaMemcpyDeviceToHost);
for (int e = 0; e < 3; e++) {
ASSERT_NEAR(ef[e], tmpFloat[e], 1e-5f);
}
for (int e = 0; e < 3; e++) {
ASSERT_NEAR(ed[e], tmpDouble[e], 1e-5);
}
delete[] tmpFloat;
delete[] tmpDouble;
}
TEST_F(CudaExtraArgumentsTests, Basic_Test_2) {
ExtraArguments args;
auto ptrInt = args.argumentsAsT<int>();
ASSERT_TRUE(ptrInt == nullptr);
}
|
b8f81aceb315eef4cecb66d05a8cb282df316b84.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void set_coords_2D(float* coords, size_t y, size_t x){
size_t index = blockIdx.x * blockDim.x + threadIdx.x;
size_t id_x = index % x;
size_t id_y = index / x;
if(index < x * y){
coords[id_x + id_y * x] = id_y - (float)y/2.0;
coords[id_x + id_y * x + x*y] = id_x - (float)x/2.0;
}
__syncthreads();
} | b8f81aceb315eef4cecb66d05a8cb282df316b84.cu | #include "includes.h"
__global__ void set_coords_2D(float* coords, size_t y, size_t x){
size_t index = blockIdx.x * blockDim.x + threadIdx.x;
size_t id_x = index % x;
size_t id_y = index / x;
if(index < x * y){
coords[id_x + id_y * x] = id_y - (float)y/2.0;
coords[id_x + id_y * x + x*y] = id_x - (float)x/2.0;
}
__syncthreads();
} |
1aa1f86b0e0e061f595627f54c7b64bbfada16f1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright 2017-2020 ABBYY Production LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
--------------------------------------------------------------------------------------------------------------*/
#include <NeoMathEngine/NeoMathEngineDefs.h>
#ifdef NEOML_USE_CUDA
#include <CudaMathEngine.h>
#include <CudaMathEngineDnnConvs.h>
#include <MemoryHandleInternal.h>
#include <MathEngineCommon.h>
#include <CudaDevice.h>
#include <CudaCommon.h>
#include <Kernels/CudaDnnTimeConvKernels.h>
namespace NeoML {
CTimeConvolutionDesc* CCudaMathEngine::InitTimeConvolution( const CBlobDesc& source,
int stride, int paddingFront, int paddingBack, int dilation,
const CBlobDesc& filter, const CBlobDesc& result )
{
ASSERT_EXPR( stride > 0 );
ASSERT_EXPR( paddingFront >= 0 );
ASSERT_EXPR( paddingBack >= 0 );
ASSERT_EXPR( dilation > 0 );
ASSERT_EXPR( filter.BatchLength() == 1 );
ASSERT_EXPR( filter.Width() == 1 );
ASSERT_EXPR( filter.Depth() == 1 );
ASSERT_EXPR( filter.Channels() == source.ObjectSize() );
ASSERT_EXPR( source.BatchLength() + paddingFront + paddingBack >= ( filter.Height() - 1 ) * dilation + 1 );
ASSERT_EXPR( result.BatchLength() == ( source.BatchLength() - ( filter.Height() - 1 ) * dilation - 1 + paddingFront + paddingBack ) / stride + 1 );
ASSERT_EXPR( result.BatchWidth() == source.BatchWidth() );
ASSERT_EXPR( result.ListSize() == 1 && source.ListSize() == 1 );
ASSERT_EXPR( result.Width() == 1 );
ASSERT_EXPR( result.Height() == 1 );
ASSERT_EXPR( result.Depth() == 1 );
ASSERT_EXPR( result.Channels() == filter.BatchWidth() );
ASSERT_EXPR( paddingFront < ( filter.Height() - 1 ) * dilation + 1 );
ASSERT_EXPR( paddingBack < ( filter.Height() - 1 ) * dilation + 1 );
CCudaTimeConvolutionDesc* desc = new CCudaTimeConvolutionDesc();
desc->Internal.Source = source;
desc->Internal.Filter = filter;
desc->Internal.Result = result;
desc->Internal.Stride = stride;
desc->Internal.PaddingFront = paddingFront;
desc->Internal.PaddingBack = paddingBack;
desc->Internal.Dilation = dilation;
return desc;
}
void CCudaMathEngine::BlobTimeConvolution( const CTimeConvolutionDesc& convDesc,
const CConstFloatHandle& sourceData, const CConstFloatHandle& filterData, const CConstFloatHandle& freeTermData,
const CFloatHandle& resultData )
{
ASSERT_EXPR( sourceData.GetMathEngine() == this );
ASSERT_EXPR( filterData.GetMathEngine() == this );
ASSERT_EXPR( freeTermData.GetMathEngine() == this );
ASSERT_EXPR( resultData.GetMathEngine() == this );
const CCudaTimeConvolutionDescInternal& desc = static_cast<const CCudaTimeConvolutionDesc&>( convDesc ).Internal;
const CCudaBlobDesc& source = desc.Source;
const CCudaBlobDesc& filter = desc.Filter;
const CCudaBlobDesc& result = desc.Result;
if( filter.Height() == 1 && desc.Stride == 1 ) {
// This assert has already been checked in InitTimeConvolution
ASSERT_EXPR( desc.PaddingFront == 0 && desc.PaddingBack == 0 );
// Trivial case
MultiplyMatrixByTransposedMatrix(sourceData,
source.BatchLength() * source.BatchWidth(), source.ObjectSize(), source.ObjectSize(),
filterData, filter.ObjectCount(), source.ObjectSize(),
resultData + desc.PaddingFront * filter.ObjectCount(), filter.ObjectCount(), result.BlobSize());
} else {
// Convolution through temp matrix
const int tempMatrixWidth = filter.ObjectSize();
const int tempMatrixHeight = result.BlobSize() / filter.ObjectCount();
// Max amount of memory allowed is a half of math engine's free memory
const int maxInMemoryHeight = getCudaTempMatrixMaxHeight( tempMatrixHeight, tempMatrixWidth );
int matrixRowIndex = 0;
CFloatHandle currResult = resultData;
CFloatHandleStackVar tempMatrixPart( mathEngine(), maxInMemoryHeight * tempMatrixWidth );
// Build temp matrix part by part and add filterDiff of that part
while( matrixRowIndex < tempMatrixHeight ) {
const int currPartHeight = min( tempMatrixHeight - matrixRowIndex, maxInMemoryHeight );
dim3 blockCount;
dim3 threadCount;
getCudaTaskGrid2D( blockCount, threadCount, currPartHeight, tempMatrixWidth );
hipLaunchKernelGGL(( BuildTempMatrixKernel), dim3(blockCount), dim3(threadCount), 0, 0, desc, GetRaw( sourceData ), currPartHeight,
tempMatrixWidth, GetRaw( tempMatrixPart.GetHandle() ), matrixRowIndex );
MultiplyMatrixByTransposedMatrix(tempMatrixPart, currPartHeight, tempMatrixWidth, tempMatrixWidth,
filterData, filter.ObjectCount(), tempMatrixWidth,
currResult, filter.ObjectCount(), result.BlobSize());
matrixRowIndex += currPartHeight;
currResult += currPartHeight * filter.ObjectCount();
}
}
// Free term
AddVectorToMatrixRows( 1, resultData, resultData, result.ObjectCount(), result.ObjectSize(), freeTermData );
}
void CCudaMathEngine::BlobTimeConvolutionBackward( const CTimeConvolutionDesc& convDesc,
const CConstFloatHandle& outputDiffData, const CConstFloatHandle& filterData, const CConstFloatHandle& /*freeTerm*/,
const CFloatHandle& inputDiffData )
{
ASSERT_EXPR( outputDiffData.GetMathEngine() == this );
ASSERT_EXPR( filterData.GetMathEngine() == this );
ASSERT_EXPR( inputDiffData.GetMathEngine() == this );
SetCudaDevice( device->DeviceNumber );
const CCudaTimeConvolutionDescInternal& desc = static_cast<const CCudaTimeConvolutionDesc&>( convDesc ).Internal;
const CCudaBlobDesc& inputDiff = desc.Source;
const CCudaBlobDesc& filter = desc.Filter;
const CCudaBlobDesc& outputDiff = desc.Result;
if( desc.Stride == 1 && filter.Height() == 1 ) {
// This assert has already been checked in InitTimeConvolution
ASSERT_EXPR( desc.PaddingFront == 0 && desc.PaddingBack == 0 );
// Trivial case
MultiplyMatrixByMatrix( 1, outputDiffData, outputDiff.ObjectCount(), outputDiff.ObjectSize(),
filterData, filter.ObjectSize(), inputDiffData, inputDiff.BlobSize() );
} else {
// Let's try to build temp matrix
const int tempMatrixWidth = filter.ObjectSize();
const int tempMatrixHeight = outputDiff.BlobSize() / filter.ObjectCount();
// Max amount of memory allowed is a half of math engine's free memory
const int maxInMemoryHeight = getCudaTempMatrixMaxHeight( tempMatrixHeight, tempMatrixWidth );
int matrixRowIndex = 0;
CConstFloatHandle currOutputDiff = outputDiffData;
CFloatHandleStackVar tempMatrixPart( mathEngine(), maxInMemoryHeight * tempMatrixWidth );
VectorFill( inputDiffData, 0.f, inputDiff.BlobSize() );
const int combineCount = max( 1, BlobTimeConvolutionBackwardUnpackCombine / filter.Height() );
const int xSizeNorm = (inputDiff.ObjectSize() + combineCount - 1) / combineCount;
while( matrixRowIndex < tempMatrixHeight ) {
const int currPartHeight = min( tempMatrixHeight - matrixRowIndex, maxInMemoryHeight );
MultiplyMatrixByMatrix( 1, currOutputDiff, currPartHeight, outputDiff.ObjectSize(),
filterData, filter.ObjectSize(), tempMatrixPart, maxInMemoryHeight * tempMatrixWidth );
dim3 blockCount;
dim3 threadCount;
getCudaTaskGrid2DMinYX(1, 512, blockCount, threadCount, inputDiff.ObjectCount(), xSizeNorm);
hipLaunchKernelGGL(( BlobTimeConvolutionBackwardUnpackKernel), dim3(blockCount), dim3(threadCount), 0, 0, desc, GetRaw( filterData ),
GetRaw( inputDiffData ), xSizeNorm, combineCount, GetRaw( tempMatrixPart.GetHandle() ), matrixRowIndex, currPartHeight );
currOutputDiff += currPartHeight * outputDiff.ObjectSize();
matrixRowIndex += currPartHeight;
}
}
}
void CCudaMathEngine::BlobTimeConvolutionLearnAdd( const CTimeConvolutionDesc& convDesc, const CConstFloatHandle& inputData,
const CConstFloatHandle& outputDiffData, const CFloatHandle& filterDiffData, const CFloatHandle& freeTermDiffData )
{
ASSERT_EXPR( inputData.GetMathEngine() == this );
ASSERT_EXPR( outputDiffData.GetMathEngine() == this );
ASSERT_EXPR( filterDiffData.GetMathEngine() == this );
ASSERT_EXPR( freeTermDiffData.GetMathEngine() == this );
SetCudaDevice( device->DeviceNumber );
const CCudaTimeConvolutionDescInternal& desc = static_cast<const CCudaTimeConvolutionDesc&>( convDesc ).Internal;
const CCudaBlobDesc& filterDiff = desc.Filter;
const CCudaBlobDesc& outputDiff = desc.Result;
// Train the filter
if( filterDiff.Height() == 1 && desc.Stride == 1 ) {
// This assert has already been checked in InitTimeConvolution
ASSERT_EXPR( desc.PaddingFront == 0 && desc.PaddingBack == 0 );
// Trivial case
MultiplyTransposedMatrixByMatrixAndAdd( outputDiffData, desc.Source.ObjectCount(),
outputDiff.ObjectSize(), outputDiff.ObjectSize(), inputData, desc.Source.ObjectSize(),
desc.Source.ObjectSize(), filterDiffData, filterDiff.ObjectSize(), filterDiff.BlobSize() );
} else {
// Let's try to build temp matrix
const int tempMatrixWidth = filterDiff.ObjectSize();
const int tempMatrixHeight = outputDiff.BlobSize() / filterDiff.ObjectCount();
// Max amount of memory allowed is a half of math engine's free memory
const int maxInMemoryHeight = getCudaTempMatrixMaxHeight( tempMatrixHeight, tempMatrixWidth );
if( maxInMemoryHeight == 0 ) {
// naive implementatino which doesn't use additional memory
int blockCount;
int threadCount;
getCudaTaskGrid( blockCount, threadCount, desc.Filter.BlobSize() );
hipLaunchKernelGGL(( BlobTimeConvolutionLearnFilterKernel), dim3(blockCount), dim3(threadCount), 0, 0, desc, GetRaw( inputData ),
GetRaw( outputDiffData ), GetRaw( filterDiffData ) );
} else {
int matrixRowIndex = 0;
CConstFloatHandle currOutputDiff = outputDiffData;
CFloatHandleStackVar tempMatrixPart( mathEngine(), maxInMemoryHeight * tempMatrixWidth );
const int filterCount = desc.Result.ObjectSize();
// Build temp matrix part by part and add filterDiff of that part
while( matrixRowIndex < tempMatrixHeight ) {
const int currPartHeight = min( tempMatrixHeight - matrixRowIndex, maxInMemoryHeight );
dim3 blockCount;
dim3 threadCount;
getCudaTaskGrid2D( blockCount, threadCount, currPartHeight, tempMatrixWidth );
hipLaunchKernelGGL(( BuildTempMatrixKernel), dim3(blockCount), dim3(threadCount), 0, 0, desc, GetRaw( inputData ), currPartHeight,
tempMatrixWidth, GetRaw( tempMatrixPart.GetHandle() ), matrixRowIndex );
MultiplyTransposedMatrixByMatrixAndAdd( currOutputDiff, currPartHeight, filterCount, filterCount,
tempMatrixPart.GetHandle(), tempMatrixWidth, tempMatrixWidth, filterDiffData, tempMatrixWidth, filterDiff.BlobSize() );
matrixRowIndex += currPartHeight;
currOutputDiff += currPartHeight * filterCount;
}
}
}
// Train the free term
SumMatrixRowsAdd( 1, freeTermDiffData, outputDiffData, outputDiff.ObjectCount(), filterDiff.ObjectCount() );
}
} // namespace NeoML
#endif // NEOML_USE_CUDA
| 1aa1f86b0e0e061f595627f54c7b64bbfada16f1.cu | /* Copyright © 2017-2020 ABBYY Production LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
--------------------------------------------------------------------------------------------------------------*/
#include <NeoMathEngine/NeoMathEngineDefs.h>
#ifdef NEOML_USE_CUDA
#include <CudaMathEngine.h>
#include <CudaMathEngineDnnConvs.h>
#include <MemoryHandleInternal.h>
#include <MathEngineCommon.h>
#include <CudaDevice.h>
#include <CudaCommon.h>
#include <Kernels/CudaDnnTimeConvKernels.h>
namespace NeoML {
CTimeConvolutionDesc* CCudaMathEngine::InitTimeConvolution( const CBlobDesc& source,
int stride, int paddingFront, int paddingBack, int dilation,
const CBlobDesc& filter, const CBlobDesc& result )
{
ASSERT_EXPR( stride > 0 );
ASSERT_EXPR( paddingFront >= 0 );
ASSERT_EXPR( paddingBack >= 0 );
ASSERT_EXPR( dilation > 0 );
ASSERT_EXPR( filter.BatchLength() == 1 );
ASSERT_EXPR( filter.Width() == 1 );
ASSERT_EXPR( filter.Depth() == 1 );
ASSERT_EXPR( filter.Channels() == source.ObjectSize() );
ASSERT_EXPR( source.BatchLength() + paddingFront + paddingBack >= ( filter.Height() - 1 ) * dilation + 1 );
ASSERT_EXPR( result.BatchLength() == ( source.BatchLength() - ( filter.Height() - 1 ) * dilation - 1 + paddingFront + paddingBack ) / stride + 1 );
ASSERT_EXPR( result.BatchWidth() == source.BatchWidth() );
ASSERT_EXPR( result.ListSize() == 1 && source.ListSize() == 1 );
ASSERT_EXPR( result.Width() == 1 );
ASSERT_EXPR( result.Height() == 1 );
ASSERT_EXPR( result.Depth() == 1 );
ASSERT_EXPR( result.Channels() == filter.BatchWidth() );
ASSERT_EXPR( paddingFront < ( filter.Height() - 1 ) * dilation + 1 );
ASSERT_EXPR( paddingBack < ( filter.Height() - 1 ) * dilation + 1 );
CCudaTimeConvolutionDesc* desc = new CCudaTimeConvolutionDesc();
desc->Internal.Source = source;
desc->Internal.Filter = filter;
desc->Internal.Result = result;
desc->Internal.Stride = stride;
desc->Internal.PaddingFront = paddingFront;
desc->Internal.PaddingBack = paddingBack;
desc->Internal.Dilation = dilation;
return desc;
}
void CCudaMathEngine::BlobTimeConvolution( const CTimeConvolutionDesc& convDesc,
const CConstFloatHandle& sourceData, const CConstFloatHandle& filterData, const CConstFloatHandle& freeTermData,
const CFloatHandle& resultData )
{
ASSERT_EXPR( sourceData.GetMathEngine() == this );
ASSERT_EXPR( filterData.GetMathEngine() == this );
ASSERT_EXPR( freeTermData.GetMathEngine() == this );
ASSERT_EXPR( resultData.GetMathEngine() == this );
const CCudaTimeConvolutionDescInternal& desc = static_cast<const CCudaTimeConvolutionDesc&>( convDesc ).Internal;
const CCudaBlobDesc& source = desc.Source;
const CCudaBlobDesc& filter = desc.Filter;
const CCudaBlobDesc& result = desc.Result;
if( filter.Height() == 1 && desc.Stride == 1 ) {
// This assert has already been checked in InitTimeConvolution
ASSERT_EXPR( desc.PaddingFront == 0 && desc.PaddingBack == 0 );
// Trivial case
MultiplyMatrixByTransposedMatrix(sourceData,
source.BatchLength() * source.BatchWidth(), source.ObjectSize(), source.ObjectSize(),
filterData, filter.ObjectCount(), source.ObjectSize(),
resultData + desc.PaddingFront * filter.ObjectCount(), filter.ObjectCount(), result.BlobSize());
} else {
// Convolution through temp matrix
const int tempMatrixWidth = filter.ObjectSize();
const int tempMatrixHeight = result.BlobSize() / filter.ObjectCount();
// Max amount of memory allowed is a half of math engine's free memory
const int maxInMemoryHeight = getCudaTempMatrixMaxHeight( tempMatrixHeight, tempMatrixWidth );
int matrixRowIndex = 0;
CFloatHandle currResult = resultData;
CFloatHandleStackVar tempMatrixPart( mathEngine(), maxInMemoryHeight * tempMatrixWidth );
// Build temp matrix part by part and add filterDiff of that part
while( matrixRowIndex < tempMatrixHeight ) {
const int currPartHeight = min( tempMatrixHeight - matrixRowIndex, maxInMemoryHeight );
dim3 blockCount;
dim3 threadCount;
getCudaTaskGrid2D( blockCount, threadCount, currPartHeight, tempMatrixWidth );
BuildTempMatrixKernel<<<blockCount, threadCount>>>( desc, GetRaw( sourceData ), currPartHeight,
tempMatrixWidth, GetRaw( tempMatrixPart.GetHandle() ), matrixRowIndex );
MultiplyMatrixByTransposedMatrix(tempMatrixPart, currPartHeight, tempMatrixWidth, tempMatrixWidth,
filterData, filter.ObjectCount(), tempMatrixWidth,
currResult, filter.ObjectCount(), result.BlobSize());
matrixRowIndex += currPartHeight;
currResult += currPartHeight * filter.ObjectCount();
}
}
// Free term
AddVectorToMatrixRows( 1, resultData, resultData, result.ObjectCount(), result.ObjectSize(), freeTermData );
}
void CCudaMathEngine::BlobTimeConvolutionBackward( const CTimeConvolutionDesc& convDesc,
const CConstFloatHandle& outputDiffData, const CConstFloatHandle& filterData, const CConstFloatHandle& /*freeTerm*/,
const CFloatHandle& inputDiffData )
{
ASSERT_EXPR( outputDiffData.GetMathEngine() == this );
ASSERT_EXPR( filterData.GetMathEngine() == this );
ASSERT_EXPR( inputDiffData.GetMathEngine() == this );
SetCudaDevice( device->DeviceNumber );
const CCudaTimeConvolutionDescInternal& desc = static_cast<const CCudaTimeConvolutionDesc&>( convDesc ).Internal;
const CCudaBlobDesc& inputDiff = desc.Source;
const CCudaBlobDesc& filter = desc.Filter;
const CCudaBlobDesc& outputDiff = desc.Result;
if( desc.Stride == 1 && filter.Height() == 1 ) {
// This assert has already been checked in InitTimeConvolution
ASSERT_EXPR( desc.PaddingFront == 0 && desc.PaddingBack == 0 );
// Trivial case
MultiplyMatrixByMatrix( 1, outputDiffData, outputDiff.ObjectCount(), outputDiff.ObjectSize(),
filterData, filter.ObjectSize(), inputDiffData, inputDiff.BlobSize() );
} else {
// Let's try to build temp matrix
const int tempMatrixWidth = filter.ObjectSize();
const int tempMatrixHeight = outputDiff.BlobSize() / filter.ObjectCount();
// Max amount of memory allowed is a half of math engine's free memory
const int maxInMemoryHeight = getCudaTempMatrixMaxHeight( tempMatrixHeight, tempMatrixWidth );
int matrixRowIndex = 0;
CConstFloatHandle currOutputDiff = outputDiffData;
CFloatHandleStackVar tempMatrixPart( mathEngine(), maxInMemoryHeight * tempMatrixWidth );
VectorFill( inputDiffData, 0.f, inputDiff.BlobSize() );
const int combineCount = max( 1, BlobTimeConvolutionBackwardUnpackCombine / filter.Height() );
const int xSizeNorm = (inputDiff.ObjectSize() + combineCount - 1) / combineCount;
while( matrixRowIndex < tempMatrixHeight ) {
const int currPartHeight = min( tempMatrixHeight - matrixRowIndex, maxInMemoryHeight );
MultiplyMatrixByMatrix( 1, currOutputDiff, currPartHeight, outputDiff.ObjectSize(),
filterData, filter.ObjectSize(), tempMatrixPart, maxInMemoryHeight * tempMatrixWidth );
dim3 blockCount;
dim3 threadCount;
getCudaTaskGrid2DMinYX(1, 512, blockCount, threadCount, inputDiff.ObjectCount(), xSizeNorm);
BlobTimeConvolutionBackwardUnpackKernel<<<blockCount, threadCount>>>( desc, GetRaw( filterData ),
GetRaw( inputDiffData ), xSizeNorm, combineCount, GetRaw( tempMatrixPart.GetHandle() ), matrixRowIndex, currPartHeight );
currOutputDiff += currPartHeight * outputDiff.ObjectSize();
matrixRowIndex += currPartHeight;
}
}
}
void CCudaMathEngine::BlobTimeConvolutionLearnAdd( const CTimeConvolutionDesc& convDesc, const CConstFloatHandle& inputData,
const CConstFloatHandle& outputDiffData, const CFloatHandle& filterDiffData, const CFloatHandle& freeTermDiffData )
{
ASSERT_EXPR( inputData.GetMathEngine() == this );
ASSERT_EXPR( outputDiffData.GetMathEngine() == this );
ASSERT_EXPR( filterDiffData.GetMathEngine() == this );
ASSERT_EXPR( freeTermDiffData.GetMathEngine() == this );
SetCudaDevice( device->DeviceNumber );
const CCudaTimeConvolutionDescInternal& desc = static_cast<const CCudaTimeConvolutionDesc&>( convDesc ).Internal;
const CCudaBlobDesc& filterDiff = desc.Filter;
const CCudaBlobDesc& outputDiff = desc.Result;
// Train the filter
if( filterDiff.Height() == 1 && desc.Stride == 1 ) {
// This assert has already been checked in InitTimeConvolution
ASSERT_EXPR( desc.PaddingFront == 0 && desc.PaddingBack == 0 );
// Trivial case
MultiplyTransposedMatrixByMatrixAndAdd( outputDiffData, desc.Source.ObjectCount(),
outputDiff.ObjectSize(), outputDiff.ObjectSize(), inputData, desc.Source.ObjectSize(),
desc.Source.ObjectSize(), filterDiffData, filterDiff.ObjectSize(), filterDiff.BlobSize() );
} else {
// Let's try to build temp matrix
const int tempMatrixWidth = filterDiff.ObjectSize();
const int tempMatrixHeight = outputDiff.BlobSize() / filterDiff.ObjectCount();
// Max amount of memory allowed is a half of math engine's free memory
const int maxInMemoryHeight = getCudaTempMatrixMaxHeight( tempMatrixHeight, tempMatrixWidth );
if( maxInMemoryHeight == 0 ) {
// naive implementatino which doesn't use additional memory
int blockCount;
int threadCount;
getCudaTaskGrid( blockCount, threadCount, desc.Filter.BlobSize() );
BlobTimeConvolutionLearnFilterKernel<<<blockCount, threadCount>>>( desc, GetRaw( inputData ),
GetRaw( outputDiffData ), GetRaw( filterDiffData ) );
} else {
int matrixRowIndex = 0;
CConstFloatHandle currOutputDiff = outputDiffData;
CFloatHandleStackVar tempMatrixPart( mathEngine(), maxInMemoryHeight * tempMatrixWidth );
const int filterCount = desc.Result.ObjectSize();
// Build temp matrix part by part and add filterDiff of that part
while( matrixRowIndex < tempMatrixHeight ) {
const int currPartHeight = min( tempMatrixHeight - matrixRowIndex, maxInMemoryHeight );
dim3 blockCount;
dim3 threadCount;
getCudaTaskGrid2D( blockCount, threadCount, currPartHeight, tempMatrixWidth );
BuildTempMatrixKernel<<<blockCount, threadCount>>>( desc, GetRaw( inputData ), currPartHeight,
tempMatrixWidth, GetRaw( tempMatrixPart.GetHandle() ), matrixRowIndex );
MultiplyTransposedMatrixByMatrixAndAdd( currOutputDiff, currPartHeight, filterCount, filterCount,
tempMatrixPart.GetHandle(), tempMatrixWidth, tempMatrixWidth, filterDiffData, tempMatrixWidth, filterDiff.BlobSize() );
matrixRowIndex += currPartHeight;
currOutputDiff += currPartHeight * filterCount;
}
}
}
// Train the free term
SumMatrixRowsAdd( 1, freeTermDiffData, outputDiffData, outputDiff.ObjectCount(), filterDiff.ObjectCount() );
}
} // namespace NeoML
#endif // NEOML_USE_CUDA
|
2ec151d95ecb265d88490ac4f6e450b84251b9b5.hip | // !!! This is a file automatically generated by hipify!!!
/**
* \file dnn/src/cuda/conv_bias/int8/kimpl/conv_bias_int8_implicit_gemm_cdiv4hwn4_unroll_width_per_chan_hswish.cu
* MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
*
* Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*/
// generated by gen_cuda_conv_bias_kern_impls.py
#include "../conv_bias_int8_implicit_gemm_cdiv4hwn4_unroll_width.cuinl"
template void megdnn::cuda::conv_bias_int8::do_conv_bias_int8_implicit_gemm_cdiv4hwn4_unroll_width<PerChannelBiasVisitor,
IConvEpilogue<Activation<megdnn::param_enumv::ConvBias::NonlineMode::H_SWISH>>>(
const int8_t* d_src,
const int8_t* d_filter,
PerChannelBiasVisitor bias,
IConvEpilogue<Activation<megdnn::param_enumv::ConvBias::NonlineMode::H_SWISH>> epilogue,
const ConvParam& param,
float alpha,
float beta,
hipStream_t stream);
| 2ec151d95ecb265d88490ac4f6e450b84251b9b5.cu | /**
* \file dnn/src/cuda/conv_bias/int8/kimpl/conv_bias_int8_implicit_gemm_cdiv4hwn4_unroll_width_per_chan_hswish.cu
* MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
*
* Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*/
// generated by gen_cuda_conv_bias_kern_impls.py
#include "../conv_bias_int8_implicit_gemm_cdiv4hwn4_unroll_width.cuinl"
template void megdnn::cuda::conv_bias_int8::do_conv_bias_int8_implicit_gemm_cdiv4hwn4_unroll_width<PerChannelBiasVisitor,
IConvEpilogue<Activation<megdnn::param_enumv::ConvBias::NonlineMode::H_SWISH>>>(
const int8_t* d_src,
const int8_t* d_filter,
PerChannelBiasVisitor bias,
IConvEpilogue<Activation<megdnn::param_enumv::ConvBias::NonlineMode::H_SWISH>> epilogue,
const ConvParam& param,
float alpha,
float beta,
cudaStream_t stream);
|
ed3f402bdb9d247dfb9a3decc6a05e7aab697232.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Get the fitness value of each agent according to their position.
Copyright (C) 2011 Edgard Nikitiuk <edgnik@gmail.com>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _GET_FITNESS_
#define _GET_FITNESS_
#include "gsa.h"
__global__ void get_fitness( int func, int n_dim, float *position, float *fitness )
{
int i, j, d, m=10, p=blockIdx.x;
float pi = 3.141592654f, a=20.0f, b=0.2f, c=2*pi;
float sum=0.0f, sum_i=0.0f, sum_j=0.0f, sum_pow=0.0f, sum_cos=0.0f, prod_cos=0.0f;
switch( func )
{
// De Jong
case 1:
for( d=0; d<n_dim; d++ )
{
sum = sum + powf(position[d+p*n_dim], 2);
}
fitness[p] = sum;
break;
// Sphere
case 2:
for( d=0; d<n_dim; d++ )
{
sum = sum + ( (d+1) * powf(position[d+p*n_dim], 2) );
}
fitness[p] = sum;
break;
// Rotated hyper-ellipsoid
case 3:
for( i=0; i<n_dim; i++ )
{
sum_j = 0.0;
for( j=0; j<(i+1); j++ )
{
sum_j = sum_j + powf(position[j+p*n_dim], 2);
}
sum_i = sum_i + sum_j;
}
fitness[p] = sum_i;
break;
// Rosenbrock
case 4:
for( d=0; d<(n_dim-1); d++ )
{
sum = sum + ( powf((1.0f - position[d+p*n_dim]), 2) + (100.0f * powf((position[d+p*n_dim+1] - powf(position[d+p*n_dim], 2)), 2)) );
}
fitness[p] = sum;
break;
// Rastrigin
case 5:
for( d=0; d<n_dim; d++ )
{
sum = sum + ( powf(position[d+p*n_dim], 2) - (10.0f * cos(2 * pi * position[d+p*n_dim])) );
}
fitness[p] = (10.0f * n_dim) + sum;
break;
// Schwefel
case 6:
for( d=0; d<n_dim; d++ )
{
sum = sum + ( -position[d+p*n_dim] * sin(powf(fabs(position[d+p*n_dim]), 0.5)) );
}
fitness[p] = sum;
break;
// Griewangk
case 7:
sum_pow = pow(position[p*n_dim], 2); prod_cos = cos(position[p*n_dim]/sqrtf(1));
for( d=1; d<n_dim; d++ )
{
sum_pow = sum_pow + powf(position[d+p*n_dim], 2);
prod_cos = prod_cos * cos(position[d+p*n_dim]/sqrtf(d+1));
}
fitness[p] = (sum_pow/4000) - prod_cos + 1;
break;
// Ackley
case 8:
for( d=0; d<n_dim; d++ )
{
sum_pow = sum_pow + powf(position[d+p*n_dim], 2);
sum_cos = sum_cos + cos(c*position[d+p*n_dim]);
}
fitness[p] = -a * expf(-b * sqrtf(sum_pow/n_dim) - expf(sum_cos/n_dim)) + a + expf(1);
break;
// Michalewicz
case 9:
for( d=0; d<n_dim; d++ )
{
sum = sum + ( sin(position[d+p*n_dim]) * powf(sin((d+1) * powf(position[d+p*n_dim], 2) / pi), 2*m) );
}
fitness[p] = -sum;
break;
}
}
#endif
| ed3f402bdb9d247dfb9a3decc6a05e7aab697232.cu | /*
Get the fitness value of each agent according to their position.
Copyright (C) 2011 Edgard Nikitiuk <edgnik@gmail.com>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _GET_FITNESS_
#define _GET_FITNESS_
#include "gsa.h"
__global__ void get_fitness( int func, int n_dim, float *position, float *fitness )
{
int i, j, d, m=10, p=blockIdx.x;
float pi = 3.141592654f, a=20.0f, b=0.2f, c=2*pi;
float sum=0.0f, sum_i=0.0f, sum_j=0.0f, sum_pow=0.0f, sum_cos=0.0f, prod_cos=0.0f;
switch( func )
{
// De Jong
case 1:
for( d=0; d<n_dim; d++ )
{
sum = sum + powf(position[d+p*n_dim], 2);
}
fitness[p] = sum;
break;
// Sphere
case 2:
for( d=0; d<n_dim; d++ )
{
sum = sum + ( (d+1) * powf(position[d+p*n_dim], 2) );
}
fitness[p] = sum;
break;
// Rotated hyper-ellipsoid
case 3:
for( i=0; i<n_dim; i++ )
{
sum_j = 0.0;
for( j=0; j<(i+1); j++ )
{
sum_j = sum_j + powf(position[j+p*n_dim], 2);
}
sum_i = sum_i + sum_j;
}
fitness[p] = sum_i;
break;
// Rosenbrock
case 4:
for( d=0; d<(n_dim-1); d++ )
{
sum = sum + ( powf((1.0f - position[d+p*n_dim]), 2) + (100.0f * powf((position[d+p*n_dim+1] - powf(position[d+p*n_dim], 2)), 2)) );
}
fitness[p] = sum;
break;
// Rastrigin
case 5:
for( d=0; d<n_dim; d++ )
{
sum = sum + ( powf(position[d+p*n_dim], 2) - (10.0f * cos(2 * pi * position[d+p*n_dim])) );
}
fitness[p] = (10.0f * n_dim) + sum;
break;
// Schwefel
case 6:
for( d=0; d<n_dim; d++ )
{
sum = sum + ( -position[d+p*n_dim] * sin(powf(fabs(position[d+p*n_dim]), 0.5)) );
}
fitness[p] = sum;
break;
// Griewangk
case 7:
sum_pow = pow(position[p*n_dim], 2); prod_cos = cos(position[p*n_dim]/sqrtf(1));
for( d=1; d<n_dim; d++ )
{
sum_pow = sum_pow + powf(position[d+p*n_dim], 2);
prod_cos = prod_cos * cos(position[d+p*n_dim]/sqrtf(d+1));
}
fitness[p] = (sum_pow/4000) - prod_cos + 1;
break;
// Ackley
case 8:
for( d=0; d<n_dim; d++ )
{
sum_pow = sum_pow + powf(position[d+p*n_dim], 2);
sum_cos = sum_cos + cos(c*position[d+p*n_dim]);
}
fitness[p] = -a * expf(-b * sqrtf(sum_pow/n_dim) - expf(sum_cos/n_dim)) + a + expf(1);
break;
// Michalewicz
case 9:
for( d=0; d<n_dim; d++ )
{
sum = sum + ( sin(position[d+p*n_dim]) * powf(sin((d+1) * powf(position[d+p*n_dim], 2) / pi), 2*m) );
}
fitness[p] = -sum;
break;
}
}
#endif
|
54942699c9140823bf5bd7cd87d1cbf6b7b24198.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <Eigen/Core>
#include <Eigen/Eigenvalues>
#include "CUDACore/cudaCheck.h"
#ifdef USE_BL
#include "plugin-PixelTriplets/BrokenLine.h"
#else
#include "plugin-PixelTriplets/RiemannFit.h"
#endif
#include "test_common.h"
using namespace Eigen;
namespace Rfit {
constexpr uint32_t maxNumberOfTracks() { return 5 * 1024; }
constexpr uint32_t stride() { return maxNumberOfTracks(); }
// hits
template <int N>
using Matrix3xNd = Eigen::Matrix<double, 3, N>;
template <int N>
using Map3xNd = Eigen::Map<Matrix3xNd<N>, 0, Eigen::Stride<3 * stride(), stride()>>;
// errors
template <int N>
using Matrix6xNf = Eigen::Matrix<float, 6, N>;
template <int N>
using Map6xNf = Eigen::Map<Matrix6xNf<N>, 0, Eigen::Stride<6 * stride(), stride()>>;
// fast fit
using Map4d = Eigen::Map<Vector4d, 0, Eigen::InnerStride<stride()>>;
} // namespace Rfit
template <int N>
__global__ void kernelPrintSizes(double* __restrict__ phits, float* __restrict__ phits_ge) {
auto i = blockIdx.x * blockDim.x + threadIdx.x;
Rfit::Map3xNd<N> hits(phits + i, 3, 4);
Rfit::Map6xNf<N> hits_ge(phits_ge + i, 6, 4);
if (i != 0)
return;
printf("GPU sizes %lu %lu %lu %lu %lu\n",
sizeof(hits[i]),
sizeof(hits_ge[i]),
sizeof(Vector4d),
sizeof(Rfit::line_fit),
sizeof(Rfit::circle_fit));
}
template <int N>
__global__ void kernelFastFit(double* __restrict__ phits, double* __restrict__ presults) {
auto i = blockIdx.x * blockDim.x + threadIdx.x;
Rfit::Map3xNd<N> hits(phits + i, 3, N);
Rfit::Map4d result(presults + i, 4);
#ifdef USE_BL
BrokenLine::BL_Fast_fit(hits, result);
#else
Rfit::Fast_fit(hits, result);
#endif
}
#ifdef USE_BL
template <int N>
__global__ void kernelBrokenLineFit(double* __restrict__ phits,
float* __restrict__ phits_ge,
double* __restrict__ pfast_fit_input,
double B,
Rfit::circle_fit* circle_fit,
Rfit::line_fit* line_fit) {
auto i = blockIdx.x * blockDim.x + threadIdx.x;
Rfit::Map3xNd<N> hits(phits + i, 3, N);
Rfit::Map4d fast_fit_input(pfast_fit_input + i, 4);
Rfit::Map6xNf<N> hits_ge(phits_ge + i, 6, N);
BrokenLine::PreparedBrokenLineData<N> data;
Rfit::Matrix3d Jacob;
auto& line_fit_results = line_fit[i];
auto& circle_fit_results = circle_fit[i];
BrokenLine::prepareBrokenLineData(hits, fast_fit_input, B, data);
BrokenLine::BL_Line_fit(hits_ge, fast_fit_input, B, data, line_fit_results);
BrokenLine::BL_Circle_fit(hits, hits_ge, fast_fit_input, B, data, circle_fit_results);
Jacob << 1., 0, 0, 0, 1., 0, 0, 0,
-B / std::copysign(Rfit::sqr(circle_fit_results.par(2)), circle_fit_results.par(2));
circle_fit_results.par(2) = B / std::abs(circle_fit_results.par(2));
circle_fit_results.cov = Jacob * circle_fit_results.cov * Jacob.transpose();
#ifdef TEST_DEBUG
if (0 == i) {
printf("Circle param %f,%f,%f\n", circle_fit[i].par(0), circle_fit[i].par(1), circle_fit[i].par(2));
}
#endif
}
#else
template <int N>
__global__ void kernelCircleFit(double* __restrict__ phits,
float* __restrict__ phits_ge,
double* __restrict__ pfast_fit_input,
double B,
Rfit::circle_fit* circle_fit_resultsGPU) {
auto i = blockIdx.x * blockDim.x + threadIdx.x;
Rfit::Map3xNd<N> hits(phits + i, 3, N);
Rfit::Map4d fast_fit_input(pfast_fit_input + i, 4);
Rfit::Map6xNf<N> hits_ge(phits_ge + i, 6, N);
constexpr auto n = N;
Rfit::VectorNd<N> rad = (hits.block(0, 0, 2, n).colwise().norm());
Rfit::Matrix2Nd<N> hits_cov = MatrixXd::Zero(2 * n, 2 * n);
Rfit::loadCovariance2D(hits_ge, hits_cov);
#ifdef TEST_DEBUG
if (0 == i) {
printf("hits %f, %f\n", hits.block(0, 0, 2, n)(0, 0), hits.block(0, 0, 2, n)(0, 1));
printf("hits %f, %f\n", hits.block(0, 0, 2, n)(1, 0), hits.block(0, 0, 2, n)(1, 1));
printf("fast_fit_input(0): %f\n", fast_fit_input(0));
printf("fast_fit_input(1): %f\n", fast_fit_input(1));
printf("fast_fit_input(2): %f\n", fast_fit_input(2));
printf("fast_fit_input(3): %f\n", fast_fit_input(3));
printf("rad(0,0): %f\n", rad(0, 0));
printf("rad(1,1): %f\n", rad(1, 1));
printf("rad(2,2): %f\n", rad(2, 2));
printf("hits_cov(0,0): %f\n", (*hits_cov)(0, 0));
printf("hits_cov(1,1): %f\n", (*hits_cov)(1, 1));
printf("hits_cov(2,2): %f\n", (*hits_cov)(2, 2));
printf("hits_cov(11,11): %f\n", (*hits_cov)(11, 11));
printf("B: %f\n", B);
}
#endif
circle_fit_resultsGPU[i] = Rfit::Circle_fit(hits.block(0, 0, 2, n), hits_cov, fast_fit_input, rad, B, true);
#ifdef TEST_DEBUG
if (0 == i) {
printf("Circle param %f,%f,%f\n",
circle_fit_resultsGPU[i].par(0),
circle_fit_resultsGPU[i].par(1),
circle_fit_resultsGPU[i].par(2));
}
#endif
}
template <int N>
__global__ void kernelLineFit(double* __restrict__ phits,
float* __restrict__ phits_ge,
double B,
Rfit::circle_fit* circle_fit,
double* __restrict__ pfast_fit_input,
Rfit::line_fit* line_fit) {
auto i = blockIdx.x * blockDim.x + threadIdx.x;
Rfit::Map3xNd<N> hits(phits + i, 3, N);
Rfit::Map4d fast_fit_input(pfast_fit_input + i, 4);
Rfit::Map6xNf<N> hits_ge(phits_ge + i, 6, N);
line_fit[i] = Rfit::Line_fit(hits, hits_ge, circle_fit[i], fast_fit_input, B, true);
}
#endif
template <typename M3xN, typename M6xN>
__device__ __host__ void fillHitsAndHitsCov(M3xN& hits, M6xN& hits_ge) {
constexpr uint32_t N = M3xN::ColsAtCompileTime;
if (N == 5) {
hits << 2.934787, 6.314229, 8.936963, 10.360559, 12.856387, 0.773211, 1.816356, 2.765734, 3.330824, 4.422212,
-10.980247, -23.162731, -32.759060, -38.061260, -47.518867;
hits_ge.col(0) << 1.424715e-07, -4.996975e-07, 1.752614e-06, 3.660689e-11, 1.644638e-09, 7.346080e-05;
hits_ge.col(1) << 6.899177e-08, -1.873414e-07, 5.087101e-07, -2.078806e-10, -2.210498e-11, 4.346079e-06;
hits_ge.col(2) << 1.406273e-06, 4.042467e-07, 6.391180e-07, -3.141497e-07, 6.513821e-08, 1.163863e-07;
hits_ge.col(3) << 1.176358e-06, 2.154100e-07, 5.072816e-07, -8.161219e-08, 1.437878e-07, 5.951832e-08;
hits_ge.col(4) << 2.852843e-05, 7.956492e-06, 3.117701e-06, -1.060541e-06, 8.777413e-09, 1.426417e-07;
return;
}
if (N > 3)
hits << 1.98645, 4.72598, 7.65632, 11.3151, 2.18002, 4.88864, 7.75845, 11.3134, 2.46338, 6.99838, 11.808, 17.793;
else
hits << 1.98645, 4.72598, 7.65632, 2.18002, 4.88864, 7.75845, 2.46338, 6.99838, 11.808;
hits_ge.col(0)[0] = 7.14652e-06;
hits_ge.col(1)[0] = 2.15789e-06;
hits_ge.col(2)[0] = 1.63328e-06;
if (N > 3)
hits_ge.col(3)[0] = 6.27919e-06;
hits_ge.col(0)[2] = 6.10348e-06;
hits_ge.col(1)[2] = 2.08211e-06;
hits_ge.col(2)[2] = 1.61672e-06;
if (N > 3)
hits_ge.col(3)[2] = 6.28081e-06;
hits_ge.col(0)[5] = 5.184e-05;
hits_ge.col(1)[5] = 1.444e-05;
hits_ge.col(2)[5] = 6.25e-06;
if (N > 3)
hits_ge.col(3)[5] = 3.136e-05;
hits_ge.col(0)[1] = -5.60077e-06;
hits_ge.col(1)[1] = -1.11936e-06;
hits_ge.col(2)[1] = -6.24945e-07;
if (N > 3)
hits_ge.col(3)[1] = -5.28e-06;
}
template <int N>
__global__ void kernelFillHitsAndHitsCov(double* __restrict__ phits, float* phits_ge) {
auto i = blockIdx.x * blockDim.x + threadIdx.x;
Rfit::Map3xNd<N> hits(phits + i, 3, N);
Rfit::Map6xNf<N> hits_ge(phits_ge + i, 6, N);
hits_ge = MatrixXf::Zero(6, N);
fillHitsAndHitsCov(hits, hits_ge);
}
template <int N>
void testFit() {
constexpr double B = 0.0113921;
Rfit::Matrix3xNd<N> hits;
Rfit::Matrix6xNf<N> hits_ge = MatrixXf::Zero(6, N);
double* hitsGPU = nullptr;
;
float* hits_geGPU = nullptr;
double* fast_fit_resultsGPU = nullptr;
double* fast_fit_resultsGPUret = new double[Rfit::maxNumberOfTracks() * sizeof(Vector4d)];
Rfit::circle_fit* circle_fit_resultsGPU = nullptr;
Rfit::circle_fit* circle_fit_resultsGPUret = new Rfit::circle_fit();
Rfit::line_fit* line_fit_resultsGPU = nullptr;
Rfit::line_fit* line_fit_resultsGPUret = new Rfit::line_fit();
fillHitsAndHitsCov(hits, hits_ge);
std::cout << "sizes " << N << ' ' << sizeof(hits) << ' ' << sizeof(hits_ge) << ' ' << sizeof(Vector4d) << ' '
<< sizeof(Rfit::line_fit) << ' ' << sizeof(Rfit::circle_fit) << std::endl;
std::cout << "Generated hits:\n" << hits << std::endl;
std::cout << "Generated cov:\n" << hits_ge << std::endl;
// FAST_FIT_CPU
#ifdef USE_BL
Vector4d fast_fit_results;
BrokenLine::BL_Fast_fit(hits, fast_fit_results);
#else
Vector4d fast_fit_results;
Rfit::Fast_fit(hits, fast_fit_results);
#endif
std::cout << "Fitted values (FastFit, [X0, Y0, R, tan(theta)]):\n" << fast_fit_results << std::endl;
// for timing purposes we fit 4096 tracks
constexpr uint32_t Ntracks = 4096;
cudaCheck(hipMalloc(&hitsGPU, Rfit::maxNumberOfTracks() * sizeof(Rfit::Matrix3xNd<N>)));
cudaCheck(hipMalloc(&hits_geGPU, Rfit::maxNumberOfTracks() * sizeof(Rfit::Matrix6xNf<N>)));
cudaCheck(hipMalloc(&fast_fit_resultsGPU, Rfit::maxNumberOfTracks() * sizeof(Vector4d)));
cudaCheck(hipMalloc(&line_fit_resultsGPU, Rfit::maxNumberOfTracks() * sizeof(Rfit::line_fit)));
cudaCheck(hipMalloc(&circle_fit_resultsGPU, Rfit::maxNumberOfTracks() * sizeof(Rfit::circle_fit)));
cudaCheck(hipMemset(fast_fit_resultsGPU, 0, Rfit::maxNumberOfTracks() * sizeof(Vector4d)));
cudaCheck(hipMemset(line_fit_resultsGPU, 0, Rfit::maxNumberOfTracks() * sizeof(Rfit::line_fit)));
hipLaunchKernelGGL(( kernelPrintSizes<N>), dim3(Ntracks / 64), dim3(64), 0, 0, hitsGPU, hits_geGPU);
hipLaunchKernelGGL(( kernelFillHitsAndHitsCov<N>), dim3(Ntracks / 64), dim3(64), 0, 0, hitsGPU, hits_geGPU);
// FAST_FIT GPU
hipLaunchKernelGGL(( kernelFastFit<N>), dim3(Ntracks / 64), dim3(64), 0, 0, hitsGPU, fast_fit_resultsGPU);
hipDeviceSynchronize();
cudaCheck(hipMemcpy(fast_fit_resultsGPUret,
fast_fit_resultsGPU,
Rfit::maxNumberOfTracks() * sizeof(Vector4d),
hipMemcpyDeviceToHost));
Rfit::Map4d fast_fit(fast_fit_resultsGPUret + 10, 4);
std::cout << "Fitted values (FastFit, [X0, Y0, R, tan(theta)]): GPU\n" << fast_fit << std::endl;
assert(isEqualFuzzy(fast_fit_results, fast_fit));
#ifdef USE_BL
// CIRCLE AND LINE FIT CPU
BrokenLine::PreparedBrokenLineData<N> data;
BrokenLine::karimaki_circle_fit circle_fit_results;
Rfit::line_fit line_fit_results;
Rfit::Matrix3d Jacob;
BrokenLine::prepareBrokenLineData(hits, fast_fit_results, B, data);
BrokenLine::BL_Line_fit(hits_ge, fast_fit_results, B, data, line_fit_results);
BrokenLine::BL_Circle_fit(hits, hits_ge, fast_fit_results, B, data, circle_fit_results);
Jacob << 1., 0, 0, 0, 1., 0, 0, 0,
-B / std::copysign(Rfit::sqr(circle_fit_results.par(2)), circle_fit_results.par(2));
circle_fit_results.par(2) = B / std::abs(circle_fit_results.par(2));
circle_fit_results.cov = Jacob * circle_fit_results.cov * Jacob.transpose();
// fit on GPU
hipLaunchKernelGGL(( kernelBrokenLineFit<N>)
, dim3(Ntracks / 64), dim3(64), 0, 0, hitsGPU, hits_geGPU, fast_fit_resultsGPU, B, circle_fit_resultsGPU, line_fit_resultsGPU);
hipDeviceSynchronize();
#else
// CIRCLE_FIT CPU
Rfit::VectorNd<N> rad = (hits.block(0, 0, 2, N).colwise().norm());
Rfit::Matrix2Nd<N> hits_cov = Rfit::Matrix2Nd<N>::Zero();
Rfit::loadCovariance2D(hits_ge, hits_cov);
Rfit::circle_fit circle_fit_results =
Rfit::Circle_fit(hits.block(0, 0, 2, N), hits_cov, fast_fit_results, rad, B, true);
// CIRCLE_FIT GPU
hipLaunchKernelGGL(( kernelCircleFit<N>), dim3(Ntracks / 64), dim3(64), 0, 0, hitsGPU, hits_geGPU, fast_fit_resultsGPU, B, circle_fit_resultsGPU);
hipDeviceSynchronize();
// LINE_FIT CPU
Rfit::line_fit line_fit_results = Rfit::Line_fit(hits, hits_ge, circle_fit_results, fast_fit_results, B, true);
hipLaunchKernelGGL(( kernelLineFit<N>)
, dim3(Ntracks / 64), dim3(64), 0, 0, hitsGPU, hits_geGPU, B, circle_fit_resultsGPU, fast_fit_resultsGPU, line_fit_resultsGPU);
hipDeviceSynchronize();
#endif
std::cout << "Fitted values (CircleFit):\n" << circle_fit_results.par << std::endl;
cudaCheck(
hipMemcpy(circle_fit_resultsGPUret, circle_fit_resultsGPU, sizeof(Rfit::circle_fit), hipMemcpyDeviceToHost));
std::cout << "Fitted values (CircleFit) GPU:\n" << circle_fit_resultsGPUret->par << std::endl;
assert(isEqualFuzzy(circle_fit_results.par, circle_fit_resultsGPUret->par));
std::cout << "Fitted values (LineFit):\n" << line_fit_results.par << std::endl;
// LINE_FIT GPU
cudaCheck(hipMemcpy(line_fit_resultsGPUret, line_fit_resultsGPU, sizeof(Rfit::line_fit), hipMemcpyDeviceToHost));
std::cout << "Fitted values (LineFit) GPU:\n" << line_fit_resultsGPUret->par << std::endl;
assert(isEqualFuzzy(line_fit_results.par, line_fit_resultsGPUret->par, N == 5 ? 1e-4 : 1e-6)); // requires fma on CPU
std::cout << "Fitted cov (CircleFit) CPU:\n" << circle_fit_results.cov << std::endl;
std::cout << "Fitted cov (LineFit): CPU\n" << line_fit_results.cov << std::endl;
std::cout << "Fitted cov (CircleFit) GPU:\n" << circle_fit_resultsGPUret->cov << std::endl;
std::cout << "Fitted cov (LineFit): GPU\n" << line_fit_resultsGPUret->cov << std::endl;
}
int main(int argc, char* argv[]) {
testFit<4>();
testFit<3>();
testFit<5>();
std::cout << "TEST FIT, NO ERRORS" << std::endl;
return 0;
}
| 54942699c9140823bf5bd7cd87d1cbf6b7b24198.cu | #include <iostream>
#include <Eigen/Core>
#include <Eigen/Eigenvalues>
#include "CUDACore/cudaCheck.h"
#ifdef USE_BL
#include "plugin-PixelTriplets/BrokenLine.h"
#else
#include "plugin-PixelTriplets/RiemannFit.h"
#endif
#include "test_common.h"
using namespace Eigen;
namespace Rfit {
constexpr uint32_t maxNumberOfTracks() { return 5 * 1024; }
constexpr uint32_t stride() { return maxNumberOfTracks(); }
// hits
template <int N>
using Matrix3xNd = Eigen::Matrix<double, 3, N>;
template <int N>
using Map3xNd = Eigen::Map<Matrix3xNd<N>, 0, Eigen::Stride<3 * stride(), stride()>>;
// errors
template <int N>
using Matrix6xNf = Eigen::Matrix<float, 6, N>;
template <int N>
using Map6xNf = Eigen::Map<Matrix6xNf<N>, 0, Eigen::Stride<6 * stride(), stride()>>;
// fast fit
using Map4d = Eigen::Map<Vector4d, 0, Eigen::InnerStride<stride()>>;
} // namespace Rfit
template <int N>
__global__ void kernelPrintSizes(double* __restrict__ phits, float* __restrict__ phits_ge) {
auto i = blockIdx.x * blockDim.x + threadIdx.x;
Rfit::Map3xNd<N> hits(phits + i, 3, 4);
Rfit::Map6xNf<N> hits_ge(phits_ge + i, 6, 4);
if (i != 0)
return;
printf("GPU sizes %lu %lu %lu %lu %lu\n",
sizeof(hits[i]),
sizeof(hits_ge[i]),
sizeof(Vector4d),
sizeof(Rfit::line_fit),
sizeof(Rfit::circle_fit));
}
template <int N>
__global__ void kernelFastFit(double* __restrict__ phits, double* __restrict__ presults) {
auto i = blockIdx.x * blockDim.x + threadIdx.x;
Rfit::Map3xNd<N> hits(phits + i, 3, N);
Rfit::Map4d result(presults + i, 4);
#ifdef USE_BL
BrokenLine::BL_Fast_fit(hits, result);
#else
Rfit::Fast_fit(hits, result);
#endif
}
#ifdef USE_BL
template <int N>
__global__ void kernelBrokenLineFit(double* __restrict__ phits,
float* __restrict__ phits_ge,
double* __restrict__ pfast_fit_input,
double B,
Rfit::circle_fit* circle_fit,
Rfit::line_fit* line_fit) {
auto i = blockIdx.x * blockDim.x + threadIdx.x;
Rfit::Map3xNd<N> hits(phits + i, 3, N);
Rfit::Map4d fast_fit_input(pfast_fit_input + i, 4);
Rfit::Map6xNf<N> hits_ge(phits_ge + i, 6, N);
BrokenLine::PreparedBrokenLineData<N> data;
Rfit::Matrix3d Jacob;
auto& line_fit_results = line_fit[i];
auto& circle_fit_results = circle_fit[i];
BrokenLine::prepareBrokenLineData(hits, fast_fit_input, B, data);
BrokenLine::BL_Line_fit(hits_ge, fast_fit_input, B, data, line_fit_results);
BrokenLine::BL_Circle_fit(hits, hits_ge, fast_fit_input, B, data, circle_fit_results);
Jacob << 1., 0, 0, 0, 1., 0, 0, 0,
-B / std::copysign(Rfit::sqr(circle_fit_results.par(2)), circle_fit_results.par(2));
circle_fit_results.par(2) = B / std::abs(circle_fit_results.par(2));
circle_fit_results.cov = Jacob * circle_fit_results.cov * Jacob.transpose();
#ifdef TEST_DEBUG
if (0 == i) {
printf("Circle param %f,%f,%f\n", circle_fit[i].par(0), circle_fit[i].par(1), circle_fit[i].par(2));
}
#endif
}
#else
template <int N>
__global__ void kernelCircleFit(double* __restrict__ phits,
float* __restrict__ phits_ge,
double* __restrict__ pfast_fit_input,
double B,
Rfit::circle_fit* circle_fit_resultsGPU) {
auto i = blockIdx.x * blockDim.x + threadIdx.x;
Rfit::Map3xNd<N> hits(phits + i, 3, N);
Rfit::Map4d fast_fit_input(pfast_fit_input + i, 4);
Rfit::Map6xNf<N> hits_ge(phits_ge + i, 6, N);
constexpr auto n = N;
Rfit::VectorNd<N> rad = (hits.block(0, 0, 2, n).colwise().norm());
Rfit::Matrix2Nd<N> hits_cov = MatrixXd::Zero(2 * n, 2 * n);
Rfit::loadCovariance2D(hits_ge, hits_cov);
#ifdef TEST_DEBUG
if (0 == i) {
printf("hits %f, %f\n", hits.block(0, 0, 2, n)(0, 0), hits.block(0, 0, 2, n)(0, 1));
printf("hits %f, %f\n", hits.block(0, 0, 2, n)(1, 0), hits.block(0, 0, 2, n)(1, 1));
printf("fast_fit_input(0): %f\n", fast_fit_input(0));
printf("fast_fit_input(1): %f\n", fast_fit_input(1));
printf("fast_fit_input(2): %f\n", fast_fit_input(2));
printf("fast_fit_input(3): %f\n", fast_fit_input(3));
printf("rad(0,0): %f\n", rad(0, 0));
printf("rad(1,1): %f\n", rad(1, 1));
printf("rad(2,2): %f\n", rad(2, 2));
printf("hits_cov(0,0): %f\n", (*hits_cov)(0, 0));
printf("hits_cov(1,1): %f\n", (*hits_cov)(1, 1));
printf("hits_cov(2,2): %f\n", (*hits_cov)(2, 2));
printf("hits_cov(11,11): %f\n", (*hits_cov)(11, 11));
printf("B: %f\n", B);
}
#endif
circle_fit_resultsGPU[i] = Rfit::Circle_fit(hits.block(0, 0, 2, n), hits_cov, fast_fit_input, rad, B, true);
#ifdef TEST_DEBUG
if (0 == i) {
printf("Circle param %f,%f,%f\n",
circle_fit_resultsGPU[i].par(0),
circle_fit_resultsGPU[i].par(1),
circle_fit_resultsGPU[i].par(2));
}
#endif
}
template <int N>
__global__ void kernelLineFit(double* __restrict__ phits,
float* __restrict__ phits_ge,
double B,
Rfit::circle_fit* circle_fit,
double* __restrict__ pfast_fit_input,
Rfit::line_fit* line_fit) {
auto i = blockIdx.x * blockDim.x + threadIdx.x;
Rfit::Map3xNd<N> hits(phits + i, 3, N);
Rfit::Map4d fast_fit_input(pfast_fit_input + i, 4);
Rfit::Map6xNf<N> hits_ge(phits_ge + i, 6, N);
line_fit[i] = Rfit::Line_fit(hits, hits_ge, circle_fit[i], fast_fit_input, B, true);
}
#endif
template <typename M3xN, typename M6xN>
__device__ __host__ void fillHitsAndHitsCov(M3xN& hits, M6xN& hits_ge) {
constexpr uint32_t N = M3xN::ColsAtCompileTime;
if (N == 5) {
hits << 2.934787, 6.314229, 8.936963, 10.360559, 12.856387, 0.773211, 1.816356, 2.765734, 3.330824, 4.422212,
-10.980247, -23.162731, -32.759060, -38.061260, -47.518867;
hits_ge.col(0) << 1.424715e-07, -4.996975e-07, 1.752614e-06, 3.660689e-11, 1.644638e-09, 7.346080e-05;
hits_ge.col(1) << 6.899177e-08, -1.873414e-07, 5.087101e-07, -2.078806e-10, -2.210498e-11, 4.346079e-06;
hits_ge.col(2) << 1.406273e-06, 4.042467e-07, 6.391180e-07, -3.141497e-07, 6.513821e-08, 1.163863e-07;
hits_ge.col(3) << 1.176358e-06, 2.154100e-07, 5.072816e-07, -8.161219e-08, 1.437878e-07, 5.951832e-08;
hits_ge.col(4) << 2.852843e-05, 7.956492e-06, 3.117701e-06, -1.060541e-06, 8.777413e-09, 1.426417e-07;
return;
}
if (N > 3)
hits << 1.98645, 4.72598, 7.65632, 11.3151, 2.18002, 4.88864, 7.75845, 11.3134, 2.46338, 6.99838, 11.808, 17.793;
else
hits << 1.98645, 4.72598, 7.65632, 2.18002, 4.88864, 7.75845, 2.46338, 6.99838, 11.808;
hits_ge.col(0)[0] = 7.14652e-06;
hits_ge.col(1)[0] = 2.15789e-06;
hits_ge.col(2)[0] = 1.63328e-06;
if (N > 3)
hits_ge.col(3)[0] = 6.27919e-06;
hits_ge.col(0)[2] = 6.10348e-06;
hits_ge.col(1)[2] = 2.08211e-06;
hits_ge.col(2)[2] = 1.61672e-06;
if (N > 3)
hits_ge.col(3)[2] = 6.28081e-06;
hits_ge.col(0)[5] = 5.184e-05;
hits_ge.col(1)[5] = 1.444e-05;
hits_ge.col(2)[5] = 6.25e-06;
if (N > 3)
hits_ge.col(3)[5] = 3.136e-05;
hits_ge.col(0)[1] = -5.60077e-06;
hits_ge.col(1)[1] = -1.11936e-06;
hits_ge.col(2)[1] = -6.24945e-07;
if (N > 3)
hits_ge.col(3)[1] = -5.28e-06;
}
template <int N>
__global__ void kernelFillHitsAndHitsCov(double* __restrict__ phits, float* phits_ge) {
auto i = blockIdx.x * blockDim.x + threadIdx.x;
Rfit::Map3xNd<N> hits(phits + i, 3, N);
Rfit::Map6xNf<N> hits_ge(phits_ge + i, 6, N);
hits_ge = MatrixXf::Zero(6, N);
fillHitsAndHitsCov(hits, hits_ge);
}
template <int N>
void testFit() {
constexpr double B = 0.0113921;
Rfit::Matrix3xNd<N> hits;
Rfit::Matrix6xNf<N> hits_ge = MatrixXf::Zero(6, N);
double* hitsGPU = nullptr;
;
float* hits_geGPU = nullptr;
double* fast_fit_resultsGPU = nullptr;
double* fast_fit_resultsGPUret = new double[Rfit::maxNumberOfTracks() * sizeof(Vector4d)];
Rfit::circle_fit* circle_fit_resultsGPU = nullptr;
Rfit::circle_fit* circle_fit_resultsGPUret = new Rfit::circle_fit();
Rfit::line_fit* line_fit_resultsGPU = nullptr;
Rfit::line_fit* line_fit_resultsGPUret = new Rfit::line_fit();
fillHitsAndHitsCov(hits, hits_ge);
std::cout << "sizes " << N << ' ' << sizeof(hits) << ' ' << sizeof(hits_ge) << ' ' << sizeof(Vector4d) << ' '
<< sizeof(Rfit::line_fit) << ' ' << sizeof(Rfit::circle_fit) << std::endl;
std::cout << "Generated hits:\n" << hits << std::endl;
std::cout << "Generated cov:\n" << hits_ge << std::endl;
// FAST_FIT_CPU
#ifdef USE_BL
Vector4d fast_fit_results;
BrokenLine::BL_Fast_fit(hits, fast_fit_results);
#else
Vector4d fast_fit_results;
Rfit::Fast_fit(hits, fast_fit_results);
#endif
std::cout << "Fitted values (FastFit, [X0, Y0, R, tan(theta)]):\n" << fast_fit_results << std::endl;
// for timing purposes we fit 4096 tracks
constexpr uint32_t Ntracks = 4096;
cudaCheck(cudaMalloc(&hitsGPU, Rfit::maxNumberOfTracks() * sizeof(Rfit::Matrix3xNd<N>)));
cudaCheck(cudaMalloc(&hits_geGPU, Rfit::maxNumberOfTracks() * sizeof(Rfit::Matrix6xNf<N>)));
cudaCheck(cudaMalloc(&fast_fit_resultsGPU, Rfit::maxNumberOfTracks() * sizeof(Vector4d)));
cudaCheck(cudaMalloc(&line_fit_resultsGPU, Rfit::maxNumberOfTracks() * sizeof(Rfit::line_fit)));
cudaCheck(cudaMalloc(&circle_fit_resultsGPU, Rfit::maxNumberOfTracks() * sizeof(Rfit::circle_fit)));
cudaCheck(cudaMemset(fast_fit_resultsGPU, 0, Rfit::maxNumberOfTracks() * sizeof(Vector4d)));
cudaCheck(cudaMemset(line_fit_resultsGPU, 0, Rfit::maxNumberOfTracks() * sizeof(Rfit::line_fit)));
kernelPrintSizes<N><<<Ntracks / 64, 64>>>(hitsGPU, hits_geGPU);
kernelFillHitsAndHitsCov<N><<<Ntracks / 64, 64>>>(hitsGPU, hits_geGPU);
// FAST_FIT GPU
kernelFastFit<N><<<Ntracks / 64, 64>>>(hitsGPU, fast_fit_resultsGPU);
cudaDeviceSynchronize();
cudaCheck(cudaMemcpy(fast_fit_resultsGPUret,
fast_fit_resultsGPU,
Rfit::maxNumberOfTracks() * sizeof(Vector4d),
cudaMemcpyDeviceToHost));
Rfit::Map4d fast_fit(fast_fit_resultsGPUret + 10, 4);
std::cout << "Fitted values (FastFit, [X0, Y0, R, tan(theta)]): GPU\n" << fast_fit << std::endl;
assert(isEqualFuzzy(fast_fit_results, fast_fit));
#ifdef USE_BL
// CIRCLE AND LINE FIT CPU
BrokenLine::PreparedBrokenLineData<N> data;
BrokenLine::karimaki_circle_fit circle_fit_results;
Rfit::line_fit line_fit_results;
Rfit::Matrix3d Jacob;
BrokenLine::prepareBrokenLineData(hits, fast_fit_results, B, data);
BrokenLine::BL_Line_fit(hits_ge, fast_fit_results, B, data, line_fit_results);
BrokenLine::BL_Circle_fit(hits, hits_ge, fast_fit_results, B, data, circle_fit_results);
Jacob << 1., 0, 0, 0, 1., 0, 0, 0,
-B / std::copysign(Rfit::sqr(circle_fit_results.par(2)), circle_fit_results.par(2));
circle_fit_results.par(2) = B / std::abs(circle_fit_results.par(2));
circle_fit_results.cov = Jacob * circle_fit_results.cov * Jacob.transpose();
// fit on GPU
kernelBrokenLineFit<N>
<<<Ntracks / 64, 64>>>(hitsGPU, hits_geGPU, fast_fit_resultsGPU, B, circle_fit_resultsGPU, line_fit_resultsGPU);
cudaDeviceSynchronize();
#else
// CIRCLE_FIT CPU
Rfit::VectorNd<N> rad = (hits.block(0, 0, 2, N).colwise().norm());
Rfit::Matrix2Nd<N> hits_cov = Rfit::Matrix2Nd<N>::Zero();
Rfit::loadCovariance2D(hits_ge, hits_cov);
Rfit::circle_fit circle_fit_results =
Rfit::Circle_fit(hits.block(0, 0, 2, N), hits_cov, fast_fit_results, rad, B, true);
// CIRCLE_FIT GPU
kernelCircleFit<N><<<Ntracks / 64, 64>>>(hitsGPU, hits_geGPU, fast_fit_resultsGPU, B, circle_fit_resultsGPU);
cudaDeviceSynchronize();
// LINE_FIT CPU
Rfit::line_fit line_fit_results = Rfit::Line_fit(hits, hits_ge, circle_fit_results, fast_fit_results, B, true);
kernelLineFit<N>
<<<Ntracks / 64, 64>>>(hitsGPU, hits_geGPU, B, circle_fit_resultsGPU, fast_fit_resultsGPU, line_fit_resultsGPU);
cudaDeviceSynchronize();
#endif
std::cout << "Fitted values (CircleFit):\n" << circle_fit_results.par << std::endl;
cudaCheck(
cudaMemcpy(circle_fit_resultsGPUret, circle_fit_resultsGPU, sizeof(Rfit::circle_fit), cudaMemcpyDeviceToHost));
std::cout << "Fitted values (CircleFit) GPU:\n" << circle_fit_resultsGPUret->par << std::endl;
assert(isEqualFuzzy(circle_fit_results.par, circle_fit_resultsGPUret->par));
std::cout << "Fitted values (LineFit):\n" << line_fit_results.par << std::endl;
// LINE_FIT GPU
cudaCheck(cudaMemcpy(line_fit_resultsGPUret, line_fit_resultsGPU, sizeof(Rfit::line_fit), cudaMemcpyDeviceToHost));
std::cout << "Fitted values (LineFit) GPU:\n" << line_fit_resultsGPUret->par << std::endl;
assert(isEqualFuzzy(line_fit_results.par, line_fit_resultsGPUret->par, N == 5 ? 1e-4 : 1e-6)); // requires fma on CPU
std::cout << "Fitted cov (CircleFit) CPU:\n" << circle_fit_results.cov << std::endl;
std::cout << "Fitted cov (LineFit): CPU\n" << line_fit_results.cov << std::endl;
std::cout << "Fitted cov (CircleFit) GPU:\n" << circle_fit_resultsGPUret->cov << std::endl;
std::cout << "Fitted cov (LineFit): GPU\n" << line_fit_resultsGPUret->cov << std::endl;
}
int main(int argc, char* argv[]) {
testFit<4>();
testFit<3>();
testFit<5>();
std::cout << "TEST FIT, NO ERRORS" << std::endl;
return 0;
}
|
b89be832523ad457a1562acbf9a6c010f6f6d24e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
*
*/
////////////////////////////////////////////////////////////////////////////////
#include "../common.cuh"
#define MEM_SIZE 10
__global__ void Kernel_DataParallel(int *A, int *B, int *C, int numElements)
{
/*
* <<< , >>>
* ( )
*/
int tid = blockDim.x * blockIdx.x + threadIdx.x;
/*
* tid < numElements
* ,
*
*/
if (tid < numElements)
{
C[tid] = A[tid] + B[tid];
}
// To do .
}
void SequentialAdd(const int *a, const int *b, int *result, int numElements)
{
for (int i = 0; i < numElements; i++)
{
result[i] = a[i] + b[i];
}
}
void DataParallel()
{
int numElements = MEM_SIZE;
size_t bufferSize = numElements * sizeof(int);
/* */
int *A = (int *)malloc(bufferSize);
int *B = (int *)malloc(bufferSize);
int *C = (int *)malloc(bufferSize);
int *CSeq = (int *)malloc(bufferSize);
/* */
if (A == NULL || B == NULL || C == NULL)
{
fprintf(stderr, " ... \n");
exit(EXIT_FAILURE);
}
/* */
for (int i = 0; i < numElements; i++)
{
A[i] = i;
B[i] = i;
C[i] = 0;
}
/* */
printf("=== A ===\n");
for (int i = 0; i < numElements; i++)
{
printf("%d ", A[i]);
}
printf("\n");
printf("\n");
printf("=== B ===\n");
for (int i = 0; i < numElements; i++)
{
printf("%d ", B[i]);
}
printf("\n");
/* */
SequentialAdd(A, B, CSeq, numElements);
/* */
printf("\n");
printf("=== ===\n");
for (int i = 0; i < numElements; i++)
{
printf("%d ", CSeq[i]);
}
printf("\n");
//////////////////////////////////////////////////////////////////
printf("\n");
printf("------------------- --------------------\n");
/* */
int *devA = NULL;
int *devB = NULL;
int *devC = NULL;
/* */
CheckCudaError(hipMalloc((void **)&devA, bufferSize));
CheckCudaError(hipMalloc((void **)&devB, bufferSize));
CheckCudaError(hipMalloc((void **)&devC, bufferSize));
/* -> */
CheckCudaError(hipMemcpy(devA, A, bufferSize, hipMemcpyHostToDevice));
CheckCudaError(hipMemcpy(devB, B, bufferSize, hipMemcpyHostToDevice));
/* */
int threadsPerBlock = 5;
int blocksPerGrid = numElements;
hipLaunchKernelGGL(( Kernel_DataParallel), dim3(blocksPerGrid),dim3(threadsPerBlock), 0, 0, devA, devB, devC, numElements);
hipError_t err = hipGetLastError();
if (err != hipSuccess)
{
fprintf(stderr, " : %s ... \n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
/* -> */
CheckCudaError(hipMemcpy(C, devC, bufferSize, hipMemcpyDeviceToHost));
/* */
printf("\n");
for (int i = 0; i < numElements; i++)
{
printf("%d ", C[i]);
}
printf("\n");
/* */
CheckCudaError(hipFree(devA));
CheckCudaError(hipFree(devB));
CheckCudaError(hipFree(devC));
/* */
free(A);
free(B);
free(C);
free(CSeq);
printf("\n");
printf("----------------- -----------------\n");
printf("\n");
} | b89be832523ad457a1562acbf9a6c010f6f6d24e.cu | /*
* 병렬 데이터 처리 커널 함수 이해 및 호스트 코드 작성
*/
////////////////////////////////////////////////////////////////////////////////
#include "../common.cuh"
#define MEM_SIZE 10
__global__ void Kernel_DataParallel(int *A, int *B, int *C, int numElements)
{
/*
* 커널 호출시 받은 <<<블록의 갯수,블록당 스레드의 갯수>>> 를 통해
* 전체 스레드를 일렬로 줄 세우기 (고유 번호 부여)
*/
int tid = blockDim.x * blockIdx.x + threadIdx.x;
/*
* tid < numElements 의 의미
* 연산 데이터의 갯수와 딱 맞는 갯수의 스레드를 사용하는 것은 쉽지 않으므로,
* 스레드의 갯수를 충분히 주되 연산 데이터 갯수만큼의 스레드들만 연산을 수행하도록 함
*/
if (tid < numElements)
{
C[tid] = A[tid] + B[tid];
}
// To do 다른 연산을 추가해 보세요.
}
void SequentialAdd(const int *a, const int *b, int *result, int numElements)
{
for (int i = 0; i < numElements; i++)
{
result[i] = a[i] + b[i];
}
}
void DataParallel()
{
int numElements = MEM_SIZE;
size_t bufferSize = numElements * sizeof(int);
/* 호스트 메모리 할당 */
int *A = (int *)malloc(bufferSize);
int *B = (int *)malloc(bufferSize);
int *C = (int *)malloc(bufferSize);
int *CSeq = (int *)malloc(bufferSize);
/* 호스트 메모리 할당 에러 처리 */
if (A == NULL || B == NULL || C == NULL)
{
fprintf(stderr, "호스트 메모리 할당 실패... 종료\n");
exit(EXIT_FAILURE);
}
/* 호스트 데이터 입력 */
for (int i = 0; i < numElements; i++)
{
A[i] = i;
B[i] = i;
C[i] = 0;
}
/* 호스트 입력 데이터 확인 */
printf("=== A 입력 값 ===\n");
for (int i = 0; i < numElements; i++)
{
printf("%d ", A[i]);
}
printf("\n");
printf("\n");
printf("=== B 입력 값 ===\n");
for (int i = 0; i < numElements; i++)
{
printf("%d ", B[i]);
}
printf("\n");
/* 시퀀셜 합 */
SequentialAdd(A, B, CSeq, numElements);
/* 시퀀셜 결과 출력 */
printf("\n");
printf("=== 시퀀셜 결과 출력 ===\n");
for (int i = 0; i < numElements; i++)
{
printf("%d ", CSeq[i]);
}
printf("\n");
//////////////////////////////////////////////////////////////////
printf("\n");
printf("------------------- 시작 --------------------\n");
/* 디바이스 변수 선언 */
int *devA = NULL;
int *devB = NULL;
int *devC = NULL;
/* 디바이스 메모리 할당 */
CheckCudaError(cudaMalloc((void **)&devA, bufferSize));
CheckCudaError(cudaMalloc((void **)&devB, bufferSize));
CheckCudaError(cudaMalloc((void **)&devC, bufferSize));
/* 호스트 -> 디바이스 입력 데이터 복사 */
CheckCudaError(cudaMemcpy(devA, A, bufferSize, cudaMemcpyHostToDevice));
CheckCudaError(cudaMemcpy(devB, B, bufferSize, cudaMemcpyHostToDevice));
/* 데이터 병렬 연산 커널 함수 실행 */
int threadsPerBlock = 5;
int blocksPerGrid = numElements;
Kernel_DataParallel<<<blocksPerGrid,threadsPerBlock>>>(devA, devB, devC, numElements);
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "에러 코드: %s ... 종료\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
/* 디바이스 -> 호스트 출력 데이터 복사 */
CheckCudaError(cudaMemcpy(C, devC, bufferSize, cudaMemcpyDeviceToHost));
/* 결과 출력 */
printf("\n");
for (int i = 0; i < numElements; i++)
{
printf("%d ", C[i]);
}
printf("\n");
/* 디바이스 메모리 해제 */
CheckCudaError(cudaFree(devA));
CheckCudaError(cudaFree(devB));
CheckCudaError(cudaFree(devC));
/* 호스트 메모리 해제 */
free(A);
free(B);
free(C);
free(CSeq);
printf("\n");
printf("----------------- 정상 종료 -----------------\n");
printf("\n");
} |
8142c1797fec7d30524cd480a6ca6b30679a3d24.hip | // !!! This is a file automatically generated by hipify!!!
#include "cuda_helper.cuh"
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <cuda_device_runtime_api.h>
#include <random>
#include <iostream>
static constexpr int BLOCK_SIZE = 128;
static constexpr float SOFTENING = 1e-9f;
static constexpr float TIMESTEP = 0.0001f; // time step
static constexpr int ITERATIONS = 10; // simulation iterations
static constexpr float DELTA = ITERATIONS*0.0001; // for floating-point error
struct Body {
float x, y, z, vx, vy, vz;
};
// error checker
// (nbody with single-precision parallel Euler is not that robust, small timestep needed)
bool compare_equal_pos(Body* p1, Body* p2, int nbodies) {
for(int i=0; i<nbodies; ++i) {
float dx = p1[i].x - p2[i].x;
float dy = p1[i].y - p2[i].y;
float dz = p1[i].z - p2[i].z;
if( std::abs(dx) > DELTA
|| std::abs(dy) > DELTA
|| std::abs(dz) > DELTA ) {
std::cerr << "First mismatch"
<< " at [" << i << "]"
<< ": (" << dx <<","<< dy <<","<< dz <<")\n"
<< ": (" << p1[i].y <<","<< p2[i].y <<")\n"
;
return false;
}
}
return true;
}
void bodyCPU(Body* p, int nbodies) {
for (int iter = 1; iter <= ITERATIONS; iter++) {
// force
for(int i=0; i<nbodies; ++i) {
float Fx = 0.0f;
float Fy = 0.0f;
float Fz = 0.0f;
for(int j=0; j<nbodies; ++j) {
float dx = p[j].x - p[i].x;
float dy = p[j].y - p[i].y;
float dz = p[j].z - p[i].z;
float distSqr = dx*dx + dy*dy + dz*dz + SOFTENING;
float invDist = 1.0/sqrtf(distSqr);
float invDist3 = invDist * invDist * invDist;
Fx += dx * invDist3;
Fy += dy * invDist3;
Fz += dz * invDist3;
}
p[i].vx += TIMESTEP*Fx;
p[i].vy += TIMESTEP*Fy;
p[i].vz += TIMESTEP*Fz;
}
// integrate
for(int i=0; i<nbodies; ++i) {
p[i].x += p[i].vx*TIMESTEP;
p[i].y += p[i].vy*TIMESTEP;
p[i].z += p[i].vz*TIMESTEP;
}
}
}
// Kernel
__global__
void bodyIntegrate(Body* p, int n) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if(i < n) {
p[i].x += p[i].vx*TIMESTEP;
p[i].y += p[i].vy*TIMESTEP;
p[i].z += p[i].vz*TIMESTEP;
}
}
__global__
void bodyForce(Body* p, int n) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < n) {
float Fx = 0.0f;
float Fy = 0.0f;
float Fz = 0.0f;
for (int j = 0; j < n; j++) {
// 3 FLOPS
float dx = p[j].x - p[i].x;
float dy = p[j].y - p[i].y;
float dz = p[j].z - p[i].z;
// 6 FLOPS
float distSqr = dx*dx + dy*dy + dz*dz + SOFTENING;
// 2 FLOPS (1 sqrt, 1 inv)
float invDist = 1.0/sqrtf(distSqr);
// 2 FLOPS
float invDist3 = invDist * invDist * invDist;
// 6 FLOPS
Fx += dx * invDist3;
Fy += dy * invDist3;
Fz += dz * invDist3;
}
p[i].vx += TIMESTEP*Fx;
p[i].vy += TIMESTEP*Fy;
p[i].vz += TIMESTEP*Fz;
}
}
// Host
int main(const int argc, const char** argv) {
int nbodies = 30000;
int dev = 0;
if (argc > 1)
nbodies = atoi(argv[1]);
if (argc > 2)
dev = atoi(argv[2]);
std::cout << "USAGE\n ./nbody [nbodies] [device-index]\n\n";
dim3 blocks( (nbodies-1)/BLOCK_SIZE+1 );
// Device information
CHECK_CUDA( hipSetDevice(dev) );
std::cout << getCUDADeviceInformations(dev).str()
<< "\n"
<< "\nThreads per block: "<< BLOCK_SIZE
<< "\nBlocks per SM: " << blocks.x << " (monolithic)"
<< "\nEpsilon: " << SOFTENING
<< "\nTimestep: " << TIMESTEP
<< "\nIterations: " << ITERATIONS
<< "\nDelta: " << DELTA
<< "\n\n"
;
// for time measurement
float milliseconds = 0;
float min_ms = std::numeric_limits<float>::max();
hipEvent_t cstart, cend;
CHECK_CUDA(hipEventCreate(&cstart));
CHECK_CUDA(hipEventCreate(&cend));
//
Body* p;
Body* d_p;
p = new Body[nbodies];
CHECK_CUDA( hipMalloc(&d_p, nbodies*sizeof(Body)) );
// C++11 random generator for uniformly distributed numbers in {1,..,42}, w seed
std::default_random_engine eng{ 1337 };
std::uniform_real_distribution<float> dist(-1.0, 1.0);
for(int i=0; i<nbodies; ++i) {
p[i].x = dist(eng);
p[i].y = dist(eng);
p[i].z = dist(eng);
p[i].vx = dist(eng);
p[i].vy = dist(eng);
p[i].vz = dist(eng);
}
CHECK_CUDA(hipMemcpy(d_p, p, nbodies*sizeof(Body), hipMemcpyHostToDevice));
// benchmark loop
for (int iter = 1; iter <= ITERATIONS; iter++) {
CHECK_CUDA(hipEventRecord(cstart));
hipLaunchKernelGGL(( bodyForce), dim3(blocks), dim3(BLOCK_SIZE), 0, 0, d_p, nbodies);
CHECK_CUDA( hipEventRecord(cend) );
CHECK_CUDA( hipEventSynchronize(cend) );
CHECK_CUDA( hipGetLastError() );
CHECK_CUDA( hipEventElapsedTime(&milliseconds, cstart, cend) );
if(milliseconds<min_ms)
min_ms = milliseconds;
// no time measurement of integration
hipLaunchKernelGGL(( bodyIntegrate), dim3(blocks), dim3(BLOCK_SIZE), 0, 0, d_p, nbodies);
CHECK_CUDA( hipGetLastError() );
}
// validate results if n<8193 (CPU is so slow)
if(nbodies<8193) {
Body* p2 = new Body[nbodies];
bodyCPU(p, nbodies);
CHECK_CUDA(hipMemcpy(p2, d_p, nbodies*sizeof(Body), hipMemcpyDeviceToHost));
if(compare_equal_pos(p, p2, nbodies)) {
std::cout << "SUCCESS\n";
} else {
std::cout << "FAILED\n";
}
delete[] p2;
} else {
std::cout << "No validation.\n";
}
std::cout << "Bodies: " << nbodies
<< "\nGFLOPs: " << 19.0 * 1e-6 * nbodies * nbodies / min_ms
<< "\n";
CHECK_CUDA(hipEventDestroy(cstart));
CHECK_CUDA(hipEventDestroy(cend));
delete[] p;
CHECK_CUDA( hipFree( d_p ) );
CHECK_CUDA(hipDeviceReset());
return 0;
}
| 8142c1797fec7d30524cd480a6ca6b30679a3d24.cu | #include "cuda_helper.cuh"
#include <cuda.h>
#include <cuda_runtime.h>
#include <cuda_device_runtime_api.h>
#include <random>
#include <iostream>
static constexpr int BLOCK_SIZE = 128;
static constexpr float SOFTENING = 1e-9f;
static constexpr float TIMESTEP = 0.0001f; // time step
static constexpr int ITERATIONS = 10; // simulation iterations
static constexpr float DELTA = ITERATIONS*0.0001; // for floating-point error
struct Body {
float x, y, z, vx, vy, vz;
};
// error checker
// (nbody with single-precision parallel Euler is not that robust, small timestep needed)
bool compare_equal_pos(Body* p1, Body* p2, int nbodies) {
for(int i=0; i<nbodies; ++i) {
float dx = p1[i].x - p2[i].x;
float dy = p1[i].y - p2[i].y;
float dz = p1[i].z - p2[i].z;
if( std::abs(dx) > DELTA
|| std::abs(dy) > DELTA
|| std::abs(dz) > DELTA ) {
std::cerr << "First mismatch"
<< " at [" << i << "]"
<< ": (" << dx <<","<< dy <<","<< dz <<")\n"
<< ": (" << p1[i].y <<","<< p2[i].y <<")\n"
;
return false;
}
}
return true;
}
void bodyCPU(Body* p, int nbodies) {
for (int iter = 1; iter <= ITERATIONS; iter++) {
// force
for(int i=0; i<nbodies; ++i) {
float Fx = 0.0f;
float Fy = 0.0f;
float Fz = 0.0f;
for(int j=0; j<nbodies; ++j) {
float dx = p[j].x - p[i].x;
float dy = p[j].y - p[i].y;
float dz = p[j].z - p[i].z;
float distSqr = dx*dx + dy*dy + dz*dz + SOFTENING;
float invDist = 1.0/sqrtf(distSqr);
float invDist3 = invDist * invDist * invDist;
Fx += dx * invDist3;
Fy += dy * invDist3;
Fz += dz * invDist3;
}
p[i].vx += TIMESTEP*Fx;
p[i].vy += TIMESTEP*Fy;
p[i].vz += TIMESTEP*Fz;
}
// integrate
for(int i=0; i<nbodies; ++i) {
p[i].x += p[i].vx*TIMESTEP;
p[i].y += p[i].vy*TIMESTEP;
p[i].z += p[i].vz*TIMESTEP;
}
}
}
// Kernel
__global__
void bodyIntegrate(Body* p, int n) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if(i < n) {
p[i].x += p[i].vx*TIMESTEP;
p[i].y += p[i].vy*TIMESTEP;
p[i].z += p[i].vz*TIMESTEP;
}
}
__global__
void bodyForce(Body* p, int n) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < n) {
float Fx = 0.0f;
float Fy = 0.0f;
float Fz = 0.0f;
for (int j = 0; j < n; j++) {
// 3 FLOPS
float dx = p[j].x - p[i].x;
float dy = p[j].y - p[i].y;
float dz = p[j].z - p[i].z;
// 6 FLOPS
float distSqr = dx*dx + dy*dy + dz*dz + SOFTENING;
// 2 FLOPS (1 sqrt, 1 inv)
float invDist = 1.0/sqrtf(distSqr);
// 2 FLOPS
float invDist3 = invDist * invDist * invDist;
// 6 FLOPS
Fx += dx * invDist3;
Fy += dy * invDist3;
Fz += dz * invDist3;
}
p[i].vx += TIMESTEP*Fx;
p[i].vy += TIMESTEP*Fy;
p[i].vz += TIMESTEP*Fz;
}
}
// Host
int main(const int argc, const char** argv) {
int nbodies = 30000;
int dev = 0;
if (argc > 1)
nbodies = atoi(argv[1]);
if (argc > 2)
dev = atoi(argv[2]);
std::cout << "USAGE\n ./nbody [nbodies] [device-index]\n\n";
dim3 blocks( (nbodies-1)/BLOCK_SIZE+1 );
// Device information
CHECK_CUDA( cudaSetDevice(dev) );
std::cout << getCUDADeviceInformations(dev).str()
<< "\n"
<< "\nThreads per block: "<< BLOCK_SIZE
<< "\nBlocks per SM: " << blocks.x << " (monolithic)"
<< "\nEpsilon: " << SOFTENING
<< "\nTimestep: " << TIMESTEP
<< "\nIterations: " << ITERATIONS
<< "\nDelta: " << DELTA
<< "\n\n"
;
// for time measurement
float milliseconds = 0;
float min_ms = std::numeric_limits<float>::max();
cudaEvent_t cstart, cend;
CHECK_CUDA(cudaEventCreate(&cstart));
CHECK_CUDA(cudaEventCreate(&cend));
//
Body* p;
Body* d_p;
p = new Body[nbodies];
CHECK_CUDA( cudaMalloc(&d_p, nbodies*sizeof(Body)) );
// C++11 random generator for uniformly distributed numbers in {1,..,42}, w seed
std::default_random_engine eng{ 1337 };
std::uniform_real_distribution<float> dist(-1.0, 1.0);
for(int i=0; i<nbodies; ++i) {
p[i].x = dist(eng);
p[i].y = dist(eng);
p[i].z = dist(eng);
p[i].vx = dist(eng);
p[i].vy = dist(eng);
p[i].vz = dist(eng);
}
CHECK_CUDA(cudaMemcpy(d_p, p, nbodies*sizeof(Body), cudaMemcpyHostToDevice));
// benchmark loop
for (int iter = 1; iter <= ITERATIONS; iter++) {
CHECK_CUDA(cudaEventRecord(cstart));
bodyForce<<<blocks, BLOCK_SIZE>>>(d_p, nbodies);
CHECK_CUDA( cudaEventRecord(cend) );
CHECK_CUDA( cudaEventSynchronize(cend) );
CHECK_CUDA( cudaGetLastError() );
CHECK_CUDA( cudaEventElapsedTime(&milliseconds, cstart, cend) );
if(milliseconds<min_ms)
min_ms = milliseconds;
// no time measurement of integration
bodyIntegrate<<<blocks, BLOCK_SIZE>>>(d_p, nbodies);
CHECK_CUDA( cudaGetLastError() );
}
// validate results if n<8193 (CPU is so slow)
if(nbodies<8193) {
Body* p2 = new Body[nbodies];
bodyCPU(p, nbodies);
CHECK_CUDA(cudaMemcpy(p2, d_p, nbodies*sizeof(Body), cudaMemcpyDeviceToHost));
if(compare_equal_pos(p, p2, nbodies)) {
std::cout << "SUCCESS\n";
} else {
std::cout << "FAILED\n";
}
delete[] p2;
} else {
std::cout << "No validation.\n";
}
std::cout << "Bodies: " << nbodies
<< "\nGFLOPs: " << 19.0 * 1e-6 * nbodies * nbodies / min_ms
<< "\n";
CHECK_CUDA(cudaEventDestroy(cstart));
CHECK_CUDA(cudaEventDestroy(cend));
delete[] p;
CHECK_CUDA( cudaFree( d_p ) );
CHECK_CUDA(cudaDeviceReset());
return 0;
}
|
7529c04bfb9345bc34b74f3a2b6c9e25c3aba33f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include "orttraining/training_ops/cuda/activation/bias_gelu_grad_impl.h"
#include "core/providers/cuda/cu_inc/common.cuh"
#include "core/providers/cuda/shared_inc/fast_divmod.h"
#include "orttraining/training_ops/cpu/activation/gelu_computation_mode.h"
#include "orttraining/training_ops/cuda/activation/gelu_grad_impl_common.cuh"
namespace onnxruntime {
namespace cuda {
template <typename T, typename GeluComputationMode, int num_elements_per_thread>
__global__ void BiasGeluGradDxKernel(int64_t bias_size, const T* dY, const T* X, const T* B, T* dX) {
const auto num_elements_per_block = num_elements_per_thread * blockDim.x;
const auto input_base_idx = bias_size * blockIdx.y + num_elements_per_block * blockIdx.x + threadIdx.x;
const auto bias_base_idx = num_elements_per_block * blockIdx.x + threadIdx.x;
const auto element_stride = blockDim.x;
T reg_dY[num_elements_per_thread];
T reg_X[num_elements_per_thread];
T reg_B[num_elements_per_thread];
{
auto input_idx = input_base_idx;
auto bias_idx = bias_base_idx;
#pragma unroll
for (int element_idx = 0; element_idx < num_elements_per_thread; ++element_idx) {
if (bias_idx < bias_size) {
reg_dY[element_idx] = dY[input_idx];
reg_X[element_idx] = X[input_idx];
reg_B[element_idx] = B[bias_idx];
input_idx += element_stride;
bias_idx += element_stride;
}
}
}
{
auto input_idx = input_base_idx;
auto bias_idx = bias_base_idx;
#pragma unroll
for (int element_idx = 0; element_idx < num_elements_per_thread; ++element_idx) {
if (bias_idx < bias_size) {
dX[input_idx] = ComputeGeluGradScalar(
reg_dY[element_idx], reg_X[element_idx] + reg_B[element_idx], GeluComputationMode{});
input_idx += element_stride;
bias_idx += element_stride;
}
}
}
}
template <typename T, typename GeluComputationMode>
void LaunchBiasGeluGradDxKernel(
hipStream_t stream,
int64_t input_size, int64_t bias_size,
const T* dY, const T* X, const T* B, T* dX) {
// given a 2D grid of blocks:
// each grid row handles bias_size elements
// there are input_size / bias_size rows
constexpr int num_elements_per_thread = GridDim::maxElementsPerThread;
const int num_threads_per_block =
std::min<int>(static_cast<int>(CeilDiv(bias_size, num_elements_per_thread)), static_cast<int>(GridDim::maxThreadsPerBlock));
const auto grid_width = CeilDiv(bias_size, num_elements_per_thread * num_threads_per_block);
const auto grid_height = input_size / bias_size;
const dim3 grid_dim{static_cast<uint32_t>(grid_width), static_cast<uint32_t>(grid_height)};
hipLaunchKernelGGL(( BiasGeluGradDxKernel<T, GeluComputationMode, num_elements_per_thread>)
, dim3(grid_dim), dim3(num_threads_per_block), 0, stream, bias_size, dY, X, B, dX);
}
// explicit instantiations
#define SPECIALIZED_BIAS_GELU_GRAD_IMPL(T, GeluComputationMode) \
template void LaunchBiasGeluGradDxKernel<T, GeluComputationMode>( \
hipStream_t stream, int64_t input_size, int64_t bias_size, \
const T* dY, const T* X, const T* B, T* dX)
SPECIALIZED_BIAS_GELU_GRAD_IMPL(half, gelu_computation_mode::Default);
SPECIALIZED_BIAS_GELU_GRAD_IMPL(float, gelu_computation_mode::Default);
SPECIALIZED_BIAS_GELU_GRAD_IMPL(double, gelu_computation_mode::Default);
SPECIALIZED_BIAS_GELU_GRAD_IMPL(half, gelu_computation_mode::Approximation);
SPECIALIZED_BIAS_GELU_GRAD_IMPL(float, gelu_computation_mode::Approximation);
SPECIALIZED_BIAS_GELU_GRAD_IMPL(double, gelu_computation_mode::Approximation);
#if TORCH_HIP_VERSION >= 11000 && (__CUDA_ARCH__ >= 800 || !defined(__CUDA_ARCH__))
SPECIALIZED_BIAS_GELU_GRAD_IMPL(nv_bfloat16, gelu_computation_mode::Default);
SPECIALIZED_BIAS_GELU_GRAD_IMPL(nv_bfloat16, gelu_computation_mode::Approximation);
#endif
#undef SPECIALIZED_BIAS_GELU_GRAD_IMPL
} // namespace cuda
} // namespace onnxruntime
| 7529c04bfb9345bc34b74f3a2b6c9e25c3aba33f.cu | // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include "orttraining/training_ops/cuda/activation/bias_gelu_grad_impl.h"
#include "core/providers/cuda/cu_inc/common.cuh"
#include "core/providers/cuda/shared_inc/fast_divmod.h"
#include "orttraining/training_ops/cpu/activation/gelu_computation_mode.h"
#include "orttraining/training_ops/cuda/activation/gelu_grad_impl_common.cuh"
namespace onnxruntime {
namespace cuda {
template <typename T, typename GeluComputationMode, int num_elements_per_thread>
__global__ void BiasGeluGradDxKernel(int64_t bias_size, const T* dY, const T* X, const T* B, T* dX) {
const auto num_elements_per_block = num_elements_per_thread * blockDim.x;
const auto input_base_idx = bias_size * blockIdx.y + num_elements_per_block * blockIdx.x + threadIdx.x;
const auto bias_base_idx = num_elements_per_block * blockIdx.x + threadIdx.x;
const auto element_stride = blockDim.x;
T reg_dY[num_elements_per_thread];
T reg_X[num_elements_per_thread];
T reg_B[num_elements_per_thread];
{
auto input_idx = input_base_idx;
auto bias_idx = bias_base_idx;
#pragma unroll
for (int element_idx = 0; element_idx < num_elements_per_thread; ++element_idx) {
if (bias_idx < bias_size) {
reg_dY[element_idx] = dY[input_idx];
reg_X[element_idx] = X[input_idx];
reg_B[element_idx] = B[bias_idx];
input_idx += element_stride;
bias_idx += element_stride;
}
}
}
{
auto input_idx = input_base_idx;
auto bias_idx = bias_base_idx;
#pragma unroll
for (int element_idx = 0; element_idx < num_elements_per_thread; ++element_idx) {
if (bias_idx < bias_size) {
dX[input_idx] = ComputeGeluGradScalar(
reg_dY[element_idx], reg_X[element_idx] + reg_B[element_idx], GeluComputationMode{});
input_idx += element_stride;
bias_idx += element_stride;
}
}
}
}
template <typename T, typename GeluComputationMode>
void LaunchBiasGeluGradDxKernel(
cudaStream_t stream,
int64_t input_size, int64_t bias_size,
const T* dY, const T* X, const T* B, T* dX) {
// given a 2D grid of blocks:
// each grid row handles bias_size elements
// there are input_size / bias_size rows
constexpr int num_elements_per_thread = GridDim::maxElementsPerThread;
const int num_threads_per_block =
std::min<int>(static_cast<int>(CeilDiv(bias_size, num_elements_per_thread)), static_cast<int>(GridDim::maxThreadsPerBlock));
const auto grid_width = CeilDiv(bias_size, num_elements_per_thread * num_threads_per_block);
const auto grid_height = input_size / bias_size;
const dim3 grid_dim{static_cast<uint32_t>(grid_width), static_cast<uint32_t>(grid_height)};
BiasGeluGradDxKernel<T, GeluComputationMode, num_elements_per_thread>
<<<grid_dim, num_threads_per_block, 0, stream>>>(bias_size, dY, X, B, dX);
}
// explicit instantiations
#define SPECIALIZED_BIAS_GELU_GRAD_IMPL(T, GeluComputationMode) \
template void LaunchBiasGeluGradDxKernel<T, GeluComputationMode>( \
cudaStream_t stream, int64_t input_size, int64_t bias_size, \
const T* dY, const T* X, const T* B, T* dX)
SPECIALIZED_BIAS_GELU_GRAD_IMPL(half, gelu_computation_mode::Default);
SPECIALIZED_BIAS_GELU_GRAD_IMPL(float, gelu_computation_mode::Default);
SPECIALIZED_BIAS_GELU_GRAD_IMPL(double, gelu_computation_mode::Default);
SPECIALIZED_BIAS_GELU_GRAD_IMPL(half, gelu_computation_mode::Approximation);
SPECIALIZED_BIAS_GELU_GRAD_IMPL(float, gelu_computation_mode::Approximation);
SPECIALIZED_BIAS_GELU_GRAD_IMPL(double, gelu_computation_mode::Approximation);
#if CUDA_VERSION >= 11000 && (__CUDA_ARCH__ >= 800 || !defined(__CUDA_ARCH__))
SPECIALIZED_BIAS_GELU_GRAD_IMPL(nv_bfloat16, gelu_computation_mode::Default);
SPECIALIZED_BIAS_GELU_GRAD_IMPL(nv_bfloat16, gelu_computation_mode::Approximation);
#endif
#undef SPECIALIZED_BIAS_GELU_GRAD_IMPL
} // namespace cuda
} // namespace onnxruntime
|
3b203269f1bb9c8e79eac9652481f433300397ce.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <hiprand/hiprand_kernel.h>
#include <stdio.h>
#include "params.h"
__device__ int getIndex(int t_x, int t_y)
{
// calculate full index from a grid position
int indx = __mul24(t_y,blockDim.x) + t_x;
return __mul24(blockDim.y, __mul24(blockIdx.x, blockDim.x)) + indx;
}
__device__ int getIndex(int t_x)
{
// calculate full index from a grid position
return __mul24(blockDim.y, __mul24(blockIdx.x, blockDim.x)) + t_x;
}
__global__ void d_initRands(hiprandState_t *state, int seed)
{
int id = getIndex(threadIdx.x, threadIdx.y);
/* Each thread gets same seed, a different sequence
* number, no offset */
hiprand_init(seed, id, 0, &state[id]);
}
__global__ void d_updateStates(int* states, int* net, float* wg, int N_x, hiprandState_t* d_rands, int NL, int t)
{
int id = getIndex(threadIdx.x, threadIdx.y);
int edges=80;
int neigh[8][2] = { { 1, 1 }, { 1, 0 }, { 1, -1 } , { 0, 1 }, { 0, -1 }, { -1, -1 } , { -1, 0 }, { -1, 1 } };
int deltan = 0;
int bl = blockIdx.x;
int N = N_x*N_x;
int myInd = threadIdx.y*N_x + threadIdx.x;
//generate random permutation array
int permList[8] = {0,1,2,3,4,5,6,7};
int perm[8] ;//= {0,1,2,3,4,5,6,7};
/*
for (int e=0;e<edges;e++)
{
int n = hiprand_uniform(&d_rands[id])*8;
if (n==8) n==7;
bool up = (hiprand_uniform(&d_rands[id])>0.5);
while (permList[n]<0)
{
if (up) n++;
else n--;
if (n<0)
n=7;
if (n>7)
n=0;
}
perm[e]=permList[n];
permList[n]=-1;
}
// */
int nn = net[N*myInd];
for (int e=0;e<nn;e++)
{
int distance = e;
int n2 = net[N*myInd+e+1];// (((myInd + distance) % N) + N) % N;
int n2_id = getIndex(n2);
if (states[n2_id]>0.5)
deltan++;
}
// if (t>1)
// if (states[id]<0.5)
// printf("%d %d %0.3f %d \n", t, myInd, deltan/80.0, states[id]);
// deltan is N+ right now but we want (N+ - N-)
float localFrac = float(edges)*(2.0*float(deltan)/(1.0f*float(nn)) - 1.0);
deltan*=2;
deltan-=edges;
bool debug = false;
if ((debug)&&(id==0))
{
int sCount = 0;
/*
for (int x_n=0;x_n<N_x;x_n++)
for (int y_n=0;y_n<N_x;y_n++)
{
int n2_id = getIndex(x_n, y_n);
if (states[n2_id]>0.5)
sCount++;
}
*/
printf("%d %d %0.5f %d \n",t, deltan, localFrac, states[id]);
}
float cue = 1.0f + ( hiprand_normal(&d_rands[id]) * sqrtf(1.0f/(2.0f*wg[id])) ) ;
float pup = exp(-4.0f*wg[id]*cue);
float pall = pup*powf((1.0f - ws)/ws,localFrac);
int newState;
if (pall<1.0f)
newState = 1;
else
newState = 0;
__syncthreads();
if (myInd==t)
states[id] = newState;
}
__global__ void d_recordData(int* states, int* states2, hiprandState_t* d_rands, int N_x, float* d_up, float* d_down, int* d_upcount, int* d_downcount, int t)
{
int group_id = threadIdx.y * N_x + threadIdx.x;
int N = N_x*N_x;
if ((group_id==0)&&(blockIdx.x==0))
for (int b=0;b<gridDim.x;b++)
{
if (t==0)
for (int i=0;i<N;i++)
states2[b * N + i] = states[b * N + i];
else
{
int totalUp = 0;
for (int i=0;i<N;i++)
if (states2[b * N + i] > 0.5)
totalUp++;
int nowDown = 0;
for (int i=0;i<N;i++)
if ((states2[b * N + i] > 0.5)&&(states[b * N + i] < 0.5))
nowDown++;
int nowUp = 0;
for (int i=0;i<N;i++)
if ((states2[b * N + i] < 0.5)&&(states[b * N + i] > 0.5))
nowUp++;
d_upcount[totalUp]+=1;
int c = d_upcount[totalUp];
// printf("%d %d %d %d\n",t, totalUp,nowDown, nowUp);
d_down[totalUp] = (nowDown/(float)N)/(float)c + (c-1)*d_down[totalUp]/(float)c;
d_up[totalUp] = (nowUp/(float)N)/(float)c + (c-1)*d_up[totalUp]/(float)c;
// res[blockIdx.y] = counter/float(t+1) + t*res[blockIdx.y]/float(t+1);
// now for something crazy!!!
// we're going to count all the uppies and then put them all in order
totalUp=0;
for (int i=0;i<N;i++)
{
if (states[b * N + i] > 0.5)
totalUp++;
// states[b * N + i] = 0;
}
// totalUp=32;
/* int nc = 0.875 * totalUp;
float frac = float(totalUp-nc)/float(N-totalUp);
for (int i=0;i<nc;i++)
states[b * N + i] = 1;
for (int i=nc;i<N;i++)
if (hiprand_uniform(&d_rands[group_id])< frac)
states[b * N + i] = 1;
*/
// int i2 = totalUp + 0.5*(N-totalUp);
// states[b * N + i2] = 1;
//
for (int i=0;i<N;i++)
states2[b * N + i] = states[b * N + i];
}
//res[t * gridDim.y + blockIdx.y] = counter;
// if (t==0)
// res[blockIdx.y] = counter;
// else
// res[blockIdx.y] = counter/float(t+1) + t*res[blockIdx.y]/float(t+1);
}
}
__global__ void block_sum(const int *input, int *per_block_results, const size_t n)
{
extern __shared__ int sdata[];
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
// load input into __shared__ memory
int x = 0;
if(i < n)
{
x = input[i];
}
sdata[threadIdx.x] = x;
__syncthreads();
// contiguous range pattern
for(int offset = blockDim.x / 2;
offset > 0;
offset >>= 1)
{
if(threadIdx.x < offset)
{
// add a partial sum upstream to our own
sdata[threadIdx.x] += sdata[threadIdx.x + offset];
}
// wait until all threads in the block hav
// updated their partial sums
__syncthreads();
}
// thread 0 writes the final result
if(threadIdx.x == 0)
{
per_block_results[blockIdx.x] = sdata[0];
}
}
void initRands(dim3 threadGrid, int numBlocks, hiprandState_t *state, int seed)
{
hipLaunchKernelGGL(( d_initRands), dim3(numBlocks), dim3(threadGrid) , 0, 0, state, seed);
if (hipSuccess != hipGetLastError()) printf( "cuda error!\n" );
}
void advanceTimestep(dim3 threadGrid, int numBlocks, hiprandState_t *rands, float* wg, int* states, int* net, int N_x, int NL, int t)
{
int r = rand() / ( RAND_MAX / (N_x*N_x) );
hipLaunchKernelGGL(( d_updateStates), dim3(numBlocks), dim3(threadGrid) , 0, 0, states, net, wg, N_x, rands, NL, r);
if (hipSuccess != hipGetLastError()) printf( "cuda error!\n" );
}
void recordData(dim3 threadGrid, int numBlocks, int* states, int* states2, hiprandState_t *rands, int N_x, float* d_up, float* d_down, int* d_upcount, int* d_downcount, int t)
{
hipLaunchKernelGGL(( d_recordData), dim3(numBlocks), dim3(threadGrid) , 0, 0, states, states2, rands, N_x, d_up, d_down, d_upcount, d_downcount, t);
if (hipSuccess != hipGetLastError()) printf( "cuda error!\n" );
}
void countStates(int numThreads, int numBlocks, int* states, int* blockTotals, int N_ALL)
{
hipLaunchKernelGGL(( block_sum), dim3(numBlocks), dim3(numThreads), numThreads * sizeof(int) , 0, states, blockTotals, N_ALL);
if (hipSuccess != hipGetLastError()) printf( "cuda error!\n" );
}
| 3b203269f1bb9c8e79eac9652481f433300397ce.cu |
#include <curand_kernel.h>
#include <stdio.h>
#include "params.h"
__device__ int getIndex(int t_x, int t_y)
{
// calculate full index from a grid position
int indx = __mul24(t_y,blockDim.x) + t_x;
return __mul24(blockDim.y, __mul24(blockIdx.x, blockDim.x)) + indx;
}
__device__ int getIndex(int t_x)
{
// calculate full index from a grid position
return __mul24(blockDim.y, __mul24(blockIdx.x, blockDim.x)) + t_x;
}
__global__ void d_initRands(curandState *state, int seed)
{
int id = getIndex(threadIdx.x, threadIdx.y);
/* Each thread gets same seed, a different sequence
* number, no offset */
curand_init(seed, id, 0, &state[id]);
}
__global__ void d_updateStates(int* states, int* net, float* wg, int N_x, curandState* d_rands, int NL, int t)
{
int id = getIndex(threadIdx.x, threadIdx.y);
int edges=80;
int neigh[8][2] = { { 1, 1 }, { 1, 0 }, { 1, -1 } , { 0, 1 }, { 0, -1 }, { -1, -1 } , { -1, 0 }, { -1, 1 } };
int deltan = 0;
int bl = blockIdx.x;
int N = N_x*N_x;
int myInd = threadIdx.y*N_x + threadIdx.x;
//generate random permutation array
int permList[8] = {0,1,2,3,4,5,6,7};
int perm[8] ;//= {0,1,2,3,4,5,6,7};
/*
for (int e=0;e<edges;e++)
{
int n = curand_uniform(&d_rands[id])*8;
if (n==8) n==7;
bool up = (curand_uniform(&d_rands[id])>0.5);
while (permList[n]<0)
{
if (up) n++;
else n--;
if (n<0)
n=7;
if (n>7)
n=0;
}
perm[e]=permList[n];
permList[n]=-1;
}
// */
int nn = net[N*myInd];
for (int e=0;e<nn;e++)
{
int distance = e;
int n2 = net[N*myInd+e+1];// (((myInd + distance) % N) + N) % N;
int n2_id = getIndex(n2);
if (states[n2_id]>0.5)
deltan++;
}
// if (t>1)
// if (states[id]<0.5)
// printf("%d %d %0.3f %d \n", t, myInd, deltan/80.0, states[id]);
// deltan is N+ right now but we want (N+ - N-)
float localFrac = float(edges)*(2.0*float(deltan)/(1.0f*float(nn)) - 1.0);
deltan*=2;
deltan-=edges;
bool debug = false;
if ((debug)&&(id==0))
{
int sCount = 0;
/*
for (int x_n=0;x_n<N_x;x_n++)
for (int y_n=0;y_n<N_x;y_n++)
{
int n2_id = getIndex(x_n, y_n);
if (states[n2_id]>0.5)
sCount++;
}
*/
printf("%d %d %0.5f %d \n",t, deltan, localFrac, states[id]);
}
float cue = 1.0f + ( curand_normal(&d_rands[id]) * sqrtf(1.0f/(2.0f*wg[id])) ) ;
float pup = exp(-4.0f*wg[id]*cue);
float pall = pup*powf((1.0f - ws)/ws,localFrac);
int newState;
if (pall<1.0f)
newState = 1;
else
newState = 0;
__syncthreads();
if (myInd==t)
states[id] = newState;
}
__global__ void d_recordData(int* states, int* states2, curandState* d_rands, int N_x, float* d_up, float* d_down, int* d_upcount, int* d_downcount, int t)
{
int group_id = threadIdx.y * N_x + threadIdx.x;
int N = N_x*N_x;
if ((group_id==0)&&(blockIdx.x==0))
for (int b=0;b<gridDim.x;b++)
{
if (t==0)
for (int i=0;i<N;i++)
states2[b * N + i] = states[b * N + i];
else
{
int totalUp = 0;
for (int i=0;i<N;i++)
if (states2[b * N + i] > 0.5)
totalUp++;
int nowDown = 0;
for (int i=0;i<N;i++)
if ((states2[b * N + i] > 0.5)&&(states[b * N + i] < 0.5))
nowDown++;
int nowUp = 0;
for (int i=0;i<N;i++)
if ((states2[b * N + i] < 0.5)&&(states[b * N + i] > 0.5))
nowUp++;
d_upcount[totalUp]+=1;
int c = d_upcount[totalUp];
// printf("%d %d %d %d\n",t, totalUp,nowDown, nowUp);
d_down[totalUp] = (nowDown/(float)N)/(float)c + (c-1)*d_down[totalUp]/(float)c;
d_up[totalUp] = (nowUp/(float)N)/(float)c + (c-1)*d_up[totalUp]/(float)c;
// res[blockIdx.y] = counter/float(t+1) + t*res[blockIdx.y]/float(t+1);
// now for something crazy!!!
// we're going to count all the uppies and then put them all in order
totalUp=0;
for (int i=0;i<N;i++)
{
if (states[b * N + i] > 0.5)
totalUp++;
// states[b * N + i] = 0;
}
// totalUp=32;
/* int nc = 0.875 * totalUp;
float frac = float(totalUp-nc)/float(N-totalUp);
for (int i=0;i<nc;i++)
states[b * N + i] = 1;
for (int i=nc;i<N;i++)
if (curand_uniform(&d_rands[group_id])< frac)
states[b * N + i] = 1;
*/
// int i2 = totalUp + 0.5*(N-totalUp);
// states[b * N + i2] = 1;
//
for (int i=0;i<N;i++)
states2[b * N + i] = states[b * N + i];
}
//res[t * gridDim.y + blockIdx.y] = counter;
// if (t==0)
// res[blockIdx.y] = counter;
// else
// res[blockIdx.y] = counter/float(t+1) + t*res[blockIdx.y]/float(t+1);
}
}
__global__ void block_sum(const int *input, int *per_block_results, const size_t n)
{
extern __shared__ int sdata[];
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
// load input into __shared__ memory
int x = 0;
if(i < n)
{
x = input[i];
}
sdata[threadIdx.x] = x;
__syncthreads();
// contiguous range pattern
for(int offset = blockDim.x / 2;
offset > 0;
offset >>= 1)
{
if(threadIdx.x < offset)
{
// add a partial sum upstream to our own
sdata[threadIdx.x] += sdata[threadIdx.x + offset];
}
// wait until all threads in the block hav
// updated their partial sums
__syncthreads();
}
// thread 0 writes the final result
if(threadIdx.x == 0)
{
per_block_results[blockIdx.x] = sdata[0];
}
}
void initRands(dim3 threadGrid, int numBlocks, curandState *state, int seed)
{
d_initRands<<< numBlocks, threadGrid >>>(state, seed);
if (cudaSuccess != cudaGetLastError()) printf( "cuda error!\n" );
}
void advanceTimestep(dim3 threadGrid, int numBlocks, curandState *rands, float* wg, int* states, int* net, int N_x, int NL, int t)
{
int r = rand() / ( RAND_MAX / (N_x*N_x) );
d_updateStates<<< numBlocks, threadGrid >>>(states, net, wg, N_x, rands, NL, r);
if (cudaSuccess != cudaGetLastError()) printf( "cuda error!\n" );
}
void recordData(dim3 threadGrid, int numBlocks, int* states, int* states2, curandState *rands, int N_x, float* d_up, float* d_down, int* d_upcount, int* d_downcount, int t)
{
d_recordData<<< numBlocks, threadGrid >>>(states, states2, rands, N_x, d_up, d_down, d_upcount, d_downcount, t);
if (cudaSuccess != cudaGetLastError()) printf( "cuda error!\n" );
}
void countStates(int numThreads, int numBlocks, int* states, int* blockTotals, int N_ALL)
{
block_sum<<< numBlocks, numThreads, numThreads * sizeof(int) >>>(states, blockTotals, N_ALL);
if (cudaSuccess != cudaGetLastError()) printf( "cuda error!\n" );
}
|
5f2420ded4db6e1d67417108f87b083fbe091661.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <cfloat>
#include <vector>
#include "caffe/layers/weighted_softmax_loss_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void WeightedSoftmaxLossForwardGPU(const int nthreads,
const Dtype* prob_data, const Dtype* label, const Dtype* sample_weight, Dtype* loss,
const int num, const int dim, const int spatial_dim,
const bool has_ignore_label_, const int ignore_label_,
Dtype* counts) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / spatial_dim;
const int s = index % spatial_dim;
const int label_value = static_cast<int>(label[n * spatial_dim + s]);
if (has_ignore_label_ && label_value == ignore_label_) {
loss[index] = 0;
counts[index] = 0;
} else {
Dtype w = sample_weight[n * spatial_dim + s];
// w = 1; /////// DEBUG LINE
loss[index] = -w * log(max(prob_data[n * dim + label_value * spatial_dim + s],
Dtype(FLT_MIN)));
counts[index] = 1;
}
}
}
template <typename Dtype>
void WeightedSoftmaxWithLossLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
softmax_layer_->Forward(softmax_bottom_vec_, softmax_top_vec_);
const Dtype* prob_data = prob_.gpu_data();
const Dtype* label = bottom[1]->gpu_data();
const Dtype* sample_weight = bottom[2]->gpu_data();
const int dim = prob_.count() / outer_num_;
const int nthreads = outer_num_ * inner_num_;
// Since this memory is not used for anything until it is overwritten
// on the backward pass, we use it here to avoid having to allocate new GPU
// memory to accumulate intermediate results in the kernel.
Dtype* loss_data = bottom[0]->mutable_gpu_diff();
// Similarly, this memory is never used elsewhere, and thus we can use it
// to avoid having to allocate additional GPU memory.
Dtype* counts = prob_.mutable_gpu_diff();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( WeightedSoftmaxLossForwardGPU<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, prob_data, label, sample_weight, loss_data,
outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts);
Dtype loss;
caffe_gpu_asum(nthreads, loss_data, &loss);
Dtype valid_count = -1;
// Only launch another CUDA kernel if we actually need the count of valid
// outputs.
if (normalization_ == LossParameter_NormalizationMode_VALID &&
has_ignore_label_) {
caffe_gpu_asum(nthreads, counts, &valid_count);
}
top[0]->mutable_cpu_data()[0] = loss / get_normalizer(normalization_,
valid_count);
if (top.size() == 2) {
top[1]->ShareData(prob_);
}
}
template <typename Dtype>
__global__ void WeightedSoftmaxLossBackwardGPU(const int nthreads, const Dtype* top,
const Dtype* label, const Dtype* sample_weight, Dtype* bottom_diff, const int num, const int dim,
const int spatial_dim, const bool has_ignore_label_,
const int ignore_label_, Dtype* counts) {
const int channels = dim / spatial_dim;
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / spatial_dim;
const int s = index % spatial_dim;
const int label_value = static_cast<int>(label[n * spatial_dim + s]);
if (has_ignore_label_ && label_value == ignore_label_) {
for (int c = 0; c < channels; ++c) {
bottom_diff[n * dim + c * spatial_dim + s] = 0;
}
counts[index] = 0;
} else {
Dtype w = sample_weight[n * spatial_dim + s];
// w = 1; /////// DEBUG LINE
bottom_diff[n * dim + label_value * spatial_dim + s] -= 1;
counts[index] = 1;
for (int m = 0; m < channels; ++m) {
bottom_diff[n * dim + m * spatial_dim + s] *= w;
}
}
}
}
template <typename Dtype>
void WeightedSoftmaxWithLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[1]) {
LOG(FATAL) << this->type()
<< " Layer cannot backpropagate to label inputs.";
}
if (propagate_down[0]) {
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const Dtype* prob_data = prob_.gpu_data();
const Dtype* top_data = top[0]->gpu_data();
caffe_gpu_memcpy(prob_.count() * sizeof(Dtype), prob_data, bottom_diff);
const Dtype* label = bottom[1]->gpu_data();
const Dtype* sample_weight = bottom[2]->gpu_data();
const int dim = prob_.count() / outer_num_;
const int nthreads = outer_num_ * inner_num_;
// Since this memory is never used for anything else,
// we use to to avoid allocating new GPU memory.
Dtype* counts = prob_.mutable_gpu_diff();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( WeightedSoftmaxLossBackwardGPU<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, top_data, label, sample_weight, bottom_diff,
outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts);
Dtype valid_count = -1;
// Only launch another CUDA kernel if we actually need the count of valid
// outputs.
if (normalization_ == LossParameter_NormalizationMode_VALID &&
has_ignore_label_) {
caffe_gpu_asum(nthreads, counts, &valid_count);
}
const Dtype loss_weight = top[0]->cpu_diff()[0] /
get_normalizer(normalization_, valid_count);
caffe_gpu_scal(prob_.count(), loss_weight , bottom_diff);
}
}
INSTANTIATE_LAYER_GPU_FUNCS(WeightedSoftmaxWithLossLayer);
} // namespace caffe
| 5f2420ded4db6e1d67417108f87b083fbe091661.cu | #include <algorithm>
#include <cfloat>
#include <vector>
#include "caffe/layers/weighted_softmax_loss_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void WeightedSoftmaxLossForwardGPU(const int nthreads,
const Dtype* prob_data, const Dtype* label, const Dtype* sample_weight, Dtype* loss,
const int num, const int dim, const int spatial_dim,
const bool has_ignore_label_, const int ignore_label_,
Dtype* counts) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / spatial_dim;
const int s = index % spatial_dim;
const int label_value = static_cast<int>(label[n * spatial_dim + s]);
if (has_ignore_label_ && label_value == ignore_label_) {
loss[index] = 0;
counts[index] = 0;
} else {
Dtype w = sample_weight[n * spatial_dim + s];
// w = 1; /////// DEBUG LINE
loss[index] = -w * log(max(prob_data[n * dim + label_value * spatial_dim + s],
Dtype(FLT_MIN)));
counts[index] = 1;
}
}
}
template <typename Dtype>
void WeightedSoftmaxWithLossLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
softmax_layer_->Forward(softmax_bottom_vec_, softmax_top_vec_);
const Dtype* prob_data = prob_.gpu_data();
const Dtype* label = bottom[1]->gpu_data();
const Dtype* sample_weight = bottom[2]->gpu_data();
const int dim = prob_.count() / outer_num_;
const int nthreads = outer_num_ * inner_num_;
// Since this memory is not used for anything until it is overwritten
// on the backward pass, we use it here to avoid having to allocate new GPU
// memory to accumulate intermediate results in the kernel.
Dtype* loss_data = bottom[0]->mutable_gpu_diff();
// Similarly, this memory is never used elsewhere, and thus we can use it
// to avoid having to allocate additional GPU memory.
Dtype* counts = prob_.mutable_gpu_diff();
// NOLINT_NEXT_LINE(whitespace/operators)
WeightedSoftmaxLossForwardGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, prob_data, label, sample_weight, loss_data,
outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts);
Dtype loss;
caffe_gpu_asum(nthreads, loss_data, &loss);
Dtype valid_count = -1;
// Only launch another CUDA kernel if we actually need the count of valid
// outputs.
if (normalization_ == LossParameter_NormalizationMode_VALID &&
has_ignore_label_) {
caffe_gpu_asum(nthreads, counts, &valid_count);
}
top[0]->mutable_cpu_data()[0] = loss / get_normalizer(normalization_,
valid_count);
if (top.size() == 2) {
top[1]->ShareData(prob_);
}
}
template <typename Dtype>
__global__ void WeightedSoftmaxLossBackwardGPU(const int nthreads, const Dtype* top,
const Dtype* label, const Dtype* sample_weight, Dtype* bottom_diff, const int num, const int dim,
const int spatial_dim, const bool has_ignore_label_,
const int ignore_label_, Dtype* counts) {
const int channels = dim / spatial_dim;
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / spatial_dim;
const int s = index % spatial_dim;
const int label_value = static_cast<int>(label[n * spatial_dim + s]);
if (has_ignore_label_ && label_value == ignore_label_) {
for (int c = 0; c < channels; ++c) {
bottom_diff[n * dim + c * spatial_dim + s] = 0;
}
counts[index] = 0;
} else {
Dtype w = sample_weight[n * spatial_dim + s];
// w = 1; /////// DEBUG LINE
bottom_diff[n * dim + label_value * spatial_dim + s] -= 1;
counts[index] = 1;
for (int m = 0; m < channels; ++m) {
bottom_diff[n * dim + m * spatial_dim + s] *= w;
}
}
}
}
template <typename Dtype>
void WeightedSoftmaxWithLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[1]) {
LOG(FATAL) << this->type()
<< " Layer cannot backpropagate to label inputs.";
}
if (propagate_down[0]) {
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const Dtype* prob_data = prob_.gpu_data();
const Dtype* top_data = top[0]->gpu_data();
caffe_gpu_memcpy(prob_.count() * sizeof(Dtype), prob_data, bottom_diff);
const Dtype* label = bottom[1]->gpu_data();
const Dtype* sample_weight = bottom[2]->gpu_data();
const int dim = prob_.count() / outer_num_;
const int nthreads = outer_num_ * inner_num_;
// Since this memory is never used for anything else,
// we use to to avoid allocating new GPU memory.
Dtype* counts = prob_.mutable_gpu_diff();
// NOLINT_NEXT_LINE(whitespace/operators)
WeightedSoftmaxLossBackwardGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, top_data, label, sample_weight, bottom_diff,
outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts);
Dtype valid_count = -1;
// Only launch another CUDA kernel if we actually need the count of valid
// outputs.
if (normalization_ == LossParameter_NormalizationMode_VALID &&
has_ignore_label_) {
caffe_gpu_asum(nthreads, counts, &valid_count);
}
const Dtype loss_weight = top[0]->cpu_diff()[0] /
get_normalizer(normalization_, valid_count);
caffe_gpu_scal(prob_.count(), loss_weight , bottom_diff);
}
}
INSTANTIATE_LAYER_GPU_FUNCS(WeightedSoftmaxWithLossLayer);
} // namespace caffe
|
12f2d436bcfa8643688c45cab0df5f6d3bbf86f5.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include "hip/hip_runtime.h"
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
#define ceil(a,b) ((a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1)
void check_error (const char* message) {
hipError_t error = hipGetLastError ();
if (error != hipSuccess) {
printf ("CUDA error : %s, %s\n", message, hipGetErrorString (error));
exit(-1);
}
}
__global__ void j2d25pt (double * __restrict__ l_in, double * __restrict__ l_out, int N) {
//Determing the block's indices
int i0 = (int)(blockIdx.x)*(int)(blockDim.x);
int i = max(i0,2) + (int)(threadIdx.x);
int j0 = 4*(int)(blockIdx.y)*(int)(blockDim.y);
int j = max(j0,2) + 4*(int)(threadIdx.y);
double (*in)[8196] = (double (*)[8196]) l_in;
double (*out)[8196] = (double (*)[8196]) l_out;
if (i>=2 & j>=2 & i<=N-3 & j<=N-3) {
double outjc0ic0 = 0.1 * in[j-2][i-2];
outjc0ic0 += 0.1 * in[j-2][i+2];
outjc0ic0 += 0.2 * in[j-2][i-1];
outjc0ic0 += 0.2 * in[j-2][i+1];
outjc0ic0 += 0.3 * in[j-2][i];
outjc0ic0 += 0.1 * in[j+2][i-2];
double outjp1ic0 = 1.1 * in[j+2][i-2];
double outjp2ic0 = 2.1 * in[j+2][i-2];
double outjp3ic0 = 1.1 * in[j+2][i-2];
outjc0ic0 += 0.1 * in[j+2][i+2];
outjp1ic0 += 1.1 * in[j+2][i+2];
outjp2ic0 += 2.1 * in[j+2][i+2];
outjp3ic0 += 1.1 * in[j+2][i+2];
outjc0ic0 += 0.2 * in[j+2][i-1];
outjp1ic0 += 1.2 * in[j+2][i-1];
outjp2ic0 += 2.2 * in[j+2][i-1];
outjp3ic0 += 1.2 * in[j+2][i-1];
outjc0ic0 += 0.2 * in[j+2][i+1];
outjp1ic0 += 1.2 * in[j+2][i+1];
outjp2ic0 += 2.2 * in[j+2][i+1];
outjp3ic0 += 1.2 * in[j+2][i+1];
outjc0ic0 += 0.3 * in[j+2][i];
outjp1ic0 += 1.3 * in[j+2][i];
outjp2ic0 += 2.3 * in[j+2][i];
outjp3ic0 += 1.3 * in[j+2][i];
outjc0ic0 += 1.1 * in[j-1][i-2];
outjp1ic0 += 0.1 * in[j-1][i-2];
outjc0ic0 += 1.1 * in[j-1][i+2];
outjp1ic0 += 0.1 * in[j-1][i+2];
outjc0ic0 += 1.1 * in[j+1][i-2];
outjp1ic0 += 2.1 * in[j+1][i-2];
outjp2ic0 += 1.1 * in[j+1][i-2];
outjp3ic0 += 0.1 * in[j+1][i-2];
outjc0ic0 += 1.1 * in[j+1][i+2];
outjp1ic0 += 2.1 * in[j+1][i+2];
outjp2ic0 += 1.1 * in[j+1][i+2];
outjp3ic0 += 0.1 * in[j+1][i+2];
outjc0ic0 += 1.2 * in[j-1][i-1];
outjp1ic0 += 0.2 * in[j-1][i-1];
outjc0ic0 += 1.2 * in[j-1][i+1];
outjp1ic0 += 0.2 * in[j-1][i+1];
outjc0ic0 += 1.2 * in[j+1][i-1];
outjp1ic0 += 2.2 * in[j+1][i-1];
outjp2ic0 += 1.2 * in[j+1][i-1];
outjp3ic0 += 0.2 * in[j+1][i-1];
outjc0ic0 += 1.2 * in[j+1][i+1];
outjp1ic0 += 2.2 * in[j+1][i+1];
outjp2ic0 += 1.2 * in[j+1][i+1];
outjp3ic0 += 0.2 * in[j+1][i+1];
outjc0ic0 += 1.3 * in[j-1][i];
outjp1ic0 += 0.3 * in[j-1][i];
outjc0ic0 += 1.3 * in[j+1][i];
outjp1ic0 += 2.3 * in[j+1][i];
outjp2ic0 += 1.3 * in[j+1][i];
outjp3ic0 += 0.3 * in[j+1][i];
outjc0ic0 += 2.1 * in[j][i-2];
outjp1ic0 += 1.1 * in[j][i-2];
outjp2ic0 += 0.1 * in[j][i-2];
outjc0ic0 += 2.1 * in[j][i+2];
outjp1ic0 += 1.1 * in[j][i+2];
outjp2ic0 += 0.1 * in[j][i+2];
outjc0ic0 += 2.2 * in[j][i-1];
outjp1ic0 += 1.2 * in[j][i-1];
outjp2ic0 += 0.2 * in[j][i-1];
outjc0ic0 += 2.2 * in[j][i+1];
outjp1ic0 += 1.2 * in[j][i+1];
outjp2ic0 += 0.2 * in[j][i+1];
outjc0ic0 += 2.3 * in[j][i];
outjp1ic0 += 1.3 * in[j][i];
outjp2ic0 += 0.3 * in[j][i];
outjp1ic0 += 0.1 * in[j+3][i-2];
outjp2ic0 += 1.1 * in[j+3][i-2];
outjp3ic0 += 2.1 * in[j+3][i-2];
outjp1ic0 += 0.1 * in[j+3][i+2];
outjp2ic0 += 1.1 * in[j+3][i+2];
outjp3ic0 += 2.1 * in[j+3][i+2];
outjp1ic0 += 0.2 * in[j+3][i-1];
outjp2ic0 += 1.2 * in[j+3][i-1];
outjp3ic0 += 2.2 * in[j+3][i-1];
outjp1ic0 += 0.2 * in[j+3][i+1];
outjp2ic0 += 1.2 * in[j+3][i+1];
outjp3ic0 += 2.2 * in[j+3][i+1];
outjp1ic0 += 0.3 * in[j+3][i];
outjp2ic0 += 1.3 * in[j+3][i];
outjp3ic0 += 2.3 * in[j+3][i];
outjp2ic0 += 0.1 * in[j+4][i-2];
outjp3ic0 += 1.1 * in[j+4][i-2];
outjp2ic0 += 0.1 * in[j+4][i+2];
outjp3ic0 += 1.1 * in[j+4][i+2];
outjp2ic0 += 0.2 * in[j+4][i-1];
outjp3ic0 += 1.2 * in[j+4][i-1];
outjp2ic0 += 0.2 * in[j+4][i+1];
outjp3ic0 += 1.2 * in[j+4][i+1];
outjp2ic0 += 0.3 * in[j+4][i];
outjp3ic0 += 1.3 * in[j+4][i];
outjp3ic0 += 0.1 * in[j+5][i-2];
outjp3ic0 += 0.1 * in[j+5][i+2];
outjp3ic0 += 0.2 * in[j+5][i-1];
outjp3ic0 += 0.2 * in[j+5][i+1];
outjp3ic0 += 0.3 * in[j+5][i];
out[j][i] = outjc0ic0;
out[j+1][i] = outjp1ic0;
out[j+2][i] = outjp2ic0;
out[j+3][i] = outjp3ic0;
}
}
extern "C" void host_code (double *h_in, double *h_out, int N) {
double *in;
hipMalloc (&in, sizeof(double)*N*N);
check_error ("Failed to allocate device memory for in\n");
hipMemcpy (in, h_in, sizeof(double)*N*N, hipMemcpyHostToDevice);
double *out;
hipMalloc (&out, sizeof(double)*N*N);
check_error ("Failed to allocate device memory for out\n");
dim3 blockconfig (16, 8);
dim3 gridconfig (ceil(N, blockconfig.x), ceil(N, 4*blockconfig.y));
hipLaunchKernelGGL(( j2d25pt), dim3(gridconfig), dim3(blockconfig), 0, 0, in, out, N);
hipMemcpy (h_out, out, sizeof(double)*N*N, hipMemcpyDeviceToHost);
hipFree (in);
hipFree (out);
}
| 12f2d436bcfa8643688c45cab0df5f6d3bbf86f5.cu | #include <stdio.h>
#include "cuda.h"
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
#define ceil(a,b) ((a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1)
void check_error (const char* message) {
cudaError_t error = cudaGetLastError ();
if (error != cudaSuccess) {
printf ("CUDA error : %s, %s\n", message, cudaGetErrorString (error));
exit(-1);
}
}
__global__ void j2d25pt (double * __restrict__ l_in, double * __restrict__ l_out, int N) {
//Determing the block's indices
int i0 = (int)(blockIdx.x)*(int)(blockDim.x);
int i = max(i0,2) + (int)(threadIdx.x);
int j0 = 4*(int)(blockIdx.y)*(int)(blockDim.y);
int j = max(j0,2) + 4*(int)(threadIdx.y);
double (*in)[8196] = (double (*)[8196]) l_in;
double (*out)[8196] = (double (*)[8196]) l_out;
if (i>=2 & j>=2 & i<=N-3 & j<=N-3) {
double outjc0ic0 = 0.1 * in[j-2][i-2];
outjc0ic0 += 0.1 * in[j-2][i+2];
outjc0ic0 += 0.2 * in[j-2][i-1];
outjc0ic0 += 0.2 * in[j-2][i+1];
outjc0ic0 += 0.3 * in[j-2][i];
outjc0ic0 += 0.1 * in[j+2][i-2];
double outjp1ic0 = 1.1 * in[j+2][i-2];
double outjp2ic0 = 2.1 * in[j+2][i-2];
double outjp3ic0 = 1.1 * in[j+2][i-2];
outjc0ic0 += 0.1 * in[j+2][i+2];
outjp1ic0 += 1.1 * in[j+2][i+2];
outjp2ic0 += 2.1 * in[j+2][i+2];
outjp3ic0 += 1.1 * in[j+2][i+2];
outjc0ic0 += 0.2 * in[j+2][i-1];
outjp1ic0 += 1.2 * in[j+2][i-1];
outjp2ic0 += 2.2 * in[j+2][i-1];
outjp3ic0 += 1.2 * in[j+2][i-1];
outjc0ic0 += 0.2 * in[j+2][i+1];
outjp1ic0 += 1.2 * in[j+2][i+1];
outjp2ic0 += 2.2 * in[j+2][i+1];
outjp3ic0 += 1.2 * in[j+2][i+1];
outjc0ic0 += 0.3 * in[j+2][i];
outjp1ic0 += 1.3 * in[j+2][i];
outjp2ic0 += 2.3 * in[j+2][i];
outjp3ic0 += 1.3 * in[j+2][i];
outjc0ic0 += 1.1 * in[j-1][i-2];
outjp1ic0 += 0.1 * in[j-1][i-2];
outjc0ic0 += 1.1 * in[j-1][i+2];
outjp1ic0 += 0.1 * in[j-1][i+2];
outjc0ic0 += 1.1 * in[j+1][i-2];
outjp1ic0 += 2.1 * in[j+1][i-2];
outjp2ic0 += 1.1 * in[j+1][i-2];
outjp3ic0 += 0.1 * in[j+1][i-2];
outjc0ic0 += 1.1 * in[j+1][i+2];
outjp1ic0 += 2.1 * in[j+1][i+2];
outjp2ic0 += 1.1 * in[j+1][i+2];
outjp3ic0 += 0.1 * in[j+1][i+2];
outjc0ic0 += 1.2 * in[j-1][i-1];
outjp1ic0 += 0.2 * in[j-1][i-1];
outjc0ic0 += 1.2 * in[j-1][i+1];
outjp1ic0 += 0.2 * in[j-1][i+1];
outjc0ic0 += 1.2 * in[j+1][i-1];
outjp1ic0 += 2.2 * in[j+1][i-1];
outjp2ic0 += 1.2 * in[j+1][i-1];
outjp3ic0 += 0.2 * in[j+1][i-1];
outjc0ic0 += 1.2 * in[j+1][i+1];
outjp1ic0 += 2.2 * in[j+1][i+1];
outjp2ic0 += 1.2 * in[j+1][i+1];
outjp3ic0 += 0.2 * in[j+1][i+1];
outjc0ic0 += 1.3 * in[j-1][i];
outjp1ic0 += 0.3 * in[j-1][i];
outjc0ic0 += 1.3 * in[j+1][i];
outjp1ic0 += 2.3 * in[j+1][i];
outjp2ic0 += 1.3 * in[j+1][i];
outjp3ic0 += 0.3 * in[j+1][i];
outjc0ic0 += 2.1 * in[j][i-2];
outjp1ic0 += 1.1 * in[j][i-2];
outjp2ic0 += 0.1 * in[j][i-2];
outjc0ic0 += 2.1 * in[j][i+2];
outjp1ic0 += 1.1 * in[j][i+2];
outjp2ic0 += 0.1 * in[j][i+2];
outjc0ic0 += 2.2 * in[j][i-1];
outjp1ic0 += 1.2 * in[j][i-1];
outjp2ic0 += 0.2 * in[j][i-1];
outjc0ic0 += 2.2 * in[j][i+1];
outjp1ic0 += 1.2 * in[j][i+1];
outjp2ic0 += 0.2 * in[j][i+1];
outjc0ic0 += 2.3 * in[j][i];
outjp1ic0 += 1.3 * in[j][i];
outjp2ic0 += 0.3 * in[j][i];
outjp1ic0 += 0.1 * in[j+3][i-2];
outjp2ic0 += 1.1 * in[j+3][i-2];
outjp3ic0 += 2.1 * in[j+3][i-2];
outjp1ic0 += 0.1 * in[j+3][i+2];
outjp2ic0 += 1.1 * in[j+3][i+2];
outjp3ic0 += 2.1 * in[j+3][i+2];
outjp1ic0 += 0.2 * in[j+3][i-1];
outjp2ic0 += 1.2 * in[j+3][i-1];
outjp3ic0 += 2.2 * in[j+3][i-1];
outjp1ic0 += 0.2 * in[j+3][i+1];
outjp2ic0 += 1.2 * in[j+3][i+1];
outjp3ic0 += 2.2 * in[j+3][i+1];
outjp1ic0 += 0.3 * in[j+3][i];
outjp2ic0 += 1.3 * in[j+3][i];
outjp3ic0 += 2.3 * in[j+3][i];
outjp2ic0 += 0.1 * in[j+4][i-2];
outjp3ic0 += 1.1 * in[j+4][i-2];
outjp2ic0 += 0.1 * in[j+4][i+2];
outjp3ic0 += 1.1 * in[j+4][i+2];
outjp2ic0 += 0.2 * in[j+4][i-1];
outjp3ic0 += 1.2 * in[j+4][i-1];
outjp2ic0 += 0.2 * in[j+4][i+1];
outjp3ic0 += 1.2 * in[j+4][i+1];
outjp2ic0 += 0.3 * in[j+4][i];
outjp3ic0 += 1.3 * in[j+4][i];
outjp3ic0 += 0.1 * in[j+5][i-2];
outjp3ic0 += 0.1 * in[j+5][i+2];
outjp3ic0 += 0.2 * in[j+5][i-1];
outjp3ic0 += 0.2 * in[j+5][i+1];
outjp3ic0 += 0.3 * in[j+5][i];
out[j][i] = outjc0ic0;
out[j+1][i] = outjp1ic0;
out[j+2][i] = outjp2ic0;
out[j+3][i] = outjp3ic0;
}
}
extern "C" void host_code (double *h_in, double *h_out, int N) {
double *in;
cudaMalloc (&in, sizeof(double)*N*N);
check_error ("Failed to allocate device memory for in\n");
cudaMemcpy (in, h_in, sizeof(double)*N*N, cudaMemcpyHostToDevice);
double *out;
cudaMalloc (&out, sizeof(double)*N*N);
check_error ("Failed to allocate device memory for out\n");
dim3 blockconfig (16, 8);
dim3 gridconfig (ceil(N, blockconfig.x), ceil(N, 4*blockconfig.y));
j2d25pt<<<gridconfig, blockconfig>>> (in, out, N);
cudaMemcpy (h_out, out, sizeof(double)*N*N, cudaMemcpyDeviceToHost);
cudaFree (in);
cudaFree (out);
}
|
eb2173718f256697016ed0a9b18fc719b9874a7f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <atomic>
#include <limits>
#include <mutex>
#include "HeterogeneousCore/CUDAUtilities/interface/cuda_assert.h"
#include "HeterogeneousCore/CUDAUtilities/interface/cudastdAlgorithm.h"
#include "RecoLocalTracker/SiPixelClusterizer/plugins/SiPixelRawToClusterGPUKernel.h"
#include "RecoLocalTracker/SiPixelRecHits/interface/pixelCPEforGPU.h"
#include "ClusterSLOnGPU.h"
using ClusterSLView = trackerHitAssociationHeterogeneous::ClusterSLView;
using Clus2TP = ClusterSLView::Clus2TP;
// #define DUMP_TK2
__global__ void simLink(const SiPixelDigisCUDA::DeviceConstView* dd,
uint32_t ndigis,
TrackingRecHit2DSOAView const* hhp,
ClusterSLView sl,
uint32_t n) {
constexpr uint32_t invTK = 0; // std::numeric_limits<int32_t>::max();
constexpr uint16_t InvId = 9999; // must be > MaxNumModules
auto const& hh = *hhp;
auto i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= ndigis)
return;
auto id = dd->moduleInd(i);
if (InvId == id)
return;
assert(id < 2000);
auto ch = pixelgpudetails::pixelToChannel(dd->xx(i), dd->yy(i));
auto first = hh.hitsModuleStart(id);
auto cl = first + dd->clus(i);
assert(cl < 2000 * blockDim.x);
const Clus2TP me{{id, ch, 0, 0, 0, 0, 0}};
auto less = [] __host__ __device__(Clus2TP const& a, Clus2TP const& b) -> bool {
// in this context we do not care of [2]
return a[0] < b[0] or ((not(b[0] < a[0])) and (a[1] < b[1]));
};
auto equal = [] __host__ __device__(Clus2TP const& a, Clus2TP const& b) -> bool {
// in this context we do not care of [2]
return a[0] == b[0] and a[1] == b[1];
};
auto const* b = sl.links_d;
auto const* e = b + n;
auto p = cuda_std::lower_bound(b, e, me, less);
int32_t j = p - sl.links_d;
assert(j >= 0);
auto getTK = [&](int i) {
auto const& l = sl.links_d[i];
return l[2];
};
j = ::min(int(j), int(n - 1));
if (equal(me, sl.links_d[j])) {
auto const itk = j;
auto const tk = getTK(j);
auto old = atomicCAS(&sl.tkId_d[cl], invTK, itk);
if (invTK == old or tk == getTK(old)) {
atomicAdd(&sl.n1_d[cl], 1);
} else {
auto old = atomicCAS(&sl.tkId2_d[cl], invTK, itk);
if (invTK == old or tk == getTK(old))
atomicAdd(&sl.n2_d[cl], 1);
}
}
}
__global__ void doZero(uint32_t nhits, ClusterSLView sl) {
auto i = blockIdx.x * blockDim.x + threadIdx.x;
if (i > nhits)
return;
sl.tkId_d[i] = 0;
sl.n1_d[i] = 0;
sl.tkId2_d[i] = 0;
sl.n2_d[i] = 0;
}
__global__ void dumpLink(int first, int ev, TrackingRecHit2DSOAView const* hhp, uint32_t nhits, ClusterSLView sl) {
auto i = first + blockIdx.x * blockDim.x + threadIdx.x;
if (i > nhits)
return;
auto const& hh = *hhp;
auto const& tk1 = sl.links_d[sl.tkId_d[i]];
#ifdef DUMP_TK2
auto const& tk2 = sl.links_d[sl.tkId2_d[i]];
printf("HIT: %d %d %d %d %.4f %.4f %.4f %.4f %d %d %d %d %d %d %d %d %d %d %d %d %d %d\n",
#else
printf("HIT: %d %d %d %d %.4f %.4f %.4f %.4f %d %d %d %d %d %d %d %d %d\n",
#endif
ev,
i,
hh.detectorIndex(i),
hh.charge(i),
hh.xGlobal(i),
hh.yGlobal(i),
hh.zGlobal(i),
hh.rGlobal(i),
hh.iphi(i),
hh.clusterSizeX(i),
hh.clusterSizeY(i),
tk1[2],
tk1[3],
tk1[4],
tk1[5],
tk1[6],
sl.n1_d[i]
#ifdef DUMP_TK2
,
tk2[2],
tk2[3],
tk2[4],
tk2[5],
tk2[6],
sl.n2_d[i]
#endif
);
}
namespace clusterSLOnGPU {
void printCSVHeader() {
#ifdef DUMP_TK2
printf("HIT: %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s\n",
#else
printf("HIT: %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s\n",
#endif
"ev",
"ind",
"det",
"charge",
"xg",
"yg",
"zg",
"rg",
"iphi",
"xsize",
"ysize",
"tkId",
"pt",
"eta",
"z0",
"r0",
"n1"
#ifdef DUMP_TK2
,
"tkId2",
"pt2",
"eta",
"z02",
"r02",
"n2"
#endif
);
}
std::atomic<int> evId(0);
std::once_flag doneCSVHeader;
Kernel::Kernel(bool dump) : doDump(dump) {
if (doDump)
std::call_once(doneCSVHeader, printCSVHeader);
}
trackerHitAssociationHeterogeneous::ProductCUDA Kernel::makeAsync(SiPixelDigisCUDA const& dd,
uint32_t ndigis,
HitsOnCPU const& hh,
Clus2TP const* digi2tp,
uint32_t nhits,
uint32_t nlinks,
hipStream_t stream) const {
trackerHitAssociationHeterogeneous::ProductCUDA product(nlinks, nhits, stream);
auto& csl = product.view();
cudaCheck(hipMemcpyAsync(csl.links_d, digi2tp, sizeof(Clus2TP) * nlinks, hipMemcpyDefault, stream));
if (0 == nhits)
return product;
int ev = ++evId;
int threadsPerBlock = 256;
int blocks = (nhits + threadsPerBlock - 1) / threadsPerBlock;
hipLaunchKernelGGL(( doZero), dim3(blocks), dim3(threadsPerBlock), 0, stream, nhits, csl);
cudaCheck(hipGetLastError());
blocks = (ndigis + threadsPerBlock - 1) / threadsPerBlock;
hipLaunchKernelGGL(( simLink), dim3(blocks), dim3(threadsPerBlock), 0, stream, dd.view(), ndigis, hh.view(), csl, nlinks);
cudaCheck(hipGetLastError());
if (doDump) {
hipStreamSynchronize(stream); // flush previous printf
// one line == 200B so each kernel can print only 5K lines....
blocks = 16;
for (int first = 0; first < int(nhits); first += blocks * threadsPerBlock) {
hipLaunchKernelGGL(( dumpLink), dim3(blocks), dim3(threadsPerBlock), 0, stream, first, ev, hh.view(), nhits, csl);
cudaCheck(hipGetLastError());
hipStreamSynchronize(stream);
}
}
cudaCheck(hipGetLastError());
return product;
}
} // namespace clusterSLOnGPU
| eb2173718f256697016ed0a9b18fc719b9874a7f.cu | #include <atomic>
#include <limits>
#include <mutex>
#include "HeterogeneousCore/CUDAUtilities/interface/cuda_assert.h"
#include "HeterogeneousCore/CUDAUtilities/interface/cudastdAlgorithm.h"
#include "RecoLocalTracker/SiPixelClusterizer/plugins/SiPixelRawToClusterGPUKernel.h"
#include "RecoLocalTracker/SiPixelRecHits/interface/pixelCPEforGPU.h"
#include "ClusterSLOnGPU.h"
using ClusterSLView = trackerHitAssociationHeterogeneous::ClusterSLView;
using Clus2TP = ClusterSLView::Clus2TP;
// #define DUMP_TK2
__global__ void simLink(const SiPixelDigisCUDA::DeviceConstView* dd,
uint32_t ndigis,
TrackingRecHit2DSOAView const* hhp,
ClusterSLView sl,
uint32_t n) {
constexpr uint32_t invTK = 0; // std::numeric_limits<int32_t>::max();
constexpr uint16_t InvId = 9999; // must be > MaxNumModules
auto const& hh = *hhp;
auto i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= ndigis)
return;
auto id = dd->moduleInd(i);
if (InvId == id)
return;
assert(id < 2000);
auto ch = pixelgpudetails::pixelToChannel(dd->xx(i), dd->yy(i));
auto first = hh.hitsModuleStart(id);
auto cl = first + dd->clus(i);
assert(cl < 2000 * blockDim.x);
const Clus2TP me{{id, ch, 0, 0, 0, 0, 0}};
auto less = [] __host__ __device__(Clus2TP const& a, Clus2TP const& b) -> bool {
// in this context we do not care of [2]
return a[0] < b[0] or ((not(b[0] < a[0])) and (a[1] < b[1]));
};
auto equal = [] __host__ __device__(Clus2TP const& a, Clus2TP const& b) -> bool {
// in this context we do not care of [2]
return a[0] == b[0] and a[1] == b[1];
};
auto const* b = sl.links_d;
auto const* e = b + n;
auto p = cuda_std::lower_bound(b, e, me, less);
int32_t j = p - sl.links_d;
assert(j >= 0);
auto getTK = [&](int i) {
auto const& l = sl.links_d[i];
return l[2];
};
j = std::min(int(j), int(n - 1));
if (equal(me, sl.links_d[j])) {
auto const itk = j;
auto const tk = getTK(j);
auto old = atomicCAS(&sl.tkId_d[cl], invTK, itk);
if (invTK == old or tk == getTK(old)) {
atomicAdd(&sl.n1_d[cl], 1);
} else {
auto old = atomicCAS(&sl.tkId2_d[cl], invTK, itk);
if (invTK == old or tk == getTK(old))
atomicAdd(&sl.n2_d[cl], 1);
}
}
}
__global__ void doZero(uint32_t nhits, ClusterSLView sl) {
auto i = blockIdx.x * blockDim.x + threadIdx.x;
if (i > nhits)
return;
sl.tkId_d[i] = 0;
sl.n1_d[i] = 0;
sl.tkId2_d[i] = 0;
sl.n2_d[i] = 0;
}
__global__ void dumpLink(int first, int ev, TrackingRecHit2DSOAView const* hhp, uint32_t nhits, ClusterSLView sl) {
auto i = first + blockIdx.x * blockDim.x + threadIdx.x;
if (i > nhits)
return;
auto const& hh = *hhp;
auto const& tk1 = sl.links_d[sl.tkId_d[i]];
#ifdef DUMP_TK2
auto const& tk2 = sl.links_d[sl.tkId2_d[i]];
printf("HIT: %d %d %d %d %.4f %.4f %.4f %.4f %d %d %d %d %d %d %d %d %d %d %d %d %d %d\n",
#else
printf("HIT: %d %d %d %d %.4f %.4f %.4f %.4f %d %d %d %d %d %d %d %d %d\n",
#endif
ev,
i,
hh.detectorIndex(i),
hh.charge(i),
hh.xGlobal(i),
hh.yGlobal(i),
hh.zGlobal(i),
hh.rGlobal(i),
hh.iphi(i),
hh.clusterSizeX(i),
hh.clusterSizeY(i),
tk1[2],
tk1[3],
tk1[4],
tk1[5],
tk1[6],
sl.n1_d[i]
#ifdef DUMP_TK2
,
tk2[2],
tk2[3],
tk2[4],
tk2[5],
tk2[6],
sl.n2_d[i]
#endif
);
}
namespace clusterSLOnGPU {
void printCSVHeader() {
#ifdef DUMP_TK2
printf("HIT: %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s\n",
#else
printf("HIT: %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s\n",
#endif
"ev",
"ind",
"det",
"charge",
"xg",
"yg",
"zg",
"rg",
"iphi",
"xsize",
"ysize",
"tkId",
"pt",
"eta",
"z0",
"r0",
"n1"
#ifdef DUMP_TK2
,
"tkId2",
"pt2",
"eta",
"z02",
"r02",
"n2"
#endif
);
}
std::atomic<int> evId(0);
std::once_flag doneCSVHeader;
Kernel::Kernel(bool dump) : doDump(dump) {
if (doDump)
std::call_once(doneCSVHeader, printCSVHeader);
}
trackerHitAssociationHeterogeneous::ProductCUDA Kernel::makeAsync(SiPixelDigisCUDA const& dd,
uint32_t ndigis,
HitsOnCPU const& hh,
Clus2TP const* digi2tp,
uint32_t nhits,
uint32_t nlinks,
cudaStream_t stream) const {
trackerHitAssociationHeterogeneous::ProductCUDA product(nlinks, nhits, stream);
auto& csl = product.view();
cudaCheck(cudaMemcpyAsync(csl.links_d, digi2tp, sizeof(Clus2TP) * nlinks, cudaMemcpyDefault, stream));
if (0 == nhits)
return product;
int ev = ++evId;
int threadsPerBlock = 256;
int blocks = (nhits + threadsPerBlock - 1) / threadsPerBlock;
doZero<<<blocks, threadsPerBlock, 0, stream>>>(nhits, csl);
cudaCheck(cudaGetLastError());
blocks = (ndigis + threadsPerBlock - 1) / threadsPerBlock;
simLink<<<blocks, threadsPerBlock, 0, stream>>>(dd.view(), ndigis, hh.view(), csl, nlinks);
cudaCheck(cudaGetLastError());
if (doDump) {
cudaStreamSynchronize(stream); // flush previous printf
// one line == 200B so each kernel can print only 5K lines....
blocks = 16;
for (int first = 0; first < int(nhits); first += blocks * threadsPerBlock) {
dumpLink<<<blocks, threadsPerBlock, 0, stream>>>(first, ev, hh.view(), nhits, csl);
cudaCheck(cudaGetLastError());
cudaStreamSynchronize(stream);
}
}
cudaCheck(cudaGetLastError());
return product;
}
} // namespace clusterSLOnGPU
|
b4e57c0591d70a0fe793742548b15a0a51741771.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* License); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Copyright (c) 2021, OPEN AI LAB
* Author: hhchen@openailab.com
*/
#include "cuda_executor.hpp"
extern "C"
{
#include "clip_param.h"
#include "graph/tensor.h"
#include "operator/op.h"
#include "utility/log.h"
}
__global__ void dropout(float *y, float *x, int N)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < N)
{
y[idx] = x[idx];
}
}
void dropout_gpu_kernel(struct graph* ir_graph, struct node* ir_node, dict_uint2voidx gpu_addr_map)
{
struct tensor* input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]);
struct tensor* output_tensor = get_ir_graph_tensor(ir_graph, ir_node->output_tensors[0]);
/* init grid and block */
int bs = 1024;
int s = ceil((output_tensor->elem_num + bs - 1.) / bs);
dim3 grid = dim3(s);
hipLaunchKernelGGL(( dropout), dim3(grid), dim3(bs), 0, 0, (float*)gpu_addr_map[output_tensor->index], (float*)gpu_addr_map[input_tensor->index], output_tensor->elem_num);
}
void CUDAEngine::AddDropoutNode(struct graph* ir_graph, struct node* ir_node)
{
TLOG_INFO("Tengine GPU: Support OP(%d) OP_DROPOUT.\n", ir_node->index);
dropout_gpu_kernel(ir_graph, ir_node, this->gpu_addr_map);
this->ops.push_back(std::bind(&dropout_gpu_kernel, ir_graph, ir_node, this->gpu_addr_map));
}
| b4e57c0591d70a0fe793742548b15a0a51741771.cu | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* License); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Copyright (c) 2021, OPEN AI LAB
* Author: hhchen@openailab.com
*/
#include "cuda_executor.hpp"
extern "C"
{
#include "clip_param.h"
#include "graph/tensor.h"
#include "operator/op.h"
#include "utility/log.h"
}
__global__ void dropout(float *y, float *x, int N)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < N)
{
y[idx] = x[idx];
}
}
void dropout_gpu_kernel(struct graph* ir_graph, struct node* ir_node, dict_uint2voidx gpu_addr_map)
{
struct tensor* input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]);
struct tensor* output_tensor = get_ir_graph_tensor(ir_graph, ir_node->output_tensors[0]);
/* init grid and block */
int bs = 1024;
int s = ceil((output_tensor->elem_num + bs - 1.) / bs);
dim3 grid = dim3(s);
dropout<<<grid, bs>>>((float*)gpu_addr_map[output_tensor->index], (float*)gpu_addr_map[input_tensor->index], output_tensor->elem_num);
}
void CUDAEngine::AddDropoutNode(struct graph* ir_graph, struct node* ir_node)
{
TLOG_INFO("Tengine GPU: Support OP(%d) OP_DROPOUT.\n", ir_node->index);
dropout_gpu_kernel(ir_graph, ir_node, this->gpu_addr_map);
this->ops.push_back(std::bind(&dropout_gpu_kernel, ir_graph, ir_node, this->gpu_addr_map));
}
|
9b201e494570e806bc58b58431d66448ba6a7f70.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include "mergeSort.h"
#include "mergeSortKernels.h"
#include "mergeSortSerial.h"
#include "timing.h"
#define ROLLMAX 10001
int main(int argc, char *argv[])
{
hipDeviceReset();
int shouldPrint = 0;
// to run this program use:
// ./mergeSort numInts blockWidth p
if(argc < 3 || argc > 4)
{
usage();
return 1;
} else if (argc == 3) {
shouldPrint = 0;
} else if (argv[3][0]=='p') {
shouldPrint = 1;
} else {
usage();
return 1;
}
unsigned int numInts = atoi(argv[1]);
unsigned int blockWidth = atoi(argv[2]);
unsigned int runCpu;
if(!blockWidth) {
runCpu = 1;
}
if(blockWidth > 0 && numInts % (blockWidth*2) != 0) {
printf("numInts must be a multiple of blockWidth*2\n");
return -1;
}
size_t bytes = numInts * sizeof(unsigned int);
srand(time(NULL));
unsigned int *h_key, *h_val, *h_dstVal, *h_dstKey;
unsigned int *d_dstKey, *d_dstVal, *d_srcKey, *d_srcVal;
// allocate host memory
h_key = (unsigned int*) calloc(numInts, sizeof(unsigned int));
h_val = (unsigned int*) calloc(numInts, sizeof(unsigned int));
h_dstKey = (unsigned int*) calloc(numInts, sizeof(unsigned int));
h_dstVal = (unsigned int*) calloc(numInts, sizeof(unsigned int));
if(!h_key || !h_val || !h_dstKey || !h_dstVal )
{
printf("Host Memory allocation failed\n");
exit(-1);
}
// allocate host source key and value arrays
fillKeyArrayRandom(h_key, numInts);
fillValArray(h_val, numInts);
if(!runCpu){
// allocate device memory
hipMalloc((void**) &d_srcKey, bytes);
hipMalloc((void**) &d_srcVal, bytes);
hipMalloc((void**) &d_dstKey, bytes);
hipMalloc((void**) &d_dstVal, bytes);
if(!d_dstKey || !d_dstVal || !d_srcKey || !d_srcVal )
{
printf("Device Memory allocation failed\n");
exit(-1);
}
}
// print the sorted results if requested
if(shouldPrint) {
printArrays(h_key, h_val, numInts);
}
if(!runCpu){
// copy memory from the host to the device source arrays
hipMemcpy(d_srcKey, h_key, bytes, hipMemcpyHostToDevice);
hipMemcpy(d_srcVal, h_val, bytes, hipMemcpyHostToDevice);
}
// call the MSITR kernel
if (numInts < 2)
{
return -1;
}
if(!runCpu){
//unsigned int batchSize = 1;
unsigned int tileWidth = 2*blockWidth;
// 2 elements per thread in the block,
unsigned int memsize = tileWidth * sizeof(unsigned int) * 2;
unsigned int numTotalBlocks = ceil((float)numInts / tileWidth);
unsigned int numBlocksy = ceil((float)numTotalBlocks / 65535);
unsigned int numBlocksx = ceil((float)numTotalBlocks / numBlocksy);
// set up the dim3's to define grid and block size
const dim3 blockSize(blockWidth, 1, 1);
const dim3 numBlocks(numBlocksx, numBlocksy, 1);
// time the kernel launches using CUDA events
hipEvent_t launch_begin, launch_end;
hipEventCreate(&launch_begin);
hipEventCreate(&launch_end);
hipEventRecord(launch_begin,0);
hipLaunchKernelGGL(( mergeSortSharedKernel), dim3(numBlocks), dim3(blockSize), memsize, 0, d_dstKey, d_dstVal, d_srcKey, d_srcVal, tileWidth, numInts, 1);
// synchronize the device after the kernel call
hipDeviceSynchronize();
hipMemcpy(h_dstKey, d_dstKey, bytes, hipMemcpyDeviceToHost);
hipMemcpy(h_dstVal, d_dstVal, bytes, hipMemcpyDeviceToHost);
// print the results of the BSITR kernel
if(shouldPrint) {
printArrays(h_dstKey, h_dstVal, numInts);
}
// swap the addresses of the input and output pointers
// for the 2nd kernel call
unsigned int *tempKey, *tempVal;
for( unsigned int stride = tileWidth; stride < numInts; stride *= 2 ) {
tempKey = d_dstKey;
tempVal = d_dstVal;
d_dstKey = d_srcKey;
d_dstVal = d_srcVal;
d_srcKey = tempKey;
d_srcVal = tempVal;
// naive tile merge kernel
hipLaunchKernelGGL(( naiveMergeSortTileKernel), dim3(numBlocks), dim3(blockSize), 0, 0, d_dstKey, d_dstVal, d_srcKey, d_srcVal, numInts, stride, 1);
// synchronize the device after the kernel call
hipDeviceSynchronize();
}
// record end time and time elapsed
hipEventRecord(launch_end,0);
hipEventSynchronize(launch_end);
// measure the time spent in the kernel
float time = 0;
hipEventElapsedTime(&time, launch_begin, launch_end);
printf("GPU merge sort ran in %f milliseconds\n", time);
// copy the results from the device to the host
hipMemcpy(h_dstKey, d_dstKey, bytes, hipMemcpyDeviceToHost);
hipMemcpy(h_dstVal, d_dstVal, bytes, hipMemcpyDeviceToHost);
// print the sorted results if requested
if(shouldPrint) {
printArrays(h_dstKey, h_dstVal, numInts);
}
// free device memory
hipFree(d_srcKey);
hipFree(d_srcVal);
hipFree(d_dstKey);
hipFree(d_dstVal);
}
// serial merge sort
if(runCpu){
double then = currentTime();
partition(&h_key, &h_val, 0, numInts-1);
double now = currentTime();
float time = 0;
time = (now - then) * 1000;
printf("CPU code executed in %f milliseconds\n", time);
if(shouldPrint) {
printArrays(h_key, h_val, numInts);
}
}
// free host memory
free(h_val);
free(h_key);
free(h_dstKey);
free(h_dstVal);
return 0;
}
// fill an array with randomly generated unsigned int values
void fillKeyArrayRandom(unsigned int *keys, unsigned int numInts)
{
for(int i = 0; i < numInts; i++){
keys[i] = rand() % ROLLMAX;
}
}
// fill the Value array with the index of the key array to which it coresponds
void fillValArray(unsigned int *vals, unsigned int numInts)
{
for(int i = 0; i < numInts; i++) {
vals[i] = i;
}
}
// print the given arrays of keys and values of size size to the console
void printArrays(unsigned int *keys, unsigned int *vals, unsigned int size)
{
unsigned int i;
printf("keys: ");
for(i = 0; i < size; i++) {
printf("%u ", keys[i]);
if( i % 32 == 0 && i > 0){
printf("\n");
}
}
printf("\nvals: ");
for(i = 0; i < size; i++) {
printf("%u ", vals[i]);
if( i % 32 == 0 && i > 0){
printf("\n");
}
}
printf("\n\n");
}
// print the required args to the command line
void usage()
{
printf("Usage: ./progName numInts blockWidth p\n");
}
| 9b201e494570e806bc58b58431d66448ba6a7f70.cu |
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include "mergeSort.h"
#include "mergeSortKernels.h"
#include "mergeSortSerial.h"
#include "timing.h"
#define ROLLMAX 10001
int main(int argc, char *argv[])
{
cudaDeviceReset();
int shouldPrint = 0;
// to run this program use:
// ./mergeSort numInts blockWidth p
if(argc < 3 || argc > 4)
{
usage();
return 1;
} else if (argc == 3) {
shouldPrint = 0;
} else if (argv[3][0]=='p') {
shouldPrint = 1;
} else {
usage();
return 1;
}
unsigned int numInts = atoi(argv[1]);
unsigned int blockWidth = atoi(argv[2]);
unsigned int runCpu;
if(!blockWidth) {
runCpu = 1;
}
if(blockWidth > 0 && numInts % (blockWidth*2) != 0) {
printf("numInts must be a multiple of blockWidth*2\n");
return -1;
}
size_t bytes = numInts * sizeof(unsigned int);
srand(time(NULL));
unsigned int *h_key, *h_val, *h_dstVal, *h_dstKey;
unsigned int *d_dstKey, *d_dstVal, *d_srcKey, *d_srcVal;
// allocate host memory
h_key = (unsigned int*) calloc(numInts, sizeof(unsigned int));
h_val = (unsigned int*) calloc(numInts, sizeof(unsigned int));
h_dstKey = (unsigned int*) calloc(numInts, sizeof(unsigned int));
h_dstVal = (unsigned int*) calloc(numInts, sizeof(unsigned int));
if(!h_key || !h_val || !h_dstKey || !h_dstVal )
{
printf("Host Memory allocation failed\n");
exit(-1);
}
// allocate host source key and value arrays
fillKeyArrayRandom(h_key, numInts);
fillValArray(h_val, numInts);
if(!runCpu){
// allocate device memory
cudaMalloc((void**) &d_srcKey, bytes);
cudaMalloc((void**) &d_srcVal, bytes);
cudaMalloc((void**) &d_dstKey, bytes);
cudaMalloc((void**) &d_dstVal, bytes);
if(!d_dstKey || !d_dstVal || !d_srcKey || !d_srcVal )
{
printf("Device Memory allocation failed\n");
exit(-1);
}
}
// print the sorted results if requested
if(shouldPrint) {
printArrays(h_key, h_val, numInts);
}
if(!runCpu){
// copy memory from the host to the device source arrays
cudaMemcpy(d_srcKey, h_key, bytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_srcVal, h_val, bytes, cudaMemcpyHostToDevice);
}
// call the MSITR kernel
if (numInts < 2)
{
return -1;
}
if(!runCpu){
//unsigned int batchSize = 1;
unsigned int tileWidth = 2*blockWidth;
// 2 elements per thread in the block,
unsigned int memsize = tileWidth * sizeof(unsigned int) * 2;
unsigned int numTotalBlocks = ceil((float)numInts / tileWidth);
unsigned int numBlocksy = ceil((float)numTotalBlocks / 65535);
unsigned int numBlocksx = ceil((float)numTotalBlocks / numBlocksy);
// set up the dim3's to define grid and block size
const dim3 blockSize(blockWidth, 1, 1);
const dim3 numBlocks(numBlocksx, numBlocksy, 1);
// time the kernel launches using CUDA events
cudaEvent_t launch_begin, launch_end;
cudaEventCreate(&launch_begin);
cudaEventCreate(&launch_end);
cudaEventRecord(launch_begin,0);
mergeSortSharedKernel<<<numBlocks, blockSize, memsize>>>(d_dstKey, d_dstVal, d_srcKey, d_srcVal, tileWidth, numInts, 1);
// synchronize the device after the kernel call
cudaDeviceSynchronize();
cudaMemcpy(h_dstKey, d_dstKey, bytes, cudaMemcpyDeviceToHost);
cudaMemcpy(h_dstVal, d_dstVal, bytes, cudaMemcpyDeviceToHost);
// print the results of the BSITR kernel
if(shouldPrint) {
printArrays(h_dstKey, h_dstVal, numInts);
}
// swap the addresses of the input and output pointers
// for the 2nd kernel call
unsigned int *tempKey, *tempVal;
for( unsigned int stride = tileWidth; stride < numInts; stride *= 2 ) {
tempKey = d_dstKey;
tempVal = d_dstVal;
d_dstKey = d_srcKey;
d_dstVal = d_srcVal;
d_srcKey = tempKey;
d_srcVal = tempVal;
// naive tile merge kernel
naiveMergeSortTileKernel<<<numBlocks, blockSize>>>(d_dstKey, d_dstVal, d_srcKey, d_srcVal, numInts, stride, 1);
// synchronize the device after the kernel call
cudaDeviceSynchronize();
}
// record end time and time elapsed
cudaEventRecord(launch_end,0);
cudaEventSynchronize(launch_end);
// measure the time spent in the kernel
float time = 0;
cudaEventElapsedTime(&time, launch_begin, launch_end);
printf("GPU merge sort ran in %f milliseconds\n", time);
// copy the results from the device to the host
cudaMemcpy(h_dstKey, d_dstKey, bytes, cudaMemcpyDeviceToHost);
cudaMemcpy(h_dstVal, d_dstVal, bytes, cudaMemcpyDeviceToHost);
// print the sorted results if requested
if(shouldPrint) {
printArrays(h_dstKey, h_dstVal, numInts);
}
// free device memory
cudaFree(d_srcKey);
cudaFree(d_srcVal);
cudaFree(d_dstKey);
cudaFree(d_dstVal);
}
// serial merge sort
if(runCpu){
double then = currentTime();
partition(&h_key, &h_val, 0, numInts-1);
double now = currentTime();
float time = 0;
time = (now - then) * 1000;
printf("CPU code executed in %f milliseconds\n", time);
if(shouldPrint) {
printArrays(h_key, h_val, numInts);
}
}
// free host memory
free(h_val);
free(h_key);
free(h_dstKey);
free(h_dstVal);
return 0;
}
// fill an array with randomly generated unsigned int values
void fillKeyArrayRandom(unsigned int *keys, unsigned int numInts)
{
for(int i = 0; i < numInts; i++){
keys[i] = rand() % ROLLMAX;
}
}
// fill the Value array with the index of the key array to which it coresponds
void fillValArray(unsigned int *vals, unsigned int numInts)
{
for(int i = 0; i < numInts; i++) {
vals[i] = i;
}
}
// print the given arrays of keys and values of size size to the console
void printArrays(unsigned int *keys, unsigned int *vals, unsigned int size)
{
unsigned int i;
printf("keys: ");
for(i = 0; i < size; i++) {
printf("%u ", keys[i]);
if( i % 32 == 0 && i > 0){
printf("\n");
}
}
printf("\nvals: ");
for(i = 0; i < size; i++) {
printf("%u ", vals[i]);
if( i % 32 == 0 && i > 0){
printf("\n");
}
}
printf("\n\n");
}
// print the required args to the command line
void usage()
{
printf("Usage: ./progName numInts blockWidth p\n");
}
|
e273e6f3e82c8977947dcd05844c4b4d04159e25.hip | // !!! This is a file automatically generated by hipify!!!
/* This program is for the matrix multiplication using cublas lib in column major format (v1)
Input Data : From Host to Device,
Operation : on Device,
Output Result : From Device to Host
Coded by: PUNEET DHEER*/
#include <thrust/device_vector.h>
#include <rocblas.h>
#include <iostream>
#include <cassert>
#include <cmath> //fabs
#include <ctime>
using namespace std;
// Fortran-style indexing column-major
int cm(int column, int row, int nRows)
{
return column*nRows + row;
}
void fill_mat_A(double *mat, int rows, int columns)
{
for (size_t i = 0; i < rows; i++)
{
for (size_t j = 0; j < columns; j++)
{
//mat[cm(j, i, rows)] = (i * 3) + (j * 2);
mat[cm(j, i, rows)] = rand();
}
}
}
void fill_mat_B(double *mat, int rows, int columns)
{
for (size_t i = 0; i < rows; i++)
{
for (size_t j = 0; j < columns; j++)
{
//mat[cm(j, i, rows)] = (i * 5) + (j * 9);
mat[cm(j, i, rows)] = rand();
}
}
}
void show_mat(double *mat, int rows, int columns)
{
cout << endl;
for (size_t i = 0; i < rows; i++)
{
for (size_t j = 0; j < columns; j++)
{
cout << mat[cm(j, i, rows)] << " ";
cout << "[" << cm(j, i, rows) << "]" << " ";
cout << "[" << &mat[cm(j, i, rows)] << "]" << " ";
}
cout << "\n";
}
cout << "\n";
}
void cross_check_result(double *A, double *B, double *C, int rowsA, int columnsB, int rowsB)
{
cout << endl;
double calc;
int count = 1;
int total = rowsA*columnsB;
for (size_t i = 0; i < rowsA; i++)
{
for (size_t j = 0; j < columnsB; j++)
{
calc = 0;
for (size_t x = 0; x < rowsB; x++)
{
calc += A[x*rowsA + i] * B[j*rowsB + x];
//cout << A[x*rowsA + i] << " " << B[j*rowsB + x] << " "<< calc <<"\n";
}
cout << "\r...CHECKING...Element No: " << count << "/" << total;
count++;
assert(fabs(C[j*rowsA + i] - calc) <= 0.000001);
/*cout << endl;
cout << "-----------------------------------------------" << endl;
cout << "[ Diff: "<<fabs(C[j*rowsA + i] - calc)<< " ] " <<"[ Resultant GPU Mat: "<< C[j*rowsA + i] <<" ] "<< "[ Resultant CPU Mat: "<<calc<<" ] "<< "\n";
cout << "-----------------------------------------------" << endl;*/
}
}
}
int main()
{
int row_A = 100;
int col_A = 100;
int row_B = col_A;
int col_B = 100;
int row_C = row_A;
int col_C = col_B;
clock_t start, end, tstart, tend;
double secs, msecs;
cout << fixed;
tstart = clock();
// Using "thrust"
cout << "Initialization of Host Matrices... ";
start = clock();
thrust::host_vector<double> h_A(row_A * col_A); // on the host side (CPU)
thrust::host_vector<double> h_B(row_B * col_B); // on the host side (CPU)
thrust::host_vector<double> h_C(row_C * col_C); // on the host side (CPU)
end = clock();
secs = (double(end - start)) / CLOCKS_PER_SEC;
msecs= (double(end - start)) / (CLOCKS_PER_SEC/1000);
cout << "...Done..." << " Execution Time: " << secs << " secs, " << msecs << " msecs" << "\n" << "\n";
// fill the matrices on the host with some simple values for further cross check
cout << "Filling Host Matrices... ";
start = clock();
fill_mat_A(thrust::raw_pointer_cast(&h_A[0]), row_A, col_A);
fill_mat_B(thrust::raw_pointer_cast(&h_B[0]), row_B, col_B);
//show_mat(thrust::raw_pointer_cast(&h_A[0]), row_A, col_A);
//show_mat(thrust::raw_pointer_cast(&h_B[0]), row_B, col_B);
end = clock();
secs = (double(end - start)) / CLOCKS_PER_SEC;
msecs = (double(end - start)) / (CLOCKS_PER_SEC / 1000);
cout << "...Done..." << " Execution Time: " << secs << " secs, " << msecs << " msecs" << "\n" << "\n";
// move the data from host to device
cout << "Moving Host Matrices to Device... ";
start = clock();
thrust::device_vector<double> d_A = h_A; // on the device side (GPU)
thrust::device_vector<double> d_B = h_B; // on the device side (GPU)
thrust::device_vector<double> d_C(row_C * col_C); // on the device side (GPU)
end = clock();
secs = (double(end - start)) / CLOCKS_PER_SEC;
msecs = (double(end - start)) / (CLOCKS_PER_SEC / 1000);
cout << "...Done..." << " Execution Time: " << secs << " secs, " << msecs << " msecs" << "\n" << "\n";
// Initialize CUBLAS
hipblasHandle_t handle;
hipblasStatus_t status = hipblasCreate(&handle);
if (status != HIPBLAS_STATUS_SUCCESS)
{
cerr << "!!! CUBLAS initialization error !!!\n";
}
double alpha = 1.0f;
double beta = 0.0f;
// C = (alpha*A) * B + (beta*C)
// A(m*k)*B(k*n) = C(m*n)
/* hipblasDgemm(hipblasHandle_t handle, hipblasOperation_t transa, hipblasOperation_t transb,
int m, int n, int k,
const double *alpha,
const double *A, int lda,
const double *B, int ldb,
const double *beta,
double *C, int ldc)*/
cout << "Multiplication is running on Device... ";
start = clock();
status = hipblasDgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, row_A, col_B, col_A, &alpha,
thrust::raw_pointer_cast(&d_A[0]), row_A,
thrust::raw_pointer_cast(&d_B[0]), row_B,
&beta,
thrust::raw_pointer_cast(&d_C[0]), row_C);
end = clock();
secs = (double(end - start)) / CLOCKS_PER_SEC;
msecs = (double(end - start)) / (CLOCKS_PER_SEC / 1000);
cout << "...Done..." << " Execution Time: " << secs << " secs, " << msecs << " msecs" << "\n" << "\n";
if (status != HIPBLAS_STATUS_SUCCESS)
{
cerr << "!!! kernel execution error !!!\n";
}
// move the result from device to host
cout << "Moving Result from Device to Host... ";
start = clock();
h_C = d_C;
end = clock();
secs = (double(end - start)) / CLOCKS_PER_SEC;
msecs = (double(end - start)) / (CLOCKS_PER_SEC / 1000);
cout << "...Done..." << " Execution Time: " << secs << " secs, " << msecs << " msecs" << "\n" << "\n";
//show_mat(thrust::raw_pointer_cast(&h_C[0]), row_C, col_C);
//cross check the result
cout << "SANITY check is running on Host... ";
start = clock();
cross_check_result(thrust::raw_pointer_cast(&h_A[0]), thrust::raw_pointer_cast(&h_B[0]), thrust::raw_pointer_cast(&h_C[0]), row_A, col_B, row_B);
end = clock();
secs = (double(end - start)) / CLOCKS_PER_SEC;
msecs = (double(end - start)) / (CLOCKS_PER_SEC / 1000);
cout << "...Done..." << " Execution Time: " << secs << " secs, " << msecs << " msecs" << "\n" << "\n";
status = hipblasDestroy(handle);
if (status != HIPBLAS_STATUS_SUCCESS)
{
cerr << "!!! shutdown error !!!\n";
}
tend = clock();
secs = (double(tend - tstart)) / CLOCKS_PER_SEC;
msecs = (double(tend - tstart)) / (CLOCKS_PER_SEC / 1000);
cout << "Total Execution Time: " << secs << " secs, " << msecs << " msecs" << "\n" << "\n";
return 0;
} | e273e6f3e82c8977947dcd05844c4b4d04159e25.cu | /* This program is for the matrix multiplication using cublas lib in column major format (v1)
Input Data : From Host to Device,
Operation : on Device,
Output Result : From Device to Host
Coded by: PUNEET DHEER*/
#include <thrust/device_vector.h>
#include <cublas_v2.h>
#include <iostream>
#include <cassert>
#include <cmath> //fabs
#include <ctime>
using namespace std;
// Fortran-style indexing column-major
int cm(int column, int row, int nRows)
{
return column*nRows + row;
}
void fill_mat_A(double *mat, int rows, int columns)
{
for (size_t i = 0; i < rows; i++)
{
for (size_t j = 0; j < columns; j++)
{
//mat[cm(j, i, rows)] = (i * 3) + (j * 2);
mat[cm(j, i, rows)] = rand();
}
}
}
void fill_mat_B(double *mat, int rows, int columns)
{
for (size_t i = 0; i < rows; i++)
{
for (size_t j = 0; j < columns; j++)
{
//mat[cm(j, i, rows)] = (i * 5) + (j * 9);
mat[cm(j, i, rows)] = rand();
}
}
}
void show_mat(double *mat, int rows, int columns)
{
cout << endl;
for (size_t i = 0; i < rows; i++)
{
for (size_t j = 0; j < columns; j++)
{
cout << mat[cm(j, i, rows)] << " ";
cout << "[" << cm(j, i, rows) << "]" << " ";
cout << "[" << &mat[cm(j, i, rows)] << "]" << " ";
}
cout << "\n";
}
cout << "\n";
}
void cross_check_result(double *A, double *B, double *C, int rowsA, int columnsB, int rowsB)
{
cout << endl;
double calc;
int count = 1;
int total = rowsA*columnsB;
for (size_t i = 0; i < rowsA; i++)
{
for (size_t j = 0; j < columnsB; j++)
{
calc = 0;
for (size_t x = 0; x < rowsB; x++)
{
calc += A[x*rowsA + i] * B[j*rowsB + x];
//cout << A[x*rowsA + i] << " " << B[j*rowsB + x] << " "<< calc <<"\n";
}
cout << "\r...CHECKING...Element No: " << count << "/" << total;
count++;
assert(fabs(C[j*rowsA + i] - calc) <= 0.000001);
/*cout << endl;
cout << "-----------------------------------------------" << endl;
cout << "[ Diff: "<<fabs(C[j*rowsA + i] - calc)<< " ] " <<"[ Resultant GPU Mat: "<< C[j*rowsA + i] <<" ] "<< "[ Resultant CPU Mat: "<<calc<<" ] "<< "\n";
cout << "-----------------------------------------------" << endl;*/
}
}
}
int main()
{
int row_A = 100;
int col_A = 100;
int row_B = col_A;
int col_B = 100;
int row_C = row_A;
int col_C = col_B;
clock_t start, end, tstart, tend;
double secs, msecs;
cout << fixed;
tstart = clock();
// Using "thrust"
cout << "Initialization of Host Matrices... ";
start = clock();
thrust::host_vector<double> h_A(row_A * col_A); // on the host side (CPU)
thrust::host_vector<double> h_B(row_B * col_B); // on the host side (CPU)
thrust::host_vector<double> h_C(row_C * col_C); // on the host side (CPU)
end = clock();
secs = (double(end - start)) / CLOCKS_PER_SEC;
msecs= (double(end - start)) / (CLOCKS_PER_SEC/1000);
cout << "...Done..." << " Execution Time: " << secs << " secs, " << msecs << " msecs" << "\n" << "\n";
// fill the matrices on the host with some simple values for further cross check
cout << "Filling Host Matrices... ";
start = clock();
fill_mat_A(thrust::raw_pointer_cast(&h_A[0]), row_A, col_A);
fill_mat_B(thrust::raw_pointer_cast(&h_B[0]), row_B, col_B);
//show_mat(thrust::raw_pointer_cast(&h_A[0]), row_A, col_A);
//show_mat(thrust::raw_pointer_cast(&h_B[0]), row_B, col_B);
end = clock();
secs = (double(end - start)) / CLOCKS_PER_SEC;
msecs = (double(end - start)) / (CLOCKS_PER_SEC / 1000);
cout << "...Done..." << " Execution Time: " << secs << " secs, " << msecs << " msecs" << "\n" << "\n";
// move the data from host to device
cout << "Moving Host Matrices to Device... ";
start = clock();
thrust::device_vector<double> d_A = h_A; // on the device side (GPU)
thrust::device_vector<double> d_B = h_B; // on the device side (GPU)
thrust::device_vector<double> d_C(row_C * col_C); // on the device side (GPU)
end = clock();
secs = (double(end - start)) / CLOCKS_PER_SEC;
msecs = (double(end - start)) / (CLOCKS_PER_SEC / 1000);
cout << "...Done..." << " Execution Time: " << secs << " secs, " << msecs << " msecs" << "\n" << "\n";
// Initialize CUBLAS
cublasHandle_t handle;
cublasStatus_t status = cublasCreate(&handle);
if (status != CUBLAS_STATUS_SUCCESS)
{
cerr << "!!! CUBLAS initialization error !!!\n";
}
double alpha = 1.0f;
double beta = 0.0f;
// C = (alpha*A) * B + (beta*C)
// A(m*k)*B(k*n) = C(m*n)
/* cublasDgemm(cublasHandle_t handle, cublasOperation_t transa, cublasOperation_t transb,
int m, int n, int k,
const double *alpha,
const double *A, int lda,
const double *B, int ldb,
const double *beta,
double *C, int ldc)*/
cout << "Multiplication is running on Device... ";
start = clock();
status = cublasDgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, row_A, col_B, col_A, &alpha,
thrust::raw_pointer_cast(&d_A[0]), row_A,
thrust::raw_pointer_cast(&d_B[0]), row_B,
&beta,
thrust::raw_pointer_cast(&d_C[0]), row_C);
end = clock();
secs = (double(end - start)) / CLOCKS_PER_SEC;
msecs = (double(end - start)) / (CLOCKS_PER_SEC / 1000);
cout << "...Done..." << " Execution Time: " << secs << " secs, " << msecs << " msecs" << "\n" << "\n";
if (status != CUBLAS_STATUS_SUCCESS)
{
cerr << "!!! kernel execution error !!!\n";
}
// move the result from device to host
cout << "Moving Result from Device to Host... ";
start = clock();
h_C = d_C;
end = clock();
secs = (double(end - start)) / CLOCKS_PER_SEC;
msecs = (double(end - start)) / (CLOCKS_PER_SEC / 1000);
cout << "...Done..." << " Execution Time: " << secs << " secs, " << msecs << " msecs" << "\n" << "\n";
//show_mat(thrust::raw_pointer_cast(&h_C[0]), row_C, col_C);
//cross check the result
cout << "SANITY check is running on Host... ";
start = clock();
cross_check_result(thrust::raw_pointer_cast(&h_A[0]), thrust::raw_pointer_cast(&h_B[0]), thrust::raw_pointer_cast(&h_C[0]), row_A, col_B, row_B);
end = clock();
secs = (double(end - start)) / CLOCKS_PER_SEC;
msecs = (double(end - start)) / (CLOCKS_PER_SEC / 1000);
cout << "...Done..." << " Execution Time: " << secs << " secs, " << msecs << " msecs" << "\n" << "\n";
status = cublasDestroy(handle);
if (status != CUBLAS_STATUS_SUCCESS)
{
cerr << "!!! shutdown error !!!\n";
}
tend = clock();
secs = (double(tend - tstart)) / CLOCKS_PER_SEC;
msecs = (double(tend - tstart)) / (CLOCKS_PER_SEC / 1000);
cout << "Total Execution Time: " << secs << " secs, " << msecs << " msecs" << "\n" << "\n";
return 0;
} |
be729d5b05711ef243f6af3832e19946c81f136f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include<stdio.h>
#include <math.h>
#define STRIDE_64K 65536
__global__ void init(int n, float *x, float *y) {
int lane_id = threadIdx.x & 31;
size_t warp_id = (threadIdx.x + blockIdx.x * blockDim.x) >> 5;
size_t warps_per_grid = (blockDim.x * gridDim.x) >> 5;
size_t warp_total = ((sizeof(float)*n) + STRIDE_64K-1) / STRIDE_64K;
// if(blockIdx.x==0 && threadIdx.x==0) {
// printf("\n TId[%d] ", threadIdx.x);
// printf(" WId[%u] ", warp_id);
// printf(" LId[%u] ", lane_id);
// printf(" WperG[%u] ", warps_per_grid);
// printf(" wTot[%u] ", warp_total);
// printf(" rep[%d] ", STRIDE_64K/sizeof(float)/32);
// }
for( ; warp_id < warp_total; warp_id += warps_per_grid) {
#pragma unroll
for(int rep = 0; rep < STRIDE_64K/sizeof(float)/32; rep++) {
size_t ind = warp_id * STRIDE_64K/sizeof(float) + rep * 32 + lane_id;
if (ind < n) {
x[ind] = 1.0f;
// if(blockIdx.x==0 && threadIdx.x==0) {
// printf(" \nind[%d] ", ind);
// }
y[ind] = 2.0f;
}
}
}
}
// CUDA kernel to add elements of two arrays
__global__
void add(int n, float *x, float *y)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride)
y[i] = x[i] + y[i];
}
int main(void)
{
int N = 1<<20;
float *x, *y;
// Allocate Unified Memory -- accessible from CPU or GPU
hipMallocManaged(&x, N*sizeof(float));
hipMallocManaged(&y, N*sizeof(float));
// Launch kernel on 1M elements on the GPU
int blockSize = 256;
int numBlocks = (N + blockSize - 1) / blockSize;
size_t warp_total = ((sizeof(float)*N) + STRIDE_64K-1) / STRIDE_64K;
int numBlocksInit = (warp_total*32) / blockSize;
hipLaunchKernelGGL(( init), dim3(numBlocksInit), dim3(blockSize), 0, 0, N, x, y);
hipLaunchKernelGGL(( add), dim3(numBlocks), dim3(blockSize), 0, 0, N, x, y);
// Wait for GPU to finish before accessing on host
hipDeviceSynchronize();
// Check for errors (all values should be 3.0f)
float maxError = 0.0f;
for (int i = 0; i < N; i++)
maxError = fmax(maxError, fabs(y[i]-3.0f));
std::cout << "Max error: " << maxError << std::endl;
// Free memory
hipFree(x);
hipFree(y);
return 0;
}
| be729d5b05711ef243f6af3832e19946c81f136f.cu | #include <iostream>
#include<stdio.h>
#include <math.h>
#define STRIDE_64K 65536
__global__ void init(int n, float *x, float *y) {
int lane_id = threadIdx.x & 31;
size_t warp_id = (threadIdx.x + blockIdx.x * blockDim.x) >> 5;
size_t warps_per_grid = (blockDim.x * gridDim.x) >> 5;
size_t warp_total = ((sizeof(float)*n) + STRIDE_64K-1) / STRIDE_64K;
// if(blockIdx.x==0 && threadIdx.x==0) {
// printf("\n TId[%d] ", threadIdx.x);
// printf(" WId[%u] ", warp_id);
// printf(" LId[%u] ", lane_id);
// printf(" WperG[%u] ", warps_per_grid);
// printf(" wTot[%u] ", warp_total);
// printf(" rep[%d] ", STRIDE_64K/sizeof(float)/32);
// }
for( ; warp_id < warp_total; warp_id += warps_per_grid) {
#pragma unroll
for(int rep = 0; rep < STRIDE_64K/sizeof(float)/32; rep++) {
size_t ind = warp_id * STRIDE_64K/sizeof(float) + rep * 32 + lane_id;
if (ind < n) {
x[ind] = 1.0f;
// if(blockIdx.x==0 && threadIdx.x==0) {
// printf(" \nind[%d] ", ind);
// }
y[ind] = 2.0f;
}
}
}
}
// CUDA kernel to add elements of two arrays
__global__
void add(int n, float *x, float *y)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride)
y[i] = x[i] + y[i];
}
int main(void)
{
int N = 1<<20;
float *x, *y;
// Allocate Unified Memory -- accessible from CPU or GPU
cudaMallocManaged(&x, N*sizeof(float));
cudaMallocManaged(&y, N*sizeof(float));
// Launch kernel on 1M elements on the GPU
int blockSize = 256;
int numBlocks = (N + blockSize - 1) / blockSize;
size_t warp_total = ((sizeof(float)*N) + STRIDE_64K-1) / STRIDE_64K;
int numBlocksInit = (warp_total*32) / blockSize;
init<<<numBlocksInit, blockSize>>>(N, x, y);
add<<<numBlocks, blockSize>>>(N, x, y);
// Wait for GPU to finish before accessing on host
cudaDeviceSynchronize();
// Check for errors (all values should be 3.0f)
float maxError = 0.0f;
for (int i = 0; i < N; i++)
maxError = fmax(maxError, fabs(y[i]-3.0f));
std::cout << "Max error: " << maxError << std::endl;
// Free memory
cudaFree(x);
cudaFree(y);
return 0;
}
|
a8b9cbfb187567e106f64a6d4ae6d13ab15fc24f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
extern "C"
// another simple test kernel
__global__ void multiplyBy2(int *size, int *in, int *out) {
const int ix = threadIdx.x + blockIdx.x * blockDim.x;
if (ix < *size) {
out[ix] = in[ix] * 2;
}
}
extern "C"
// test reduce kernel that sums elements
__global__ void sum(int *size, int *input, int *output, int *stage, int *totalStages) {
const long ix = threadIdx.x + blockIdx.x * (long)blockDim.x;
const int jump = 64 * 256;
// if (ix == 0) printf("size: %d stage : %d totalStages : %d \n",*size, *stage, *totalStages);
if (*stage == 0) {
if (ix < *size) {
assert(jump == blockDim.x * gridDim.x);
int result = 0;
for (long i = ix; i < *size; i += jump) {
result += input[i];
}
input[ix] = result;
}
} else if (ix == 0) {
const long count = (*size < (long)jump) ? *size : (long)jump;
int result = 0;
for (long i = 0; i < count; ++i) {
result += input[i];
}
output[0] = result;
}
}
| a8b9cbfb187567e106f64a6d4ae6d13ab15fc24f.cu |
extern "C"
// another simple test kernel
__global__ void multiplyBy2(int *size, int *in, int *out) {
const int ix = threadIdx.x + blockIdx.x * blockDim.x;
if (ix < *size) {
out[ix] = in[ix] * 2;
}
}
extern "C"
// test reduce kernel that sums elements
__global__ void sum(int *size, int *input, int *output, int *stage, int *totalStages) {
const long ix = threadIdx.x + blockIdx.x * (long)blockDim.x;
const int jump = 64 * 256;
// if (ix == 0) printf("size: %d stage : %d totalStages : %d \n",*size, *stage, *totalStages);
if (*stage == 0) {
if (ix < *size) {
assert(jump == blockDim.x * gridDim.x);
int result = 0;
for (long i = ix; i < *size; i += jump) {
result += input[i];
}
input[ix] = result;
}
} else if (ix == 0) {
const long count = (*size < (long)jump) ? *size : (long)jump;
int result = 0;
for (long i = 0; i < count; ++i) {
result += input[i];
}
output[0] = result;
}
}
|
e38a5456531a84a69ab00949395a403ddec90950.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void elementwise_1D_1D_exp_grad(float* in_x, float* in_d, float* out_x, float * out_d, int size) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = gridDim.x * blockDim.x;
for (; tid < size; tid += stride)
if (tid < size) in_d[tid] += out_d[tid] * out_x[tid];
} | e38a5456531a84a69ab00949395a403ddec90950.cu | #include "includes.h"
__global__ void elementwise_1D_1D_exp_grad(float* in_x, float* in_d, float* out_x, float * out_d, int size) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = gridDim.x * blockDim.x;
for (; tid < size; tid += stride)
if (tid < size) in_d[tid] += out_d[tid] * out_x[tid];
} |
df39df56747e8a276b48759a322a88887d6211d1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<complex>
#define DEFINE_KERNEL_MACRO_TRANSPOSE(type, name)\
__global__ void dot_kernel_##name(const type *a, const type *b, type *c, int row_a, int col_a_row_b, int col_b) \
{ \
int idx = blockIdx.x * blockDim.x + threadIdx.x; \
int i; \
const int target_row = idx / col_b; \
const int target_col = idx % col_b; \
if (idx >= row_a*col_b) \
return; \
c += idx; \
*c = 0; \
a += target_row*col_a_row_b; \
b += target_col*col_b; \
for (i = 0; i < col_a_row_b; ++i, a += 1, b += 1) \
*c += *a + *b; \
}
#define DEFINE_KERNEL_MACRO(type, name)\
__global__ void dot_kernel_##name(const type *a, const type *b, type *c, int row_a, int col_a_row_b, int col_b) \
{ \
int idx = blockIdx.x * blockDim.x + threadIdx.x; \
int i; \
const int target_row = idx / col_b; \
const int target_col = idx % col_b; \
if (idx >= row_a*col_b) \
return; \
c += idx; \
*c = 0; \
a += target_row*col_a_row_b; \
b += target_col; \
for (i = 0; i < col_a_row_b; ++i, a+=1, b+=col_b) \
*c += *a + *b; \
}
#define CALL_CUDA_MACRO(type, name) \
DEFINE_KERNEL_MACRO(type, name##_) \
DEFINE_KERNEL_MACRO_TRANSPOSE(type, name##_transpose) \
void call_cuda_kernel_##name##_(int blocks, const type *a, const type *b, type *c, int row_a, int col_a_row_b, int col_b) \
hipLaunchKernelGGL(({dot_kernel_##name##_), blocks, 256>> >(a, b, c, row_a, col_a_row_b, col_b);}\
void call_cuda_kernel_##name##_transpose(int blocks, const type *a, const type *b, type *c, int row_a, int col_a_row_b, int col_b) \
({dot_kernel_##name##_)transpose, blocks, 256>> >(a, b, c, row_a, col_a_row_b, col_b);}
CALL_CUDA_MACRO(float, float)
CALL_CUDA_MACRO(double, double)
CALL_CUDA_MACRO(long long, longlong)
CALL_CUDA_MACRO(short, short)
CALL_CUDA_MACRO(int, int)
#define DEFINE_KERNEL_MACRO_COMPLEX_TRANSPOSE(type, name)\
__global__ void dot_kernel_##name(const type *a, const type *b, type *c, int row_a, int col_a_row_b, int col_b) \
{ \
int idx = blockIdx.x * blockDim.x + threadIdx.x; \
int i; \
const int target_row = idx / col_b; \
const int target_col = idx % col_b; \
if (idx >= row_a*col_b) \
return; \
c += idx*2; \
c[0] = 0; \
c[1] = 0; \
a += target_row*col_a_row_b*2; \
b += target_col*col_b*2; \
for (i = 0; i < col_a_row_b; ++i, a += 2, b += 2) \
{ \
c[0] += a[0] * b[0] - a[1] * b[1]; \
c[1] += a[1] * b[0] + a[0] * b[1]; \
} \
}
#define DEFINE_KERNEL_MACRO_COMPLEX(type, name)\
__global__ void dot_kernel_##name(const type *a, const type *b, type *c, int row_a, int col_a_row_b, int col_b) \
{ \
int idx = blockIdx.x * blockDim.x + threadIdx.x; \
int i; \
const int target_row = idx / col_b; \
const int target_col = idx % col_b; \
if (idx >= row_a*col_b) \
return; \
c += idx*2; \
c[0] = 0; \
c[1] = 0; \
a += target_row*col_a_row_b*2; \
b += target_col*2; \
for (i = 0; i < col_a_row_b; ++i, a+=2, b+=col_b*2)\
{ \
c[0] += a[0] * b[0] - a[1] * b[1]; \
c[1] += a[1] * b[0] + a[0] * b[1]; \
} \
}
#define CALL_CUDA_MACRO_COMPLEX(type, name) \
DEFINE_KERNEL_MACRO_COMPLEX(type, name##_) \
DEFINE_KERNEL_MACRO_COMPLEX_TRANSPOSE(type, name##_transpose) \
void call_cuda_kernel_##name##_(int blocks, const std::complex<type> *a, const std::complex<type> *b, std::complex<type> *c, int row_a, int col_a_row_b, int col_b) \
({dot_kernel_##name##_), blocks, 256, 0, 0, 0, reinterpret_cast<const type*>(a), reinterpret_cast<const type*>(b), reinterpret_cast<type*>(c), row_a, col_a_row_b, col_b);}\
void call_cuda_kernel_##name##_transpose(int blocks, const std::complex<type> *a, const std::complex<type> *b, std::complex<type> *c, int row_a, int col_a_row_b, int col_b) \
hipLaunchKernelGGL(({dot_kernel_##name##_transpose), dim3(blocks), dim3(256), 0, 0, reinterpret_cast<const type*>(a), reinterpret_cast<const type*>(b), reinterpret_cast<type*>(c), row_a, col_a_row_b, col_b);}
CALL_CUDA_MACRO_COMPLEX(float, complex)
CALL_CUDA_MACRO_COMPLEX(double, complexdouble)
| df39df56747e8a276b48759a322a88887d6211d1.cu | #include<complex>
#define DEFINE_KERNEL_MACRO_TRANSPOSE(type, name)\
__global__ void dot_kernel_##name(const type *a, const type *b, type *c, int row_a, int col_a_row_b, int col_b) \
{ \
int idx = blockIdx.x * blockDim.x + threadIdx.x; \
int i; \
const int target_row = idx / col_b; \
const int target_col = idx % col_b; \
if (idx >= row_a*col_b) \
return; \
c += idx; \
*c = 0; \
a += target_row*col_a_row_b; \
b += target_col*col_b; \
for (i = 0; i < col_a_row_b; ++i, a += 1, b += 1) \
*c += *a + *b; \
}
#define DEFINE_KERNEL_MACRO(type, name)\
__global__ void dot_kernel_##name(const type *a, const type *b, type *c, int row_a, int col_a_row_b, int col_b) \
{ \
int idx = blockIdx.x * blockDim.x + threadIdx.x; \
int i; \
const int target_row = idx / col_b; \
const int target_col = idx % col_b; \
if (idx >= row_a*col_b) \
return; \
c += idx; \
*c = 0; \
a += target_row*col_a_row_b; \
b += target_col; \
for (i = 0; i < col_a_row_b; ++i, a+=1, b+=col_b) \
*c += *a + *b; \
}
#define CALL_CUDA_MACRO(type, name) \
DEFINE_KERNEL_MACRO(type, name##_) \
DEFINE_KERNEL_MACRO_TRANSPOSE(type, name##_transpose) \
void call_cuda_kernel_##name##_(int blocks, const type *a, const type *b, type *c, int row_a, int col_a_row_b, int col_b) \
{dot_kernel_##name##_<<<blocks, 256>> >(a, b, c, row_a, col_a_row_b, col_b);}\
void call_cuda_kernel_##name##_transpose(int blocks, const type *a, const type *b, type *c, int row_a, int col_a_row_b, int col_b) \
{dot_kernel_##name##_transpose<<<blocks, 256>> >(a, b, c, row_a, col_a_row_b, col_b);}
CALL_CUDA_MACRO(float, float)
CALL_CUDA_MACRO(double, double)
CALL_CUDA_MACRO(long long, longlong)
CALL_CUDA_MACRO(short, short)
CALL_CUDA_MACRO(int, int)
#define DEFINE_KERNEL_MACRO_COMPLEX_TRANSPOSE(type, name)\
__global__ void dot_kernel_##name(const type *a, const type *b, type *c, int row_a, int col_a_row_b, int col_b) \
{ \
int idx = blockIdx.x * blockDim.x + threadIdx.x; \
int i; \
const int target_row = idx / col_b; \
const int target_col = idx % col_b; \
if (idx >= row_a*col_b) \
return; \
c += idx*2; \
c[0] = 0; \
c[1] = 0; \
a += target_row*col_a_row_b*2; \
b += target_col*col_b*2; \
for (i = 0; i < col_a_row_b; ++i, a += 2, b += 2) \
{ \
c[0] += a[0] * b[0] - a[1] * b[1]; \
c[1] += a[1] * b[0] + a[0] * b[1]; \
} \
}
#define DEFINE_KERNEL_MACRO_COMPLEX(type, name)\
__global__ void dot_kernel_##name(const type *a, const type *b, type *c, int row_a, int col_a_row_b, int col_b) \
{ \
int idx = blockIdx.x * blockDim.x + threadIdx.x; \
int i; \
const int target_row = idx / col_b; \
const int target_col = idx % col_b; \
if (idx >= row_a*col_b) \
return; \
c += idx*2; \
c[0] = 0; \
c[1] = 0; \
a += target_row*col_a_row_b*2; \
b += target_col*2; \
for (i = 0; i < col_a_row_b; ++i, a+=2, b+=col_b*2)\
{ \
c[0] += a[0] * b[0] - a[1] * b[1]; \
c[1] += a[1] * b[0] + a[0] * b[1]; \
} \
}
#define CALL_CUDA_MACRO_COMPLEX(type, name) \
DEFINE_KERNEL_MACRO_COMPLEX(type, name##_) \
DEFINE_KERNEL_MACRO_COMPLEX_TRANSPOSE(type, name##_transpose) \
void call_cuda_kernel_##name##_(int blocks, const std::complex<type> *a, const std::complex<type> *b, std::complex<type> *c, int row_a, int col_a_row_b, int col_b) \
{dot_kernel_##name##_<<<blocks, 256>>>(reinterpret_cast<const type*>(a), reinterpret_cast<const type*>(b), reinterpret_cast<type*>(c), row_a, col_a_row_b, col_b);}\
void call_cuda_kernel_##name##_transpose(int blocks, const std::complex<type> *a, const std::complex<type> *b, std::complex<type> *c, int row_a, int col_a_row_b, int col_b) \
{dot_kernel_##name##_transpose<<<blocks, 256>>>(reinterpret_cast<const type*>(a), reinterpret_cast<const type*>(b), reinterpret_cast<type*>(c), row_a, col_a_row_b, col_b);}
CALL_CUDA_MACRO_COMPLEX(float, complex)
CALL_CUDA_MACRO_COMPLEX(double, complexdouble)
|
c6e0ee04df1fa4054b235cf1229a15cecba23a26.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<stdio.h>
#include<stdlib.h>
#include <opencv2/opencv.hpp>
#include <cfloat>
#include <opencv2/core/cuda/common.hpp>
#include <opencv2/core/cuda/border_interpolate.hpp>
#include <opencv2/core/cuda/vec_traits.hpp>
#include <opencv2/core/cuda/vec_math.hpp>
#include "helper_math.h"
__global__ void process(const cv::cuda::PtrStep<uchar3> l_src,
const cv::cuda::PtrStep<uchar3> l_src,
cv::cuda::PtrStep<uchar3> dst, int rows, int cols )
{
const int dst_x = blockDim.x * blockIdx.x + threadIdx.x;
const int dst_y = blockDim.y * blockIdx.y + threadIdx.y;
if (dst_x < cols && dst_y < rows)
{
uchar3 val = src(dst_y, dst_x);
dst(dst_y, dst_x).x = 255-val.x;
dst(dst_y, dst_x).y = 255-val.y;
dst(dst_y, dst_x).z = 255-val.z;
}}
int divUp(int a, int b)
{
return ((a % b) != 0) ? (a / b + 1) : (a / b);
}
void startCUDA ( cv::cuda::GpuMat& src, cv::cuda::GpuMat& dst )
{
const dim3 block(32, 8);
const dim3 grid(divUp(dst.cols, block.x), divUp(dst.rows, block.y));
hipLaunchKernelGGL(( process), dim3(grid), dim3(block), 0, 0, src, dst, dst.rows, dst.cols);
}
void AnaglyphEffect(const cv::cuda::GpuMat& l_src, const cv::cuda::GpuMat& l_src, cv::cuda::GpuMat& dst){
const dim3 block(32, 8);
const dim3 grid(divU(dst.cols, block.x), divUp(dst.rows, block.y));
hipLaunchKernelGGL(( process), dim3(grid), dim3(block), 0, 0, l_src, r_src, dst, dst.rows, dst.cols);
}
| c6e0ee04df1fa4054b235cf1229a15cecba23a26.cu | #include<stdio.h>
#include<stdlib.h>
#include <opencv2/opencv.hpp>
#include <cfloat>
#include <opencv2/core/cuda/common.hpp>
#include <opencv2/core/cuda/border_interpolate.hpp>
#include <opencv2/core/cuda/vec_traits.hpp>
#include <opencv2/core/cuda/vec_math.hpp>
#include "helper_math.h"
__global__ void process(const cv::cuda::PtrStep<uchar3> l_src,
const cv::cuda::PtrStep<uchar3> l_src,
cv::cuda::PtrStep<uchar3> dst, int rows, int cols )
{
const int dst_x = blockDim.x * blockIdx.x + threadIdx.x;
const int dst_y = blockDim.y * blockIdx.y + threadIdx.y;
if (dst_x < cols && dst_y < rows)
{
uchar3 val = src(dst_y, dst_x);
dst(dst_y, dst_x).x = 255-val.x;
dst(dst_y, dst_x).y = 255-val.y;
dst(dst_y, dst_x).z = 255-val.z;
}}
int divUp(int a, int b)
{
return ((a % b) != 0) ? (a / b + 1) : (a / b);
}
void startCUDA ( cv::cuda::GpuMat& src, cv::cuda::GpuMat& dst )
{
const dim3 block(32, 8);
const dim3 grid(divUp(dst.cols, block.x), divUp(dst.rows, block.y));
process<<<grid, block>>>(src, dst, dst.rows, dst.cols);
}
void AnaglyphEffect(const cv::cuda::GpuMat& l_src, const cv::cuda::GpuMat& l_src, cv::cuda::GpuMat& dst){
const dim3 block(32, 8);
const dim3 grid(divU(dst.cols, block.x), divUp(dst.rows, block.y));
process<<<grid, block>>>(l_src, r_src, dst, dst.rows, dst.cols);
}
|
53f9c2741a160e2bacae9a7a93d8732f351fd225.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int xdim0_tea_leaf_common_init_u_u0_kernel;
int xdim0_tea_leaf_common_init_u_u0_kernel_h = -1;
__constant__ int xdim1_tea_leaf_common_init_u_u0_kernel;
int xdim1_tea_leaf_common_init_u_u0_kernel_h = -1;
__constant__ int xdim2_tea_leaf_common_init_u_u0_kernel;
int xdim2_tea_leaf_common_init_u_u0_kernel_h = -1;
__constant__ int xdim3_tea_leaf_common_init_u_u0_kernel;
int xdim3_tea_leaf_common_init_u_u0_kernel_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#define OPS_ACC0(x, y) (x + xdim0_tea_leaf_common_init_u_u0_kernel * (y))
#define OPS_ACC1(x, y) (x + xdim1_tea_leaf_common_init_u_u0_kernel * (y))
#define OPS_ACC2(x, y) (x + xdim2_tea_leaf_common_init_u_u0_kernel * (y))
#define OPS_ACC3(x, y) (x + xdim3_tea_leaf_common_init_u_u0_kernel * (y))
// user function
__device__
void
tea_leaf_common_init_u_u0_kernel_gpu(double *u, double *u0,
const double *energy,
const double *density) {
u[OPS_ACC0(0, 0)] = energy[OPS_ACC2(0, 0)] * density[OPS_ACC3(0, 0)];
u0[OPS_ACC1(0, 0)] = energy[OPS_ACC2(0, 0)] * density[OPS_ACC3(0, 0)];
}
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
__global__ void ops_tea_leaf_common_init_u_u0_kernel(
double *__restrict arg0, double *__restrict arg1,
const double *__restrict arg2, const double *__restrict arg3, int size0,
int size1) {
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 +=
idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_tea_leaf_common_init_u_u0_kernel;
arg1 +=
idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_tea_leaf_common_init_u_u0_kernel;
arg2 +=
idx_x * 1 * 1 + idx_y * 1 * 1 * xdim2_tea_leaf_common_init_u_u0_kernel;
arg3 +=
idx_x * 1 * 1 + idx_y * 1 * 1 * xdim3_tea_leaf_common_init_u_u0_kernel;
if (idx_x < size0 && idx_y < size1) {
tea_leaf_common_init_u_u0_kernel_gpu(arg0, arg1, arg2, arg3);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_tea_leaf_common_init_u_u0_kernel(char const *name,
ops_block block, int dim,
int *range, ops_arg arg0,
ops_arg arg1, ops_arg arg2,
ops_arg arg3) {
#else
void ops_par_loop_tea_leaf_common_init_u_u0_kernel_execute(
ops_kernel_descriptor *desc) {
int dim = desc->dim;
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
ops_arg arg3 = desc->args[3];
#endif
// Timing
double t1, t2, c1, c2;
ops_arg args[4] = {arg0, arg1, arg2, arg3};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args, 4, range, 28))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(28, "tea_leaf_common_init_u_u0_kernel");
OPS_kernels[28].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[2];
int end[2];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 2; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 2; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
int xdim0 = args[0].dat->size[0];
int xdim1 = args[1].dat->size[0];
int xdim2 = args[2].dat->size[0];
int xdim3 = args[3].dat->size[0];
if (xdim0 != xdim0_tea_leaf_common_init_u_u0_kernel_h ||
xdim1 != xdim1_tea_leaf_common_init_u_u0_kernel_h ||
xdim2 != xdim2_tea_leaf_common_init_u_u0_kernel_h ||
xdim3 != xdim3_tea_leaf_common_init_u_u0_kernel_h) {
hipMemcpyToSymbol(xdim0_tea_leaf_common_init_u_u0_kernel, &xdim0,
sizeof(int));
xdim0_tea_leaf_common_init_u_u0_kernel_h = xdim0;
hipMemcpyToSymbol(xdim1_tea_leaf_common_init_u_u0_kernel, &xdim1,
sizeof(int));
xdim1_tea_leaf_common_init_u_u0_kernel_h = xdim1;
hipMemcpyToSymbol(xdim2_tea_leaf_common_init_u_u0_kernel, &xdim2,
sizeof(int));
xdim2_tea_leaf_common_init_u_u0_kernel_h = xdim2;
hipMemcpyToSymbol(xdim3_tea_leaf_common_init_u_u0_kernel, &xdim3,
sizeof(int));
xdim3_tea_leaf_common_init_u_u0_kernel_h = xdim3;
}
dim3 grid((x_size - 1) / OPS_block_size_x + 1,
(y_size - 1) / OPS_block_size_y + 1, 1);
dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size);
int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size);
char *p_a[4];
// set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0 +
dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1 +
dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]);
p_a[1] = (char *)args[1].data_d + base1;
int base2 = args[2].dat->base_offset +
dat2 * 1 * (start[0] * args[2].stencil->stride[0]);
base2 = base2 +
dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1]);
p_a[2] = (char *)args[2].data_d + base2;
int base3 = args[3].dat->base_offset +
dat3 * 1 * (start[0] * args[3].stencil->stride[0]);
base3 = base3 +
dat3 * args[3].dat->size[0] * (start[1] * args[3].stencil->stride[1]);
p_a[3] = (char *)args[3].data_d + base3;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 4);
ops_halo_exchanges(args, 4, range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[28].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
hipLaunchKernelGGL(( ops_tea_leaf_common_init_u_u0_kernel), dim3(grid), dim3(tblock), 0, 0,
(double *)p_a[0], (double *)p_a[1], (double *)p_a[2], (double *)p_a[3],
x_size, y_size);
cutilSafeCall(hipGetLastError());
if (OPS_diags > 1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[28].time += t1 - t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 4);
ops_set_halo_dirtybit3(&args[0], range);
ops_set_halo_dirtybit3(&args[1], range);
#endif
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[28].mpi_time += t2 - t1;
OPS_kernels[28].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[28].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[28].transfer += ops_compute_transfer(dim, start, end, &arg2);
OPS_kernels[28].transfer += ops_compute_transfer(dim, start, end, &arg3);
}
}
#ifdef OPS_LAZY
void ops_par_loop_tea_leaf_common_init_u_u0_kernel(char const *name,
ops_block block, int dim,
int *range, ops_arg arg0,
ops_arg arg1, ops_arg arg2,
ops_arg arg3) {
ops_kernel_descriptor *desc =
(ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 28;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 28;
for (int i = 0; i < 4; i++) {
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 4;
desc->args = (ops_arg *)malloc(4 * sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index;
desc->args[3] = arg3;
desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index;
desc->function = ops_par_loop_tea_leaf_common_init_u_u0_kernel_execute;
if (OPS_diags > 1) {
ops_timing_realloc(28, "tea_leaf_common_init_u_u0_kernel");
}
ops_enqueue_kernel(desc);
}
#endif
| 53f9c2741a160e2bacae9a7a93d8732f351fd225.cu | //
// auto-generated by ops.py
//
__constant__ int xdim0_tea_leaf_common_init_u_u0_kernel;
int xdim0_tea_leaf_common_init_u_u0_kernel_h = -1;
__constant__ int xdim1_tea_leaf_common_init_u_u0_kernel;
int xdim1_tea_leaf_common_init_u_u0_kernel_h = -1;
__constant__ int xdim2_tea_leaf_common_init_u_u0_kernel;
int xdim2_tea_leaf_common_init_u_u0_kernel_h = -1;
__constant__ int xdim3_tea_leaf_common_init_u_u0_kernel;
int xdim3_tea_leaf_common_init_u_u0_kernel_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#define OPS_ACC0(x, y) (x + xdim0_tea_leaf_common_init_u_u0_kernel * (y))
#define OPS_ACC1(x, y) (x + xdim1_tea_leaf_common_init_u_u0_kernel * (y))
#define OPS_ACC2(x, y) (x + xdim2_tea_leaf_common_init_u_u0_kernel * (y))
#define OPS_ACC3(x, y) (x + xdim3_tea_leaf_common_init_u_u0_kernel * (y))
// user function
__device__
void
tea_leaf_common_init_u_u0_kernel_gpu(double *u, double *u0,
const double *energy,
const double *density) {
u[OPS_ACC0(0, 0)] = energy[OPS_ACC2(0, 0)] * density[OPS_ACC3(0, 0)];
u0[OPS_ACC1(0, 0)] = energy[OPS_ACC2(0, 0)] * density[OPS_ACC3(0, 0)];
}
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
__global__ void ops_tea_leaf_common_init_u_u0_kernel(
double *__restrict arg0, double *__restrict arg1,
const double *__restrict arg2, const double *__restrict arg3, int size0,
int size1) {
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 +=
idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_tea_leaf_common_init_u_u0_kernel;
arg1 +=
idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_tea_leaf_common_init_u_u0_kernel;
arg2 +=
idx_x * 1 * 1 + idx_y * 1 * 1 * xdim2_tea_leaf_common_init_u_u0_kernel;
arg3 +=
idx_x * 1 * 1 + idx_y * 1 * 1 * xdim3_tea_leaf_common_init_u_u0_kernel;
if (idx_x < size0 && idx_y < size1) {
tea_leaf_common_init_u_u0_kernel_gpu(arg0, arg1, arg2, arg3);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_tea_leaf_common_init_u_u0_kernel(char const *name,
ops_block block, int dim,
int *range, ops_arg arg0,
ops_arg arg1, ops_arg arg2,
ops_arg arg3) {
#else
void ops_par_loop_tea_leaf_common_init_u_u0_kernel_execute(
ops_kernel_descriptor *desc) {
int dim = desc->dim;
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
ops_arg arg3 = desc->args[3];
#endif
// Timing
double t1, t2, c1, c2;
ops_arg args[4] = {arg0, arg1, arg2, arg3};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args, 4, range, 28))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(28, "tea_leaf_common_init_u_u0_kernel");
OPS_kernels[28].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[2];
int end[2];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 2; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 2; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
int xdim0 = args[0].dat->size[0];
int xdim1 = args[1].dat->size[0];
int xdim2 = args[2].dat->size[0];
int xdim3 = args[3].dat->size[0];
if (xdim0 != xdim0_tea_leaf_common_init_u_u0_kernel_h ||
xdim1 != xdim1_tea_leaf_common_init_u_u0_kernel_h ||
xdim2 != xdim2_tea_leaf_common_init_u_u0_kernel_h ||
xdim3 != xdim3_tea_leaf_common_init_u_u0_kernel_h) {
cudaMemcpyToSymbol(xdim0_tea_leaf_common_init_u_u0_kernel, &xdim0,
sizeof(int));
xdim0_tea_leaf_common_init_u_u0_kernel_h = xdim0;
cudaMemcpyToSymbol(xdim1_tea_leaf_common_init_u_u0_kernel, &xdim1,
sizeof(int));
xdim1_tea_leaf_common_init_u_u0_kernel_h = xdim1;
cudaMemcpyToSymbol(xdim2_tea_leaf_common_init_u_u0_kernel, &xdim2,
sizeof(int));
xdim2_tea_leaf_common_init_u_u0_kernel_h = xdim2;
cudaMemcpyToSymbol(xdim3_tea_leaf_common_init_u_u0_kernel, &xdim3,
sizeof(int));
xdim3_tea_leaf_common_init_u_u0_kernel_h = xdim3;
}
dim3 grid((x_size - 1) / OPS_block_size_x + 1,
(y_size - 1) / OPS_block_size_y + 1, 1);
dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size);
int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size);
char *p_a[4];
// set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0 +
dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1 +
dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]);
p_a[1] = (char *)args[1].data_d + base1;
int base2 = args[2].dat->base_offset +
dat2 * 1 * (start[0] * args[2].stencil->stride[0]);
base2 = base2 +
dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1]);
p_a[2] = (char *)args[2].data_d + base2;
int base3 = args[3].dat->base_offset +
dat3 * 1 * (start[0] * args[3].stencil->stride[0]);
base3 = base3 +
dat3 * args[3].dat->size[0] * (start[1] * args[3].stencil->stride[1]);
p_a[3] = (char *)args[3].data_d + base3;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 4);
ops_halo_exchanges(args, 4, range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[28].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
ops_tea_leaf_common_init_u_u0_kernel<<<grid, tblock>>>(
(double *)p_a[0], (double *)p_a[1], (double *)p_a[2], (double *)p_a[3],
x_size, y_size);
cutilSafeCall(cudaGetLastError());
if (OPS_diags > 1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[28].time += t1 - t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 4);
ops_set_halo_dirtybit3(&args[0], range);
ops_set_halo_dirtybit3(&args[1], range);
#endif
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[28].mpi_time += t2 - t1;
OPS_kernels[28].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[28].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[28].transfer += ops_compute_transfer(dim, start, end, &arg2);
OPS_kernels[28].transfer += ops_compute_transfer(dim, start, end, &arg3);
}
}
#ifdef OPS_LAZY
void ops_par_loop_tea_leaf_common_init_u_u0_kernel(char const *name,
ops_block block, int dim,
int *range, ops_arg arg0,
ops_arg arg1, ops_arg arg2,
ops_arg arg3) {
ops_kernel_descriptor *desc =
(ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 28;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 28;
for (int i = 0; i < 4; i++) {
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 4;
desc->args = (ops_arg *)malloc(4 * sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index;
desc->args[3] = arg3;
desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index;
desc->function = ops_par_loop_tea_leaf_common_init_u_u0_kernel_execute;
if (OPS_diags > 1) {
ops_timing_realloc(28, "tea_leaf_common_init_u_u0_kernel");
}
ops_enqueue_kernel(desc);
}
#endif
|
6eb510179de60ed0ea6033e385a63269df06ac2b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <assert.h>
#include <math.h>
// Simple define to index into a 1D array from 2D space
#define I2D(num, c, r) ((r)*(num)+(c))
inline hipError_t checkCuda(hipError_t result) {
if (result != hipSuccess) {
fprintf(stderr, "CUDA Runtime Error: %s\n", hipGetErrorString(result));
// assert makes the program stop
assert(result == hipSuccess);
}
return result;
}
/*
* `step_kernel_mod` is currently a direct copy of the CPU reference solution
* `step_kernel_ref` below. Accelerate it to run as a CUDA kernel.
*/
__global__ void step_kernel_mod(int ni, int nj, float fact, float* temp_in, float* temp_out)
{
int i00, im10, ip10, i0m1, i0p1;
float d2tdx2, d2tdy2;
// the implementation below assumed i represents x and j represents y dimensions
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j = blockDim.y * blockIdx.y + threadIdx.y;
dim3 stride(gridDim.x * blockDim.x, gridDim.y * blockDim.y);
// avoid boundaries & add support to stride
for (; j >= 1 && j < nj - 1; j += stride.y) {
for (; i >= 1 && i < ni - 1; i += stride.x) {
// find indices into linear memory
// for central point and neighbours
i00 = I2D(ni, i, j);
im10 = I2D(ni, i-1, j);
ip10 = I2D(ni, i+1, j);
i0m1 = I2D(ni, i, j-1);
i0p1 = I2D(ni, i, j+1);
// evaluate derivatives
d2tdx2 = temp_in[im10]-2*temp_in[i00]+temp_in[ip10];
d2tdy2 = temp_in[i0m1]-2*temp_in[i00]+temp_in[i0p1];
// update temperatures
temp_out[i00] = temp_in[i00]+fact*(d2tdx2 + d2tdy2);
}
}
}
void step_kernel_ref(int ni, int nj, float fact, float* temp_in, float* temp_out)
{
int i00, im10, ip10, i0m1, i0p1;
float d2tdx2, d2tdy2;
// loop over all points in domain (except boundary)
for ( int j=1; j < nj-1; j++ ) {
for ( int i=1; i < ni-1; i++ ) {
// find indices into linear memory
// for central point and neighbours
i00 = I2D(ni, i, j);
im10 = I2D(ni, i-1, j);
ip10 = I2D(ni, i+1, j);
i0m1 = I2D(ni, i, j-1);
i0p1 = I2D(ni, i, j+1);
// evaluate derivatives
d2tdx2 = temp_in[im10]-2*temp_in[i00]+temp_in[ip10];
d2tdy2 = temp_in[i0m1]-2*temp_in[i00]+temp_in[i0p1];
// update temperatures
temp_out[i00] = temp_in[i00]+fact*(d2tdx2 + d2tdy2);
}
}
}
int main()
{
int istep;
int nstep = 200; // number of time steps
// Specify our 2D dimensions
const int ni = 200;
const int nj = 100;
float tfac = 8.418e-5; // thermal diffusivity of silver
float *temp1_ref, *temp2_ref, *temp1, *temp2, *temp_tmp;
const int size = ni * nj * sizeof(float);
checkCuda(hipMallocManaged(&temp1_ref, size));
checkCuda(hipMallocManaged(&temp2_ref, size));
checkCuda(hipMallocManaged(&temp1, size));
checkCuda(hipMallocManaged(&temp2, size));
// Initialize with random data
for( int i = 0; i < ni*nj; ++i) {
temp1_ref[i] = temp2_ref[i] = temp1[i] = temp2[i] = (float)rand()/(float)(RAND_MAX/100.0f);
}
// Execute the CPU-only reference version
for (istep=0; istep < nstep; istep++) {
step_kernel_ref(ni, nj, tfac, temp1_ref, temp2_ref);
// swap the temperature pointers
temp_tmp = temp1_ref;
temp1_ref = temp2_ref;
temp2_ref = temp_tmp;
}
// the implementation assumed i represents x and j represents y dimensions
dim3 threads_per_block(16, 16); // 256 threads per block
dim3 number_of_blocks(
(ni + threads_per_block.x - 1) / threads_per_block.x,
(nj + threads_per_block.y - 1) / threads_per_block.y
);
// Execute the modified version using same data
for (istep=0; istep < nstep; istep++) {
hipLaunchKernelGGL(( step_kernel_mod), dim3(number_of_blocks), dim3(threads_per_block), 0, 0, ni, nj, tfac, temp1, temp2);
checkCuda(hipGetLastError());
checkCuda(hipDeviceSynchronize());
// swap the temperature pointers
temp_tmp = temp1;
temp1 = temp2;
temp2 = temp_tmp;
}
float maxError = 0;
// Output should always be stored in the temp1 and temp1_ref at this point
for( int i = 0; i < ni*nj; ++i ) {
if (abs(temp1[i]-temp1_ref[i]) > maxError) { maxError = abs(temp1[i]-temp1_ref[i]); }
}
// Check and see if our maxError is greater than an error bound
if (maxError > 0.0005f)
printf("Problem! The Max Error of %.5f is NOT within acceptable bounds.\n", maxError);
else
printf("The Max Error of %.5f is within acceptable bounds.\n", maxError);
checkCuda(hipFree( temp1_ref ));
checkCuda(hipFree( temp2_ref ));
checkCuda(hipFree( temp1 ));
checkCuda(hipFree( temp2 ));
return 0;
}
| 6eb510179de60ed0ea6033e385a63269df06ac2b.cu | #include <stdio.h>
#include <assert.h>
#include <math.h>
// Simple define to index into a 1D array from 2D space
#define I2D(num, c, r) ((r)*(num)+(c))
inline cudaError_t checkCuda(cudaError_t result) {
if (result != cudaSuccess) {
fprintf(stderr, "CUDA Runtime Error: %s\n", cudaGetErrorString(result));
// assert makes the program stop
assert(result == cudaSuccess);
}
return result;
}
/*
* `step_kernel_mod` is currently a direct copy of the CPU reference solution
* `step_kernel_ref` below. Accelerate it to run as a CUDA kernel.
*/
__global__ void step_kernel_mod(int ni, int nj, float fact, float* temp_in, float* temp_out)
{
int i00, im10, ip10, i0m1, i0p1;
float d2tdx2, d2tdy2;
// the implementation below assumed i represents x and j represents y dimensions
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j = blockDim.y * blockIdx.y + threadIdx.y;
dim3 stride(gridDim.x * blockDim.x, gridDim.y * blockDim.y);
// avoid boundaries & add support to stride
for (; j >= 1 && j < nj - 1; j += stride.y) {
for (; i >= 1 && i < ni - 1; i += stride.x) {
// find indices into linear memory
// for central point and neighbours
i00 = I2D(ni, i, j);
im10 = I2D(ni, i-1, j);
ip10 = I2D(ni, i+1, j);
i0m1 = I2D(ni, i, j-1);
i0p1 = I2D(ni, i, j+1);
// evaluate derivatives
d2tdx2 = temp_in[im10]-2*temp_in[i00]+temp_in[ip10];
d2tdy2 = temp_in[i0m1]-2*temp_in[i00]+temp_in[i0p1];
// update temperatures
temp_out[i00] = temp_in[i00]+fact*(d2tdx2 + d2tdy2);
}
}
}
void step_kernel_ref(int ni, int nj, float fact, float* temp_in, float* temp_out)
{
int i00, im10, ip10, i0m1, i0p1;
float d2tdx2, d2tdy2;
// loop over all points in domain (except boundary)
for ( int j=1; j < nj-1; j++ ) {
for ( int i=1; i < ni-1; i++ ) {
// find indices into linear memory
// for central point and neighbours
i00 = I2D(ni, i, j);
im10 = I2D(ni, i-1, j);
ip10 = I2D(ni, i+1, j);
i0m1 = I2D(ni, i, j-1);
i0p1 = I2D(ni, i, j+1);
// evaluate derivatives
d2tdx2 = temp_in[im10]-2*temp_in[i00]+temp_in[ip10];
d2tdy2 = temp_in[i0m1]-2*temp_in[i00]+temp_in[i0p1];
// update temperatures
temp_out[i00] = temp_in[i00]+fact*(d2tdx2 + d2tdy2);
}
}
}
int main()
{
int istep;
int nstep = 200; // number of time steps
// Specify our 2D dimensions
const int ni = 200;
const int nj = 100;
float tfac = 8.418e-5; // thermal diffusivity of silver
float *temp1_ref, *temp2_ref, *temp1, *temp2, *temp_tmp;
const int size = ni * nj * sizeof(float);
checkCuda(cudaMallocManaged(&temp1_ref, size));
checkCuda(cudaMallocManaged(&temp2_ref, size));
checkCuda(cudaMallocManaged(&temp1, size));
checkCuda(cudaMallocManaged(&temp2, size));
// Initialize with random data
for( int i = 0; i < ni*nj; ++i) {
temp1_ref[i] = temp2_ref[i] = temp1[i] = temp2[i] = (float)rand()/(float)(RAND_MAX/100.0f);
}
// Execute the CPU-only reference version
for (istep=0; istep < nstep; istep++) {
step_kernel_ref(ni, nj, tfac, temp1_ref, temp2_ref);
// swap the temperature pointers
temp_tmp = temp1_ref;
temp1_ref = temp2_ref;
temp2_ref = temp_tmp;
}
// the implementation assumed i represents x and j represents y dimensions
dim3 threads_per_block(16, 16); // 256 threads per block
dim3 number_of_blocks(
(ni + threads_per_block.x - 1) / threads_per_block.x,
(nj + threads_per_block.y - 1) / threads_per_block.y
);
// Execute the modified version using same data
for (istep=0; istep < nstep; istep++) {
step_kernel_mod<<<number_of_blocks, threads_per_block>>>(ni, nj, tfac, temp1, temp2);
checkCuda(cudaGetLastError());
checkCuda(cudaDeviceSynchronize());
// swap the temperature pointers
temp_tmp = temp1;
temp1 = temp2;
temp2 = temp_tmp;
}
float maxError = 0;
// Output should always be stored in the temp1 and temp1_ref at this point
for( int i = 0; i < ni*nj; ++i ) {
if (abs(temp1[i]-temp1_ref[i]) > maxError) { maxError = abs(temp1[i]-temp1_ref[i]); }
}
// Check and see if our maxError is greater than an error bound
if (maxError > 0.0005f)
printf("Problem! The Max Error of %.5f is NOT within acceptable bounds.\n", maxError);
else
printf("The Max Error of %.5f is within acceptable bounds.\n", maxError);
checkCuda(cudaFree( temp1_ref ));
checkCuda(cudaFree( temp2_ref ));
checkCuda(cudaFree( temp1 ));
checkCuda(cudaFree( temp2 ));
return 0;
}
|
6537253c18519e5e914ca850db68a2aaba747999.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/ATen.h>
#include <THH/THHAtomics.cuh>
using namespace at; // temporal fix for pytorch<=0.4.1 (see #9848)
#define CUDA_1D_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \
i += blockDim.x * gridDim.x)
#define THREADS_PER_BLOCK 1024
inline int GET_BLOCKS(const int N) {
int optimal_block_num = (N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
int max_block_num = 65536;
return min(optimal_block_num, max_block_num);
}
__device__ inline int Loc2Index(const int n, const int c, const int h,
const int w, const int channel_num,
const int height, const int width) {
int index = w + (h + (c + n * channel_num) * height) * width;
return index;
}
template <typename scalar_t>
__global__ void CARAFENAIVEForward(const int nthreads,
const scalar_t *bottom_data,
const scalar_t *bottom_masks,
const int kernel_size, const int group_size,
const int scale_factor, const int channels,
const int height, const int width,
scalar_t *top_data) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the bottom_data
int pw = index % width;
int ph = (index / width) % height;
int c = (index / width / height) % channels;
int n = index / width / height / channels;
int mask_channels = kernel_size * kernel_size * group_size;
int mask_group = c / (channels / group_size);
int down_pw = pw / scale_factor;
int down_ph = ph / scale_factor;
int down_width = width / scale_factor;
int down_height = height / scale_factor;
int start_w = down_pw - (kernel_size - 1) / 2;
int end_w = down_pw + (kernel_size - 1) / 2 + 1;
int start_h = down_ph - (kernel_size - 1) / 2;
int end_h = down_ph + (kernel_size - 1) / 2 + 1;
scalar_t output_val = 0;
for (int iy = start_h; iy < end_h; iy++) {
for (int ix = start_w; ix < end_w; ix++) {
if (iy < 0 || iy > down_height - 1 || ix < 0 || ix > down_width - 1) {
continue;
}
int mask_iy = iy - down_ph + (kernel_size - 1) / 2;
int mask_ix = ix - down_pw + (kernel_size - 1) / 2;
int mask_c =
(mask_group * kernel_size + mask_iy) * kernel_size + mask_ix;
int feat_index =
Loc2Index(n, c, iy, ix, channels, down_height, down_width);
int mask_index =
Loc2Index(n, mask_c, ph, pw, mask_channels, height, width);
output_val += bottom_data[feat_index] * bottom_masks[mask_index];
}
}
top_data[index] = output_val;
}
}
int CARAFENAIVEForwardLaucher(const at::Tensor features, const at::Tensor masks,
const int kernel_size, const int group_size,
const int scale_factor, const int batch_size,
const int channels, const int height,
const int width, at::Tensor output) {
const int output_size = batch_size * channels * height * width;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
features.scalar_type(), "CARAFENAIVELaucherForward", ([&] {
const scalar_t *bottom_data = features.data_ptr<scalar_t>();
const scalar_t *bottom_masks = masks.data_ptr<scalar_t>();
scalar_t *top_data = output.data_ptr<scalar_t>();
hipLaunchKernelGGL(( CARAFENAIVEForward<scalar_t>)
, dim3(GET_BLOCKS(output_size)), dim3(THREADS_PER_BLOCK), 0, 0,
output_size, bottom_data, bottom_masks, kernel_size, group_size,
scale_factor, channels, height, width, top_data);
}));
hipError_t err = hipGetLastError();
if (hipSuccess != err) {
fprintf(stderr, "cudaCheckError() failed : %s\n", hipGetErrorString(err));
exit(-1);
}
return 1;
}
template <typename scalar_t>
__global__ void CARAFENAIVEBackward(
const int nthreads, const scalar_t *top_diff, const scalar_t *bottom_data,
const scalar_t *bottom_masks, const int kernel_size, const int group_size,
const int scale_factor, const int channels, const int height,
const int width, scalar_t *bottom_diff, scalar_t *mask_diff) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the bottom_data
int pw = index % width;
int ph = (index / width) % height;
int c = (index / width / height) % channels;
int n = index / width / height / channels;
int mask_channels = kernel_size * kernel_size * group_size;
int mask_group = c / (channels / group_size);
int down_pw = pw / scale_factor;
int down_ph = ph / scale_factor;
int down_width = width / scale_factor;
int down_height = height / scale_factor;
int start_w = down_pw - (kernel_size - 1) / 2;
int end_w = down_pw + (kernel_size - 1) / 2 + 1;
int start_h = down_ph - (kernel_size - 1) / 2;
int end_h = down_ph + (kernel_size - 1) / 2 + 1;
for (int iy = start_h; iy < end_h; iy++) {
for (int ix = start_w; ix < end_w; ix++) {
if (iy < 0 || iy > down_height - 1 || ix < 0 || ix > down_width - 1) {
continue;
}
int mask_iy = iy - down_ph + (kernel_size - 1) / 2;
int mask_ix = ix - down_pw + (kernel_size - 1) / 2;
int mask_c =
(mask_group * kernel_size + mask_iy) * kernel_size + mask_ix;
int feat_index =
Loc2Index(n, c, iy, ix, channels, down_height, down_width);
int mask_index =
Loc2Index(n, mask_c, ph, pw, mask_channels, height, width);
atomicAdd(bottom_diff + feat_index,
bottom_masks[mask_index] * top_diff[index]);
atomicAdd(mask_diff + mask_index,
bottom_data[feat_index] * top_diff[index]);
}
}
}
}
int CARAFENAIVEBackwardLaucher(const at::Tensor top_grad,
const at::Tensor features,
const at::Tensor masks, const int kernel_size,
const int group_size, const int scale_factor,
const int batch_size, const int channels,
const int height, const int width,
at::Tensor bottom_grad, at::Tensor mask_grad) {
const int output_size = batch_size * channels * height * width;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
top_grad.scalar_type(), "CARAFENAIVELaucherBackward", ([&] {
const scalar_t *top_diff = top_grad.data_ptr<scalar_t>();
const scalar_t *bottom_data = features.data_ptr<scalar_t>();
const scalar_t *bottom_masks = masks.data_ptr<scalar_t>();
scalar_t *bottom_diff = bottom_grad.data_ptr<scalar_t>();
scalar_t *mask_diff = mask_grad.data_ptr<scalar_t>();
hipLaunchKernelGGL(( CARAFENAIVEBackward<scalar_t>)
, dim3(GET_BLOCKS(output_size)), dim3(THREADS_PER_BLOCK), 0, 0,
output_size, top_diff, bottom_data, bottom_masks, kernel_size,
group_size, scale_factor, channels, height, width, bottom_diff,
mask_diff);
}));
hipError_t err = hipGetLastError();
if (hipSuccess != err) {
fprintf(stderr, "cudaCheckError() failed : %s\n", hipGetErrorString(err));
exit(-1);
}
return 1;
}
| 6537253c18519e5e914ca850db68a2aaba747999.cu | #include <ATen/ATen.h>
#include <THC/THCAtomics.cuh>
using namespace at; // temporal fix for pytorch<=0.4.1 (see #9848)
#define CUDA_1D_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \
i += blockDim.x * gridDim.x)
#define THREADS_PER_BLOCK 1024
inline int GET_BLOCKS(const int N) {
int optimal_block_num = (N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
int max_block_num = 65536;
return min(optimal_block_num, max_block_num);
}
__device__ inline int Loc2Index(const int n, const int c, const int h,
const int w, const int channel_num,
const int height, const int width) {
int index = w + (h + (c + n * channel_num) * height) * width;
return index;
}
template <typename scalar_t>
__global__ void CARAFENAIVEForward(const int nthreads,
const scalar_t *bottom_data,
const scalar_t *bottom_masks,
const int kernel_size, const int group_size,
const int scale_factor, const int channels,
const int height, const int width,
scalar_t *top_data) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the bottom_data
int pw = index % width;
int ph = (index / width) % height;
int c = (index / width / height) % channels;
int n = index / width / height / channels;
int mask_channels = kernel_size * kernel_size * group_size;
int mask_group = c / (channels / group_size);
int down_pw = pw / scale_factor;
int down_ph = ph / scale_factor;
int down_width = width / scale_factor;
int down_height = height / scale_factor;
int start_w = down_pw - (kernel_size - 1) / 2;
int end_w = down_pw + (kernel_size - 1) / 2 + 1;
int start_h = down_ph - (kernel_size - 1) / 2;
int end_h = down_ph + (kernel_size - 1) / 2 + 1;
scalar_t output_val = 0;
for (int iy = start_h; iy < end_h; iy++) {
for (int ix = start_w; ix < end_w; ix++) {
if (iy < 0 || iy > down_height - 1 || ix < 0 || ix > down_width - 1) {
continue;
}
int mask_iy = iy - down_ph + (kernel_size - 1) / 2;
int mask_ix = ix - down_pw + (kernel_size - 1) / 2;
int mask_c =
(mask_group * kernel_size + mask_iy) * kernel_size + mask_ix;
int feat_index =
Loc2Index(n, c, iy, ix, channels, down_height, down_width);
int mask_index =
Loc2Index(n, mask_c, ph, pw, mask_channels, height, width);
output_val += bottom_data[feat_index] * bottom_masks[mask_index];
}
}
top_data[index] = output_val;
}
}
int CARAFENAIVEForwardLaucher(const at::Tensor features, const at::Tensor masks,
const int kernel_size, const int group_size,
const int scale_factor, const int batch_size,
const int channels, const int height,
const int width, at::Tensor output) {
const int output_size = batch_size * channels * height * width;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
features.scalar_type(), "CARAFENAIVELaucherForward", ([&] {
const scalar_t *bottom_data = features.data_ptr<scalar_t>();
const scalar_t *bottom_masks = masks.data_ptr<scalar_t>();
scalar_t *top_data = output.data_ptr<scalar_t>();
CARAFENAIVEForward<scalar_t>
<<<GET_BLOCKS(output_size), THREADS_PER_BLOCK>>>(
output_size, bottom_data, bottom_masks, kernel_size, group_size,
scale_factor, channels, height, width, top_data);
}));
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err) {
fprintf(stderr, "cudaCheckError() failed : %s\n", cudaGetErrorString(err));
exit(-1);
}
return 1;
}
template <typename scalar_t>
__global__ void CARAFENAIVEBackward(
const int nthreads, const scalar_t *top_diff, const scalar_t *bottom_data,
const scalar_t *bottom_masks, const int kernel_size, const int group_size,
const int scale_factor, const int channels, const int height,
const int width, scalar_t *bottom_diff, scalar_t *mask_diff) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the bottom_data
int pw = index % width;
int ph = (index / width) % height;
int c = (index / width / height) % channels;
int n = index / width / height / channels;
int mask_channels = kernel_size * kernel_size * group_size;
int mask_group = c / (channels / group_size);
int down_pw = pw / scale_factor;
int down_ph = ph / scale_factor;
int down_width = width / scale_factor;
int down_height = height / scale_factor;
int start_w = down_pw - (kernel_size - 1) / 2;
int end_w = down_pw + (kernel_size - 1) / 2 + 1;
int start_h = down_ph - (kernel_size - 1) / 2;
int end_h = down_ph + (kernel_size - 1) / 2 + 1;
for (int iy = start_h; iy < end_h; iy++) {
for (int ix = start_w; ix < end_w; ix++) {
if (iy < 0 || iy > down_height - 1 || ix < 0 || ix > down_width - 1) {
continue;
}
int mask_iy = iy - down_ph + (kernel_size - 1) / 2;
int mask_ix = ix - down_pw + (kernel_size - 1) / 2;
int mask_c =
(mask_group * kernel_size + mask_iy) * kernel_size + mask_ix;
int feat_index =
Loc2Index(n, c, iy, ix, channels, down_height, down_width);
int mask_index =
Loc2Index(n, mask_c, ph, pw, mask_channels, height, width);
atomicAdd(bottom_diff + feat_index,
bottom_masks[mask_index] * top_diff[index]);
atomicAdd(mask_diff + mask_index,
bottom_data[feat_index] * top_diff[index]);
}
}
}
}
int CARAFENAIVEBackwardLaucher(const at::Tensor top_grad,
const at::Tensor features,
const at::Tensor masks, const int kernel_size,
const int group_size, const int scale_factor,
const int batch_size, const int channels,
const int height, const int width,
at::Tensor bottom_grad, at::Tensor mask_grad) {
const int output_size = batch_size * channels * height * width;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
top_grad.scalar_type(), "CARAFENAIVELaucherBackward", ([&] {
const scalar_t *top_diff = top_grad.data_ptr<scalar_t>();
const scalar_t *bottom_data = features.data_ptr<scalar_t>();
const scalar_t *bottom_masks = masks.data_ptr<scalar_t>();
scalar_t *bottom_diff = bottom_grad.data_ptr<scalar_t>();
scalar_t *mask_diff = mask_grad.data_ptr<scalar_t>();
CARAFENAIVEBackward<scalar_t>
<<<GET_BLOCKS(output_size), THREADS_PER_BLOCK>>>(
output_size, top_diff, bottom_data, bottom_masks, kernel_size,
group_size, scale_factor, channels, height, width, bottom_diff,
mask_diff);
}));
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err) {
fprintf(stderr, "cudaCheckError() failed : %s\n", cudaGetErrorString(err));
exit(-1);
}
return 1;
}
|
36ee6d5ee1ef0a3b41e58726b4da21fb2fd6d0e8.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2020-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <experimental/include_cuco_static_map.cuh>
#include <experimental/detail/graph_utils.cuh>
#include <experimental/graph_functions.hpp>
#include <experimental/graph_view.hpp>
#include <utilities/device_comm.cuh>
#include <utilities/error.hpp>
#include <utilities/host_scalar_comm.cuh>
#include <utilities/shuffle_comm.cuh>
#include <rmm/thrust_rmm_allocator.h>
#include <raft/handle.hpp>
#include <rmm/device_uvector.hpp>
#include <thrust/binary_search.h>
#include <thrust/copy.h>
#include <thrust/iterator/discard_iterator.h>
#include <thrust/sort.h>
#include <thrust/tuple.h>
#include <algorithm>
#include <iterator>
#include <numeric>
#include <tuple>
#include <utility>
namespace cugraph {
namespace experimental {
namespace detail {
#ifdef CUCO_STATIC_MAP_DEFINED
template <typename vertex_t, typename edge_t, bool multi_gpu>
rmm::device_uvector<vertex_t> compute_renumber_map(
raft::handle_t const& handle,
vertex_t const* vertices,
vertex_t num_local_vertices /* relevant only if vertices != nullptr */,
std::vector<vertex_t const*> const& edgelist_major_vertices,
std::vector<vertex_t const*> const& edgelist_minor_vertices,
std::vector<edge_t> const& edgelist_edge_counts)
{
// FIXME: compare this sort based approach with hash based approach in both speed and memory
// footprint
// 1. acquire (unique major label, count) pairs
rmm::device_uvector<vertex_t> major_labels(0, handle.get_stream());
rmm::device_uvector<edge_t> major_counts(0, handle.get_stream());
for (size_t i = 0; i < edgelist_major_vertices.size(); ++i) {
rmm::device_uvector<vertex_t> tmp_major_labels(0, handle.get_stream());
rmm::device_uvector<edge_t> tmp_major_counts(0, handle.get_stream());
{
rmm::device_uvector<vertex_t> sorted_major_labels(edgelist_edge_counts[i],
handle.get_stream());
thrust::copy(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()),
edgelist_major_vertices[i],
edgelist_major_vertices[i] + edgelist_edge_counts[i],
sorted_major_labels.begin());
thrust::sort(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()),
sorted_major_labels.begin(),
sorted_major_labels.end());
auto num_unique_labels =
thrust::count_if(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()),
thrust::make_counting_iterator(size_t{0}),
thrust::make_counting_iterator(sorted_major_labels.size()),
[labels = sorted_major_labels.data()] __device__(auto i) {
return (i == 0) || (labels[i - 1] != labels[i]);
});
tmp_major_labels.resize(num_unique_labels, handle.get_stream());
tmp_major_counts.resize(tmp_major_labels.size(), handle.get_stream());
thrust::reduce_by_key(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()),
sorted_major_labels.begin(),
sorted_major_labels.end(),
thrust::make_constant_iterator(edge_t{1}),
tmp_major_labels.begin(),
tmp_major_counts.begin());
}
if (multi_gpu) {
auto& col_comm = handle.get_subcomm(cugraph::partition_2d::key_naming_t().col_name());
auto const col_comm_rank = col_comm.get_rank();
auto const col_comm_size = col_comm.get_size();
rmm::device_uvector<vertex_t> rx_major_labels(0, handle.get_stream());
rmm::device_uvector<edge_t> rx_major_counts(0, handle.get_stream());
auto rx_sizes = host_scalar_gather(
col_comm, tmp_major_labels.size(), static_cast<int>(i), handle.get_stream());
std::vector<size_t> rx_displs{};
if (static_cast<int>(i) == col_comm_rank) {
rx_displs.assign(col_comm_size, size_t{0});
std::partial_sum(rx_sizes.begin(), rx_sizes.end() - 1, rx_displs.begin() + 1);
rx_major_labels.resize(rx_displs.back() + rx_sizes.back(), handle.get_stream());
rx_major_counts.resize(rx_major_labels.size(), handle.get_stream());
}
device_gatherv(col_comm,
thrust::make_zip_iterator(
thrust::make_tuple(tmp_major_labels.begin(), tmp_major_counts.begin())),
thrust::make_zip_iterator(
thrust::make_tuple(rx_major_labels.begin(), rx_major_counts.begin())),
tmp_major_labels.size(),
rx_sizes,
rx_displs,
static_cast<int>(i),
handle.get_stream());
if (static_cast<int>(i) == col_comm_rank) {
thrust::sort_by_key(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()),
rx_major_labels.begin(),
rx_major_labels.end(),
rx_major_counts.begin());
major_labels.resize(rx_major_labels.size(), handle.get_stream());
major_counts.resize(major_labels.size(), handle.get_stream());
auto pair_it =
thrust::reduce_by_key(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()),
rx_major_labels.begin(),
rx_major_labels.end(),
rx_major_counts.begin(),
major_labels.begin(),
major_counts.begin());
major_labels.resize(thrust::distance(major_labels.begin(), thrust::get<0>(pair_it)),
handle.get_stream());
major_counts.resize(major_labels.size(), handle.get_stream());
major_labels.shrink_to_fit(handle.get_stream());
major_counts.shrink_to_fit(handle.get_stream());
}
} else {
tmp_major_labels.shrink_to_fit(handle.get_stream());
tmp_major_counts.shrink_to_fit(handle.get_stream());
major_labels = std::move(tmp_major_labels);
major_counts = std::move(tmp_major_counts);
}
}
// 2. acquire unique minor labels
std::vector<edge_t> minor_displs(edgelist_minor_vertices.size(), edge_t{0});
std::partial_sum(
edgelist_edge_counts.begin(), edgelist_edge_counts.end() - 1, minor_displs.begin() + 1);
rmm::device_uvector<vertex_t> minor_labels(minor_displs.back() + edgelist_edge_counts.back(),
handle.get_stream());
for (size_t i = 0; i < edgelist_minor_vertices.size(); ++i) {
thrust::copy(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()),
edgelist_minor_vertices[i],
edgelist_minor_vertices[i] + edgelist_edge_counts[i],
minor_labels.begin() + minor_displs[i]);
}
thrust::sort(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()),
minor_labels.begin(),
minor_labels.end());
minor_labels.resize(
thrust::distance(minor_labels.begin(),
thrust::unique(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()),
minor_labels.begin(),
minor_labels.end())),
handle.get_stream());
if (multi_gpu) {
auto& row_comm = handle.get_subcomm(cugraph::partition_2d::key_naming_t().row_name());
auto const row_comm_size = row_comm.get_size();
rmm::device_uvector<vertex_t> rx_minor_labels(0, handle.get_stream());
std::tie(rx_minor_labels, std::ignore) = groupby_gpuid_and_shuffle_values(
row_comm,
minor_labels.begin(),
minor_labels.end(),
[key_func = detail::compute_gpu_id_from_vertex_t<vertex_t>{row_comm_size}] __device__(
auto val) { return key_func(val); },
handle.get_stream());
thrust::sort(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()),
rx_minor_labels.begin(),
rx_minor_labels.end());
rx_minor_labels.resize(
thrust::distance(
rx_minor_labels.begin(),
thrust::unique(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()),
rx_minor_labels.begin(),
rx_minor_labels.end())),
handle.get_stream());
minor_labels = std::move(rx_minor_labels);
}
minor_labels.shrink_to_fit(handle.get_stream());
// 3. merge major and minor labels and vertex labels
rmm::device_uvector<vertex_t> merged_labels(major_labels.size() + minor_labels.size(),
handle.get_stream());
rmm::device_uvector<edge_t> merged_counts(merged_labels.size(), handle.get_stream());
thrust::merge_by_key(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()),
major_labels.begin(),
major_labels.end(),
minor_labels.begin(),
minor_labels.end(),
major_counts.begin(),
thrust::make_constant_iterator(edge_t{0}),
merged_labels.begin(),
merged_counts.begin());
major_labels.resize(0, handle.get_stream());
major_counts.resize(0, handle.get_stream());
minor_labels.resize(0, handle.get_stream());
major_labels.shrink_to_fit(handle.get_stream());
major_counts.shrink_to_fit(handle.get_stream());
minor_labels.shrink_to_fit(handle.get_stream());
rmm::device_uvector<vertex_t> labels(merged_labels.size(), handle.get_stream());
rmm::device_uvector<edge_t> counts(labels.size(), handle.get_stream());
auto pair_it =
thrust::reduce_by_key(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()),
merged_labels.begin(),
merged_labels.end(),
merged_counts.begin(),
labels.begin(),
counts.begin());
merged_labels.resize(0, handle.get_stream());
merged_counts.resize(0, handle.get_stream());
merged_labels.shrink_to_fit(handle.get_stream());
merged_counts.shrink_to_fit(handle.get_stream());
labels.resize(thrust::distance(labels.begin(), thrust::get<0>(pair_it)), handle.get_stream());
counts.resize(labels.size(), handle.get_stream());
labels.shrink_to_fit(handle.get_stream());
counts.shrink_to_fit(handle.get_stream());
// 4. if vertices != nullptr, add isolated vertices
rmm::device_uvector<vertex_t> isolated_vertices(0, handle.get_stream());
if (vertices != nullptr) {
auto num_isolated_vertices = thrust::count_if(
rmm::exec_policy(handle.get_stream())->on(handle.get_stream()),
vertices,
vertices + num_local_vertices,
[label_first = labels.begin(), label_last = labels.end()] __device__(auto v) {
return !thrust::binary_search(thrust::seq, label_first, label_last, v);
});
isolated_vertices.resize(num_isolated_vertices, handle.get_stream());
thrust::copy_if(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()),
vertices,
vertices + num_local_vertices,
isolated_vertices.begin(),
[label_first = labels.begin(), label_last = labels.end()] __device__(auto v) {
return !thrust::binary_search(thrust::seq, label_first, label_last, v);
});
}
if (isolated_vertices.size() > 0) {
labels.resize(labels.size() + isolated_vertices.size(), handle.get_stream());
counts.resize(labels.size(), handle.get_stream());
thrust::copy(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()),
isolated_vertices.begin(),
isolated_vertices.end(),
labels.end() - isolated_vertices.size());
thrust::fill(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()),
counts.end() - isolated_vertices.size(),
counts.end(),
edge_t{0});
}
// 6. sort by degree
thrust::sort_by_key(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()),
counts.begin(),
counts.end(),
labels.begin(),
thrust::greater<edge_t>());
return labels;
}
template <typename vertex_t, typename edge_t, bool multi_gpu>
void expensive_check_edgelist(
raft::handle_t const& handle,
vertex_t const* local_vertices,
vertex_t num_local_vertices /* relevant only if local_vertices != nullptr */,
std::vector<vertex_t const*> const& edgelist_major_vertices,
std::vector<vertex_t const*> const& edgelist_minor_vertices,
std::vector<edge_t> const& edgelist_edge_counts)
{
rmm::device_uvector<vertex_t> sorted_local_vertices(
local_vertices != nullptr ? num_local_vertices : vertex_t{0}, handle.get_stream());
thrust::copy(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()),
local_vertices,
local_vertices + num_local_vertices,
sorted_local_vertices.begin());
thrust::sort(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()),
sorted_local_vertices.begin(),
sorted_local_vertices.end());
CUGRAPH_EXPECTS(static_cast<size_t>(thrust::distance(
sorted_local_vertices.begin(),
thrust::unique(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()),
sorted_local_vertices.begin(),
sorted_local_vertices.end()))) == sorted_local_vertices.size(),
"Invalid input argument: local_vertices should not have duplicates.");
if (multi_gpu) {
auto& comm = handle.get_comms();
auto const comm_size = comm.get_size();
auto const comm_rank = comm.get_rank();
auto& row_comm = handle.get_subcomm(cugraph::partition_2d::key_naming_t().row_name());
auto const row_comm_size = row_comm.get_size();
auto const row_comm_rank = row_comm.get_rank();
auto& col_comm = handle.get_subcomm(cugraph::partition_2d::key_naming_t().col_name());
auto const col_comm_size = col_comm.get_size();
auto const col_comm_rank = col_comm.get_rank();
CUGRAPH_EXPECTS((edgelist_major_vertices.size() == edgelist_minor_vertices.size()) &&
(edgelist_major_vertices.size() == static_cast<size_t>(col_comm_size)),
"Invalid input argument: both edgelist_major_vertices.size() & "
"edgelist_minor_vertices.size() should coincide with col_comm_size.");
CUGRAPH_EXPECTS(
thrust::count_if(
rmm::exec_policy(handle.get_stream())->on(handle.get_stream()),
local_vertices,
local_vertices + num_local_vertices,
[comm_rank,
key_func =
detail::compute_gpu_id_from_vertex_t<vertex_t>{comm_size}] __device__(auto val) {
return key_func(val) != comm_rank;
}) == 0,
"Invalid input argument: local_vertices should be pre-shuffled.");
for (size_t i = 0; i < edgelist_major_vertices.size(); ++i) {
auto edge_first = thrust::make_zip_iterator(
thrust::make_tuple(edgelist_major_vertices[i], edgelist_minor_vertices[i]));
CUGRAPH_EXPECTS(
thrust::count_if(
rmm::exec_policy(handle.get_stream())->on(handle.get_stream()),
edge_first,
edge_first + edgelist_edge_counts[i],
[comm_size,
comm_rank,
row_comm_rank,
col_comm_size,
col_comm_rank,
i,
gpu_id_key_func =
detail::compute_gpu_id_from_edge_t<vertex_t>{comm_size, row_comm_size, col_comm_size},
partition_id_key_func =
detail::compute_partition_id_from_edge_t<vertex_t>{
comm_size, row_comm_size, col_comm_size}] __device__(auto edge) {
return (gpu_id_key_func(thrust::get<0>(edge), thrust::get<1>(edge)) != comm_rank) ||
(partition_id_key_func(thrust::get<0>(edge), thrust::get<1>(edge)) !=
row_comm_rank * col_comm_size + col_comm_rank + i * comm_size);
}) == 0,
"Invalid input argument: edgelist_major_vertices & edgelist_minor_vertices should be "
"pre-shuffled.");
auto aggregate_vertexlist_size = host_scalar_allreduce(
comm,
local_vertices != nullptr ? num_local_vertices : vertex_t{0},
handle.get_stream()); // local_vertices != nullptr is insufficient in multi-GPU as only a
// subset of GPUs may have a non-zero vertices
if (aggregate_vertexlist_size > 0) {
auto& row_comm = handle.get_subcomm(cugraph::partition_2d::key_naming_t().row_name());
auto& col_comm = handle.get_subcomm(cugraph::partition_2d::key_naming_t().col_name());
rmm::device_uvector<vertex_t> sorted_major_vertices(0, handle.get_stream());
{
auto recvcounts =
host_scalar_allgather(col_comm, sorted_local_vertices.size(), handle.get_stream());
std::vector<size_t> displacements(recvcounts.size(), size_t{0});
std::partial_sum(recvcounts.begin(), recvcounts.end() - 1, displacements.begin() + 1);
sorted_major_vertices.resize(displacements.back() + recvcounts.back(),
handle.get_stream());
device_allgatherv(col_comm,
sorted_local_vertices.data(),
sorted_major_vertices.data(),
recvcounts,
displacements,
handle.get_stream());
thrust::sort(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()),
sorted_major_vertices.begin(),
sorted_major_vertices.end());
}
rmm::device_uvector<vertex_t> sorted_minor_vertices(0, handle.get_stream());
{
auto recvcounts =
host_scalar_allgather(row_comm, sorted_local_vertices.size(), handle.get_stream());
std::vector<size_t> displacements(recvcounts.size(), size_t{0});
std::partial_sum(recvcounts.begin(), recvcounts.end() - 1, displacements.begin() + 1);
sorted_minor_vertices.resize(displacements.back() + recvcounts.back(),
handle.get_stream());
device_allgatherv(row_comm,
sorted_local_vertices.data(),
sorted_minor_vertices.data(),
recvcounts,
displacements,
handle.get_stream());
thrust::sort(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()),
sorted_minor_vertices.begin(),
sorted_minor_vertices.end());
}
auto edge_first = thrust::make_zip_iterator(
thrust::make_tuple(edgelist_major_vertices[i], edgelist_minor_vertices[i]));
CUGRAPH_EXPECTS(
thrust::count_if(
rmm::exec_policy(handle.get_stream())->on(handle.get_stream()),
edge_first,
edge_first + edgelist_edge_counts[i],
[num_major_vertices = static_cast<vertex_t>(sorted_major_vertices.size()),
sorted_major_vertices = sorted_major_vertices.data(),
num_minor_vertices = static_cast<vertex_t>(sorted_minor_vertices.size()),
sorted_minor_vertices = sorted_minor_vertices.data()] __device__(auto e) {
return !thrust::binary_search(thrust::seq,
sorted_major_vertices,
sorted_major_vertices + num_major_vertices,
thrust::get<0>(e)) ||
!thrust::binary_search(thrust::seq,
sorted_minor_vertices,
sorted_minor_vertices + num_minor_vertices,
thrust::get<1>(e));
}) == 0,
"Invalid input argument: edgelist_major_vertices and/or edgelist_mior_vertices have "
"invalid vertex ID(s).");
}
}
} else {
assert(edgelist_major_vertices.size() == 1);
assert(edgelist_minor_vertices.size() == 1);
if (local_vertices != nullptr) {
auto edge_first = thrust::make_zip_iterator(
thrust::make_tuple(edgelist_major_vertices[0], edgelist_minor_vertices[0]));
CUGRAPH_EXPECTS(
thrust::count_if(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()),
edge_first,
edge_first + edgelist_edge_counts[0],
[num_local_vertices,
sorted_local_vertices = sorted_local_vertices.data()] __device__(auto e) {
return !thrust::binary_search(thrust::seq,
sorted_local_vertices,
sorted_local_vertices + num_local_vertices,
thrust::get<0>(e)) ||
!thrust::binary_search(thrust::seq,
sorted_local_vertices,
sorted_local_vertices + num_local_vertices,
thrust::get<1>(e));
}) == 0,
"Invalid input argument: edgelist_major_vertices and/or edgelist_minor_vertices have "
"invalid vertex ID(s).");
}
}
}
#endif
template <typename vertex_t, typename edge_t, bool multi_gpu>
std::enable_if_t<multi_gpu,
std::tuple<rmm::device_uvector<vertex_t>, partition_t<vertex_t>, vertex_t, edge_t>>
renumber_edgelist(raft::handle_t const& handle,
vertex_t const* local_vertices,
vertex_t num_local_vertices /* relevant only if local_vertices != nullptr */,
std::vector<vertex_t*> const& edgelist_major_vertices /* [INOUT] */,
std::vector<vertex_t*> const& edgelist_minor_vertices /* [INOUT] */,
std::vector<edge_t> const& edgelist_edge_counts,
bool do_expensive_check)
{
// FIXME: remove this check once we drop Pascal support
CUGRAPH_EXPECTS(
handle.get_device_properties().major >= 7,
"This version of enumber_edgelist not supported on Pascal and older architectures.");
#ifdef CUCO_STATIC_MAP_DEFINED
auto& comm = handle.get_comms();
auto const comm_size = comm.get_size();
auto const comm_rank = comm.get_rank();
auto& row_comm = handle.get_subcomm(cugraph::partition_2d::key_naming_t().row_name());
auto const row_comm_size = row_comm.get_size();
auto const row_comm_rank = row_comm.get_rank();
auto& col_comm = handle.get_subcomm(cugraph::partition_2d::key_naming_t().col_name());
auto const col_comm_size = col_comm.get_size();
auto const col_comm_rank = col_comm.get_rank();
std::vector<vertex_t const*> edgelist_const_major_vertices(edgelist_major_vertices.size());
std::vector<vertex_t const*> edgelist_const_minor_vertices(edgelist_const_major_vertices.size());
for (size_t i = 0; i < edgelist_const_major_vertices.size(); ++i) {
edgelist_const_major_vertices[i] = edgelist_major_vertices[i];
edgelist_const_minor_vertices[i] = edgelist_minor_vertices[i];
}
if (do_expensive_check) {
expensive_check_edgelist<vertex_t, edge_t, multi_gpu>(handle,
local_vertices,
num_local_vertices,
edgelist_const_major_vertices,
edgelist_const_minor_vertices,
edgelist_edge_counts);
}
// 1. compute renumber map
auto renumber_map_labels =
detail::compute_renumber_map<vertex_t, edge_t, multi_gpu>(handle,
local_vertices,
num_local_vertices,
edgelist_const_major_vertices,
edgelist_const_minor_vertices,
edgelist_edge_counts);
// 2. initialize partition_t object, number_of_vertices, and number_of_edges for the coarsened
// graph
auto vertex_counts = host_scalar_allgather(
comm, static_cast<vertex_t>(renumber_map_labels.size()), handle.get_stream());
std::vector<vertex_t> vertex_partition_offsets(comm_size + 1, 0);
std::partial_sum(
vertex_counts.begin(), vertex_counts.end(), vertex_partition_offsets.begin() + 1);
partition_t<vertex_t> partition(
vertex_partition_offsets, row_comm_size, col_comm_size, row_comm_rank, col_comm_rank);
auto number_of_vertices = vertex_partition_offsets.back();
auto number_of_edges = host_scalar_allreduce(
comm,
std::accumulate(edgelist_edge_counts.begin(), edgelist_edge_counts.end(), edge_t{0}),
handle.get_stream());
// 3. renumber edges
double constexpr load_factor = 0.7;
// FIXME: compare this hash based approach with a binary search based approach in both memory
// footprint and execution time
for (size_t i = 0; i < edgelist_major_vertices.size(); ++i) {
rmm::device_uvector<vertex_t> renumber_map_major_labels(
col_comm_rank == static_cast<int>(i) ? vertex_t{0}
: partition.get_matrix_partition_major_size(i),
handle.get_stream());
device_bcast(col_comm,
renumber_map_labels.data(),
renumber_map_major_labels.data(),
partition.get_matrix_partition_major_size(i),
i,
handle.get_stream());
CUDA_TRY(hipStreamSynchronize(
handle.get_stream())); // cuco::static_map currently does not take stream
cuco::static_map<vertex_t, vertex_t> renumber_map{
// FIXME: ::max(..., ...) as a temporary workaround for
// https://github.com/NVIDIA/cuCollections/issues/72 and
// https://github.com/NVIDIA/cuCollections/issues/73
::max(static_cast<size_t>(
static_cast<double>(partition.get_matrix_partition_major_size(i)) / load_factor),
static_cast<size_t>(partition.get_matrix_partition_major_size(i)) + 1),
invalid_vertex_id<vertex_t>::value,
invalid_vertex_id<vertex_t>::value};
auto pair_first = thrust::make_transform_iterator(
thrust::make_zip_iterator(thrust::make_tuple(
col_comm_rank == static_cast<int>(i) ? renumber_map_labels.begin()
: renumber_map_major_labels.begin(),
thrust::make_counting_iterator(partition.get_matrix_partition_major_first(i)))),
[] __device__(auto val) {
return thrust::make_pair(thrust::get<0>(val), thrust::get<1>(val));
});
// FIXME: a temporary workaround. cuco::static_map currently launches a kernel even if the grid
// size is 0; this leads to cudaErrorInvaildConfiguration.
if (partition.get_matrix_partition_major_size(i) > 0) {
renumber_map.insert(pair_first, pair_first + partition.get_matrix_partition_major_size(i));
}
// FIXME: a temporary workaround. cuco::static_map currently launches a kernel even if the grid
// size is 0; this leads to cudaErrorInvaildConfiguration.
if (edgelist_edge_counts[i]) {
renumber_map.find(edgelist_major_vertices[i],
edgelist_major_vertices[i] + edgelist_edge_counts[i],
edgelist_major_vertices[i]);
}
}
{
rmm::device_uvector<vertex_t> renumber_map_minor_labels(
partition.get_matrix_partition_minor_size(), handle.get_stream());
std::vector<size_t> recvcounts(row_comm_size);
for (int i = 0; i < row_comm_size; ++i) {
recvcounts[i] = partition.get_vertex_partition_size(col_comm_rank * row_comm_size + i);
}
std::vector<size_t> displacements(recvcounts.size(), 0);
std::partial_sum(recvcounts.begin(), recvcounts.end() - 1, displacements.begin() + 1);
device_allgatherv(row_comm,
renumber_map_labels.begin(),
renumber_map_minor_labels.begin(),
recvcounts,
displacements,
handle.get_stream());
CUDA_TRY(hipStreamSynchronize(
handle.get_stream())); // cuco::static_map currently does not take stream
cuco::static_map<vertex_t, vertex_t> renumber_map{
// FIXME: ::max(..., ...) as a temporary workaround for
// https://github.com/NVIDIA/cuCollections/issues/72 and
// https://github.com/NVIDIA/cuCollections/issues/73
::max(
static_cast<size_t>(static_cast<double>(renumber_map_minor_labels.size()) / load_factor),
renumber_map_minor_labels.size() + 1),
invalid_vertex_id<vertex_t>::value,
invalid_vertex_id<vertex_t>::value};
auto pair_first = thrust::make_transform_iterator(
thrust::make_zip_iterator(thrust::make_tuple(
renumber_map_minor_labels.begin(),
thrust::make_counting_iterator(partition.get_matrix_partition_minor_first()))),
[] __device__(auto val) {
return thrust::make_pair(thrust::get<0>(val), thrust::get<1>(val));
});
// FIXME: a temporary workaround. cuco::static_map currently launches a kernel even if the grid
// size is 0; this leads to cudaErrorInvaildConfiguration.
if (renumber_map_minor_labels.size()) {
renumber_map.insert(pair_first, pair_first + renumber_map_minor_labels.size());
}
for (size_t i = 0; i < edgelist_major_vertices.size(); ++i) {
// FIXME: a temporary workaround. cuco::static_map currently launches a kernel even if the
// grid size is 0; this leads to cudaErrorInvaildConfiguration.
if (edgelist_edge_counts[i]) {
renumber_map.find(edgelist_minor_vertices[i],
edgelist_minor_vertices[i] + edgelist_edge_counts[i],
edgelist_minor_vertices[i]);
}
}
}
return std::make_tuple(
std::move(renumber_map_labels), partition, number_of_vertices, number_of_edges);
#else
return std::make_tuple(rmm::device_uvector<vertex_t>(0, handle.get_stream()),
partition_t<vertex_t>{},
vertex_t{0},
edge_t{0});
#endif
}
template <typename vertex_t, typename edge_t, bool multi_gpu>
std::enable_if_t<!multi_gpu, rmm::device_uvector<vertex_t>> renumber_edgelist(
raft::handle_t const& handle,
vertex_t const* vertices,
vertex_t num_vertices /* relevant only if vertices != nullptr */,
vertex_t* edgelist_major_vertices /* [INOUT] */,
vertex_t* edgelist_minor_vertices /* [INOUT] */,
edge_t num_edgelist_edges,
bool do_expensive_check)
{
// FIXME: remove this check once we drop Pascal support
CUGRAPH_EXPECTS(
handle.get_device_properties().major >= 7,
"This version of renumber_edgelist not supported on Pascal and older architectures.");
#ifdef CUCO_STATIC_MAP_DEFINED
if (do_expensive_check) {
expensive_check_edgelist<vertex_t, edge_t, multi_gpu>(
handle,
vertices,
num_vertices,
std::vector<vertex_t const*>{edgelist_major_vertices},
std::vector<vertex_t const*>{edgelist_minor_vertices},
std::vector<edge_t>{num_edgelist_edges});
}
auto renumber_map_labels = detail::compute_renumber_map<vertex_t, edge_t, multi_gpu>(
handle,
vertices,
num_vertices,
std::vector<vertex_t const*>{edgelist_major_vertices},
std::vector<vertex_t const*>{edgelist_minor_vertices},
std::vector<edge_t>{num_edgelist_edges});
double constexpr load_factor = 0.7;
// FIXME: compare this hash based approach with a binary search based approach in both memory
// footprint and execution time
cuco::static_map<vertex_t, vertex_t> renumber_map{
// FIXME: ::max(..., ...) as a temporary workaround for
// https://github.com/NVIDIA/cuCollections/issues/72 and
// https://github.com/NVIDIA/cuCollections/issues/73
::max(static_cast<size_t>(static_cast<double>(renumber_map_labels.size()) / load_factor),
renumber_map_labels.size() + 1),
invalid_vertex_id<vertex_t>::value,
invalid_vertex_id<vertex_t>::value};
auto pair_first = thrust::make_transform_iterator(
thrust::make_zip_iterator(
thrust::make_tuple(renumber_map_labels.begin(), thrust::make_counting_iterator(vertex_t{0}))),
[] __device__(auto val) {
return thrust::make_pair(thrust::get<0>(val), thrust::get<1>(val));
});
// FIXME: a temporary workaround. cuco::static_map currently launches a kernel even if the grid
// size is 0; this leads to cudaErrorInvaildConfiguration.
if (renumber_map_labels.size()) {
renumber_map.insert(pair_first, pair_first + renumber_map_labels.size());
}
// FIXME: a temporary workaround. cuco::static_map currently launches a kernel even if the grid
// size is 0; this leads to cudaErrorInvaildConfiguration.
if (num_edgelist_edges > 0) {
renumber_map.find(edgelist_major_vertices,
edgelist_major_vertices + num_edgelist_edges,
edgelist_major_vertices);
renumber_map.find(edgelist_minor_vertices,
edgelist_minor_vertices + num_edgelist_edges,
edgelist_minor_vertices);
}
return renumber_map_labels;
#else
return rmm::device_uvector<vertex_t>(0, handle.get_stream());
#endif
}
} // namespace detail
template <typename vertex_t, typename edge_t, bool multi_gpu>
std::enable_if_t<multi_gpu,
std::tuple<rmm::device_uvector<vertex_t>, partition_t<vertex_t>, vertex_t, edge_t>>
renumber_edgelist(raft::handle_t const& handle,
std::vector<vertex_t*> const& edgelist_major_vertices /* [INOUT] */,
std::vector<vertex_t*> const& edgelist_minor_vertices /* [INOUT] */,
std::vector<edge_t> const& edgelist_edge_counts,
bool do_expensive_check)
{
// FIXME: remove this check once we drop Pascal support
CUGRAPH_EXPECTS(
handle.get_device_properties().major >= 7,
"This version of renumber_edgelist not supported on Pascal and older architectures.");
return detail::renumber_edgelist<vertex_t, edge_t, multi_gpu>(handle,
static_cast<vertex_t*>(nullptr),
vertex_t{0},
edgelist_major_vertices,
edgelist_minor_vertices,
edgelist_edge_counts,
do_expensive_check);
}
template <typename vertex_t, typename edge_t, bool multi_gpu>
std::enable_if_t<!multi_gpu, rmm::device_uvector<vertex_t>> renumber_edgelist(
raft::handle_t const& handle,
vertex_t* edgelist_major_vertices /* [INOUT] */,
vertex_t* edgelist_minor_vertices /* [INOUT] */,
edge_t num_edgelist_edges,
bool do_expensive_check)
{
// FIXME: remove this check once we drop Pascal support
CUGRAPH_EXPECTS(
handle.get_device_properties().major >= 7,
"This version of renumber_edgelist not supported on Pascal and older architectures.");
return detail::renumber_edgelist<vertex_t, edge_t, multi_gpu>(handle,
static_cast<vertex_t*>(nullptr),
vertex_t{0} /* dummy */,
edgelist_major_vertices,
edgelist_minor_vertices,
num_edgelist_edges,
do_expensive_check);
}
template <typename vertex_t, typename edge_t, bool multi_gpu>
std::enable_if_t<multi_gpu,
std::tuple<rmm::device_uvector<vertex_t>, partition_t<vertex_t>, vertex_t, edge_t>>
renumber_edgelist(raft::handle_t const& handle,
vertex_t const* local_vertices,
vertex_t num_local_vertices,
std::vector<vertex_t*> const& edgelist_major_vertices /* [INOUT] */,
std::vector<vertex_t*> const& edgelist_minor_vertices /* [INOUT] */,
std::vector<edge_t> const& edgelist_edge_counts,
bool do_expensive_check)
{
// FIXME: remove this check once we drop Pascal support
CUGRAPH_EXPECTS(
handle.get_device_properties().major >= 7,
"This version of renumber_edgelist not supported on Pascal and older architectures.");
return detail::renumber_edgelist<vertex_t, edge_t, multi_gpu>(handle,
local_vertices,
num_local_vertices,
edgelist_major_vertices,
edgelist_minor_vertices,
edgelist_edge_counts,
do_expensive_check);
}
template <typename vertex_t, typename edge_t, bool multi_gpu>
std::enable_if_t<!multi_gpu, rmm::device_uvector<vertex_t>> renumber_edgelist(
raft::handle_t const& handle,
vertex_t const* vertices,
vertex_t num_vertices,
vertex_t* edgelist_major_vertices /* [INOUT] */,
vertex_t* edgelist_minor_vertices /* [INOUT] */,
edge_t num_edgelist_edges,
bool do_expensive_check)
{
// FIXME: remove this check once we drop Pascal support
CUGRAPH_EXPECTS(
handle.get_device_properties().major >= 7,
"This version of renumber_edgelist not supported on Pascal and older architectures.");
return detail::renumber_edgelist<vertex_t, edge_t, multi_gpu>(handle,
vertices,
num_vertices,
edgelist_major_vertices,
edgelist_minor_vertices,
num_edgelist_edges,
do_expensive_check);
}
// explicit instantiation directives (EIDir's):
//
// instantiations for <vertex_t == int32_t, edge_t == int32_t>
//
template std::tuple<rmm::device_uvector<int32_t>, partition_t<int32_t>, int32_t, int32_t>
renumber_edgelist<int32_t, int32_t, true>(
raft::handle_t const& handle,
std::vector<int32_t*> const& edgelist_major_vertices /* [INOUT] */,
std::vector<int32_t*> const& edgelist_minor_vertices /* [INOUT] */,
std::vector<int32_t> const& edgelist_edge_counts,
bool do_expensive_check);
template rmm::device_uvector<int32_t> renumber_edgelist<int32_t, int32_t, false>(
raft::handle_t const& handle,
int32_t* edgelist_major_vertices /* [INOUT] */,
int32_t* edgelist_minor_vertices /* [INOUT] */,
int32_t num_edgelist_edges,
bool do_expensive_check);
template std::tuple<rmm::device_uvector<int32_t>, partition_t<int32_t>, int32_t, int32_t>
renumber_edgelist<int32_t, int32_t, true>(
raft::handle_t const& handle,
int32_t const* local_vertices,
int32_t num_local_vertices,
std::vector<int32_t*> const& edgelist_major_vertices /* [INOUT] */,
std::vector<int32_t*> const& edgelist_minor_vertices /* [INOUT] */,
std::vector<int32_t> const& edgelist_edge_counts,
bool do_expensive_check);
template rmm::device_uvector<int32_t> renumber_edgelist<int32_t, int32_t, false>(
raft::handle_t const& handle,
int32_t const* vertices,
int32_t num_vertices,
int32_t* edgelist_major_vertices /* [INOUT] */,
int32_t* edgelist_minor_vertices /* [INOUT] */,
int32_t num_edgelist_edges,
bool do_expensive_check);
// instantiations for <vertex_t == int32_t, edge_t == int64_t>
//
template std::tuple<rmm::device_uvector<int32_t>, partition_t<int32_t>, int32_t, int64_t>
renumber_edgelist<int32_t, int64_t, true>(
raft::handle_t const& handle,
std::vector<int32_t*> const& edgelist_major_vertices /* [INOUT] */,
std::vector<int32_t*> const& edgelist_minor_vertices /* [INOUT] */,
std::vector<int64_t> const& edgelist_edge_counts,
bool do_expensive_check);
template rmm::device_uvector<int32_t> renumber_edgelist<int32_t, int64_t, false>(
raft::handle_t const& handle,
int32_t* edgelist_major_vertices /* [INOUT] */,
int32_t* edgelist_minor_vertices /* [INOUT] */,
int64_t num_edgelist_edges,
bool do_expensive_check);
template std::tuple<rmm::device_uvector<int32_t>, partition_t<int32_t>, int32_t, int64_t>
renumber_edgelist<int32_t, int64_t, true>(
raft::handle_t const& handle,
int32_t const* local_vertices,
int32_t num_local_vertices,
std::vector<int32_t*> const& edgelist_major_vertices /* [INOUT] */,
std::vector<int32_t*> const& edgelist_minor_vertices /* [INOUT] */,
std::vector<int64_t> const& edgelist_edge_counts,
bool do_expensive_check);
template rmm::device_uvector<int32_t> renumber_edgelist<int32_t, int64_t, false>(
raft::handle_t const& handle,
int32_t const* vertices,
int32_t num_vertices,
int32_t* edgelist_major_vertices /* [INOUT] */,
int32_t* edgelist_minor_vertices /* [INOUT] */,
int64_t num_edgelist_edges,
bool do_expensive_check);
// instantiations for <vertex_t == int64_t, edge_t == int64_t>
//
template std::tuple<rmm::device_uvector<int64_t>, partition_t<int64_t>, int64_t, int64_t>
renumber_edgelist<int64_t, int64_t, true>(
raft::handle_t const& handle,
std::vector<int64_t*> const& edgelist_major_vertices /* [INOUT] */,
std::vector<int64_t*> const& edgelist_minor_vertices /* [INOUT] */,
std::vector<int64_t> const& edgelist_edge_counts,
bool do_expensive_check);
template rmm::device_uvector<int64_t> renumber_edgelist<int64_t, int64_t, false>(
raft::handle_t const& handle,
int64_t* edgelist_major_vertices /* [INOUT] */,
int64_t* edgelist_minor_vertices /* [INOUT] */,
int64_t num_edgelist_edges,
bool do_expensive_check);
template std::tuple<rmm::device_uvector<int64_t>, partition_t<int64_t>, int64_t, int64_t>
renumber_edgelist<int64_t, int64_t, true>(
raft::handle_t const& handle,
int64_t const* local_vertices,
int64_t num_local_vertices,
std::vector<int64_t*> const& edgelist_major_vertices /* [INOUT] */,
std::vector<int64_t*> const& edgelist_minor_vertices /* [INOUT] */,
std::vector<int64_t> const& edgelist_edge_counts,
bool do_expensive_check);
template rmm::device_uvector<int64_t> renumber_edgelist<int64_t, int64_t, false>(
raft::handle_t const& handle,
int64_t const* vertices,
int64_t num_vertices,
int64_t* edgelist_major_vertices /* [INOUT] */,
int64_t* edgelist_minor_vertices /* [INOUT] */,
int64_t num_edgelist_edges,
bool do_expensive_check);
} // namespace experimental
} // namespace cugraph
| 36ee6d5ee1ef0a3b41e58726b4da21fb2fd6d0e8.cu | /*
* Copyright (c) 2020-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <experimental/include_cuco_static_map.cuh>
#include <experimental/detail/graph_utils.cuh>
#include <experimental/graph_functions.hpp>
#include <experimental/graph_view.hpp>
#include <utilities/device_comm.cuh>
#include <utilities/error.hpp>
#include <utilities/host_scalar_comm.cuh>
#include <utilities/shuffle_comm.cuh>
#include <rmm/thrust_rmm_allocator.h>
#include <raft/handle.hpp>
#include <rmm/device_uvector.hpp>
#include <thrust/binary_search.h>
#include <thrust/copy.h>
#include <thrust/iterator/discard_iterator.h>
#include <thrust/sort.h>
#include <thrust/tuple.h>
#include <algorithm>
#include <iterator>
#include <numeric>
#include <tuple>
#include <utility>
namespace cugraph {
namespace experimental {
namespace detail {
#ifdef CUCO_STATIC_MAP_DEFINED
template <typename vertex_t, typename edge_t, bool multi_gpu>
rmm::device_uvector<vertex_t> compute_renumber_map(
raft::handle_t const& handle,
vertex_t const* vertices,
vertex_t num_local_vertices /* relevant only if vertices != nullptr */,
std::vector<vertex_t const*> const& edgelist_major_vertices,
std::vector<vertex_t const*> const& edgelist_minor_vertices,
std::vector<edge_t> const& edgelist_edge_counts)
{
// FIXME: compare this sort based approach with hash based approach in both speed and memory
// footprint
// 1. acquire (unique major label, count) pairs
rmm::device_uvector<vertex_t> major_labels(0, handle.get_stream());
rmm::device_uvector<edge_t> major_counts(0, handle.get_stream());
for (size_t i = 0; i < edgelist_major_vertices.size(); ++i) {
rmm::device_uvector<vertex_t> tmp_major_labels(0, handle.get_stream());
rmm::device_uvector<edge_t> tmp_major_counts(0, handle.get_stream());
{
rmm::device_uvector<vertex_t> sorted_major_labels(edgelist_edge_counts[i],
handle.get_stream());
thrust::copy(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()),
edgelist_major_vertices[i],
edgelist_major_vertices[i] + edgelist_edge_counts[i],
sorted_major_labels.begin());
thrust::sort(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()),
sorted_major_labels.begin(),
sorted_major_labels.end());
auto num_unique_labels =
thrust::count_if(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()),
thrust::make_counting_iterator(size_t{0}),
thrust::make_counting_iterator(sorted_major_labels.size()),
[labels = sorted_major_labels.data()] __device__(auto i) {
return (i == 0) || (labels[i - 1] != labels[i]);
});
tmp_major_labels.resize(num_unique_labels, handle.get_stream());
tmp_major_counts.resize(tmp_major_labels.size(), handle.get_stream());
thrust::reduce_by_key(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()),
sorted_major_labels.begin(),
sorted_major_labels.end(),
thrust::make_constant_iterator(edge_t{1}),
tmp_major_labels.begin(),
tmp_major_counts.begin());
}
if (multi_gpu) {
auto& col_comm = handle.get_subcomm(cugraph::partition_2d::key_naming_t().col_name());
auto const col_comm_rank = col_comm.get_rank();
auto const col_comm_size = col_comm.get_size();
rmm::device_uvector<vertex_t> rx_major_labels(0, handle.get_stream());
rmm::device_uvector<edge_t> rx_major_counts(0, handle.get_stream());
auto rx_sizes = host_scalar_gather(
col_comm, tmp_major_labels.size(), static_cast<int>(i), handle.get_stream());
std::vector<size_t> rx_displs{};
if (static_cast<int>(i) == col_comm_rank) {
rx_displs.assign(col_comm_size, size_t{0});
std::partial_sum(rx_sizes.begin(), rx_sizes.end() - 1, rx_displs.begin() + 1);
rx_major_labels.resize(rx_displs.back() + rx_sizes.back(), handle.get_stream());
rx_major_counts.resize(rx_major_labels.size(), handle.get_stream());
}
device_gatherv(col_comm,
thrust::make_zip_iterator(
thrust::make_tuple(tmp_major_labels.begin(), tmp_major_counts.begin())),
thrust::make_zip_iterator(
thrust::make_tuple(rx_major_labels.begin(), rx_major_counts.begin())),
tmp_major_labels.size(),
rx_sizes,
rx_displs,
static_cast<int>(i),
handle.get_stream());
if (static_cast<int>(i) == col_comm_rank) {
thrust::sort_by_key(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()),
rx_major_labels.begin(),
rx_major_labels.end(),
rx_major_counts.begin());
major_labels.resize(rx_major_labels.size(), handle.get_stream());
major_counts.resize(major_labels.size(), handle.get_stream());
auto pair_it =
thrust::reduce_by_key(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()),
rx_major_labels.begin(),
rx_major_labels.end(),
rx_major_counts.begin(),
major_labels.begin(),
major_counts.begin());
major_labels.resize(thrust::distance(major_labels.begin(), thrust::get<0>(pair_it)),
handle.get_stream());
major_counts.resize(major_labels.size(), handle.get_stream());
major_labels.shrink_to_fit(handle.get_stream());
major_counts.shrink_to_fit(handle.get_stream());
}
} else {
tmp_major_labels.shrink_to_fit(handle.get_stream());
tmp_major_counts.shrink_to_fit(handle.get_stream());
major_labels = std::move(tmp_major_labels);
major_counts = std::move(tmp_major_counts);
}
}
// 2. acquire unique minor labels
std::vector<edge_t> minor_displs(edgelist_minor_vertices.size(), edge_t{0});
std::partial_sum(
edgelist_edge_counts.begin(), edgelist_edge_counts.end() - 1, minor_displs.begin() + 1);
rmm::device_uvector<vertex_t> minor_labels(minor_displs.back() + edgelist_edge_counts.back(),
handle.get_stream());
for (size_t i = 0; i < edgelist_minor_vertices.size(); ++i) {
thrust::copy(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()),
edgelist_minor_vertices[i],
edgelist_minor_vertices[i] + edgelist_edge_counts[i],
minor_labels.begin() + minor_displs[i]);
}
thrust::sort(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()),
minor_labels.begin(),
minor_labels.end());
minor_labels.resize(
thrust::distance(minor_labels.begin(),
thrust::unique(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()),
minor_labels.begin(),
minor_labels.end())),
handle.get_stream());
if (multi_gpu) {
auto& row_comm = handle.get_subcomm(cugraph::partition_2d::key_naming_t().row_name());
auto const row_comm_size = row_comm.get_size();
rmm::device_uvector<vertex_t> rx_minor_labels(0, handle.get_stream());
std::tie(rx_minor_labels, std::ignore) = groupby_gpuid_and_shuffle_values(
row_comm,
minor_labels.begin(),
minor_labels.end(),
[key_func = detail::compute_gpu_id_from_vertex_t<vertex_t>{row_comm_size}] __device__(
auto val) { return key_func(val); },
handle.get_stream());
thrust::sort(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()),
rx_minor_labels.begin(),
rx_minor_labels.end());
rx_minor_labels.resize(
thrust::distance(
rx_minor_labels.begin(),
thrust::unique(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()),
rx_minor_labels.begin(),
rx_minor_labels.end())),
handle.get_stream());
minor_labels = std::move(rx_minor_labels);
}
minor_labels.shrink_to_fit(handle.get_stream());
// 3. merge major and minor labels and vertex labels
rmm::device_uvector<vertex_t> merged_labels(major_labels.size() + minor_labels.size(),
handle.get_stream());
rmm::device_uvector<edge_t> merged_counts(merged_labels.size(), handle.get_stream());
thrust::merge_by_key(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()),
major_labels.begin(),
major_labels.end(),
minor_labels.begin(),
minor_labels.end(),
major_counts.begin(),
thrust::make_constant_iterator(edge_t{0}),
merged_labels.begin(),
merged_counts.begin());
major_labels.resize(0, handle.get_stream());
major_counts.resize(0, handle.get_stream());
minor_labels.resize(0, handle.get_stream());
major_labels.shrink_to_fit(handle.get_stream());
major_counts.shrink_to_fit(handle.get_stream());
minor_labels.shrink_to_fit(handle.get_stream());
rmm::device_uvector<vertex_t> labels(merged_labels.size(), handle.get_stream());
rmm::device_uvector<edge_t> counts(labels.size(), handle.get_stream());
auto pair_it =
thrust::reduce_by_key(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()),
merged_labels.begin(),
merged_labels.end(),
merged_counts.begin(),
labels.begin(),
counts.begin());
merged_labels.resize(0, handle.get_stream());
merged_counts.resize(0, handle.get_stream());
merged_labels.shrink_to_fit(handle.get_stream());
merged_counts.shrink_to_fit(handle.get_stream());
labels.resize(thrust::distance(labels.begin(), thrust::get<0>(pair_it)), handle.get_stream());
counts.resize(labels.size(), handle.get_stream());
labels.shrink_to_fit(handle.get_stream());
counts.shrink_to_fit(handle.get_stream());
// 4. if vertices != nullptr, add isolated vertices
rmm::device_uvector<vertex_t> isolated_vertices(0, handle.get_stream());
if (vertices != nullptr) {
auto num_isolated_vertices = thrust::count_if(
rmm::exec_policy(handle.get_stream())->on(handle.get_stream()),
vertices,
vertices + num_local_vertices,
[label_first = labels.begin(), label_last = labels.end()] __device__(auto v) {
return !thrust::binary_search(thrust::seq, label_first, label_last, v);
});
isolated_vertices.resize(num_isolated_vertices, handle.get_stream());
thrust::copy_if(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()),
vertices,
vertices + num_local_vertices,
isolated_vertices.begin(),
[label_first = labels.begin(), label_last = labels.end()] __device__(auto v) {
return !thrust::binary_search(thrust::seq, label_first, label_last, v);
});
}
if (isolated_vertices.size() > 0) {
labels.resize(labels.size() + isolated_vertices.size(), handle.get_stream());
counts.resize(labels.size(), handle.get_stream());
thrust::copy(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()),
isolated_vertices.begin(),
isolated_vertices.end(),
labels.end() - isolated_vertices.size());
thrust::fill(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()),
counts.end() - isolated_vertices.size(),
counts.end(),
edge_t{0});
}
// 6. sort by degree
thrust::sort_by_key(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()),
counts.begin(),
counts.end(),
labels.begin(),
thrust::greater<edge_t>());
return labels;
}
template <typename vertex_t, typename edge_t, bool multi_gpu>
void expensive_check_edgelist(
raft::handle_t const& handle,
vertex_t const* local_vertices,
vertex_t num_local_vertices /* relevant only if local_vertices != nullptr */,
std::vector<vertex_t const*> const& edgelist_major_vertices,
std::vector<vertex_t const*> const& edgelist_minor_vertices,
std::vector<edge_t> const& edgelist_edge_counts)
{
rmm::device_uvector<vertex_t> sorted_local_vertices(
local_vertices != nullptr ? num_local_vertices : vertex_t{0}, handle.get_stream());
thrust::copy(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()),
local_vertices,
local_vertices + num_local_vertices,
sorted_local_vertices.begin());
thrust::sort(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()),
sorted_local_vertices.begin(),
sorted_local_vertices.end());
CUGRAPH_EXPECTS(static_cast<size_t>(thrust::distance(
sorted_local_vertices.begin(),
thrust::unique(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()),
sorted_local_vertices.begin(),
sorted_local_vertices.end()))) == sorted_local_vertices.size(),
"Invalid input argument: local_vertices should not have duplicates.");
if (multi_gpu) {
auto& comm = handle.get_comms();
auto const comm_size = comm.get_size();
auto const comm_rank = comm.get_rank();
auto& row_comm = handle.get_subcomm(cugraph::partition_2d::key_naming_t().row_name());
auto const row_comm_size = row_comm.get_size();
auto const row_comm_rank = row_comm.get_rank();
auto& col_comm = handle.get_subcomm(cugraph::partition_2d::key_naming_t().col_name());
auto const col_comm_size = col_comm.get_size();
auto const col_comm_rank = col_comm.get_rank();
CUGRAPH_EXPECTS((edgelist_major_vertices.size() == edgelist_minor_vertices.size()) &&
(edgelist_major_vertices.size() == static_cast<size_t>(col_comm_size)),
"Invalid input argument: both edgelist_major_vertices.size() & "
"edgelist_minor_vertices.size() should coincide with col_comm_size.");
CUGRAPH_EXPECTS(
thrust::count_if(
rmm::exec_policy(handle.get_stream())->on(handle.get_stream()),
local_vertices,
local_vertices + num_local_vertices,
[comm_rank,
key_func =
detail::compute_gpu_id_from_vertex_t<vertex_t>{comm_size}] __device__(auto val) {
return key_func(val) != comm_rank;
}) == 0,
"Invalid input argument: local_vertices should be pre-shuffled.");
for (size_t i = 0; i < edgelist_major_vertices.size(); ++i) {
auto edge_first = thrust::make_zip_iterator(
thrust::make_tuple(edgelist_major_vertices[i], edgelist_minor_vertices[i]));
CUGRAPH_EXPECTS(
thrust::count_if(
rmm::exec_policy(handle.get_stream())->on(handle.get_stream()),
edge_first,
edge_first + edgelist_edge_counts[i],
[comm_size,
comm_rank,
row_comm_rank,
col_comm_size,
col_comm_rank,
i,
gpu_id_key_func =
detail::compute_gpu_id_from_edge_t<vertex_t>{comm_size, row_comm_size, col_comm_size},
partition_id_key_func =
detail::compute_partition_id_from_edge_t<vertex_t>{
comm_size, row_comm_size, col_comm_size}] __device__(auto edge) {
return (gpu_id_key_func(thrust::get<0>(edge), thrust::get<1>(edge)) != comm_rank) ||
(partition_id_key_func(thrust::get<0>(edge), thrust::get<1>(edge)) !=
row_comm_rank * col_comm_size + col_comm_rank + i * comm_size);
}) == 0,
"Invalid input argument: edgelist_major_vertices & edgelist_minor_vertices should be "
"pre-shuffled.");
auto aggregate_vertexlist_size = host_scalar_allreduce(
comm,
local_vertices != nullptr ? num_local_vertices : vertex_t{0},
handle.get_stream()); // local_vertices != nullptr is insufficient in multi-GPU as only a
// subset of GPUs may have a non-zero vertices
if (aggregate_vertexlist_size > 0) {
auto& row_comm = handle.get_subcomm(cugraph::partition_2d::key_naming_t().row_name());
auto& col_comm = handle.get_subcomm(cugraph::partition_2d::key_naming_t().col_name());
rmm::device_uvector<vertex_t> sorted_major_vertices(0, handle.get_stream());
{
auto recvcounts =
host_scalar_allgather(col_comm, sorted_local_vertices.size(), handle.get_stream());
std::vector<size_t> displacements(recvcounts.size(), size_t{0});
std::partial_sum(recvcounts.begin(), recvcounts.end() - 1, displacements.begin() + 1);
sorted_major_vertices.resize(displacements.back() + recvcounts.back(),
handle.get_stream());
device_allgatherv(col_comm,
sorted_local_vertices.data(),
sorted_major_vertices.data(),
recvcounts,
displacements,
handle.get_stream());
thrust::sort(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()),
sorted_major_vertices.begin(),
sorted_major_vertices.end());
}
rmm::device_uvector<vertex_t> sorted_minor_vertices(0, handle.get_stream());
{
auto recvcounts =
host_scalar_allgather(row_comm, sorted_local_vertices.size(), handle.get_stream());
std::vector<size_t> displacements(recvcounts.size(), size_t{0});
std::partial_sum(recvcounts.begin(), recvcounts.end() - 1, displacements.begin() + 1);
sorted_minor_vertices.resize(displacements.back() + recvcounts.back(),
handle.get_stream());
device_allgatherv(row_comm,
sorted_local_vertices.data(),
sorted_minor_vertices.data(),
recvcounts,
displacements,
handle.get_stream());
thrust::sort(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()),
sorted_minor_vertices.begin(),
sorted_minor_vertices.end());
}
auto edge_first = thrust::make_zip_iterator(
thrust::make_tuple(edgelist_major_vertices[i], edgelist_minor_vertices[i]));
CUGRAPH_EXPECTS(
thrust::count_if(
rmm::exec_policy(handle.get_stream())->on(handle.get_stream()),
edge_first,
edge_first + edgelist_edge_counts[i],
[num_major_vertices = static_cast<vertex_t>(sorted_major_vertices.size()),
sorted_major_vertices = sorted_major_vertices.data(),
num_minor_vertices = static_cast<vertex_t>(sorted_minor_vertices.size()),
sorted_minor_vertices = sorted_minor_vertices.data()] __device__(auto e) {
return !thrust::binary_search(thrust::seq,
sorted_major_vertices,
sorted_major_vertices + num_major_vertices,
thrust::get<0>(e)) ||
!thrust::binary_search(thrust::seq,
sorted_minor_vertices,
sorted_minor_vertices + num_minor_vertices,
thrust::get<1>(e));
}) == 0,
"Invalid input argument: edgelist_major_vertices and/or edgelist_mior_vertices have "
"invalid vertex ID(s).");
}
}
} else {
assert(edgelist_major_vertices.size() == 1);
assert(edgelist_minor_vertices.size() == 1);
if (local_vertices != nullptr) {
auto edge_first = thrust::make_zip_iterator(
thrust::make_tuple(edgelist_major_vertices[0], edgelist_minor_vertices[0]));
CUGRAPH_EXPECTS(
thrust::count_if(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()),
edge_first,
edge_first + edgelist_edge_counts[0],
[num_local_vertices,
sorted_local_vertices = sorted_local_vertices.data()] __device__(auto e) {
return !thrust::binary_search(thrust::seq,
sorted_local_vertices,
sorted_local_vertices + num_local_vertices,
thrust::get<0>(e)) ||
!thrust::binary_search(thrust::seq,
sorted_local_vertices,
sorted_local_vertices + num_local_vertices,
thrust::get<1>(e));
}) == 0,
"Invalid input argument: edgelist_major_vertices and/or edgelist_minor_vertices have "
"invalid vertex ID(s).");
}
}
}
#endif
template <typename vertex_t, typename edge_t, bool multi_gpu>
std::enable_if_t<multi_gpu,
std::tuple<rmm::device_uvector<vertex_t>, partition_t<vertex_t>, vertex_t, edge_t>>
renumber_edgelist(raft::handle_t const& handle,
vertex_t const* local_vertices,
vertex_t num_local_vertices /* relevant only if local_vertices != nullptr */,
std::vector<vertex_t*> const& edgelist_major_vertices /* [INOUT] */,
std::vector<vertex_t*> const& edgelist_minor_vertices /* [INOUT] */,
std::vector<edge_t> const& edgelist_edge_counts,
bool do_expensive_check)
{
// FIXME: remove this check once we drop Pascal support
CUGRAPH_EXPECTS(
handle.get_device_properties().major >= 7,
"This version of enumber_edgelist not supported on Pascal and older architectures.");
#ifdef CUCO_STATIC_MAP_DEFINED
auto& comm = handle.get_comms();
auto const comm_size = comm.get_size();
auto const comm_rank = comm.get_rank();
auto& row_comm = handle.get_subcomm(cugraph::partition_2d::key_naming_t().row_name());
auto const row_comm_size = row_comm.get_size();
auto const row_comm_rank = row_comm.get_rank();
auto& col_comm = handle.get_subcomm(cugraph::partition_2d::key_naming_t().col_name());
auto const col_comm_size = col_comm.get_size();
auto const col_comm_rank = col_comm.get_rank();
std::vector<vertex_t const*> edgelist_const_major_vertices(edgelist_major_vertices.size());
std::vector<vertex_t const*> edgelist_const_minor_vertices(edgelist_const_major_vertices.size());
for (size_t i = 0; i < edgelist_const_major_vertices.size(); ++i) {
edgelist_const_major_vertices[i] = edgelist_major_vertices[i];
edgelist_const_minor_vertices[i] = edgelist_minor_vertices[i];
}
if (do_expensive_check) {
expensive_check_edgelist<vertex_t, edge_t, multi_gpu>(handle,
local_vertices,
num_local_vertices,
edgelist_const_major_vertices,
edgelist_const_minor_vertices,
edgelist_edge_counts);
}
// 1. compute renumber map
auto renumber_map_labels =
detail::compute_renumber_map<vertex_t, edge_t, multi_gpu>(handle,
local_vertices,
num_local_vertices,
edgelist_const_major_vertices,
edgelist_const_minor_vertices,
edgelist_edge_counts);
// 2. initialize partition_t object, number_of_vertices, and number_of_edges for the coarsened
// graph
auto vertex_counts = host_scalar_allgather(
comm, static_cast<vertex_t>(renumber_map_labels.size()), handle.get_stream());
std::vector<vertex_t> vertex_partition_offsets(comm_size + 1, 0);
std::partial_sum(
vertex_counts.begin(), vertex_counts.end(), vertex_partition_offsets.begin() + 1);
partition_t<vertex_t> partition(
vertex_partition_offsets, row_comm_size, col_comm_size, row_comm_rank, col_comm_rank);
auto number_of_vertices = vertex_partition_offsets.back();
auto number_of_edges = host_scalar_allreduce(
comm,
std::accumulate(edgelist_edge_counts.begin(), edgelist_edge_counts.end(), edge_t{0}),
handle.get_stream());
// 3. renumber edges
double constexpr load_factor = 0.7;
// FIXME: compare this hash based approach with a binary search based approach in both memory
// footprint and execution time
for (size_t i = 0; i < edgelist_major_vertices.size(); ++i) {
rmm::device_uvector<vertex_t> renumber_map_major_labels(
col_comm_rank == static_cast<int>(i) ? vertex_t{0}
: partition.get_matrix_partition_major_size(i),
handle.get_stream());
device_bcast(col_comm,
renumber_map_labels.data(),
renumber_map_major_labels.data(),
partition.get_matrix_partition_major_size(i),
i,
handle.get_stream());
CUDA_TRY(cudaStreamSynchronize(
handle.get_stream())); // cuco::static_map currently does not take stream
cuco::static_map<vertex_t, vertex_t> renumber_map{
// FIXME: std::max(..., ...) as a temporary workaround for
// https://github.com/NVIDIA/cuCollections/issues/72 and
// https://github.com/NVIDIA/cuCollections/issues/73
std::max(static_cast<size_t>(
static_cast<double>(partition.get_matrix_partition_major_size(i)) / load_factor),
static_cast<size_t>(partition.get_matrix_partition_major_size(i)) + 1),
invalid_vertex_id<vertex_t>::value,
invalid_vertex_id<vertex_t>::value};
auto pair_first = thrust::make_transform_iterator(
thrust::make_zip_iterator(thrust::make_tuple(
col_comm_rank == static_cast<int>(i) ? renumber_map_labels.begin()
: renumber_map_major_labels.begin(),
thrust::make_counting_iterator(partition.get_matrix_partition_major_first(i)))),
[] __device__(auto val) {
return thrust::make_pair(thrust::get<0>(val), thrust::get<1>(val));
});
// FIXME: a temporary workaround. cuco::static_map currently launches a kernel even if the grid
// size is 0; this leads to cudaErrorInvaildConfiguration.
if (partition.get_matrix_partition_major_size(i) > 0) {
renumber_map.insert(pair_first, pair_first + partition.get_matrix_partition_major_size(i));
}
// FIXME: a temporary workaround. cuco::static_map currently launches a kernel even if the grid
// size is 0; this leads to cudaErrorInvaildConfiguration.
if (edgelist_edge_counts[i]) {
renumber_map.find(edgelist_major_vertices[i],
edgelist_major_vertices[i] + edgelist_edge_counts[i],
edgelist_major_vertices[i]);
}
}
{
rmm::device_uvector<vertex_t> renumber_map_minor_labels(
partition.get_matrix_partition_minor_size(), handle.get_stream());
std::vector<size_t> recvcounts(row_comm_size);
for (int i = 0; i < row_comm_size; ++i) {
recvcounts[i] = partition.get_vertex_partition_size(col_comm_rank * row_comm_size + i);
}
std::vector<size_t> displacements(recvcounts.size(), 0);
std::partial_sum(recvcounts.begin(), recvcounts.end() - 1, displacements.begin() + 1);
device_allgatherv(row_comm,
renumber_map_labels.begin(),
renumber_map_minor_labels.begin(),
recvcounts,
displacements,
handle.get_stream());
CUDA_TRY(cudaStreamSynchronize(
handle.get_stream())); // cuco::static_map currently does not take stream
cuco::static_map<vertex_t, vertex_t> renumber_map{
// FIXME: std::max(..., ...) as a temporary workaround for
// https://github.com/NVIDIA/cuCollections/issues/72 and
// https://github.com/NVIDIA/cuCollections/issues/73
std::max(
static_cast<size_t>(static_cast<double>(renumber_map_minor_labels.size()) / load_factor),
renumber_map_minor_labels.size() + 1),
invalid_vertex_id<vertex_t>::value,
invalid_vertex_id<vertex_t>::value};
auto pair_first = thrust::make_transform_iterator(
thrust::make_zip_iterator(thrust::make_tuple(
renumber_map_minor_labels.begin(),
thrust::make_counting_iterator(partition.get_matrix_partition_minor_first()))),
[] __device__(auto val) {
return thrust::make_pair(thrust::get<0>(val), thrust::get<1>(val));
});
// FIXME: a temporary workaround. cuco::static_map currently launches a kernel even if the grid
// size is 0; this leads to cudaErrorInvaildConfiguration.
if (renumber_map_minor_labels.size()) {
renumber_map.insert(pair_first, pair_first + renumber_map_minor_labels.size());
}
for (size_t i = 0; i < edgelist_major_vertices.size(); ++i) {
// FIXME: a temporary workaround. cuco::static_map currently launches a kernel even if the
// grid size is 0; this leads to cudaErrorInvaildConfiguration.
if (edgelist_edge_counts[i]) {
renumber_map.find(edgelist_minor_vertices[i],
edgelist_minor_vertices[i] + edgelist_edge_counts[i],
edgelist_minor_vertices[i]);
}
}
}
return std::make_tuple(
std::move(renumber_map_labels), partition, number_of_vertices, number_of_edges);
#else
return std::make_tuple(rmm::device_uvector<vertex_t>(0, handle.get_stream()),
partition_t<vertex_t>{},
vertex_t{0},
edge_t{0});
#endif
}
template <typename vertex_t, typename edge_t, bool multi_gpu>
std::enable_if_t<!multi_gpu, rmm::device_uvector<vertex_t>> renumber_edgelist(
raft::handle_t const& handle,
vertex_t const* vertices,
vertex_t num_vertices /* relevant only if vertices != nullptr */,
vertex_t* edgelist_major_vertices /* [INOUT] */,
vertex_t* edgelist_minor_vertices /* [INOUT] */,
edge_t num_edgelist_edges,
bool do_expensive_check)
{
// FIXME: remove this check once we drop Pascal support
CUGRAPH_EXPECTS(
handle.get_device_properties().major >= 7,
"This version of renumber_edgelist not supported on Pascal and older architectures.");
#ifdef CUCO_STATIC_MAP_DEFINED
if (do_expensive_check) {
expensive_check_edgelist<vertex_t, edge_t, multi_gpu>(
handle,
vertices,
num_vertices,
std::vector<vertex_t const*>{edgelist_major_vertices},
std::vector<vertex_t const*>{edgelist_minor_vertices},
std::vector<edge_t>{num_edgelist_edges});
}
auto renumber_map_labels = detail::compute_renumber_map<vertex_t, edge_t, multi_gpu>(
handle,
vertices,
num_vertices,
std::vector<vertex_t const*>{edgelist_major_vertices},
std::vector<vertex_t const*>{edgelist_minor_vertices},
std::vector<edge_t>{num_edgelist_edges});
double constexpr load_factor = 0.7;
// FIXME: compare this hash based approach with a binary search based approach in both memory
// footprint and execution time
cuco::static_map<vertex_t, vertex_t> renumber_map{
// FIXME: std::max(..., ...) as a temporary workaround for
// https://github.com/NVIDIA/cuCollections/issues/72 and
// https://github.com/NVIDIA/cuCollections/issues/73
std::max(static_cast<size_t>(static_cast<double>(renumber_map_labels.size()) / load_factor),
renumber_map_labels.size() + 1),
invalid_vertex_id<vertex_t>::value,
invalid_vertex_id<vertex_t>::value};
auto pair_first = thrust::make_transform_iterator(
thrust::make_zip_iterator(
thrust::make_tuple(renumber_map_labels.begin(), thrust::make_counting_iterator(vertex_t{0}))),
[] __device__(auto val) {
return thrust::make_pair(thrust::get<0>(val), thrust::get<1>(val));
});
// FIXME: a temporary workaround. cuco::static_map currently launches a kernel even if the grid
// size is 0; this leads to cudaErrorInvaildConfiguration.
if (renumber_map_labels.size()) {
renumber_map.insert(pair_first, pair_first + renumber_map_labels.size());
}
// FIXME: a temporary workaround. cuco::static_map currently launches a kernel even if the grid
// size is 0; this leads to cudaErrorInvaildConfiguration.
if (num_edgelist_edges > 0) {
renumber_map.find(edgelist_major_vertices,
edgelist_major_vertices + num_edgelist_edges,
edgelist_major_vertices);
renumber_map.find(edgelist_minor_vertices,
edgelist_minor_vertices + num_edgelist_edges,
edgelist_minor_vertices);
}
return renumber_map_labels;
#else
return rmm::device_uvector<vertex_t>(0, handle.get_stream());
#endif
}
} // namespace detail
template <typename vertex_t, typename edge_t, bool multi_gpu>
std::enable_if_t<multi_gpu,
std::tuple<rmm::device_uvector<vertex_t>, partition_t<vertex_t>, vertex_t, edge_t>>
renumber_edgelist(raft::handle_t const& handle,
std::vector<vertex_t*> const& edgelist_major_vertices /* [INOUT] */,
std::vector<vertex_t*> const& edgelist_minor_vertices /* [INOUT] */,
std::vector<edge_t> const& edgelist_edge_counts,
bool do_expensive_check)
{
// FIXME: remove this check once we drop Pascal support
CUGRAPH_EXPECTS(
handle.get_device_properties().major >= 7,
"This version of renumber_edgelist not supported on Pascal and older architectures.");
return detail::renumber_edgelist<vertex_t, edge_t, multi_gpu>(handle,
static_cast<vertex_t*>(nullptr),
vertex_t{0},
edgelist_major_vertices,
edgelist_minor_vertices,
edgelist_edge_counts,
do_expensive_check);
}
template <typename vertex_t, typename edge_t, bool multi_gpu>
std::enable_if_t<!multi_gpu, rmm::device_uvector<vertex_t>> renumber_edgelist(
raft::handle_t const& handle,
vertex_t* edgelist_major_vertices /* [INOUT] */,
vertex_t* edgelist_minor_vertices /* [INOUT] */,
edge_t num_edgelist_edges,
bool do_expensive_check)
{
// FIXME: remove this check once we drop Pascal support
CUGRAPH_EXPECTS(
handle.get_device_properties().major >= 7,
"This version of renumber_edgelist not supported on Pascal and older architectures.");
return detail::renumber_edgelist<vertex_t, edge_t, multi_gpu>(handle,
static_cast<vertex_t*>(nullptr),
vertex_t{0} /* dummy */,
edgelist_major_vertices,
edgelist_minor_vertices,
num_edgelist_edges,
do_expensive_check);
}
template <typename vertex_t, typename edge_t, bool multi_gpu>
std::enable_if_t<multi_gpu,
std::tuple<rmm::device_uvector<vertex_t>, partition_t<vertex_t>, vertex_t, edge_t>>
renumber_edgelist(raft::handle_t const& handle,
vertex_t const* local_vertices,
vertex_t num_local_vertices,
std::vector<vertex_t*> const& edgelist_major_vertices /* [INOUT] */,
std::vector<vertex_t*> const& edgelist_minor_vertices /* [INOUT] */,
std::vector<edge_t> const& edgelist_edge_counts,
bool do_expensive_check)
{
// FIXME: remove this check once we drop Pascal support
CUGRAPH_EXPECTS(
handle.get_device_properties().major >= 7,
"This version of renumber_edgelist not supported on Pascal and older architectures.");
return detail::renumber_edgelist<vertex_t, edge_t, multi_gpu>(handle,
local_vertices,
num_local_vertices,
edgelist_major_vertices,
edgelist_minor_vertices,
edgelist_edge_counts,
do_expensive_check);
}
template <typename vertex_t, typename edge_t, bool multi_gpu>
std::enable_if_t<!multi_gpu, rmm::device_uvector<vertex_t>> renumber_edgelist(
raft::handle_t const& handle,
vertex_t const* vertices,
vertex_t num_vertices,
vertex_t* edgelist_major_vertices /* [INOUT] */,
vertex_t* edgelist_minor_vertices /* [INOUT] */,
edge_t num_edgelist_edges,
bool do_expensive_check)
{
// FIXME: remove this check once we drop Pascal support
CUGRAPH_EXPECTS(
handle.get_device_properties().major >= 7,
"This version of renumber_edgelist not supported on Pascal and older architectures.");
return detail::renumber_edgelist<vertex_t, edge_t, multi_gpu>(handle,
vertices,
num_vertices,
edgelist_major_vertices,
edgelist_minor_vertices,
num_edgelist_edges,
do_expensive_check);
}
// explicit instantiation directives (EIDir's):
//
// instantiations for <vertex_t == int32_t, edge_t == int32_t>
//
template std::tuple<rmm::device_uvector<int32_t>, partition_t<int32_t>, int32_t, int32_t>
renumber_edgelist<int32_t, int32_t, true>(
raft::handle_t const& handle,
std::vector<int32_t*> const& edgelist_major_vertices /* [INOUT] */,
std::vector<int32_t*> const& edgelist_minor_vertices /* [INOUT] */,
std::vector<int32_t> const& edgelist_edge_counts,
bool do_expensive_check);
template rmm::device_uvector<int32_t> renumber_edgelist<int32_t, int32_t, false>(
raft::handle_t const& handle,
int32_t* edgelist_major_vertices /* [INOUT] */,
int32_t* edgelist_minor_vertices /* [INOUT] */,
int32_t num_edgelist_edges,
bool do_expensive_check);
template std::tuple<rmm::device_uvector<int32_t>, partition_t<int32_t>, int32_t, int32_t>
renumber_edgelist<int32_t, int32_t, true>(
raft::handle_t const& handle,
int32_t const* local_vertices,
int32_t num_local_vertices,
std::vector<int32_t*> const& edgelist_major_vertices /* [INOUT] */,
std::vector<int32_t*> const& edgelist_minor_vertices /* [INOUT] */,
std::vector<int32_t> const& edgelist_edge_counts,
bool do_expensive_check);
template rmm::device_uvector<int32_t> renumber_edgelist<int32_t, int32_t, false>(
raft::handle_t const& handle,
int32_t const* vertices,
int32_t num_vertices,
int32_t* edgelist_major_vertices /* [INOUT] */,
int32_t* edgelist_minor_vertices /* [INOUT] */,
int32_t num_edgelist_edges,
bool do_expensive_check);
// instantiations for <vertex_t == int32_t, edge_t == int64_t>
//
template std::tuple<rmm::device_uvector<int32_t>, partition_t<int32_t>, int32_t, int64_t>
renumber_edgelist<int32_t, int64_t, true>(
raft::handle_t const& handle,
std::vector<int32_t*> const& edgelist_major_vertices /* [INOUT] */,
std::vector<int32_t*> const& edgelist_minor_vertices /* [INOUT] */,
std::vector<int64_t> const& edgelist_edge_counts,
bool do_expensive_check);
template rmm::device_uvector<int32_t> renumber_edgelist<int32_t, int64_t, false>(
raft::handle_t const& handle,
int32_t* edgelist_major_vertices /* [INOUT] */,
int32_t* edgelist_minor_vertices /* [INOUT] */,
int64_t num_edgelist_edges,
bool do_expensive_check);
template std::tuple<rmm::device_uvector<int32_t>, partition_t<int32_t>, int32_t, int64_t>
renumber_edgelist<int32_t, int64_t, true>(
raft::handle_t const& handle,
int32_t const* local_vertices,
int32_t num_local_vertices,
std::vector<int32_t*> const& edgelist_major_vertices /* [INOUT] */,
std::vector<int32_t*> const& edgelist_minor_vertices /* [INOUT] */,
std::vector<int64_t> const& edgelist_edge_counts,
bool do_expensive_check);
template rmm::device_uvector<int32_t> renumber_edgelist<int32_t, int64_t, false>(
raft::handle_t const& handle,
int32_t const* vertices,
int32_t num_vertices,
int32_t* edgelist_major_vertices /* [INOUT] */,
int32_t* edgelist_minor_vertices /* [INOUT] */,
int64_t num_edgelist_edges,
bool do_expensive_check);
// instantiations for <vertex_t == int64_t, edge_t == int64_t>
//
template std::tuple<rmm::device_uvector<int64_t>, partition_t<int64_t>, int64_t, int64_t>
renumber_edgelist<int64_t, int64_t, true>(
raft::handle_t const& handle,
std::vector<int64_t*> const& edgelist_major_vertices /* [INOUT] */,
std::vector<int64_t*> const& edgelist_minor_vertices /* [INOUT] */,
std::vector<int64_t> const& edgelist_edge_counts,
bool do_expensive_check);
template rmm::device_uvector<int64_t> renumber_edgelist<int64_t, int64_t, false>(
raft::handle_t const& handle,
int64_t* edgelist_major_vertices /* [INOUT] */,
int64_t* edgelist_minor_vertices /* [INOUT] */,
int64_t num_edgelist_edges,
bool do_expensive_check);
template std::tuple<rmm::device_uvector<int64_t>, partition_t<int64_t>, int64_t, int64_t>
renumber_edgelist<int64_t, int64_t, true>(
raft::handle_t const& handle,
int64_t const* local_vertices,
int64_t num_local_vertices,
std::vector<int64_t*> const& edgelist_major_vertices /* [INOUT] */,
std::vector<int64_t*> const& edgelist_minor_vertices /* [INOUT] */,
std::vector<int64_t> const& edgelist_edge_counts,
bool do_expensive_check);
template rmm::device_uvector<int64_t> renumber_edgelist<int64_t, int64_t, false>(
raft::handle_t const& handle,
int64_t const* vertices,
int64_t num_vertices,
int64_t* edgelist_major_vertices /* [INOUT] */,
int64_t* edgelist_minor_vertices /* [INOUT] */,
int64_t num_edgelist_edges,
bool do_expensive_check);
} // namespace experimental
} // namespace cugraph
|
cc977374710307eb09e5592a55383af61e44c443.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2019
@generated from sparse/blas/zgeellmv.cu, normal z -> c, Wed Jan 2 14:18:53 2019
*/
#include "magmasparse_internal.h"
#define BLOCK_SIZE 512
// ELLPACK SpMV kernel
//Michael Garland
__global__ void
cgeellmv_kernel(
int num_rows,
int num_cols,
int num_cols_per_row,
magmaFloatComplex alpha,
magmaFloatComplex * dval,
magma_index_t * dcolind,
magmaFloatComplex * dx,
magmaFloatComplex beta,
magmaFloatComplex * dy)
{
int row = blockDim.x * blockIdx.x + threadIdx.x;
if (row < num_rows) {
magmaFloatComplex dot = MAGMA_C_MAKE(0.0, 0.0);
for ( int n = 0; n < num_cols_per_row; n++ ) {
int col = dcolind [ num_cols_per_row * row + n ];
magmaFloatComplex val = dval [ num_cols_per_row * row + n ];
if ( val != 0)
dot += val * dx[col ];
}
dy[ row ] = dot * alpha + beta * dy [ row ];
}
}
// shifted ELLPACK SpMV kernel
//Michael Garland
__global__ void
cgeellmv_kernel_shift(
int num_rows,
int num_cols,
int num_cols_per_row,
magmaFloatComplex alpha,
magmaFloatComplex lambda,
magmaFloatComplex * dval,
magma_index_t * dcolind,
magmaFloatComplex * dx,
magmaFloatComplex beta,
int offset,
int blocksize,
magma_index_t * addrows,
magmaFloatComplex * dy)
{
int row = blockDim.x * blockIdx.x + threadIdx.x;
if (row < num_rows) {
magmaFloatComplex dot = MAGMA_C_MAKE(0.0, 0.0);
for ( int n = 0; n < num_cols_per_row; n++ ) {
int col = dcolind [ num_cols_per_row * row + n ];
magmaFloatComplex val = dval [ num_cols_per_row * row + n ];
if ( val != 0)
dot += val * dx[col ];
}
if ( row < blocksize )
dy[ row ] = dot * alpha - lambda * dx[ offset+row ] + beta * dy [ row ];
else
dy[ row ] = dot * alpha - lambda * dx[ addrows[row-blocksize] ] + beta * dy [ row ];
}
}
/**
Purpose
-------
This routine computes y = alpha * A * x + beta * y on the GPU.
Input format is ELLPACK.
Arguments
---------
@param[in]
transA magma_trans_t
transposition parameter for A
@param[in]
m magma_int_t
number of rows in A
@param[in]
n magma_int_t
number of columns in A
@param[in]
nnz_per_row magma_int_t
number of elements in the longest row
@param[in]
alpha magmaFloatComplex
scalar multiplier
@param[in]
dval magmaFloatComplex_ptr
array containing values of A in ELLPACK
@param[in]
dcolind magmaIndex_ptr
columnindices of A in ELLPACK
@param[in]
dx magmaFloatComplex_ptr
input vector x
@param[in]
beta magmaFloatComplex
scalar multiplier
@param[out]
dy magmaFloatComplex_ptr
input/output vector y
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_cblas
********************************************************************/
extern "C" magma_int_t
magma_cgeellmv(
magma_trans_t transA,
magma_int_t m, magma_int_t n,
magma_int_t nnz_per_row,
magmaFloatComplex alpha,
magmaFloatComplex_ptr dval,
magmaIndex_ptr dcolind,
magmaFloatComplex_ptr dx,
magmaFloatComplex beta,
magmaFloatComplex_ptr dy,
magma_queue_t queue )
{
dim3 grid( magma_ceildiv( m, BLOCK_SIZE ) );
magma_int_t threads = BLOCK_SIZE;
hipLaunchKernelGGL(( cgeellmv_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
m, n, nnz_per_row, alpha, dval, dcolind, dx, beta, dy );
return MAGMA_SUCCESS;
}
/**
Purpose
-------
This routine computes y = alpha *( A - lambda I ) * x + beta * y on the GPU.
Input format is ELLPACK.
It is the shifted version of the ELLPACK SpMV.
Arguments
---------
@param[in]
transA magma_trans_t
transposition parameter for A
@param[in]
m magma_int_t
number of rows in A
@param[in]
n magma_int_t
number of columns in A
@param[in]
nnz_per_row magma_int_t
number of elements in the longest row
@param[in]
alpha magmaFloatComplex
scalar multiplier
@param[in]
lambda magmaFloatComplex
scalar multiplier
@param[in]
dval magmaFloatComplex_ptr
array containing values of A in ELLPACK
@param[in]
dcolind magmaIndex_ptr
columnindices of A in ELLPACK
@param[in]
dx magmaFloatComplex_ptr
input vector x
@param[in]
beta magmaFloatComplex
scalar multiplier
@param[in]
offset magma_int_t
in case not the main diagonal is scaled
@param[in]
blocksize magma_int_t
in case of processing multiple vectors
@param[in]
addrows magmaIndex_ptr
in case the matrixpowerskernel is used
@param[out]
dy magmaFloatComplex_ptr
input/output vector y
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_cblas
********************************************************************/
extern "C" magma_int_t
magma_cgeellmv_shift(
magma_trans_t transA,
magma_int_t m, magma_int_t n,
magma_int_t nnz_per_row,
magmaFloatComplex alpha,
magmaFloatComplex lambda,
magmaFloatComplex_ptr dval,
magmaIndex_ptr dcolind,
magmaFloatComplex_ptr dx,
magmaFloatComplex beta,
magma_int_t offset,
magma_int_t blocksize,
magmaIndex_ptr addrows,
magmaFloatComplex_ptr dy,
magma_queue_t queue )
{
dim3 grid( magma_ceildiv( m, BLOCK_SIZE ) );
magma_int_t threads = BLOCK_SIZE;
hipLaunchKernelGGL(( cgeellmv_kernel_shift), dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
m, n, nnz_per_row, alpha, lambda, dval, dcolind, dx,
beta, offset, blocksize, addrows, dy );
return MAGMA_SUCCESS;
}
| cc977374710307eb09e5592a55383af61e44c443.cu | /*
-- MAGMA (version 2.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2019
@generated from sparse/blas/zgeellmv.cu, normal z -> c, Wed Jan 2 14:18:53 2019
*/
#include "magmasparse_internal.h"
#define BLOCK_SIZE 512
// ELLPACK SpMV kernel
//Michael Garland
__global__ void
cgeellmv_kernel(
int num_rows,
int num_cols,
int num_cols_per_row,
magmaFloatComplex alpha,
magmaFloatComplex * dval,
magma_index_t * dcolind,
magmaFloatComplex * dx,
magmaFloatComplex beta,
magmaFloatComplex * dy)
{
int row = blockDim.x * blockIdx.x + threadIdx.x;
if (row < num_rows) {
magmaFloatComplex dot = MAGMA_C_MAKE(0.0, 0.0);
for ( int n = 0; n < num_cols_per_row; n++ ) {
int col = dcolind [ num_cols_per_row * row + n ];
magmaFloatComplex val = dval [ num_cols_per_row * row + n ];
if ( val != 0)
dot += val * dx[col ];
}
dy[ row ] = dot * alpha + beta * dy [ row ];
}
}
// shifted ELLPACK SpMV kernel
//Michael Garland
__global__ void
cgeellmv_kernel_shift(
int num_rows,
int num_cols,
int num_cols_per_row,
magmaFloatComplex alpha,
magmaFloatComplex lambda,
magmaFloatComplex * dval,
magma_index_t * dcolind,
magmaFloatComplex * dx,
magmaFloatComplex beta,
int offset,
int blocksize,
magma_index_t * addrows,
magmaFloatComplex * dy)
{
int row = blockDim.x * blockIdx.x + threadIdx.x;
if (row < num_rows) {
magmaFloatComplex dot = MAGMA_C_MAKE(0.0, 0.0);
for ( int n = 0; n < num_cols_per_row; n++ ) {
int col = dcolind [ num_cols_per_row * row + n ];
magmaFloatComplex val = dval [ num_cols_per_row * row + n ];
if ( val != 0)
dot += val * dx[col ];
}
if ( row < blocksize )
dy[ row ] = dot * alpha - lambda * dx[ offset+row ] + beta * dy [ row ];
else
dy[ row ] = dot * alpha - lambda * dx[ addrows[row-blocksize] ] + beta * dy [ row ];
}
}
/**
Purpose
-------
This routine computes y = alpha * A * x + beta * y on the GPU.
Input format is ELLPACK.
Arguments
---------
@param[in]
transA magma_trans_t
transposition parameter for A
@param[in]
m magma_int_t
number of rows in A
@param[in]
n magma_int_t
number of columns in A
@param[in]
nnz_per_row magma_int_t
number of elements in the longest row
@param[in]
alpha magmaFloatComplex
scalar multiplier
@param[in]
dval magmaFloatComplex_ptr
array containing values of A in ELLPACK
@param[in]
dcolind magmaIndex_ptr
columnindices of A in ELLPACK
@param[in]
dx magmaFloatComplex_ptr
input vector x
@param[in]
beta magmaFloatComplex
scalar multiplier
@param[out]
dy magmaFloatComplex_ptr
input/output vector y
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_cblas
********************************************************************/
extern "C" magma_int_t
magma_cgeellmv(
magma_trans_t transA,
magma_int_t m, magma_int_t n,
magma_int_t nnz_per_row,
magmaFloatComplex alpha,
magmaFloatComplex_ptr dval,
magmaIndex_ptr dcolind,
magmaFloatComplex_ptr dx,
magmaFloatComplex beta,
magmaFloatComplex_ptr dy,
magma_queue_t queue )
{
dim3 grid( magma_ceildiv( m, BLOCK_SIZE ) );
magma_int_t threads = BLOCK_SIZE;
cgeellmv_kernel<<< grid, threads, 0, queue->cuda_stream() >>>
( m, n, nnz_per_row, alpha, dval, dcolind, dx, beta, dy );
return MAGMA_SUCCESS;
}
/**
Purpose
-------
This routine computes y = alpha *( A - lambda I ) * x + beta * y on the GPU.
Input format is ELLPACK.
It is the shifted version of the ELLPACK SpMV.
Arguments
---------
@param[in]
transA magma_trans_t
transposition parameter for A
@param[in]
m magma_int_t
number of rows in A
@param[in]
n magma_int_t
number of columns in A
@param[in]
nnz_per_row magma_int_t
number of elements in the longest row
@param[in]
alpha magmaFloatComplex
scalar multiplier
@param[in]
lambda magmaFloatComplex
scalar multiplier
@param[in]
dval magmaFloatComplex_ptr
array containing values of A in ELLPACK
@param[in]
dcolind magmaIndex_ptr
columnindices of A in ELLPACK
@param[in]
dx magmaFloatComplex_ptr
input vector x
@param[in]
beta magmaFloatComplex
scalar multiplier
@param[in]
offset magma_int_t
in case not the main diagonal is scaled
@param[in]
blocksize magma_int_t
in case of processing multiple vectors
@param[in]
addrows magmaIndex_ptr
in case the matrixpowerskernel is used
@param[out]
dy magmaFloatComplex_ptr
input/output vector y
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_cblas
********************************************************************/
extern "C" magma_int_t
magma_cgeellmv_shift(
magma_trans_t transA,
magma_int_t m, magma_int_t n,
magma_int_t nnz_per_row,
magmaFloatComplex alpha,
magmaFloatComplex lambda,
magmaFloatComplex_ptr dval,
magmaIndex_ptr dcolind,
magmaFloatComplex_ptr dx,
magmaFloatComplex beta,
magma_int_t offset,
magma_int_t blocksize,
magmaIndex_ptr addrows,
magmaFloatComplex_ptr dy,
magma_queue_t queue )
{
dim3 grid( magma_ceildiv( m, BLOCK_SIZE ) );
magma_int_t threads = BLOCK_SIZE;
cgeellmv_kernel_shift<<< grid, threads, 0, queue->cuda_stream() >>>
( m, n, nnz_per_row, alpha, lambda, dval, dcolind, dx,
beta, offset, blocksize, addrows, dy );
return MAGMA_SUCCESS;
}
|
363a55761c941abbe7ed6e39b44cfb45433dbf9c.hip | // !!! This is a file automatically generated by hipify!!!
#include<stdio.h>
#include <stdlib.h>
#include <malloc.h>
#include <string.h>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <sys/time.h>
#include "table_item.h"
int count = 0;
long getCurrentTime()
{
struct timeval tv;
gettimeofday(&tv,NULL);
return tv.tv_sec * 1000 + tv.tv_usec / 1000;
}
extern "C" void to_gpu(struct order_item* order, struct user_item* user,struct join_acce_result *result,int o_size,int u_size)
{
int sum_size=o_size*u_size;
int i,j,wh,k;
struct result_item* g_result = (struct result_item*)malloc(sizeof(struct result_item)*sum_size);
memset(g_result,0,sizeof(struct result_item)*sum_size);
for(i=0;i<o_size;i++)
for(j=0;j<u_size;j++)
{
wh = 0;
for(k=0;k<10;k++)
{
if(order[i].col1[k]=='\0'&&user[j].col1[k]=='\0')
break;
if(order[i].col1[k]!=user[j].col1[k])
{
wh = 1;
break;
}
}
if(wh==0)
{
for(k=0;k<10;k++)
{
if(order[i].col1[k]!='\0')
{
//g_result[count].col1[k]=order[i].col1[k];
}
}
for(k=0;k<10;k++)
{
if(user[j].col1[k]!='\0')
{
//g_result[count].col2[k]=user[j].col1[k];
}
}
for(k=0;k<10;k++)
{
if(order[i].col2[k]!='\0')
{
//g_result[count].col3[k]=order[i].col2[k];
}
}
for(k=0;k<10;k++)
{
if(user[j].col2[k]!='\0')
{
//g_result[count].col4[k]=user[j].col2[k];
}
}
//g_result[count].flag=1;
count++;
}
}
result->result_len=100;
result->result_addr = g_result;
return ;
}
| 363a55761c941abbe7ed6e39b44cfb45433dbf9c.cu | #include<stdio.h>
#include <stdlib.h>
#include <malloc.h>
#include <string.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <sys/time.h>
#include "table_item.h"
int count = 0;
long getCurrentTime()
{
struct timeval tv;
gettimeofday(&tv,NULL);
return tv.tv_sec * 1000 + tv.tv_usec / 1000;
}
extern "C" void to_gpu(struct order_item* order, struct user_item* user,struct join_acce_result *result,int o_size,int u_size)
{
int sum_size=o_size*u_size;
int i,j,wh,k;
struct result_item* g_result = (struct result_item*)malloc(sizeof(struct result_item)*sum_size);
memset(g_result,0,sizeof(struct result_item)*sum_size);
for(i=0;i<o_size;i++)
for(j=0;j<u_size;j++)
{
wh = 0;
for(k=0;k<10;k++)
{
if(order[i].col1[k]=='\0'&&user[j].col1[k]=='\0')
break;
if(order[i].col1[k]!=user[j].col1[k])
{
wh = 1;
break;
}
}
if(wh==0)
{
for(k=0;k<10;k++)
{
if(order[i].col1[k]!='\0')
{
//g_result[count].col1[k]=order[i].col1[k];
}
}
for(k=0;k<10;k++)
{
if(user[j].col1[k]!='\0')
{
//g_result[count].col2[k]=user[j].col1[k];
}
}
for(k=0;k<10;k++)
{
if(order[i].col2[k]!='\0')
{
//g_result[count].col3[k]=order[i].col2[k];
}
}
for(k=0;k<10;k++)
{
if(user[j].col2[k]!='\0')
{
//g_result[count].col4[k]=user[j].col2[k];
}
}
//g_result[count].flag=1;
count++;
}
}
result->result_len=100;
result->result_addr = g_result;
return ;
}
|
873c98ab78377bbba9b6573e2fdac55b5f56efe4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//http://nothings.org/stb/stb_image_write.h
#define STB_IMAGE_WRITE_IMPLEMENTATION
#include "stb_image_write.h"
#include "stdio.h"
#include <vector>
int __host__ __device__ mandel(double p, double q, double maxv, int maxk)
{
double x0 = p;
double y0 = q;
double r;
int k = 0;
do
{
double x = x0 * x0 - y0 * y0 + p;
double y = 2 * x0 * y0 + q;
x0 = x;
y0 = y;
r = x * x + y * y;
k++;
}
while (r <= maxv && k < maxk);
return k;
}
int iceil(int a, int b)
{
return (a+b-1)/b;
}
void __global__ kernel(unsigned char * out, int w,int h, double pmin, double pmax, double qmin, double qmax, double maxv, int maxk)
{
int i = blockIdx.y * blockDim.y + threadIdx.y;
int j = blockIdx.x * blockDim.x + threadIdx.x;
int index = j + i * w;
if(i < h && j < w)
{
int k = mandel(i*((pmax-pmin)/h)+pmin,j*((qmax-qmin)/w)+qmin, maxv, maxk);
//i*w+j
out[index] = k >= maxk ? 0 : k;
}
}
int main(int argc, char const *argv[])
{
double pmin = -2.25, pmax = 0.75, qmin = -1.5, qmax = 1.5;
double maxv = 100;
int maxk = 128;
int w = 640;
int h = 480;
hipSetDevice(0);
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, 0);
printf(" Maximum number of threads per multiprocessor: %d\n", deviceProp.maxThreadsPerMultiProcessor);
printf(" Maximum number of threads per block: %d\n", deviceProp.maxThreadsPerBlock);
printf(" Max dimension size of a thread block (x,y,z): (%d, %d, %d)\n",
deviceProp.maxThreadsDim[0],
deviceProp.maxThreadsDim[1],
deviceProp.maxThreadsDim[2]);
printf(" Max dimension size of a grid size (x,y,z): (%d, %d, %d)\n",
deviceProp.maxGridSize[0],
deviceProp.maxGridSize[1],
deviceProp.maxGridSize[2]);
//double tp = omp_get_wtime();
std::vector<unsigned char> image(w*h);
unsigned char * dev_i;
//std::vector<unsigned char> image2(w*h);
//double t0 = omp_get_wtime();
for(int i = 0; i < h; i++)
for(int j = 0; j < w; j++)
{
int k = mandel(i*((pmax-pmin)/h)+pmin,j*((qmax-qmin)/w)+qmin, maxv, maxk);
image[i*w+j] = k >= maxk ? 0 : k;
//image2[i*w+j] = omp_get_thread_num()*256/n;
}
stbi_write_png("out.png",w,h,1,&image[0],w);
// GPU
// transfer
hipMalloc(&dev_i,w*h);
hipMemset(dev_i,128,w*h);
// malloc
unsigned int kernelTime;
//cutCreateTimer(&kernelTime);
//cutResetTimer(kernelTime);
//cutStartTim,1);
dim3 dimBlock(32, 32,1);
dim3 dimGrid(iceil(w,dimBlock.x),iceil(h,dimBlock.y));
printf("Blocks %d %d %d Threads %d %d %d\n",dimGrid.x,dimGrid.y,dimGrid.z,dimBlock.x,dimBlock.y,dimBlock.z);
hipLaunchKernelGGL(( kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, dev_i,w,h,pmin,pmax,qmin,qmax,maxv,maxk);
{
hipError_t cudaerr = hipPeekAtLastError();
if (cudaerr != 0)
printf("kernel launch failed with error \"%s\".\n",
hipGetErrorString(cudaerr));
}
hipMemcpy(&image[0],dev_i,w*h,hipMemcpyDeviceToHost);
//cutStopTimer(kernelTime);
hipFree(dev_i);
//printf ("Time for the kernel: %f ms\n", cutGetTimerValue(kernelTime));
stbi_write_png("outGPU.png",w,h,1,&image[0],w);
/// double t1 = omp_get_wtime();
//printf("Output is %f and warmup %f\n",t1-t0,t0-tp);
return 0;
} | 873c98ab78377bbba9b6573e2fdac55b5f56efe4.cu | //http://nothings.org/stb/stb_image_write.h
#define STB_IMAGE_WRITE_IMPLEMENTATION
#include "stb_image_write.h"
#include "stdio.h"
#include <vector>
int __host__ __device__ mandel(double p, double q, double maxv, int maxk)
{
double x0 = p;
double y0 = q;
double r;
int k = 0;
do
{
double x = x0 * x0 - y0 * y0 + p;
double y = 2 * x0 * y0 + q;
x0 = x;
y0 = y;
r = x * x + y * y;
k++;
}
while (r <= maxv && k < maxk);
return k;
}
int iceil(int a, int b)
{
return (a+b-1)/b;
}
void __global__ kernel(unsigned char * out, int w,int h, double pmin, double pmax, double qmin, double qmax, double maxv, int maxk)
{
int i = blockIdx.y * blockDim.y + threadIdx.y;
int j = blockIdx.x * blockDim.x + threadIdx.x;
int index = j + i * w;
if(i < h && j < w)
{
int k = mandel(i*((pmax-pmin)/h)+pmin,j*((qmax-qmin)/w)+qmin, maxv, maxk);
//i*w+j
out[index] = k >= maxk ? 0 : k;
}
}
int main(int argc, char const *argv[])
{
double pmin = -2.25, pmax = 0.75, qmin = -1.5, qmax = 1.5;
double maxv = 100;
int maxk = 128;
int w = 640;
int h = 480;
cudaSetDevice(0);
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, 0);
printf(" Maximum number of threads per multiprocessor: %d\n", deviceProp.maxThreadsPerMultiProcessor);
printf(" Maximum number of threads per block: %d\n", deviceProp.maxThreadsPerBlock);
printf(" Max dimension size of a thread block (x,y,z): (%d, %d, %d)\n",
deviceProp.maxThreadsDim[0],
deviceProp.maxThreadsDim[1],
deviceProp.maxThreadsDim[2]);
printf(" Max dimension size of a grid size (x,y,z): (%d, %d, %d)\n",
deviceProp.maxGridSize[0],
deviceProp.maxGridSize[1],
deviceProp.maxGridSize[2]);
//double tp = omp_get_wtime();
std::vector<unsigned char> image(w*h);
unsigned char * dev_i;
//std::vector<unsigned char> image2(w*h);
//double t0 = omp_get_wtime();
for(int i = 0; i < h; i++)
for(int j = 0; j < w; j++)
{
int k = mandel(i*((pmax-pmin)/h)+pmin,j*((qmax-qmin)/w)+qmin, maxv, maxk);
image[i*w+j] = k >= maxk ? 0 : k;
//image2[i*w+j] = omp_get_thread_num()*256/n;
}
stbi_write_png("out.png",w,h,1,&image[0],w);
// GPU
// transfer
cudaMalloc(&dev_i,w*h);
cudaMemset(dev_i,128,w*h);
// malloc
unsigned int kernelTime;
//cutCreateTimer(&kernelTime);
//cutResetTimer(kernelTime);
//cutStartTim,1);
dim3 dimBlock(32, 32,1);
dim3 dimGrid(iceil(w,dimBlock.x),iceil(h,dimBlock.y));
printf("Blocks %d %d %d Threads %d %d %d\n",dimGrid.x,dimGrid.y,dimGrid.z,dimBlock.x,dimBlock.y,dimBlock.z);
kernel<<<dimGrid,dimBlock>>>(dev_i,w,h,pmin,pmax,qmin,qmax,maxv,maxk);
{
cudaError_t cudaerr = cudaPeekAtLastError();
if (cudaerr != 0)
printf("kernel launch failed with error \"%s\".\n",
cudaGetErrorString(cudaerr));
}
cudaMemcpy(&image[0],dev_i,w*h,cudaMemcpyDeviceToHost);
//cutStopTimer(kernelTime);
cudaFree(dev_i);
//printf ("Time for the kernel: %f ms\n", cutGetTimerValue(kernelTime));
stbi_write_png("outGPU.png",w,h,1,&image[0],w);
/// double t1 = omp_get_wtime();
//printf("Output is %f and warmup %f\n",t1-t0,t0-tp);
return 0;
} |
sum_op.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <paddle/fluid/platform/device_context.h>
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/sum_op.h"
#include "paddle/fluid/platform/float16.h"
namespace plat = paddle::platform;
namespace paddle {
namespace operators {
#define CEIL_DIV(x, y) (((x) + (y)-1) / (y))
using LoDTensor = framework::LoDTensor;
template <class T>
__global__ void Sum2CUDAKernel(const T *in_0, const T *in_1, T *out,
int64_t N) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
while (id < N) {
out[id] = in_0[id] + in_1[id];
id += blockDim.x * gridDim.x;
}
}
template <class T>
__global__ void SumArrayCUDAKernel(T **in, T *out, int64_t N, size_t in_size,
bool read_dst) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
while (id < N) {
T total(read_dst ? out[id] : static_cast<T>(0));
for (int i = 0; i < in_size; ++i) {
const T *tmp = in[i];
if (tmp) {
total += tmp[id];
}
}
out[id] = total;
id += blockDim.x * gridDim.x;
}
}
template <class T>
__global__ void SumSelectedRowsCUDAKernel(T **sr_in_out, int64_t N,
size_t rows) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
while (id < N) {
for (int i = 0; i < 2 * rows; i += 2) {
const T *tmp = sr_in_out[i];
T *tmp_out = sr_in_out[i + 1];
if (tmp && tmp_out) {
tmp_out[id] += tmp[id];
}
}
id += blockDim.x * gridDim.x;
}
}
template <class T>
__global__ void SumAlign4CUDAKernel(const T *in_0, const T *in_1, T *out,
int64_t N) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = id; i < N / 4; i += blockDim.x * gridDim.x) {
const float4 *in0_4 = reinterpret_cast<float4 *>(in_0);
const float4 *in1_4 = reinterpret_cast<float4 *>(in_1);
float4 tmp;
tmp.x = in0_4[i].x + in1_4[i].x;
tmp.y = in0_4[i].y + in1_4[i].y;
tmp.z = in0_4[i].z + in1_4[i].z;
tmp.w = in0_4[i].w + in1_4[i].w;
reinterpret_cast<float4 *>(out)[i] = tmp;
}
}
template <class T>
void SumToLoDTensor(const framework::ExecutionContext &context) {
auto in_vars = context.MultiInputVar("X");
const size_t in_num = in_vars.size();
constexpr size_t theory_sm_threads = 1024;
auto &dev_ctx =
context.template device_context<platform::CUDADeviceContext>();
auto stream = dev_ctx.stream();
auto max_threads = dev_ctx.GetMaxPhysicalThreadCount();
auto sm_count = max_threads / theory_sm_threads;
size_t tile_size = 0;
dim3 grids;
dim3 blocks;
auto ComputeKernelParameter = [&](size_t length) {
if (length >= max_threads)
tile_size = 1024;
else if (length < max_threads && length > sm_count * 128)
tile_size = 512;
else if (length <= sm_count * 128)
tile_size = 256;
grids = dim3(CEIL_DIV(length, tile_size), 1, 1);
blocks = dim3(tile_size, 1, 1);
};
auto *out = context.Output<LoDTensor>("Out");
bool in_place = in_vars[0] == context.OutputVar("Out");
if (!in_place) {
auto *out_ptr = out->mutable_data<T>(context.GetPlace());
if (in_num >= 1 && in_vars[0]->IsType<framework::LoDTensor>()) {
auto &in_0_tensor = in_vars[0]->Get<framework::LoDTensor>();
if (in_0_tensor.numel() > 0) {
in_place = (in_0_tensor.data<T>() == out_ptr);
}
}
}
// Sum of two tensors
if (in_num == 2 && in_vars[0]->IsType<framework::LoDTensor>() &&
in_vars[1]->IsType<framework::LoDTensor>()) {
auto &in_0 = in_vars[0]->Get<framework::LoDTensor>();
auto &in_1 = in_vars[1]->Get<framework::LoDTensor>();
auto length = in_0.numel();
if (length && in_0.IsInitialized() && in_1.IsInitialized()) {
auto result = EigenVector<T>::Flatten(*out);
auto &place = *dev_ctx.eigen_device();
auto in_0_e = EigenVector<T>::Flatten(in_0);
auto in_1_e = EigenVector<T>::Flatten(in_1);
result.device(place) = in_0_e + in_1_e;
} else if (length && in_0.IsInitialized()) {
auto result = EigenVector<T>::Flatten(*out);
auto &place = *dev_ctx.eigen_device();
result.device(place) = EigenVector<T>::Flatten(in_0);
} else if (length && in_1.IsInitialized()) {
auto result = EigenVector<T>::Flatten(*out);
auto &place = *dev_ctx.eigen_device();
result.device(place) = EigenVector<T>::Flatten(in_1);
}
return;
}
int start = in_place ? 1 : 0;
if (!in_place) {
math::SetConstant<platform::CUDADeviceContext, T> constant_functor;
constant_functor(
context.template device_context<platform::CUDADeviceContext>(), out,
static_cast<T>(0));
}
std::vector<const T *> in_data;
std::vector<int> selectrow_index;
int64_t lod_length = 0;
bool dst_write = false;
for (int i = start; i < in_num; ++i) {
if (in_vars[i]->IsType<framework::LoDTensor>()) {
auto &in_i = in_vars[i]->Get<framework::LoDTensor>();
in_data.emplace_back(in_i.data<T>());
lod_length = in_i.numel();
} else if (in_vars[i]->IsType<framework::SelectedRows>()) {
selectrow_index.push_back(i);
}
}
// compute select rows seperately.
if (!selectrow_index.empty()) {
std::vector<const T *> sr_in_out_data;
size_t rows = 0;
int64_t length = 0;
for (auto index : selectrow_index) {
auto &sr = in_vars[index]->Get<framework::SelectedRows>();
auto &sr_value = sr.value();
auto &sr_rows = sr.rows();
auto row_numel = sr_value.numel() / sr_rows.size();
auto out_dims = out->dims();
PADDLE_ENFORCE_EQ(sr.height(), out_dims[0]);
PADDLE_ENFORCE_EQ(row_numel, out->numel() / sr.height());
auto *sr_data = sr_value.data<T>();
auto *sr_out_data = out->data<T>();
rows += sr_rows.size();
length = row_numel;
for (size_t i = 0; i < sr_rows.size(); ++i) {
sr_in_out_data.emplace_back(&sr_data[i * row_numel]);
sr_in_out_data.emplace_back(&sr_out_data[sr_rows[i] * row_numel]);
}
}
if (!sr_in_out_data.empty()) {
auto tmp_sr_in_out_array =
platform::DeviceTemporaryAllocator::Instance().Get(dev_ctx).Allocate(
sr_in_out_data.size() * sizeof(T *));
memory::Copy(boost::get<platform::CUDAPlace>(dev_ctx.GetPlace()),
tmp_sr_in_out_array->ptr(), platform::CPUPlace(),
reinterpret_cast<void *>(sr_in_out_data.data()),
sr_in_out_data.size() * sizeof(T *), dev_ctx.stream());
T **sr_in_out_array_data =
reinterpret_cast<T **>(tmp_sr_in_out_array->ptr());
ComputeKernelParameter(length);
hipLaunchKernelGGL(( SumSelectedRowsCUDAKernel<T>), dim3(grids), dim3(blocks), 0, stream,
sr_in_out_array_data, length, rows);
dst_write = true;
}
}
// if indata not null, merge into one kernel call.
if (!in_data.empty()) {
auto tmp_in_array =
platform::DeviceTemporaryAllocator::Instance().Get(dev_ctx).Allocate(
in_data.size() * sizeof(T *));
memory::Copy(boost::get<platform::CUDAPlace>(dev_ctx.GetPlace()),
tmp_in_array->ptr(), platform::CPUPlace(),
reinterpret_cast<void *>(in_data.data()),
in_data.size() * sizeof(T *), dev_ctx.stream());
T **in_array_data = reinterpret_cast<T **>(tmp_in_array->ptr());
ComputeKernelParameter(lod_length);
hipLaunchKernelGGL(( SumArrayCUDAKernel<T>), dim3(grids), dim3(blocks), 0, stream,
in_array_data, out->data<T>(), lod_length, in_data.size(),
dst_write | in_place);
}
}
template <typename T>
class SumKernel<platform::CUDADeviceContext, T>
: public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext &context) const override {
auto out_var = context.OutputVar("Out");
if (out_var->IsType<framework::LoDTensor>()) {
SumToLoDTensor<T>(context);
} else if (out_var->IsType<framework::SelectedRows>()) {
SelectedRowsCompute<platform::CUDADeviceContext, T>(context);
} else if (out_var->IsType<framework::LoDTensorArray>()) {
LodTensorArrayCompute<platform::CUDADeviceContext, T>(context);
} else {
PADDLE_THROW("Unexpected branch, output variable type is %s",
framework::ToTypeName(out_var->Type()));
}
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
namespace plat = paddle::platform;
REGISTER_OP_CUDA_KERNEL(
sum, ops::SumKernel<paddle::platform::CUDADeviceContext, float>,
ops::SumKernel<paddle::platform::CUDADeviceContext, double>,
ops::SumKernel<paddle::platform::CUDADeviceContext, int>,
ops::SumKernel<paddle::platform::CUDADeviceContext, int64_t>,
ops::SumKernel<paddle::platform::CUDADeviceContext, plat::float16>);
| sum_op.cu | /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <paddle/fluid/platform/device_context.h>
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/sum_op.h"
#include "paddle/fluid/platform/float16.h"
namespace plat = paddle::platform;
namespace paddle {
namespace operators {
#define CEIL_DIV(x, y) (((x) + (y)-1) / (y))
using LoDTensor = framework::LoDTensor;
template <class T>
__global__ void Sum2CUDAKernel(const T *in_0, const T *in_1, T *out,
int64_t N) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
while (id < N) {
out[id] = in_0[id] + in_1[id];
id += blockDim.x * gridDim.x;
}
}
template <class T>
__global__ void SumArrayCUDAKernel(T **in, T *out, int64_t N, size_t in_size,
bool read_dst) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
while (id < N) {
T total(read_dst ? out[id] : static_cast<T>(0));
for (int i = 0; i < in_size; ++i) {
const T *tmp = in[i];
if (tmp) {
total += tmp[id];
}
}
out[id] = total;
id += blockDim.x * gridDim.x;
}
}
template <class T>
__global__ void SumSelectedRowsCUDAKernel(T **sr_in_out, int64_t N,
size_t rows) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
while (id < N) {
for (int i = 0; i < 2 * rows; i += 2) {
const T *tmp = sr_in_out[i];
T *tmp_out = sr_in_out[i + 1];
if (tmp && tmp_out) {
tmp_out[id] += tmp[id];
}
}
id += blockDim.x * gridDim.x;
}
}
template <class T>
__global__ void SumAlign4CUDAKernel(const T *in_0, const T *in_1, T *out,
int64_t N) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = id; i < N / 4; i += blockDim.x * gridDim.x) {
const float4 *in0_4 = reinterpret_cast<float4 *>(in_0);
const float4 *in1_4 = reinterpret_cast<float4 *>(in_1);
float4 tmp;
tmp.x = in0_4[i].x + in1_4[i].x;
tmp.y = in0_4[i].y + in1_4[i].y;
tmp.z = in0_4[i].z + in1_4[i].z;
tmp.w = in0_4[i].w + in1_4[i].w;
reinterpret_cast<float4 *>(out)[i] = tmp;
}
}
template <class T>
void SumToLoDTensor(const framework::ExecutionContext &context) {
auto in_vars = context.MultiInputVar("X");
const size_t in_num = in_vars.size();
constexpr size_t theory_sm_threads = 1024;
auto &dev_ctx =
context.template device_context<platform::CUDADeviceContext>();
auto stream = dev_ctx.stream();
auto max_threads = dev_ctx.GetMaxPhysicalThreadCount();
auto sm_count = max_threads / theory_sm_threads;
size_t tile_size = 0;
dim3 grids;
dim3 blocks;
auto ComputeKernelParameter = [&](size_t length) {
if (length >= max_threads)
tile_size = 1024;
else if (length < max_threads && length > sm_count * 128)
tile_size = 512;
else if (length <= sm_count * 128)
tile_size = 256;
grids = dim3(CEIL_DIV(length, tile_size), 1, 1);
blocks = dim3(tile_size, 1, 1);
};
auto *out = context.Output<LoDTensor>("Out");
bool in_place = in_vars[0] == context.OutputVar("Out");
if (!in_place) {
auto *out_ptr = out->mutable_data<T>(context.GetPlace());
if (in_num >= 1 && in_vars[0]->IsType<framework::LoDTensor>()) {
auto &in_0_tensor = in_vars[0]->Get<framework::LoDTensor>();
if (in_0_tensor.numel() > 0) {
in_place = (in_0_tensor.data<T>() == out_ptr);
}
}
}
// Sum of two tensors
if (in_num == 2 && in_vars[0]->IsType<framework::LoDTensor>() &&
in_vars[1]->IsType<framework::LoDTensor>()) {
auto &in_0 = in_vars[0]->Get<framework::LoDTensor>();
auto &in_1 = in_vars[1]->Get<framework::LoDTensor>();
auto length = in_0.numel();
if (length && in_0.IsInitialized() && in_1.IsInitialized()) {
auto result = EigenVector<T>::Flatten(*out);
auto &place = *dev_ctx.eigen_device();
auto in_0_e = EigenVector<T>::Flatten(in_0);
auto in_1_e = EigenVector<T>::Flatten(in_1);
result.device(place) = in_0_e + in_1_e;
} else if (length && in_0.IsInitialized()) {
auto result = EigenVector<T>::Flatten(*out);
auto &place = *dev_ctx.eigen_device();
result.device(place) = EigenVector<T>::Flatten(in_0);
} else if (length && in_1.IsInitialized()) {
auto result = EigenVector<T>::Flatten(*out);
auto &place = *dev_ctx.eigen_device();
result.device(place) = EigenVector<T>::Flatten(in_1);
}
return;
}
int start = in_place ? 1 : 0;
if (!in_place) {
math::SetConstant<platform::CUDADeviceContext, T> constant_functor;
constant_functor(
context.template device_context<platform::CUDADeviceContext>(), out,
static_cast<T>(0));
}
std::vector<const T *> in_data;
std::vector<int> selectrow_index;
int64_t lod_length = 0;
bool dst_write = false;
for (int i = start; i < in_num; ++i) {
if (in_vars[i]->IsType<framework::LoDTensor>()) {
auto &in_i = in_vars[i]->Get<framework::LoDTensor>();
in_data.emplace_back(in_i.data<T>());
lod_length = in_i.numel();
} else if (in_vars[i]->IsType<framework::SelectedRows>()) {
selectrow_index.push_back(i);
}
}
// compute select rows seperately.
if (!selectrow_index.empty()) {
std::vector<const T *> sr_in_out_data;
size_t rows = 0;
int64_t length = 0;
for (auto index : selectrow_index) {
auto &sr = in_vars[index]->Get<framework::SelectedRows>();
auto &sr_value = sr.value();
auto &sr_rows = sr.rows();
auto row_numel = sr_value.numel() / sr_rows.size();
auto out_dims = out->dims();
PADDLE_ENFORCE_EQ(sr.height(), out_dims[0]);
PADDLE_ENFORCE_EQ(row_numel, out->numel() / sr.height());
auto *sr_data = sr_value.data<T>();
auto *sr_out_data = out->data<T>();
rows += sr_rows.size();
length = row_numel;
for (size_t i = 0; i < sr_rows.size(); ++i) {
sr_in_out_data.emplace_back(&sr_data[i * row_numel]);
sr_in_out_data.emplace_back(&sr_out_data[sr_rows[i] * row_numel]);
}
}
if (!sr_in_out_data.empty()) {
auto tmp_sr_in_out_array =
platform::DeviceTemporaryAllocator::Instance().Get(dev_ctx).Allocate(
sr_in_out_data.size() * sizeof(T *));
memory::Copy(boost::get<platform::CUDAPlace>(dev_ctx.GetPlace()),
tmp_sr_in_out_array->ptr(), platform::CPUPlace(),
reinterpret_cast<void *>(sr_in_out_data.data()),
sr_in_out_data.size() * sizeof(T *), dev_ctx.stream());
T **sr_in_out_array_data =
reinterpret_cast<T **>(tmp_sr_in_out_array->ptr());
ComputeKernelParameter(length);
SumSelectedRowsCUDAKernel<T><<<grids, blocks, 0, stream>>>(
sr_in_out_array_data, length, rows);
dst_write = true;
}
}
// if indata not null, merge into one kernel call.
if (!in_data.empty()) {
auto tmp_in_array =
platform::DeviceTemporaryAllocator::Instance().Get(dev_ctx).Allocate(
in_data.size() * sizeof(T *));
memory::Copy(boost::get<platform::CUDAPlace>(dev_ctx.GetPlace()),
tmp_in_array->ptr(), platform::CPUPlace(),
reinterpret_cast<void *>(in_data.data()),
in_data.size() * sizeof(T *), dev_ctx.stream());
T **in_array_data = reinterpret_cast<T **>(tmp_in_array->ptr());
ComputeKernelParameter(lod_length);
SumArrayCUDAKernel<T><<<grids, blocks, 0, stream>>>(
in_array_data, out->data<T>(), lod_length, in_data.size(),
dst_write | in_place);
}
}
template <typename T>
class SumKernel<platform::CUDADeviceContext, T>
: public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext &context) const override {
auto out_var = context.OutputVar("Out");
if (out_var->IsType<framework::LoDTensor>()) {
SumToLoDTensor<T>(context);
} else if (out_var->IsType<framework::SelectedRows>()) {
SelectedRowsCompute<platform::CUDADeviceContext, T>(context);
} else if (out_var->IsType<framework::LoDTensorArray>()) {
LodTensorArrayCompute<platform::CUDADeviceContext, T>(context);
} else {
PADDLE_THROW("Unexpected branch, output variable type is %s",
framework::ToTypeName(out_var->Type()));
}
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
namespace plat = paddle::platform;
REGISTER_OP_CUDA_KERNEL(
sum, ops::SumKernel<paddle::platform::CUDADeviceContext, float>,
ops::SumKernel<paddle::platform::CUDADeviceContext, double>,
ops::SumKernel<paddle::platform::CUDADeviceContext, int>,
ops::SumKernel<paddle::platform::CUDADeviceContext, int64_t>,
ops::SumKernel<paddle::platform::CUDADeviceContext, plat::float16>);
|
e6145bc6915d7d80f9e821ae026b59f472a34e36.hip | // !!! This is a file automatically generated by hipify!!!
/*!
Count triangles using the per-edge binary search
*/
#include <iostream>
#include <thread>
#include <vector>
#include <roctracer/roctx.h>
#include <clara/clara.hpp>
#include <fmt/format.h>
#include "pangolin/algorithm/tc_edge_binary.cuh"
#include "pangolin/bounded_buffer.hpp"
#include "pangolin/configure.hpp"
#include "pangolin/cuda_cxx/stream.hpp"
#include "pangolin/file/edge_list_file.hpp"
#include "pangolin/init.hpp"
#include "pangolin/sparse/csr_coo.hpp"
// Buffer is a BoundedBuffer with two entries (double buffer)
template <typename T> using Buffer = pangolin::BoundedBuffer<T, 2>;
using pangolin::Stream;
template <typename V> void print_vec(const V &vec, const std::string &sep) {
for (const auto &e : vec) {
fmt::print("{}{}", sep, e);
}
}
template <typename Edge> void produce(const std::string path, Buffer<std::vector<Edge>> &queue) {
double readTime = 0, queueTime = 0;
pangolin::EdgeListFile file(path);
std::vector<Edge> edges;
while (true) {
auto readStart = std::chrono::system_clock::now();
size_t readCount = file.get_edges(edges, 500);
auto readEnd = std::chrono::system_clock::now();
readTime += (readEnd - readStart).count() / 1e9;
SPDLOG_TRACE(pangolin::logger::console(), "reader: read {} edges", edges.size());
if (0 == readCount) {
break;
}
auto queueStart = std::chrono::system_clock::now();
queue.push(std::move(edges));
auto queueEnd = std::chrono::system_clock::now();
queueTime += (queueEnd - queueStart).count() / 1e9;
SPDLOG_TRACE(pangolin::logger::console(), "reader: pushed edges");
}
SPDLOG_TRACE(pangolin::logger::console(), "reader: closing queue");
queue.close();
LOG(debug, "reader: {}s I/O, {}s blocked", readTime, queueTime);
}
template <typename Mat> void consume(Buffer<std::vector<typename Mat::edge_type>> &queue, Mat &mat) {
typedef typename Mat::index_type Index;
typedef typename Mat::edge_type Edge;
double queueTime = 0, csrTime = 0;
auto upperTriangular = [](const Edge &e) { return e.src < e.dst; };
// keep grabbing while queue is filling
Index maxNode = 0;
while (true) {
std::vector<Edge> edges;
bool popped;
SPDLOG_TRACE(pangolin::logger::console(), "builder: trying to pop...");
auto queueStart = std::chrono::system_clock::now();
edges = queue.pop(popped);
auto queueEnd = std::chrono::system_clock::now();
queueTime += (queueEnd - queueStart).count() / 1e9;
if (popped) {
SPDLOG_TRACE(pangolin::logger::console(), "builder: popped {} edges", edges.size());
auto csrStart = std::chrono::system_clock::now();
for (const auto &edge : edges) {
maxNode = max(edge.src, maxNode);
maxNode = max(edge.dst, maxNode);
if (upperTriangular(edge)) {
// SPDLOG_TRACE(pangolin::logger::console(), "{} {}", edge.src, edge.dst);
mat.add_next_edge(edge);
}
}
auto csrEnd = std::chrono::system_clock::now();
csrTime += (csrEnd - csrStart).count() / 1e9;
} else {
SPDLOG_TRACE(pangolin::logger::console(), "builder: no edges after pop");
assert(queue.empty());
assert(queue.closed());
break;
}
}
auto csrStart = std::chrono::system_clock::now();
mat.finish_edges(maxNode);
auto csrEnd = std::chrono::system_clock::now();
csrTime += (csrEnd - csrStart).count() / 1e9;
LOG(debug, "builder: {}s csr {}s blocked", csrTime, queueTime);
}
struct RunOptions {
int iters;
std::vector<int> gpus;
std::string path; //!< path for graph
std::string sep; //!< seperator for output
int blockSize;
int coarsening;
bool readMostly;
bool accessedBy;
bool prefetchAsync;
bool shrinkToFit;
bool preCountBarrier;
};
void print_header(const RunOptions &opts) {
fmt::print("bmark{0}bs{0}gpus{0}graph{0}nodes{0}edges{0}tris", opts.sep);
for (auto i = 0; i < opts.iters; ++i) {
fmt::print("{}total_time{}", opts.sep, i);
}
for (auto i = 0; i < opts.iters; ++i) {
fmt::print("{}total_teps{}", opts.sep, i);
}
for (auto i = 0; i < opts.iters; ++i) {
fmt::print("{}gpu_time{}", opts.sep, i);
}
for (auto i = 0; i < opts.iters; ++i) {
fmt::print("{}gpu_teps{}", opts.sep, i);
}
for (auto i = 0; i < opts.iters; ++i) {
fmt::print("{}count_time{}", opts.sep, i);
}
for (auto i = 0; i < opts.iters; ++i) {
fmt::print("{}count_teps{}", opts.sep, i);
}
for (auto i = 0; i < opts.iters; ++i) {
fmt::print("{}kernel_time{}", opts.sep, i);
}
for (auto i = 0; i < opts.iters; ++i) {
fmt::print("{}kernel_teps{}", opts.sep, i);
}
fmt::print("\n");
}
template <typename Index> int run(RunOptions &opts) {
typedef pangolin::DiEdge<Index> Edge;
auto gpus = opts.gpus;
if (gpus.empty()) {
LOG(warn, "no GPUs provided on command line, using GPU 0");
gpus.push_back(0);
}
// create a stream for each GPU
std::vector<Stream> streams;
for (const auto &gpu : gpus) {
streams.push_back(Stream(gpu));
LOG(debug, "created stream {} for gpu {}", streams.back(), gpu);
}
std::vector<double> totalTimes;
std::vector<double> gpuTimes;
std::vector<double> countTimes;
std::vector<double> kernelTimes;
uint64_t nnz;
uint64_t numRows;
uint64_t tris;
// create csr and count `opts.iters` times
for (int i = 0; i < opts.iters; ++i) {
// read data
const auto totalStart = std::chrono::system_clock::now();
Buffer<std::vector<Edge>> queue;
pangolin::CSRCOO<Index> csr;
// start a thread to read the matrix data
LOG(debug, "start disk reader");
std::thread reader(produce<Edge>, opts.path, std::ref(queue));
// start a thread to build the matrix
LOG(debug, "start csr build");
std::thread builder(consume<pangolin::CSRCOO<Index>>, std::ref(queue), std::ref(csr));
// consume(queue, csr, &readerActive);
LOG(debug, "waiting for disk reader...");
reader.join();
LOG(debug, "waiting for CSR builder...");
builder.join();
assert(queue.empty());
if (opts.shrinkToFit) {
LOG(debug, "shrink CSR");
csr.shrink_to_fit();
}
double elapsed = (std::chrono::system_clock::now() - totalStart).count() / 1e9;
LOG(info, "io/csr time {}s", elapsed);
LOG(debug, "CSR nnz = {} rows = {}", csr.nnz(), csr.num_rows());
LOG(debug, "CSR cap = {}MB size = {}MB", csr.capacity_bytes() / 1024 / 1024, csr.size_bytes() / 1024 / 1024);
const auto gpuStart = std::chrono::system_clock::now();
// read-mostly
roctxRangePush("read-mostly");
auto start = std::chrono::system_clock::now();
if (opts.readMostly) {
csr.read_mostly();
}
elapsed = (std::chrono::system_clock::now() - start).count() / 1e9;
roctxRangePop();
LOG(info, "read-mostly CSR time {}s", elapsed);
// accessed-by
start = std::chrono::system_clock::now();
if (opts.accessedBy) {
for (const auto &gpu : gpus) {
csr.accessed_by(gpu);
}
}
elapsed = (std::chrono::system_clock::now() - start).count() / 1e9;
LOG(info, "accessed-by CSR time {}s", elapsed);
// prefetch
start = std::chrono::system_clock::now();
if (opts.prefetchAsync) {
for (size_t gpuIdx = 0; gpuIdx < gpus.size(); ++gpuIdx) {
auto &gpu = gpus[gpuIdx];
hipStream_t stream = streams[gpuIdx].stream();
csr.prefetch_async(gpu, stream);
}
}
elapsed = (std::chrono::system_clock::now() - start).count() / 1e9;
LOG(info, "prefetch CSR time {}s", elapsed);
if (opts.preCountBarrier) {
LOG(debug, "sync streams after hints");
for (auto &stream : streams) {
stream.sync();
}
}
// count triangles
roctxRangePush("count");
const auto countStart = std::chrono::system_clock::now();
// create async counters
std::vector<pangolin::BinaryTC> counters;
for (size_t gpuIdx = 0; gpuIdx < gpus.size(); ++gpuIdx) {
auto dev = gpus[gpuIdx];
hipStream_t stream = streams[gpuIdx].stream();
LOG(debug, "create device {} counter", dev);
counters.push_back(std::move(pangolin::BinaryTC(dev, stream)));
}
// determine the number of edges per gpu
const size_t edgesPerGPU = (csr.nnz() + gpus.size() - 1) / gpus.size();
LOG(debug, "{} edges per GPU", edgesPerGPU);
// launch counting operations
size_t edgeStart = 0;
for (auto &counter : counters) {
const size_t edgeStop = ::min(edgeStart + edgesPerGPU, csr.nnz());
const size_t numEdges = edgeStop - edgeStart;
LOG(debug, "start async count on GPU {} ({} edges)", counter.device(), numEdges);
counter.count_async(csr.view(), numEdges, edgeStart, opts.blockSize, opts.coarsening);
edgeStart += edgesPerGPU;
}
// wait for counting operations to finish
uint64_t total = 0;
for (auto &counter : counters) {
LOG(debug, "wait for counter on GPU {}", counter.device());
counter.sync();
total += counter.count();
}
const auto stop = std::chrono::system_clock::now();
roctxRangePop(); // count
LOG(info, "{} triangles", total);
// record graph stats
tris = total;
nnz = csr.nnz();
numRows = csr.num_rows();
const double totalElapsed = (stop - totalStart).count() / 1e9;
const double gpuElapsed = (stop - gpuStart).count() / 1e9;
const double countElapsed = (stop - countStart).count() / 1e9;
LOG(info, "total time {}s ({} teps)", totalElapsed, nnz / totalElapsed);
LOG(info, "gpu time {}s ({} teps)", gpuElapsed, nnz / gpuElapsed);
LOG(info, "count time {}s ({} teps)", countElapsed, nnz / countElapsed);
totalTimes.push_back(totalElapsed);
gpuTimes.push_back(gpuElapsed);
countTimes.push_back(countElapsed);
for (auto &counter : counters) {
double secs = counter.kernel_time();
int dev = counter.device();
LOG(info, "gpu {} kernel time {}s ({} teps)", dev, secs, nnz / secs);
}
if (counters.size() == 1) {
kernelTimes.push_back(counters[0].kernel_time());
} else {
kernelTimes.push_back(0);
}
}
if (opts.iters > 0) {
fmt::print("binary");
fmt::print("{}{}", opts.sep, opts.blockSize);
std::string gpuStr;
for (auto gpu : gpus) {
gpuStr += std::to_string(gpu);
}
fmt::print("{}{}", opts.sep, gpuStr);
fmt::print("{}{}", opts.sep, opts.path);
fmt::print("{}{}", opts.sep, numRows);
fmt::print("{}{}", opts.sep, nnz);
fmt::print("{}{}", opts.sep, tris);
print_vec(totalTimes, opts.sep);
for (const auto &s : totalTimes) {
fmt::print("{}{}", opts.sep, nnz / s);
}
print_vec(gpuTimes, opts.sep);
for (const auto &s : gpuTimes) {
fmt::print("{}{}", opts.sep, nnz / s);
}
print_vec(countTimes, opts.sep);
for (const auto &s : countTimes) {
fmt::print("{}{}", opts.sep, nnz / s);
}
print_vec(kernelTimes, opts.sep);
for (const auto &s : kernelTimes) {
fmt::print("{}{}", opts.sep, nnz / s);
}
fmt::print("\n");
}
return 0;
}
int main(int argc, char **argv) {
pangolin::init();
RunOptions opts;
opts.sep = ",";
opts.blockSize = 512;
opts.coarsening = 1;
opts.iters = 1;
opts.shrinkToFit = false;
opts.readMostly = false;
opts.accessedBy = false;
opts.prefetchAsync = false;
opts.preCountBarrier = true;
bool help = false;
bool debug = false;
bool verbose = false;
bool onlyPrintHeader = false;
bool wide = false;
clara::Parser cli;
cli = cli | clara::Help(help);
cli = cli | clara::Opt(debug)["--debug"]("print debug messages to stderr");
cli = cli | clara::Opt(verbose)["--verbose"]("print verbose messages to stderr");
cli = cli | clara::Opt(onlyPrintHeader)["--header"]("print the header for the times output and quit");
cli = cli | clara::Opt(wide)["--wide"]("64-bit node IDs");
cli = cli | clara::Opt(opts.gpus, "dev ids")["-g"]("gpus to use");
cli = cli | clara::Opt(opts.coarsening, "coarsening")["-c"]("Number of elements per thread");
cli = cli | clara::Opt(opts.blockSize, "block-dim")["--bs"]("Number of threads in a block");
cli = cli | clara::Opt(opts.shrinkToFit)["--shrink-to-fit"]("shrink allocations to fit data");
cli = cli | clara::Opt(opts.readMostly)["--read-mostly"]("mark data as read-mostly by all gpus before kernel");
cli = cli | clara::Opt(opts.accessedBy)["--accessed-by"]("mark data as accessed-by all GPUs before kernel");
cli = cli | clara::Opt(opts.prefetchAsync)["--prefetch-async"]("prefetch data to all GPUs before kernel");
cli = cli | clara::Opt(opts.iters, "N")["-n"]("number of counts");
cli = cli | clara::Arg(opts.path, "graph file")("Path to adjacency list").required();
auto result = cli.parse(clara::Args(argc, argv));
if (!result) {
LOG(error, "Error in command line: {}", result.errorMessage());
exit(1);
}
if (help) {
std::cout << cli;
return 0;
}
// set logging level
if (verbose) {
pangolin::logger::set_level(pangolin::logger::Level::TRACE);
} else if (debug) {
pangolin::logger::set_level(pangolin::logger::Level::DEBUG);
}
// log command line before much else happens
{
std::string cmd;
for (int i = 0; i < argc; ++i) {
if (i != 0) {
cmd += " ";
}
cmd += argv[i];
}
LOG(debug, cmd);
}
LOG(debug, "pangolin version: {}.{}.{}", PANGOLIN_VERSION_MAJOR, PANGOLIN_VERSION_MINOR, PANGOLIN_VERSION_PATCH);
LOG(debug, "pangolin branch: {}", PANGOLIN_GIT_REFSPEC);
LOG(debug, "pangolin sha: {}", PANGOLIN_GIT_HASH);
LOG(debug, "pangolin changes: {}", PANGOLIN_GIT_LOCAL_CHANGES);
#ifndef NDEBUG
LOG(warn, "Not a release build");
#endif
if (onlyPrintHeader) {
print_header(opts);
return 0;
}
if (wide) {
return run<uint64_t>(opts);
} else {
return run<uint32_t>(opts);
}
}
| e6145bc6915d7d80f9e821ae026b59f472a34e36.cu | /*!
Count triangles using the per-edge binary search
*/
#include <iostream>
#include <thread>
#include <vector>
#include <nvToolsExt.h>
#include <clara/clara.hpp>
#include <fmt/format.h>
#include "pangolin/algorithm/tc_edge_binary.cuh"
#include "pangolin/bounded_buffer.hpp"
#include "pangolin/configure.hpp"
#include "pangolin/cuda_cxx/stream.hpp"
#include "pangolin/file/edge_list_file.hpp"
#include "pangolin/init.hpp"
#include "pangolin/sparse/csr_coo.hpp"
// Buffer is a BoundedBuffer with two entries (double buffer)
template <typename T> using Buffer = pangolin::BoundedBuffer<T, 2>;
using pangolin::Stream;
template <typename V> void print_vec(const V &vec, const std::string &sep) {
for (const auto &e : vec) {
fmt::print("{}{}", sep, e);
}
}
template <typename Edge> void produce(const std::string path, Buffer<std::vector<Edge>> &queue) {
double readTime = 0, queueTime = 0;
pangolin::EdgeListFile file(path);
std::vector<Edge> edges;
while (true) {
auto readStart = std::chrono::system_clock::now();
size_t readCount = file.get_edges(edges, 500);
auto readEnd = std::chrono::system_clock::now();
readTime += (readEnd - readStart).count() / 1e9;
SPDLOG_TRACE(pangolin::logger::console(), "reader: read {} edges", edges.size());
if (0 == readCount) {
break;
}
auto queueStart = std::chrono::system_clock::now();
queue.push(std::move(edges));
auto queueEnd = std::chrono::system_clock::now();
queueTime += (queueEnd - queueStart).count() / 1e9;
SPDLOG_TRACE(pangolin::logger::console(), "reader: pushed edges");
}
SPDLOG_TRACE(pangolin::logger::console(), "reader: closing queue");
queue.close();
LOG(debug, "reader: {}s I/O, {}s blocked", readTime, queueTime);
}
template <typename Mat> void consume(Buffer<std::vector<typename Mat::edge_type>> &queue, Mat &mat) {
typedef typename Mat::index_type Index;
typedef typename Mat::edge_type Edge;
double queueTime = 0, csrTime = 0;
auto upperTriangular = [](const Edge &e) { return e.src < e.dst; };
// keep grabbing while queue is filling
Index maxNode = 0;
while (true) {
std::vector<Edge> edges;
bool popped;
SPDLOG_TRACE(pangolin::logger::console(), "builder: trying to pop...");
auto queueStart = std::chrono::system_clock::now();
edges = queue.pop(popped);
auto queueEnd = std::chrono::system_clock::now();
queueTime += (queueEnd - queueStart).count() / 1e9;
if (popped) {
SPDLOG_TRACE(pangolin::logger::console(), "builder: popped {} edges", edges.size());
auto csrStart = std::chrono::system_clock::now();
for (const auto &edge : edges) {
maxNode = max(edge.src, maxNode);
maxNode = max(edge.dst, maxNode);
if (upperTriangular(edge)) {
// SPDLOG_TRACE(pangolin::logger::console(), "{} {}", edge.src, edge.dst);
mat.add_next_edge(edge);
}
}
auto csrEnd = std::chrono::system_clock::now();
csrTime += (csrEnd - csrStart).count() / 1e9;
} else {
SPDLOG_TRACE(pangolin::logger::console(), "builder: no edges after pop");
assert(queue.empty());
assert(queue.closed());
break;
}
}
auto csrStart = std::chrono::system_clock::now();
mat.finish_edges(maxNode);
auto csrEnd = std::chrono::system_clock::now();
csrTime += (csrEnd - csrStart).count() / 1e9;
LOG(debug, "builder: {}s csr {}s blocked", csrTime, queueTime);
}
struct RunOptions {
int iters;
std::vector<int> gpus;
std::string path; //!< path for graph
std::string sep; //!< seperator for output
int blockSize;
int coarsening;
bool readMostly;
bool accessedBy;
bool prefetchAsync;
bool shrinkToFit;
bool preCountBarrier;
};
void print_header(const RunOptions &opts) {
fmt::print("bmark{0}bs{0}gpus{0}graph{0}nodes{0}edges{0}tris", opts.sep);
for (auto i = 0; i < opts.iters; ++i) {
fmt::print("{}total_time{}", opts.sep, i);
}
for (auto i = 0; i < opts.iters; ++i) {
fmt::print("{}total_teps{}", opts.sep, i);
}
for (auto i = 0; i < opts.iters; ++i) {
fmt::print("{}gpu_time{}", opts.sep, i);
}
for (auto i = 0; i < opts.iters; ++i) {
fmt::print("{}gpu_teps{}", opts.sep, i);
}
for (auto i = 0; i < opts.iters; ++i) {
fmt::print("{}count_time{}", opts.sep, i);
}
for (auto i = 0; i < opts.iters; ++i) {
fmt::print("{}count_teps{}", opts.sep, i);
}
for (auto i = 0; i < opts.iters; ++i) {
fmt::print("{}kernel_time{}", opts.sep, i);
}
for (auto i = 0; i < opts.iters; ++i) {
fmt::print("{}kernel_teps{}", opts.sep, i);
}
fmt::print("\n");
}
template <typename Index> int run(RunOptions &opts) {
typedef pangolin::DiEdge<Index> Edge;
auto gpus = opts.gpus;
if (gpus.empty()) {
LOG(warn, "no GPUs provided on command line, using GPU 0");
gpus.push_back(0);
}
// create a stream for each GPU
std::vector<Stream> streams;
for (const auto &gpu : gpus) {
streams.push_back(Stream(gpu));
LOG(debug, "created stream {} for gpu {}", streams.back(), gpu);
}
std::vector<double> totalTimes;
std::vector<double> gpuTimes;
std::vector<double> countTimes;
std::vector<double> kernelTimes;
uint64_t nnz;
uint64_t numRows;
uint64_t tris;
// create csr and count `opts.iters` times
for (int i = 0; i < opts.iters; ++i) {
// read data
const auto totalStart = std::chrono::system_clock::now();
Buffer<std::vector<Edge>> queue;
pangolin::CSRCOO<Index> csr;
// start a thread to read the matrix data
LOG(debug, "start disk reader");
std::thread reader(produce<Edge>, opts.path, std::ref(queue));
// start a thread to build the matrix
LOG(debug, "start csr build");
std::thread builder(consume<pangolin::CSRCOO<Index>>, std::ref(queue), std::ref(csr));
// consume(queue, csr, &readerActive);
LOG(debug, "waiting for disk reader...");
reader.join();
LOG(debug, "waiting for CSR builder...");
builder.join();
assert(queue.empty());
if (opts.shrinkToFit) {
LOG(debug, "shrink CSR");
csr.shrink_to_fit();
}
double elapsed = (std::chrono::system_clock::now() - totalStart).count() / 1e9;
LOG(info, "io/csr time {}s", elapsed);
LOG(debug, "CSR nnz = {} rows = {}", csr.nnz(), csr.num_rows());
LOG(debug, "CSR cap = {}MB size = {}MB", csr.capacity_bytes() / 1024 / 1024, csr.size_bytes() / 1024 / 1024);
const auto gpuStart = std::chrono::system_clock::now();
// read-mostly
nvtxRangePush("read-mostly");
auto start = std::chrono::system_clock::now();
if (opts.readMostly) {
csr.read_mostly();
}
elapsed = (std::chrono::system_clock::now() - start).count() / 1e9;
nvtxRangePop();
LOG(info, "read-mostly CSR time {}s", elapsed);
// accessed-by
start = std::chrono::system_clock::now();
if (opts.accessedBy) {
for (const auto &gpu : gpus) {
csr.accessed_by(gpu);
}
}
elapsed = (std::chrono::system_clock::now() - start).count() / 1e9;
LOG(info, "accessed-by CSR time {}s", elapsed);
// prefetch
start = std::chrono::system_clock::now();
if (opts.prefetchAsync) {
for (size_t gpuIdx = 0; gpuIdx < gpus.size(); ++gpuIdx) {
auto &gpu = gpus[gpuIdx];
cudaStream_t stream = streams[gpuIdx].stream();
csr.prefetch_async(gpu, stream);
}
}
elapsed = (std::chrono::system_clock::now() - start).count() / 1e9;
LOG(info, "prefetch CSR time {}s", elapsed);
if (opts.preCountBarrier) {
LOG(debug, "sync streams after hints");
for (auto &stream : streams) {
stream.sync();
}
}
// count triangles
nvtxRangePush("count");
const auto countStart = std::chrono::system_clock::now();
// create async counters
std::vector<pangolin::BinaryTC> counters;
for (size_t gpuIdx = 0; gpuIdx < gpus.size(); ++gpuIdx) {
auto dev = gpus[gpuIdx];
cudaStream_t stream = streams[gpuIdx].stream();
LOG(debug, "create device {} counter", dev);
counters.push_back(std::move(pangolin::BinaryTC(dev, stream)));
}
// determine the number of edges per gpu
const size_t edgesPerGPU = (csr.nnz() + gpus.size() - 1) / gpus.size();
LOG(debug, "{} edges per GPU", edgesPerGPU);
// launch counting operations
size_t edgeStart = 0;
for (auto &counter : counters) {
const size_t edgeStop = std::min(edgeStart + edgesPerGPU, csr.nnz());
const size_t numEdges = edgeStop - edgeStart;
LOG(debug, "start async count on GPU {} ({} edges)", counter.device(), numEdges);
counter.count_async(csr.view(), numEdges, edgeStart, opts.blockSize, opts.coarsening);
edgeStart += edgesPerGPU;
}
// wait for counting operations to finish
uint64_t total = 0;
for (auto &counter : counters) {
LOG(debug, "wait for counter on GPU {}", counter.device());
counter.sync();
total += counter.count();
}
const auto stop = std::chrono::system_clock::now();
nvtxRangePop(); // count
LOG(info, "{} triangles", total);
// record graph stats
tris = total;
nnz = csr.nnz();
numRows = csr.num_rows();
const double totalElapsed = (stop - totalStart).count() / 1e9;
const double gpuElapsed = (stop - gpuStart).count() / 1e9;
const double countElapsed = (stop - countStart).count() / 1e9;
LOG(info, "total time {}s ({} teps)", totalElapsed, nnz / totalElapsed);
LOG(info, "gpu time {}s ({} teps)", gpuElapsed, nnz / gpuElapsed);
LOG(info, "count time {}s ({} teps)", countElapsed, nnz / countElapsed);
totalTimes.push_back(totalElapsed);
gpuTimes.push_back(gpuElapsed);
countTimes.push_back(countElapsed);
for (auto &counter : counters) {
double secs = counter.kernel_time();
int dev = counter.device();
LOG(info, "gpu {} kernel time {}s ({} teps)", dev, secs, nnz / secs);
}
if (counters.size() == 1) {
kernelTimes.push_back(counters[0].kernel_time());
} else {
kernelTimes.push_back(0);
}
}
if (opts.iters > 0) {
fmt::print("binary");
fmt::print("{}{}", opts.sep, opts.blockSize);
std::string gpuStr;
for (auto gpu : gpus) {
gpuStr += std::to_string(gpu);
}
fmt::print("{}{}", opts.sep, gpuStr);
fmt::print("{}{}", opts.sep, opts.path);
fmt::print("{}{}", opts.sep, numRows);
fmt::print("{}{}", opts.sep, nnz);
fmt::print("{}{}", opts.sep, tris);
print_vec(totalTimes, opts.sep);
for (const auto &s : totalTimes) {
fmt::print("{}{}", opts.sep, nnz / s);
}
print_vec(gpuTimes, opts.sep);
for (const auto &s : gpuTimes) {
fmt::print("{}{}", opts.sep, nnz / s);
}
print_vec(countTimes, opts.sep);
for (const auto &s : countTimes) {
fmt::print("{}{}", opts.sep, nnz / s);
}
print_vec(kernelTimes, opts.sep);
for (const auto &s : kernelTimes) {
fmt::print("{}{}", opts.sep, nnz / s);
}
fmt::print("\n");
}
return 0;
}
int main(int argc, char **argv) {
pangolin::init();
RunOptions opts;
opts.sep = ",";
opts.blockSize = 512;
opts.coarsening = 1;
opts.iters = 1;
opts.shrinkToFit = false;
opts.readMostly = false;
opts.accessedBy = false;
opts.prefetchAsync = false;
opts.preCountBarrier = true;
bool help = false;
bool debug = false;
bool verbose = false;
bool onlyPrintHeader = false;
bool wide = false;
clara::Parser cli;
cli = cli | clara::Help(help);
cli = cli | clara::Opt(debug)["--debug"]("print debug messages to stderr");
cli = cli | clara::Opt(verbose)["--verbose"]("print verbose messages to stderr");
cli = cli | clara::Opt(onlyPrintHeader)["--header"]("print the header for the times output and quit");
cli = cli | clara::Opt(wide)["--wide"]("64-bit node IDs");
cli = cli | clara::Opt(opts.gpus, "dev ids")["-g"]("gpus to use");
cli = cli | clara::Opt(opts.coarsening, "coarsening")["-c"]("Number of elements per thread");
cli = cli | clara::Opt(opts.blockSize, "block-dim")["--bs"]("Number of threads in a block");
cli = cli | clara::Opt(opts.shrinkToFit)["--shrink-to-fit"]("shrink allocations to fit data");
cli = cli | clara::Opt(opts.readMostly)["--read-mostly"]("mark data as read-mostly by all gpus before kernel");
cli = cli | clara::Opt(opts.accessedBy)["--accessed-by"]("mark data as accessed-by all GPUs before kernel");
cli = cli | clara::Opt(opts.prefetchAsync)["--prefetch-async"]("prefetch data to all GPUs before kernel");
cli = cli | clara::Opt(opts.iters, "N")["-n"]("number of counts");
cli = cli | clara::Arg(opts.path, "graph file")("Path to adjacency list").required();
auto result = cli.parse(clara::Args(argc, argv));
if (!result) {
LOG(error, "Error in command line: {}", result.errorMessage());
exit(1);
}
if (help) {
std::cout << cli;
return 0;
}
// set logging level
if (verbose) {
pangolin::logger::set_level(pangolin::logger::Level::TRACE);
} else if (debug) {
pangolin::logger::set_level(pangolin::logger::Level::DEBUG);
}
// log command line before much else happens
{
std::string cmd;
for (int i = 0; i < argc; ++i) {
if (i != 0) {
cmd += " ";
}
cmd += argv[i];
}
LOG(debug, cmd);
}
LOG(debug, "pangolin version: {}.{}.{}", PANGOLIN_VERSION_MAJOR, PANGOLIN_VERSION_MINOR, PANGOLIN_VERSION_PATCH);
LOG(debug, "pangolin branch: {}", PANGOLIN_GIT_REFSPEC);
LOG(debug, "pangolin sha: {}", PANGOLIN_GIT_HASH);
LOG(debug, "pangolin changes: {}", PANGOLIN_GIT_LOCAL_CHANGES);
#ifndef NDEBUG
LOG(warn, "Not a release build");
#endif
if (onlyPrintHeader) {
print_header(opts);
return 0;
}
if (wide) {
return run<uint64_t>(opts);
} else {
return run<uint32_t>(opts);
}
}
|
988f5eb404a41246a8e09d02e51d10a66a4aa8d5.hip | // !!! This is a file automatically generated by hipify!!!
#pragma once
#include <stdio.h>
#include <time.h>
#include "cuHeader.h"
#include <math.h>
#include <hip/hip_runtime.h>
#include "sortlib.h"
void initRand(){
time_t t;
srand((unsigned) time(&t));
}
/// using malloced nElem*3*sizeof(float) buffer,write uniform particles postiion.
/// and Mold buffer pointer to V3Buf
/// nElem must be 4(k)^3 !!! now Implemention is nElem == 256
V3Buf CreateUniformParticles(float* buf,float rho,int nElem,float* p_length){
int nloop = (int)powf((float)(nElem/4)+0.1f,1.0f/3);
printf("%d\n",nloop);
float length = powf(nElem/rho,1.0f/3);
*p_length = length;
float a = length/nloop;
float ah = a/2;
float px[4] = {0,0,ah,ah};
float py[4] = {0,ah,ah,0};
float pz[4] = {0,ah,0,ah};
float *h_x,*h_y,*h_z;
h_x = buf;
h_y = h_x + nElem;
h_z = h_y + nElem;
for (int i=0; i<nElem;i++){
h_x[i]=0;
h_y[i]=0;
h_z[i]=0;
}
int i=0;
for (int ix = 0;ix<nloop;ix++){
for (int iy = 0;iy<nloop;iy++){
for(int iz=0;iz<nloop;iz++){
for (int ia=0;ia<4;ia++){
h_x[i] = px[ia] + ix * a;
h_y[i] = py[ia] + iy * a;
h_z[i] = pz[ia] + iz * a;
i++;
}
}
}
}
V3Buf v3Buf = {h_x,h_y,h_z,nElem};
return v3Buf;
}
/// using malloced nElem*3*sizeof(float) buffer,write random particle position.
/// and Mold buffer pointer to V3Buf
/// nElem must be integer.
V3Buf CreateRandomVelocity(float* buf,int nElem){
float *h_x,*h_y,*h_z;
h_x = buf;
h_y = h_x + nElem;
h_z = h_y + nElem;
for(int i=0;i<nElem;i++){
float x = ((float)rand() / ((float)RAND_MAX + 1)) *2 -1;
float y = ((float)rand() / ((float)RAND_MAX + 1)) *2 -1;
float z = ((float)rand() / ((float)RAND_MAX + 1)) *2 -1;
h_x[i]=x;
h_y[i]=y;
h_z[i]=z;
}
V3Buf v3Buf = {h_x,h_y,h_z,nElem};
return v3Buf;
}
V3Buf CalculateForce(float *force,float *pos,int nElem,float length,double *potential){
float *h_fx,*h_fy,*h_fz,*h_px,*h_py,*h_pz;
h_fx = force;
h_fy = h_fx + nElem;
h_fz = h_fy + nElem;
h_px = pos;
h_py = h_px + nElem;
h_pz = h_py + nElem;
for (int i=0;i<nElem*3;i++){//SYOKIKA
force[i]=0.0f;
}
*potential = 0.0;
float eps = 1.0f;
float sigma = 1.0f;
float ce12 = 4.0f*eps*powf(sigma,12);
float ce06 = 4.0f*eps*powf(sigma, 6);
float cf12 = ce12 * 12.0f;
float cf06 = ce06 * 6.0f;
for (int j=0;j<nElem;j++){
for (int i=0;i<j;i++){
float dx,dy,dz,r2,r2i,r06i,r12i,fc,fx,fy,fz;
dx = h_px[i]-h_px[j];
dy = h_py[i]-h_py[j];
dz = h_pz[i]-h_pz[j];
if(dx<-length/2) dx+=length;
if(dx> length/2) dx-=length;
if(dy<-length/2) dy+=length;
if(dy> length/2) dy-=length;
if(dz<-length/2) dz+=length;
if(dz> length/2) dz-=length;
r2 = dx*dx+dy*dy+dz*dz;
if (r2 > 4*4)continue;
r2i = 1.0f/r2;
r06i = r2i * r2i * r2i;
r12i = r06i * r06i;
*potential += (ce12 * r12i - ce06 * r06i);
fc = (cf12*r12i-cf06*r06i)*r2i;
fx = fc * dx;
fy = fc * dy;
fz = fc * dz;
h_fx[i]+=fx;
h_fy[i]+=fy;
h_fz[i]+=fz;
h_fx[j]-=fx;
h_fy[j]-=fy;
h_fz[j]-=fz;
}
}
V3Buf v3Buf = {h_fx,h_fy,h_fz,nElem};
return v3Buf;
}
double CalculateHamiltonian(float* pos,float* vel,int nElem,double potential){
float *h_px,*h_py,*h_pz,*h_vx,*h_vy,*h_vz;
h_px = pos;
h_py = h_px + nElem;
h_pz = h_py + nElem;
h_vx = vel;
h_vy = h_vx + nElem;
h_vz = h_vy + nElem;
double energy = 0.0;
for(int i=0;i<nElem;i++){
float px=h_px[i],py=h_py[i],pz=h_pz[i];
float vx=h_vx[i],vy=h_vy[i],vz=h_vz[i];
float r = sqrtf(px*px+py*py+pz*pz);
float v2= vx*vx+vy*vy+vz*vz;
energy += (double)(v2/2);
}
printf("%lf %lf %lf\n",energy+potential,energy,potential);
return energy+potential;
}
void SwapFloatPointer(float** a,float **b){
float *tmp = *b;
*b = *a;
*a = tmp;
}
__global__ void CalculateForce_GPUNaive(float *force,float *pos,int nElem,float length){
int idx = threadIdx.x + blockIdx.x * blockDim.x; //1dim grid ,1im block
float *d_fx,*d_fy,*d_fz,*d_px,*d_py,*d_pz;
d_fx = force;
d_fy = d_fx + nElem;
d_fz = d_fy + nElem;
d_px = pos;
d_py = d_px + nElem;
d_pz = d_py + nElem;
//SYOKIKA
float t_fx=0.0f,t_fy=0.0f,t_fz=0.0f;
//*potential = 0.0; must implement to calculate Hamiltoniam...
//Is it better to load this constants from ConstantMemory?
float eps = 1.0f;
float sigma = 1.0f;
float ce12 = 4.0f*eps*powf(sigma,12);
float ce06 = 4.0f*eps*powf(sigma, 6);
float cf12 = ce12 * 12.0f;
float cf06 = ce06 * 6.0f;
int i = idx;
for (int j=0;j<nElem;j++){// j->i
if (i==j)continue;
float dx,dy,dz,r2,r2i,r06i,r12i,fc,fx,fy,fz;
dx = d_px[i]-d_px[j];
dy = d_py[i]-d_py[j];
dz = d_pz[i]-d_pz[j];
if(dx<-length/2) dx+=length;
if(dx> length/2) dx-=length;
if(dy<-length/2) dy+=length;
if(dy> length/2) dy-=length;
if(dz<-length/2) dz+=length;
if(dz> length/2) dz-=length;
r2 = dx*dx+dy*dy+dz*dz;
//if (r2 > 4*4)continue; //Cut force with far from cut radius;this may be MEANINGLESS in GPU.
r2i = 1.0f/r2;
r06i = r2i * r2i * r2i;
r12i = r06i * r06i;
//*potential += (ce12 * r12i - ce06 * r06i);
fc = (cf12*r12i-cf06*r06i)*r2i;
fx = fc * dx;
fy = fc * dy;
fz = fc * dz;
t_fx+=fx;
t_fy+=fy;
t_fz+=fz;
}
d_fx[i]=t_fx;
d_fy[i]=t_fy;
d_fz[i]=t_fz;
}
void CalculateForce_UseGPU_Naive(float* h_p,float *h_f,float* d_p,float *d_f,int nElem,float length){
int nBytes = nElem * sizeof(float);
hipMemcpy(d_p,h_p,nBytes*3,hipMemcpyHostToDevice);
dim3 block(256);
dim3 grid((nElem+block.x-1)/block.x);
hipLaunchKernelGGL(( CalculateForce_GPUNaive), dim3(grid),dim3(block), 0, 0, d_f,d_p,nElem,length);
hipMemcpy(h_f,d_f,nBytes*3,hipMemcpyDeviceToHost);
}
#define HSF 6//hash size = 1<<HSF
__host__ __device__ inline int getHashPart(float v,int gC,float hL){
return (int)floorf(v/hL);
}
__host__ __device__ inline int HashParts2Hash(int Hx,int Hy,int Hz){
return Hx + Hy*(1<<HSF) + Hz*(1<<(2*HSF)); //this implemention uses gC < (1<<HSF);
}
__host__ __device__ inline int getHash(float x,float y,float z,int gC,float hL){
// gC is grid Count(1dim)
// hL is grid Length(one grid,1dim)
int Hx = getHashPart(x,gC,hL); //!!! if x == hL * gC ,it cause error!!!
int Hy = getHashPart(y,gC,hL);
int Hz = getHashPart(z,gC,hL);
return HashParts2Hash(Hx,Hy,Hz);
}
__host__ __device__ inline void recoverHash(int hash,int &x,int &y,int &z){
//Not Implement;
}
__global__ void HashFromPosition(float *d_p,int *d_hash,int nElem,int gC,float hL){
int idx = threadIdx.x + blockIdx.x * blockDim.x;
float x = d_p[idx];
float y = d_p[nElem+idx];
float z = d_p[2*nElem+idx];
int h = getHash(x,y,z,gC,hL);
d_hash[idx] = h;
d_hash[idx+nElem] = idx; //it's key;
}
__global__ void GenerateHRRM(int *d_hash,int *d_HRRM,int nElem){
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int nHash = 1<<(HSF*3);
int ths = d_hash[idx];
if (idx==0){
d_HRRM[ths] = 0;
}
if(idx+1<nElem){
int nxt = d_hash[idx+1];
if (ths != nxt){
d_HRRM[ths+nHash] = idx+1;
d_HRRM[nxt] = idx+1;
}
}else{//idx = nElem-1
d_HRRM[ths+nHash] = nElem;
}
}
__global__ void CalculateForce_GPUSort(float *force,float *pos,int* hrrm,int *hash,int nElem,float length,int gC,float hL,float *d_pot){
int idx = threadIdx.x + blockIdx.x * blockDim.x; //1dim grid ,1im block
float *d_fx,*d_fy,*d_fz,*d_px,*d_py,*d_pz;
d_fx = force;
d_fy = d_fx + nElem;
d_fz = d_fy + nElem;
d_px = pos;
d_py = d_px + nElem;
d_pz = d_py + nElem;
//SYOKIKA
float t_fx=0.0f,t_fy=0.0f,t_fz=0.0f;
double potential = 0.0;// must implement to calculate Hamiltoniam...
//Is it better to load this constants from ConstantMemory?
float eps = 1.0f;
float sigma = 1.0f;
float ce12 = 4.0f*eps*powf(sigma,12);
float ce06 = 4.0f*eps*powf(sigma, 6);
float cf12 = ce12 * 12.0f;
float cf06 = ce06 * 6.0f;
int i = idx;
float px = d_px[i];
float py = d_py[i];
float pz = d_pz[i];
int Hx = getHashPart(px,gC,hL);
int Hy = getHashPart(py,gC,hL);
int Hz = getHashPart(pz,gC,hL);
int nHash = 1<<(HSF*3);
for (int c=0;c<27;c++){
int dHx = c%3-1;
int dHy = (c/3)%3-1;
int dHz = (c/3/3)%3-1;
int pHx = (Hx+gC+dHx)%gC;
int pHy = (Hy+gC+dHy)%gC;
int pHz = (Hz+gC+dHz)%gC;
int h = HashParts2Hash(pHx,pHy,pHz);
int start = hrrm[h];
int end = hrrm[h+nHash];
if(!(start<=end & end-start<1000000)){printf("D");};
for (int k=start;k<end;k++){
int j = hash[k+nElem];
if (hash[k]!=h)printf("H");
if (i==j)continue;
float dx,dy,dz,r2,r2i,r06i,r12i,fc,fx,fy,fz;
float qx,qy,qz;
qx = d_px[j];
qy = d_py[j];
qz = d_pz[j];
dx = px-qx;
dy = py-qy;
dz = pz-qz;
if(dx<-length/2) dx+=length;
if(dx> length/2) dx-=length;
if(dy<-length/2) dy+=length;
if(dy> length/2) dy-=length;
if(dz<-length/2) dz+=length;
if(dz> length/2) dz-=length;
if (c==13 & dx>hL*3)printf("G");
if (getHash(qx,qy,qz,gC,hL)!=h)printf("E");
r2 = dx*dx+dy*dy+dz*dz;
r2i = 1.0f/r2;
r06i = r2i * r2i * r2i;
r12i = r06i * r06i;
potential += (ce12 * r12i - ce06 * r06i);
fc = (cf12*r12i-cf06*r06i)*r2i;
fx = fc * dx;
fy = fc * dy;
fz = fc * dz;
t_fx+=fx;
t_fy+=fy;
t_fz+=fz;
}
}
d_fx[i]=t_fx;
d_fy[i]=t_fy;
d_fz[i]=t_fz;
d_pot[i]=potential;
}
typedef struct{
float * h_pot;
float * d_pot;
int *d_HashKey; //nElem * 2 (pair of hash and key);
int *d_HashKeyWork;
int *d_HRRM;//?
int *h_HashKeyTest;//TestBuf
int *h_HRRMTest;
float *h_nElemF;
} WorkBufList;
void CalculateForce_UseGPU(float* h_p,float *h_f,float* d_p,float *d_f,int nElem,float length,WorkBufList wbl){
float cutRadius = 4.0f;
int gC = floor(length/cutRadius); //grid Size(1dim)
float hL = length/gC; //cutRadius
int nHash = 1<<(HSF*3);
int nBytes = nElem * sizeof(float);
hipMemcpy(d_p,h_p,nBytes*3,hipMemcpyHostToDevice);
dim3 block(256);
dim3 grid(nElem/block.x);
//printf("G<%d>",grid.x);
CHECK(hipGetLastError());
//Kernel:Hashing (Generate Hash And Embed key)
hipLaunchKernelGGL(( HashFromPosition), dim3(grid),dim3(block), 0, 0, d_p,wbl.d_HashKey,nElem,gC,hL);
hipDeviceSynchronize();
CHECK(hipGetLastError());
//Kernel:Sort Key based on Hash.
int nLog2Elem = (int)log2f((float)(nElem)+0.1f);
//printf ("%d",nLog2Elem);
sort(wbl.d_HashKey,wbl.d_HashKeyWork,nLog2Elem); //TYUI! secound parameter is for only work buffer(x:in-out,o:in&out-work)
CHECK(hipGetLastError());
//Kernel:Generate Hash range reference map(HRRM).
hipMemset(wbl.d_HRRM,0,nHash*2*sizeof(int));
hipLaunchKernelGGL(( GenerateHRRM), dim3(grid),dim3(block), 0, 0, wbl.d_HashKey,wbl.d_HRRM,nElem);
hipDeviceSynchronize();
CHECK(hipGetLastError());
hipMemcpy(wbl.h_HRRMTest,wbl.d_HRRM,nHash*2*sizeof(int),hipMemcpyDeviceToHost);
hipMemcpy(wbl.h_HashKeyTest,wbl.d_HashKey,nElem*2*sizeof(int),hipMemcpyDeviceToHost);
hipMemcpy(wbl.h_nElemF,d_p,nBytes*3,hipMemcpyDeviceToHost);
//Kernel:using Non-Aligned data and HRRM and Sorted Key-Hash,calculate Force fast.
hipLaunchKernelGGL(( CalculateForce_GPUSort), dim3(grid),dim3(block), 0, 0, d_f,d_p,wbl.d_HRRM,wbl.d_HashKey,nElem,length,gC,hL,wbl.d_pot);
//CalculateForce_GPUNaive<<<grid,block>>>(d_f,d_p,nElem,length);
hipMemcpy(wbl.h_pot,wbl.d_pot,nBytes,hipMemcpyDeviceToHost);
CHECK(hipGetLastError());
hipMemcpy(h_f,d_f,nBytes*3,hipMemcpyDeviceToHost);
CHECK(hipGetLastError());
}
void cuMain(void (*grpc)(V3Buf buf) ){
initRand();
//Buffer Initialization (CPU)
int nElem = 256*8*8*8;
int nBytes = nElem * sizeof(float);
float *h_p,*h_v,*h_f,*h_fd,*h_pot;
h_p = (float*)malloc(nBytes*3);
h_v = (float*)malloc(nBytes*3);
h_f = (float*)malloc(nBytes*3);
h_fd= (float*)malloc(nBytes*3);
h_pot = (float*)malloc(nBytes);
//Buffer Initialization (GPU)
float *d_p,*d_f,*d_pot;
int *d_HashKeyIn,*d_HashKeyOut,*d_HRRM;
hipMalloc(&d_p,nBytes*3);
hipMalloc(&d_f,nBytes*3);
hipMalloc(&d_HashKeyIn,nElem*2*sizeof(int));
hipMalloc(&d_HashKeyOut,nElem*2*sizeof(int));
hipMalloc(&d_HRRM,(1<<(HSF*3))*2*sizeof(int));//HRRM size is determined by HashMax * 2
hipMalloc(&d_pot,nBytes);
float *h_df; //Test Buf;
int *h_dh,*h_d_HRRM; // Test Buf;
h_df = (float *)malloc(nBytes*3);
h_dh = (int *)malloc(nElem*2*sizeof(int));
h_d_HRRM = (int *)malloc((1<<(HSF*3))*2*sizeof(int));
WorkBufList wbl = {h_pot,d_pot,d_HashKeyIn,d_HashKeyOut,d_HRRM,h_dh,h_d_HRRM,h_df};
//Buffer Setting
float length;
V3Buf h_v3pos = CreateUniformParticles(h_p,0.5f,nElem,&length);
V3Buf h_v3vel = CreateRandomVelocity(h_v,nElem);
printf("length%f\n",length);
for (int i=0;i<nElem*3;i++){
//h_v[i]*=10.0f;
h_v[i]=(float)((i*7)%13)/13.0f*2.0f-1.0f;
h_v[i]*=2.0f;
}
double potential;
CalculateForce_UseGPU_Naive(h_p,h_f,d_p,d_f,nElem,length);
float dt = 0.005;
int it = 0;
while(true){
//Graphics Functon(External) :transfer postion buffer to graphics function;
if (it%20==19) (*grpc)(h_v3pos);//20
//Position Update
for (int i=0;i<nElem*3;i++){
float p = h_p[i];
p+=dt*(h_v[i]+0.5f*dt*h_f[i]);
p = p- floorf(p/length)*length;
h_p[i] = p;
}
//Force buffer becomes old because of position updated.
SwapFloatPointer(&h_f,&h_fd);
//CalculateForce_UseGPU_Naive(h_p,h_f,d_p,d_f,nElem,length);
CalculateForce_UseGPU(h_p,h_f,d_p,d_f,nElem,length,wbl);
for (int i=0;i<nElem*3;i++){
h_v[i]+=dt*0.5*(h_f[i]+h_fd[i]);
}
//printf("%f",h_p[1000]);
potential = 0;
for (int i=0;i<nElem;i++){
potential += h_pot[i];
}
potential /=2.;
CalculateHamiltonian(h_p,h_v,nElem,potential);
it++;
}
} | 988f5eb404a41246a8e09d02e51d10a66a4aa8d5.cu | #pragma once
#include <stdio.h>
#include <time.h>
#include "cuHeader.h"
#include <math.h>
#include <cuda_runtime.h>
#include "sortlib.h"
void initRand(){
time_t t;
srand((unsigned) time(&t));
}
/// using malloced nElem*3*sizeof(float) buffer,write uniform particles postiion.
/// and Mold buffer pointer to V3Buf
/// nElem must be 4(k)^3 !!! now Implemention is nElem == 256
V3Buf CreateUniformParticles(float* buf,float rho,int nElem,float* p_length){
int nloop = (int)powf((float)(nElem/4)+0.1f,1.0f/3);
printf("%d\n",nloop);
float length = powf(nElem/rho,1.0f/3);
*p_length = length;
float a = length/nloop;
float ah = a/2;
float px[4] = {0,0,ah,ah};
float py[4] = {0,ah,ah,0};
float pz[4] = {0,ah,0,ah};
float *h_x,*h_y,*h_z;
h_x = buf;
h_y = h_x + nElem;
h_z = h_y + nElem;
for (int i=0; i<nElem;i++){
h_x[i]=0;
h_y[i]=0;
h_z[i]=0;
}
int i=0;
for (int ix = 0;ix<nloop;ix++){
for (int iy = 0;iy<nloop;iy++){
for(int iz=0;iz<nloop;iz++){
for (int ia=0;ia<4;ia++){
h_x[i] = px[ia] + ix * a;
h_y[i] = py[ia] + iy * a;
h_z[i] = pz[ia] + iz * a;
i++;
}
}
}
}
V3Buf v3Buf = {h_x,h_y,h_z,nElem};
return v3Buf;
}
/// using malloced nElem*3*sizeof(float) buffer,write random particle position.
/// and Mold buffer pointer to V3Buf
/// nElem must be integer.
V3Buf CreateRandomVelocity(float* buf,int nElem){
float *h_x,*h_y,*h_z;
h_x = buf;
h_y = h_x + nElem;
h_z = h_y + nElem;
for(int i=0;i<nElem;i++){
float x = ((float)rand() / ((float)RAND_MAX + 1)) *2 -1;
float y = ((float)rand() / ((float)RAND_MAX + 1)) *2 -1;
float z = ((float)rand() / ((float)RAND_MAX + 1)) *2 -1;
h_x[i]=x;
h_y[i]=y;
h_z[i]=z;
}
V3Buf v3Buf = {h_x,h_y,h_z,nElem};
return v3Buf;
}
V3Buf CalculateForce(float *force,float *pos,int nElem,float length,double *potential){
float *h_fx,*h_fy,*h_fz,*h_px,*h_py,*h_pz;
h_fx = force;
h_fy = h_fx + nElem;
h_fz = h_fy + nElem;
h_px = pos;
h_py = h_px + nElem;
h_pz = h_py + nElem;
for (int i=0;i<nElem*3;i++){//SYOKIKA
force[i]=0.0f;
}
*potential = 0.0;
float eps = 1.0f;
float sigma = 1.0f;
float ce12 = 4.0f*eps*powf(sigma,12);
float ce06 = 4.0f*eps*powf(sigma, 6);
float cf12 = ce12 * 12.0f;
float cf06 = ce06 * 6.0f;
for (int j=0;j<nElem;j++){
for (int i=0;i<j;i++){
float dx,dy,dz,r2,r2i,r06i,r12i,fc,fx,fy,fz;
dx = h_px[i]-h_px[j];
dy = h_py[i]-h_py[j];
dz = h_pz[i]-h_pz[j];
if(dx<-length/2) dx+=length;
if(dx> length/2) dx-=length;
if(dy<-length/2) dy+=length;
if(dy> length/2) dy-=length;
if(dz<-length/2) dz+=length;
if(dz> length/2) dz-=length;
r2 = dx*dx+dy*dy+dz*dz;
if (r2 > 4*4)continue;
r2i = 1.0f/r2;
r06i = r2i * r2i * r2i;
r12i = r06i * r06i;
*potential += (ce12 * r12i - ce06 * r06i);
fc = (cf12*r12i-cf06*r06i)*r2i;
fx = fc * dx;
fy = fc * dy;
fz = fc * dz;
h_fx[i]+=fx;
h_fy[i]+=fy;
h_fz[i]+=fz;
h_fx[j]-=fx;
h_fy[j]-=fy;
h_fz[j]-=fz;
}
}
V3Buf v3Buf = {h_fx,h_fy,h_fz,nElem};
return v3Buf;
}
double CalculateHamiltonian(float* pos,float* vel,int nElem,double potential){
float *h_px,*h_py,*h_pz,*h_vx,*h_vy,*h_vz;
h_px = pos;
h_py = h_px + nElem;
h_pz = h_py + nElem;
h_vx = vel;
h_vy = h_vx + nElem;
h_vz = h_vy + nElem;
double energy = 0.0;
for(int i=0;i<nElem;i++){
float px=h_px[i],py=h_py[i],pz=h_pz[i];
float vx=h_vx[i],vy=h_vy[i],vz=h_vz[i];
float r = sqrtf(px*px+py*py+pz*pz);
float v2= vx*vx+vy*vy+vz*vz;
energy += (double)(v2/2);
}
printf("%lf %lf %lf\n",energy+potential,energy,potential);
return energy+potential;
}
void SwapFloatPointer(float** a,float **b){
float *tmp = *b;
*b = *a;
*a = tmp;
}
__global__ void CalculateForce_GPUNaive(float *force,float *pos,int nElem,float length){
int idx = threadIdx.x + blockIdx.x * blockDim.x; //1dim grid ,1im block
float *d_fx,*d_fy,*d_fz,*d_px,*d_py,*d_pz;
d_fx = force;
d_fy = d_fx + nElem;
d_fz = d_fy + nElem;
d_px = pos;
d_py = d_px + nElem;
d_pz = d_py + nElem;
//SYOKIKA
float t_fx=0.0f,t_fy=0.0f,t_fz=0.0f;
//*potential = 0.0; must implement to calculate Hamiltoniam...
//Is it better to load this constants from ConstantMemory?
float eps = 1.0f;
float sigma = 1.0f;
float ce12 = 4.0f*eps*powf(sigma,12);
float ce06 = 4.0f*eps*powf(sigma, 6);
float cf12 = ce12 * 12.0f;
float cf06 = ce06 * 6.0f;
int i = idx;
for (int j=0;j<nElem;j++){// j->i
if (i==j)continue;
float dx,dy,dz,r2,r2i,r06i,r12i,fc,fx,fy,fz;
dx = d_px[i]-d_px[j];
dy = d_py[i]-d_py[j];
dz = d_pz[i]-d_pz[j];
if(dx<-length/2) dx+=length;
if(dx> length/2) dx-=length;
if(dy<-length/2) dy+=length;
if(dy> length/2) dy-=length;
if(dz<-length/2) dz+=length;
if(dz> length/2) dz-=length;
r2 = dx*dx+dy*dy+dz*dz;
//if (r2 > 4*4)continue; //Cut force with far from cut radius;this may be MEANINGLESS in GPU.
r2i = 1.0f/r2;
r06i = r2i * r2i * r2i;
r12i = r06i * r06i;
//*potential += (ce12 * r12i - ce06 * r06i);
fc = (cf12*r12i-cf06*r06i)*r2i;
fx = fc * dx;
fy = fc * dy;
fz = fc * dz;
t_fx+=fx;
t_fy+=fy;
t_fz+=fz;
}
d_fx[i]=t_fx;
d_fy[i]=t_fy;
d_fz[i]=t_fz;
}
void CalculateForce_UseGPU_Naive(float* h_p,float *h_f,float* d_p,float *d_f,int nElem,float length){
int nBytes = nElem * sizeof(float);
cudaMemcpy(d_p,h_p,nBytes*3,cudaMemcpyHostToDevice);
dim3 block(256);
dim3 grid((nElem+block.x-1)/block.x);
CalculateForce_GPUNaive<<<grid,block>>>(d_f,d_p,nElem,length);
cudaMemcpy(h_f,d_f,nBytes*3,cudaMemcpyDeviceToHost);
}
#define HSF 6//hash size = 1<<HSF
__host__ __device__ inline int getHashPart(float v,int gC,float hL){
return (int)floorf(v/hL);
}
__host__ __device__ inline int HashParts2Hash(int Hx,int Hy,int Hz){
return Hx + Hy*(1<<HSF) + Hz*(1<<(2*HSF)); //this implemention uses gC < (1<<HSF);
}
__host__ __device__ inline int getHash(float x,float y,float z,int gC,float hL){
// gC is grid Count(1dim)
// hL is grid Length(one grid,1dim)
int Hx = getHashPart(x,gC,hL); //!!! if x == hL * gC ,it cause error!!!
int Hy = getHashPart(y,gC,hL);
int Hz = getHashPart(z,gC,hL);
return HashParts2Hash(Hx,Hy,Hz);
}
__host__ __device__ inline void recoverHash(int hash,int &x,int &y,int &z){
//Not Implement;
}
__global__ void HashFromPosition(float *d_p,int *d_hash,int nElem,int gC,float hL){
int idx = threadIdx.x + blockIdx.x * blockDim.x;
float x = d_p[idx];
float y = d_p[nElem+idx];
float z = d_p[2*nElem+idx];
int h = getHash(x,y,z,gC,hL);
d_hash[idx] = h;
d_hash[idx+nElem] = idx; //it's key;
}
__global__ void GenerateHRRM(int *d_hash,int *d_HRRM,int nElem){
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int nHash = 1<<(HSF*3);
int ths = d_hash[idx];
if (idx==0){
d_HRRM[ths] = 0;
}
if(idx+1<nElem){
int nxt = d_hash[idx+1];
if (ths != nxt){
d_HRRM[ths+nHash] = idx+1;
d_HRRM[nxt] = idx+1;
}
}else{//idx = nElem-1
d_HRRM[ths+nHash] = nElem;
}
}
__global__ void CalculateForce_GPUSort(float *force,float *pos,int* hrrm,int *hash,int nElem,float length,int gC,float hL,float *d_pot){
int idx = threadIdx.x + blockIdx.x * blockDim.x; //1dim grid ,1im block
float *d_fx,*d_fy,*d_fz,*d_px,*d_py,*d_pz;
d_fx = force;
d_fy = d_fx + nElem;
d_fz = d_fy + nElem;
d_px = pos;
d_py = d_px + nElem;
d_pz = d_py + nElem;
//SYOKIKA
float t_fx=0.0f,t_fy=0.0f,t_fz=0.0f;
double potential = 0.0;// must implement to calculate Hamiltoniam...
//Is it better to load this constants from ConstantMemory?
float eps = 1.0f;
float sigma = 1.0f;
float ce12 = 4.0f*eps*powf(sigma,12);
float ce06 = 4.0f*eps*powf(sigma, 6);
float cf12 = ce12 * 12.0f;
float cf06 = ce06 * 6.0f;
int i = idx;
float px = d_px[i];
float py = d_py[i];
float pz = d_pz[i];
int Hx = getHashPart(px,gC,hL);
int Hy = getHashPart(py,gC,hL);
int Hz = getHashPart(pz,gC,hL);
int nHash = 1<<(HSF*3);
for (int c=0;c<27;c++){
int dHx = c%3-1;
int dHy = (c/3)%3-1;
int dHz = (c/3/3)%3-1;
int pHx = (Hx+gC+dHx)%gC;
int pHy = (Hy+gC+dHy)%gC;
int pHz = (Hz+gC+dHz)%gC;
int h = HashParts2Hash(pHx,pHy,pHz);
int start = hrrm[h];
int end = hrrm[h+nHash];
if(!(start<=end & end-start<1000000)){printf("D");};
for (int k=start;k<end;k++){
int j = hash[k+nElem];
if (hash[k]!=h)printf("H");
if (i==j)continue;
float dx,dy,dz,r2,r2i,r06i,r12i,fc,fx,fy,fz;
float qx,qy,qz;
qx = d_px[j];
qy = d_py[j];
qz = d_pz[j];
dx = px-qx;
dy = py-qy;
dz = pz-qz;
if(dx<-length/2) dx+=length;
if(dx> length/2) dx-=length;
if(dy<-length/2) dy+=length;
if(dy> length/2) dy-=length;
if(dz<-length/2) dz+=length;
if(dz> length/2) dz-=length;
if (c==13 & dx>hL*3)printf("G");
if (getHash(qx,qy,qz,gC,hL)!=h)printf("E");
r2 = dx*dx+dy*dy+dz*dz;
r2i = 1.0f/r2;
r06i = r2i * r2i * r2i;
r12i = r06i * r06i;
potential += (ce12 * r12i - ce06 * r06i);
fc = (cf12*r12i-cf06*r06i)*r2i;
fx = fc * dx;
fy = fc * dy;
fz = fc * dz;
t_fx+=fx;
t_fy+=fy;
t_fz+=fz;
}
}
d_fx[i]=t_fx;
d_fy[i]=t_fy;
d_fz[i]=t_fz;
d_pot[i]=potential;
}
typedef struct{
float * h_pot;
float * d_pot;
int *d_HashKey; //nElem * 2 (pair of hash and key);
int *d_HashKeyWork;
int *d_HRRM;//?
int *h_HashKeyTest;//TestBuf
int *h_HRRMTest;
float *h_nElemF;
} WorkBufList;
void CalculateForce_UseGPU(float* h_p,float *h_f,float* d_p,float *d_f,int nElem,float length,WorkBufList wbl){
float cutRadius = 4.0f;
int gC = floor(length/cutRadius); //grid Size(1dim)
float hL = length/gC; //cutRadius
int nHash = 1<<(HSF*3);
int nBytes = nElem * sizeof(float);
cudaMemcpy(d_p,h_p,nBytes*3,cudaMemcpyHostToDevice);
dim3 block(256);
dim3 grid(nElem/block.x);
//printf("G<%d>",grid.x);
CHECK(cudaGetLastError());
//Kernel:Hashing (Generate Hash And Embed key)
HashFromPosition<<<grid,block>>>(d_p,wbl.d_HashKey,nElem,gC,hL);
cudaDeviceSynchronize();
CHECK(cudaGetLastError());
//Kernel:Sort Key based on Hash.
int nLog2Elem = (int)log2f((float)(nElem)+0.1f);
//printf ("%d",nLog2Elem);
sort(wbl.d_HashKey,wbl.d_HashKeyWork,nLog2Elem); //TYUI! secound parameter is for only work buffer(x:in-out,o:in&out-work)
CHECK(cudaGetLastError());
//Kernel:Generate Hash range reference map(HRRM).
cudaMemset(wbl.d_HRRM,0,nHash*2*sizeof(int));
GenerateHRRM<<<grid,block>>>(wbl.d_HashKey,wbl.d_HRRM,nElem);
cudaDeviceSynchronize();
CHECK(cudaGetLastError());
cudaMemcpy(wbl.h_HRRMTest,wbl.d_HRRM,nHash*2*sizeof(int),cudaMemcpyDeviceToHost);
cudaMemcpy(wbl.h_HashKeyTest,wbl.d_HashKey,nElem*2*sizeof(int),cudaMemcpyDeviceToHost);
cudaMemcpy(wbl.h_nElemF,d_p,nBytes*3,cudaMemcpyDeviceToHost);
//Kernel:using Non-Aligned data and HRRM and Sorted Key-Hash,calculate Force fast.
CalculateForce_GPUSort<<<grid,block>>>(d_f,d_p,wbl.d_HRRM,wbl.d_HashKey,nElem,length,gC,hL,wbl.d_pot);
//CalculateForce_GPUNaive<<<grid,block>>>(d_f,d_p,nElem,length);
cudaMemcpy(wbl.h_pot,wbl.d_pot,nBytes,cudaMemcpyDeviceToHost);
CHECK(cudaGetLastError());
cudaMemcpy(h_f,d_f,nBytes*3,cudaMemcpyDeviceToHost);
CHECK(cudaGetLastError());
}
void cuMain(void (*grpc)(V3Buf buf) ){
initRand();
//Buffer Initialization (CPU)
int nElem = 256*8*8*8;
int nBytes = nElem * sizeof(float);
float *h_p,*h_v,*h_f,*h_fd,*h_pot;
h_p = (float*)malloc(nBytes*3);
h_v = (float*)malloc(nBytes*3);
h_f = (float*)malloc(nBytes*3);
h_fd= (float*)malloc(nBytes*3);
h_pot = (float*)malloc(nBytes);
//Buffer Initialization (GPU)
float *d_p,*d_f,*d_pot;
int *d_HashKeyIn,*d_HashKeyOut,*d_HRRM;
cudaMalloc(&d_p,nBytes*3);
cudaMalloc(&d_f,nBytes*3);
cudaMalloc(&d_HashKeyIn,nElem*2*sizeof(int));
cudaMalloc(&d_HashKeyOut,nElem*2*sizeof(int));
cudaMalloc(&d_HRRM,(1<<(HSF*3))*2*sizeof(int));//HRRM size is determined by HashMax * 2
cudaMalloc(&d_pot,nBytes);
float *h_df; //Test Buf;
int *h_dh,*h_d_HRRM; // Test Buf;
h_df = (float *)malloc(nBytes*3);
h_dh = (int *)malloc(nElem*2*sizeof(int));
h_d_HRRM = (int *)malloc((1<<(HSF*3))*2*sizeof(int));
WorkBufList wbl = {h_pot,d_pot,d_HashKeyIn,d_HashKeyOut,d_HRRM,h_dh,h_d_HRRM,h_df};
//Buffer Setting
float length;
V3Buf h_v3pos = CreateUniformParticles(h_p,0.5f,nElem,&length);
V3Buf h_v3vel = CreateRandomVelocity(h_v,nElem);
printf("length%f\n",length);
for (int i=0;i<nElem*3;i++){
//h_v[i]*=10.0f;
h_v[i]=(float)((i*7)%13)/13.0f*2.0f-1.0f;
h_v[i]*=2.0f;
}
double potential;
CalculateForce_UseGPU_Naive(h_p,h_f,d_p,d_f,nElem,length);
float dt = 0.005;
int it = 0;
while(true){
//Graphics Functon(External) :transfer postion buffer to graphics function;
if (it%20==19) (*grpc)(h_v3pos);//20ステップに1回表示:粒子数が多いと表示で律速するので.
//Position Update
for (int i=0;i<nElem*3;i++){
float p = h_p[i];
p+=dt*(h_v[i]+0.5f*dt*h_f[i]);
p = p- floorf(p/length)*length;
h_p[i] = p;
}
//Force buffer becomes old because of position updated.
SwapFloatPointer(&h_f,&h_fd);
//CalculateForce_UseGPU_Naive(h_p,h_f,d_p,d_f,nElem,length);
CalculateForce_UseGPU(h_p,h_f,d_p,d_f,nElem,length,wbl);
for (int i=0;i<nElem*3;i++){
h_v[i]+=dt*0.5*(h_f[i]+h_fd[i]);
}
//printf("%f",h_p[1000]);
potential = 0;
for (int i=0;i<nElem;i++){
potential += h_pot[i];
}
potential /=2.;
CalculateHamiltonian(h_p,h_v,nElem,potential);
it++;
}
} |
73c32df21df97bb6cad635963557f34cfc4b59d5.hip | // !!! This is a file automatically generated by hipify!!!
/*
* main.c
*
* Created on: 26/01/2011
* Author: einstein/carneiro
*/
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#define mat_h(i,j) mat_h[i*N+j]
#define mat_d(i,j) mat_d[i*N_l+j]
#define mat_block(i,j) mat_block[i*N_l+j]
#define proximo(x) x+1
#define anterior(x) x-1
#define MAX 8192
#define INFINITO 999999
#define ZERO 0
#define ONE 1
#define _VAZIO_ -1
#define _VISITADO_ 1
#define _NAO_VISITADO_ 0
int qtd = 0;
int custo = 0;
int N;
int melhor = INFINITO;
int upper_bound;
int mat_h[MAX];
void read() {
int i;
//scanf("%d", &upper_bound);
scanf("%d", &N);
for (i = 0; i < (N * N); i++) {
scanf("%d", &mat_h[i]);
}
}
int fatorBranchingNivelDesejado(int nivelDesejado, int N){
return N-nivelDesejado+1;
}
unsigned int calculaNPrefixos(int nivelPrefixo, int nVertice) {
unsigned int x = nVertice - 1;
int i;
for (i = 1; i < nivelPrefixo-1; ++i) {
x *= nVertice - i-1;
}
return x;
}
unsigned int calculaNPrefixosNivelDesejado(int nivelInicial,int nivelDesejado, int nVertice) {
int nivelBusca = nivelInicial+1;
int i;
unsigned int nprefixos = 1;
for (i = nivelBusca; i <=nivelDesejado; ++i) {
nprefixos *= fatorBranchingNivelDesejado(i,N);
}
return nprefixos;
}
void fillFixedPaths(short* preFixo, const int nivelPrefixo) {
char flag[16];
int vertice[16]; //representa o ciclo
int cont = 0;
int i, nivel; //para dizer que 0-1 ja foi visitado e a busca comeca de 1, bote 2
for (i = 0; i < N; ++i) {
flag[i] = 0;
vertice[i] = -1;
}
vertice[0] = 0; //aqui!!!! vertice[nivel] = idx vflag[idx] = 1
flag[0] = 1;
nivel = 1;
while (nivel >= 1) { // modificar aqui se quiser comecar a busca de determinado nivel
if (vertice[nivel] != -1) {
flag[vertice[nivel]] = 0;
}
do {
vertice[nivel]++;
} while (vertice[nivel] < N && flag[vertice[nivel]]); //
if (vertice[nivel] < N) { //vertice[x] vertice no nivel x
flag[vertice[nivel]] = 1;
nivel++;
if (nivel == nivelPrefixo) {
for (i = 0; i < nivelPrefixo; ++i) {
preFixo[cont * nivelPrefixo + i] = vertice[i];
// printf("%d ", vertice[i]);
}
// printf("\n");
cont++;
nivel--;
}
} else {
vertice[nivel] = -1;
nivel--;
}//else
}//while
}
unsigned int finalDFS(const int N, const short *preFixos, const int idx, int *melhor_sol, const int nPreFixos, const int nivelPrefixo){
int flag[16];
int vertice[16]; //representa o ciclo
int i, nivel; //para dizer que 0-1 ja foi visitado e a busca comeca de 1, bote 2
int custo=0;
unsigned int qtd_solucoes_thread = 0;
int UB_local = INFINITO;
int nivelGlobal = nivelPrefixo;
if (idx < nPreFixos) { //(@)botar algo com vflag aqui, pois do jeito que esta algumas threads tentarao descer.
for (i = 0; i < N; ++i) {
vertice[i] = _VAZIO_;
flag[i] = _NAO_VISITADO_;
}
vertice[0] = 0;
flag[0] = _VISITADO_;
custo= ZERO;
for (i = 1; i < nivelGlobal; ++i) {
vertice[i] = preFixos[idx * nivelGlobal + i];
flag[vertice[i]] = _VISITADO_;
custo += mat_h(vertice[i-1],vertice[i]);
}
nivel=nivelPrefixo;
while (nivel >= nivelGlobal ) { // modificar aqui se quiser comecar a busca de determinado nivel
if (vertice[nivel] != _VAZIO_) {
flag[vertice[nivel]] = _NAO_VISITADO_;
custo -= mat_h(vertice[anterior(nivel)],vertice[nivel]);
}
do {
vertice[nivel]++;
} while (vertice[nivel] < N && flag[vertice[nivel]]); //
if (vertice[nivel] < N) { //vertice[x] vertice no nivel x
custo += mat_h(vertice[anterior(nivel)],vertice[nivel]);
flag[vertice[nivel]] = _VISITADO_;
nivel++;
if (nivel == N) { //se o vrtice do nvel for == N, entao formou o ciclo e vc soma peso + vertice anterior -> inicio
++qtd_solucoes_thread;
if (custo + mat_h(vertice[anterior(nivel)],0) < UB_local) {
UB_local = custo + mat_h(vertice[anterior(nivel)],0);
}
nivel--;
}
//else {
//if (custo > custoMin_d[0])
//nivel--; //poda, LB maior que UB
//}
}
else {
vertice[nivel] = _VAZIO_;
nivel--;
}//else
}//while
// sols_d[idx] = qtd_solucoes_thread;
// melhorSol_d[idx] = UB_local;
}//dfs
if(UB_local < (*melhor_sol)){
*melhor_sol = UB_local;
}
return qtd_solucoes_thread;
}
int dfs2(const short *preFixos, const int idx, const int nivelInicial, const int nivelDesejado, int *otimo_global) {
//pode tirar o idx
register int flag[16];
register int vertice[16]; //representa o ciclo
register int i, nivel; //para dizer que 0-1 ja foi visitado e a busca comeca de 1, bote 2
unsigned int qtd_solucoes_local = 0;
unsigned int qtd_solucoes_filho = 0;
int cont = 0;
unsigned int qtd_prefixos_locais = calculaNPrefixosNivelDesejado(nivelInicial,nivelDesejado,N);
int melhor_sol = INFINITO;
short *path_local;
path_local = (short*)malloc(sizeof(short) * nivelDesejado * qtd_prefixos_locais);
for (i = 0; i < N; ++i) {
vertice[i] = _VAZIO_;
flag[i] = _NAO_VISITADO_;
}
vertice[0] = 0;
flag[0] = _VISITADO_;
custo= ZERO;
for (i = 1; i < nivelInicial; ++i) {
vertice[i] = preFixos[idx * nivelInicial + i];
flag[vertice[i]] = _VISITADO_;
}
nivel=nivelInicial;
while (nivel >= nivelInicial ) { // modificar aqui se quiser comecar a busca de determinado nivel
if (vertice[nivel] != _VAZIO_) {
flag[vertice[nivel]] = _NAO_VISITADO_;
}
do {
vertice[nivel]++;
} while (vertice[nivel] < N && flag[vertice[nivel]]); //
if (vertice[nivel] < N) { //vertice[x] vertice no nivel x
flag[vertice[nivel]] = _VISITADO_;
nivel++;
if (nivel == nivelDesejado) { //se o vrtice do nvel for == N, entao formou o ciclo e vc soma peso + vertice anterior -> inicio
for (i = 0; i < nivelDesejado; ++i) {
path_local[cont * nivelDesejado + i] = vertice[i];
}
++cont;
nivel--;
}
}
else {
vertice[nivel] = _VAZIO_;
nivel--;
}//else
}//while
// for(int pref = 0; pref < qtd_prefixos_locais; ++pref){
// for(int j = 0; j<nivelDesejado;++j){
// printf(" %d ", path_local[pref*nivelDesejado + j]);
// }
// printf("\n");
// }
for(int pref = 0; pref < qtd_prefixos_locais; ++pref){
qtd_solucoes_filho = finalDFS(N, path_local,pref, &melhor_sol, qtd_prefixos_locais,nivelDesejado);
// printf("\nQtd de sols encontrada pelo prefixo %d: %d.\n",pref,qtd_solucoes_filho);
qtd_solucoes_local+=qtd_solucoes_filho;
}
if(melhor_sol < (*otimo_global)){
*otimo_global = melhor_sol;
}
free(path_local);
return qtd_solucoes_local;
}//dfs2
int main() {
read();
int otimo_global = INFINITO;
int qtd_sols_global = ZERO;
int nivelPreFixos = 5;//Numero de niveis prefixados; o que nos permite utilizar mais threads.
int nivelDesejado = 8;
unsigned int nPreFixos = calculaNPrefixos(nivelPreFixos,N);
short * path_h = (short*) malloc(sizeof(short) * nPreFixos * nivelPreFixos);
fillFixedPaths(path_h, nivelPreFixos);
printf("\nNivel inicial: %d.", nivelPreFixos);
printf("\nQuantidade de prefixos no nivel inicial: %d\n", nPreFixos);
// for(int i = 0; i<nPreFixos;++i){
// for(int j = 0; j<nivelPreFixos;++j )
// printf(" %d ", path_h[i*nivelPreFixos+j]);
// printf("\n");
// }
printf("\n Nivel desejado: %d.", nivelDesejado);
printf("\n Prefixos individuais por pai no nivel desejado: %d .", calculaNPrefixosNivelDesejado(nivelPreFixos,nivelDesejado,N));
printf("\n Prefixos totais no nivel desejado: %d .\n", calculaNPrefixosNivelDesejado(nivelPreFixos,nivelDesejado,N)*nPreFixos);
//para nprefixos nivel inicial, imprimir as raizes de dfs descendentes criados
for(int vertex = 0; vertex < nPreFixos; ++vertex){
qtd_sols_global += dfs2(path_h, vertex, nivelPreFixos,nivelDesejado, &otimo_global) ;
}
printf("\n Quantidade de solucoes Global: %d.\n", qtd_sols_global);
printf("\nOtimo global: %d. \n", otimo_global);
return 0;
}
| 73c32df21df97bb6cad635963557f34cfc4b59d5.cu |
/*
* main.c
*
* Created on: 26/01/2011
* Author: einstein/carneiro
*/
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <cuda.h>
#define mat_h(i,j) mat_h[i*N+j]
#define mat_d(i,j) mat_d[i*N_l+j]
#define mat_block(i,j) mat_block[i*N_l+j]
#define proximo(x) x+1
#define anterior(x) x-1
#define MAX 8192
#define INFINITO 999999
#define ZERO 0
#define ONE 1
#define _VAZIO_ -1
#define _VISITADO_ 1
#define _NAO_VISITADO_ 0
int qtd = 0;
int custo = 0;
int N;
int melhor = INFINITO;
int upper_bound;
int mat_h[MAX];
void read() {
int i;
//scanf("%d", &upper_bound);
scanf("%d", &N);
for (i = 0; i < (N * N); i++) {
scanf("%d", &mat_h[i]);
}
}
int fatorBranchingNivelDesejado(int nivelDesejado, int N){
return N-nivelDesejado+1;
}
unsigned int calculaNPrefixos(int nivelPrefixo, int nVertice) {
unsigned int x = nVertice - 1;
int i;
for (i = 1; i < nivelPrefixo-1; ++i) {
x *= nVertice - i-1;
}
return x;
}
unsigned int calculaNPrefixosNivelDesejado(int nivelInicial,int nivelDesejado, int nVertice) {
int nivelBusca = nivelInicial+1;
int i;
unsigned int nprefixos = 1;
for (i = nivelBusca; i <=nivelDesejado; ++i) {
nprefixos *= fatorBranchingNivelDesejado(i,N);
}
return nprefixos;
}
void fillFixedPaths(short* preFixo, const int nivelPrefixo) {
char flag[16];
int vertice[16]; //representa o ciclo
int cont = 0;
int i, nivel; //para dizer que 0-1 ja foi visitado e a busca comeca de 1, bote 2
for (i = 0; i < N; ++i) {
flag[i] = 0;
vertice[i] = -1;
}
vertice[0] = 0; //aqui!!!! vertice[nivel] = idx vflag[idx] = 1
flag[0] = 1;
nivel = 1;
while (nivel >= 1) { // modificar aqui se quiser comecar a busca de determinado nivel
if (vertice[nivel] != -1) {
flag[vertice[nivel]] = 0;
}
do {
vertice[nivel]++;
} while (vertice[nivel] < N && flag[vertice[nivel]]); //
if (vertice[nivel] < N) { //vertice[x] vertice no nivel x
flag[vertice[nivel]] = 1;
nivel++;
if (nivel == nivelPrefixo) {
for (i = 0; i < nivelPrefixo; ++i) {
preFixo[cont * nivelPrefixo + i] = vertice[i];
// printf("%d ", vertice[i]);
}
// printf("\n");
cont++;
nivel--;
}
} else {
vertice[nivel] = -1;
nivel--;
}//else
}//while
}
unsigned int finalDFS(const int N, const short *preFixos, const int idx, int *melhor_sol, const int nPreFixos, const int nivelPrefixo){
int flag[16];
int vertice[16]; //representa o ciclo
int i, nivel; //para dizer que 0-1 ja foi visitado e a busca comeca de 1, bote 2
int custo=0;
unsigned int qtd_solucoes_thread = 0;
int UB_local = INFINITO;
int nivelGlobal = nivelPrefixo;
if (idx < nPreFixos) { //(@)botar algo com vflag aqui, pois do jeito que esta algumas threads tentarao descer.
for (i = 0; i < N; ++i) {
vertice[i] = _VAZIO_;
flag[i] = _NAO_VISITADO_;
}
vertice[0] = 0;
flag[0] = _VISITADO_;
custo= ZERO;
for (i = 1; i < nivelGlobal; ++i) {
vertice[i] = preFixos[idx * nivelGlobal + i];
flag[vertice[i]] = _VISITADO_;
custo += mat_h(vertice[i-1],vertice[i]);
}
nivel=nivelPrefixo;
while (nivel >= nivelGlobal ) { // modificar aqui se quiser comecar a busca de determinado nivel
if (vertice[nivel] != _VAZIO_) {
flag[vertice[nivel]] = _NAO_VISITADO_;
custo -= mat_h(vertice[anterior(nivel)],vertice[nivel]);
}
do {
vertice[nivel]++;
} while (vertice[nivel] < N && flag[vertice[nivel]]); //
if (vertice[nivel] < N) { //vertice[x] vertice no nivel x
custo += mat_h(vertice[anterior(nivel)],vertice[nivel]);
flag[vertice[nivel]] = _VISITADO_;
nivel++;
if (nivel == N) { //se o vértice do nível for == N, entao formou o ciclo e vc soma peso + vertice anterior -> inicio
++qtd_solucoes_thread;
if (custo + mat_h(vertice[anterior(nivel)],0) < UB_local) {
UB_local = custo + mat_h(vertice[anterior(nivel)],0);
}
nivel--;
}
//else {
//if (custo > custoMin_d[0])
//nivel--; //poda, LB maior que UB
//}
}
else {
vertice[nivel] = _VAZIO_;
nivel--;
}//else
}//while
// sols_d[idx] = qtd_solucoes_thread;
// melhorSol_d[idx] = UB_local;
}//dfs
if(UB_local < (*melhor_sol)){
*melhor_sol = UB_local;
}
return qtd_solucoes_thread;
}
int dfs2(const short *preFixos, const int idx, const int nivelInicial, const int nivelDesejado, int *otimo_global) {
//pode tirar o idx
register int flag[16];
register int vertice[16]; //representa o ciclo
register int i, nivel; //para dizer que 0-1 ja foi visitado e a busca comeca de 1, bote 2
unsigned int qtd_solucoes_local = 0;
unsigned int qtd_solucoes_filho = 0;
int cont = 0;
unsigned int qtd_prefixos_locais = calculaNPrefixosNivelDesejado(nivelInicial,nivelDesejado,N);
int melhor_sol = INFINITO;
short *path_local;
path_local = (short*)malloc(sizeof(short) * nivelDesejado * qtd_prefixos_locais);
for (i = 0; i < N; ++i) {
vertice[i] = _VAZIO_;
flag[i] = _NAO_VISITADO_;
}
vertice[0] = 0;
flag[0] = _VISITADO_;
custo= ZERO;
for (i = 1; i < nivelInicial; ++i) {
vertice[i] = preFixos[idx * nivelInicial + i];
flag[vertice[i]] = _VISITADO_;
}
nivel=nivelInicial;
while (nivel >= nivelInicial ) { // modificar aqui se quiser comecar a busca de determinado nivel
if (vertice[nivel] != _VAZIO_) {
flag[vertice[nivel]] = _NAO_VISITADO_;
}
do {
vertice[nivel]++;
} while (vertice[nivel] < N && flag[vertice[nivel]]); //
if (vertice[nivel] < N) { //vertice[x] vertice no nivel x
flag[vertice[nivel]] = _VISITADO_;
nivel++;
if (nivel == nivelDesejado) { //se o vértice do nível for == N, entao formou o ciclo e vc soma peso + vertice anterior -> inicio
for (i = 0; i < nivelDesejado; ++i) {
path_local[cont * nivelDesejado + i] = vertice[i];
}
++cont;
nivel--;
}
}
else {
vertice[nivel] = _VAZIO_;
nivel--;
}//else
}//while
// for(int pref = 0; pref < qtd_prefixos_locais; ++pref){
// for(int j = 0; j<nivelDesejado;++j){
// printf(" %d ", path_local[pref*nivelDesejado + j]);
// }
// printf("\n");
// }
for(int pref = 0; pref < qtd_prefixos_locais; ++pref){
qtd_solucoes_filho = finalDFS(N, path_local,pref, &melhor_sol, qtd_prefixos_locais,nivelDesejado);
// printf("\nQtd de sols encontrada pelo prefixo %d: %d.\n",pref,qtd_solucoes_filho);
qtd_solucoes_local+=qtd_solucoes_filho;
}
if(melhor_sol < (*otimo_global)){
*otimo_global = melhor_sol;
}
free(path_local);
return qtd_solucoes_local;
}//dfs2
int main() {
read();
int otimo_global = INFINITO;
int qtd_sols_global = ZERO;
int nivelPreFixos = 5;//Numero de niveis prefixados; o que nos permite utilizar mais threads.
int nivelDesejado = 8;
unsigned int nPreFixos = calculaNPrefixos(nivelPreFixos,N);
short * path_h = (short*) malloc(sizeof(short) * nPreFixos * nivelPreFixos);
fillFixedPaths(path_h, nivelPreFixos);
printf("\nNivel inicial: %d.", nivelPreFixos);
printf("\nQuantidade de prefixos no nivel inicial: %d\n", nPreFixos);
// for(int i = 0; i<nPreFixos;++i){
// for(int j = 0; j<nivelPreFixos;++j )
// printf(" %d ", path_h[i*nivelPreFixos+j]);
// printf("\n");
// }
printf("\n Nivel desejado: %d.", nivelDesejado);
printf("\n Prefixos individuais por pai no nivel desejado: %d .", calculaNPrefixosNivelDesejado(nivelPreFixos,nivelDesejado,N));
printf("\n Prefixos totais no nivel desejado: %d .\n", calculaNPrefixosNivelDesejado(nivelPreFixos,nivelDesejado,N)*nPreFixos);
//para nprefixos nivel inicial, imprimir as raizes de dfs descendentes criados
for(int vertex = 0; vertex < nPreFixos; ++vertex){
qtd_sols_global += dfs2(path_h, vertex, nivelPreFixos,nivelDesejado, &otimo_global) ;
}
printf("\n Quantidade de solucoes Global: %d.\n", qtd_sols_global);
printf("\nOtimo global: %d. \n", otimo_global);
return 0;
}
|
8235be7d33b24948562d3d3858b7f40baaddef43.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <benchmark/benchmark.h>
#include "init/init.hpp"
#include "reduction/args.hpp"
#include "utils/utils.hpp"
#include "kernel_hip.cuh"
using namespace wmma_reduction;
template <int SEGMENTS_PER_WARP, int WARPS_PER_BLOCK>
void tryCUDA_WMMA_SEGMENTED_REDUCTION_256(benchmark::State &state) {
const size_t num_segments = state.range(0);
const size_t segment_size = state.range(1);
if (segment_size != WMMA_TILE_SIZE) {
state.SkipWithError("segment size must be WMMA_TILE_SIZE (256)");
}
const int BLOCK_DIM = WARPS_PER_BLOCK * WARP_SIZE;
const size_t num_elements = num_segments * segment_size;
const int segments_per_block = WARPS_PER_BLOCK * SEGMENTS_PER_WARP;
half *d_in_fp16 = nullptr;
half *d_out = nullptr;
try {
PRINT_IF_ERROR(hipMalloc(&d_in_fp16, num_elements * sizeof(half)));
PRINT_IF_ERROR(hipMalloc(&d_out, num_segments * sizeof(half)));
cuda_memory_set(d_in_fp16, 0.001f, num_elements);
dim3 gridDim, blockDim;
blockDim.x = BLOCK_DIM;
gridDim.x = (num_segments + segments_per_block - 1) / segments_per_block;
if (gridDim.x >= CUDA_MAX_GRID_SIZE) {
state.SkipWithError(
fmt::format("gridDim.x={} is greater than CUDA_MAX_GRID_SIZE", gridDim.x)
.c_str());
return;
}
hipEvent_t start, stop;
PRINT_IF_ERROR(hipEventCreate(&start));
PRINT_IF_ERROR(hipEventCreate(&stop));
defer(hipEventDestroy(start));
defer(hipEventDestroy(stop));
for (auto _ : state) {
PRINT_IF_ERROR(hipEventRecord(start));
hipLaunchKernelGGL(( compute_wmma_segmented_reduction_256<SEGMENTS_PER_WARP, WARPS_PER_BLOCK, BLOCK_DIM>)
, dim3(gridDim), dim3(blockDim), 0, 0, d_in_fp16, d_out, num_segments);
PRINT_IF_ERROR(hipEventRecord(stop));
PRINT_IF_ERROR(hipEventSynchronize(stop));
state.PauseTiming();
float msecTotal = 0.0f;
PRINT_IF_ERROR(hipEventElapsedTime(&msecTotal, start, stop));
state.SetIterationTime(msecTotal / 1000);
state.ResumeTiming();
}
state.counters.insert({{"num_segments", num_segments},
{"segment_size", segment_size},
{"num_elements", num_segments * segment_size},
{"segmented_per_warp", SEGMENTS_PER_WARP},
{"warps_per_block", WARPS_PER_BLOCK},
{"flops",
{state.iterations() * 1.0 * num_segments * segment_size,
benchmark::Counter::kAvgThreadsRate}}});
#if 0
half *h_out = new half[num_segments];
PRINT_IF_ERROR(hipMemcpy(h_out, d_out, num_segments * sizeof(half),
hipMemcpyDeviceToHost));
int errors = 0;
for (int j = 0; j < num_segments; j++) {
float correct_segment_sum = 0;
for (int i = 0; i < segment_size; i++) {
correct_segment_sum += h_in[j * segment_size + i];
}
if (fabs(half_to_float(h_out[j]) - correct_segment_sum) > 0.1) {
errors++;
printf("Expected %f, get h_out[%d] = %f\n", correct_segment_sum, j,
half_to_float(h_out[j]));
}
}
if (errors > 0) {
printf(
"CUDA_WMMA_SEGMENTED_REDUCTION_256 does not agree with SEQUENTIAL! %d errors!\n",
errors);
} else {
printf("Results verified: they agree.\n\n");
}
delete h_out;
#endif
hipFree(d_in_fp16);
hipFree(d_out);
} catch (...) {
hipFree(d_in_fp16);
hipFree(d_out);
hipDeviceReset();
const auto p = std::current_exception();
std::rethrow_exception(p);
}
}
template <int SEGMENTS_PER_WARP, int WARPS_PER_BLOCK>
void CUDA_WMMA_SEGMENTED_REDUCTION_256(benchmark::State &state) {
hipDeviceReset();
try {
tryCUDA_WMMA_SEGMENTED_REDUCTION_256<SEGMENTS_PER_WARP, WARPS_PER_BLOCK>(state);
} catch (const std::exception &e) {
state.SkipWithError(e.what());
} catch (const std::string &e) {
state.SkipWithError(e.c_str());
} catch (...) {
state.SkipWithError("unknown exception");
}
}
#define RUN_CUDA_WMMA0(SEGMENTS_PER_WARP, WARPS_PER_BLOCK) \
BENCHMARK_TEMPLATE( \
CUDA_WMMA_SEGMENTED_REDUCTION_256, SEGMENTS_PER_WARP, WARPS_PER_BLOCK) \
->SEG_256_ARGS() \
->UseManualTime();
#define RUN_CUDA_WMMA(SEGMENTS_PER_WARP) \
RUN_CUDA_WMMA0(SEGMENTS_PER_WARP, 1); \
RUN_CUDA_WMMA0(SEGMENTS_PER_WARP, 2); \
RUN_CUDA_WMMA0(SEGMENTS_PER_WARP, 4); \
RUN_CUDA_WMMA0(SEGMENTS_PER_WARP, 8);
RUN_CUDA_WMMA(1);
RUN_CUDA_WMMA(2);
RUN_CUDA_WMMA(4);
RUN_CUDA_WMMA(8);
| 8235be7d33b24948562d3d3858b7f40baaddef43.cu |
#include <benchmark/benchmark.h>
#include "init/init.hpp"
#include "reduction/args.hpp"
#include "utils/utils.hpp"
#include "kernel.cuh"
using namespace wmma_reduction;
template <int SEGMENTS_PER_WARP, int WARPS_PER_BLOCK>
void tryCUDA_WMMA_SEGMENTED_REDUCTION_256(benchmark::State &state) {
const size_t num_segments = state.range(0);
const size_t segment_size = state.range(1);
if (segment_size != WMMA_TILE_SIZE) {
state.SkipWithError("segment size must be WMMA_TILE_SIZE (256)");
}
const int BLOCK_DIM = WARPS_PER_BLOCK * WARP_SIZE;
const size_t num_elements = num_segments * segment_size;
const int segments_per_block = WARPS_PER_BLOCK * SEGMENTS_PER_WARP;
half *d_in_fp16 = nullptr;
half *d_out = nullptr;
try {
PRINT_IF_ERROR(cudaMalloc(&d_in_fp16, num_elements * sizeof(half)));
PRINT_IF_ERROR(cudaMalloc(&d_out, num_segments * sizeof(half)));
cuda_memory_set(d_in_fp16, 0.001f, num_elements);
dim3 gridDim, blockDim;
blockDim.x = BLOCK_DIM;
gridDim.x = (num_segments + segments_per_block - 1) / segments_per_block;
if (gridDim.x >= CUDA_MAX_GRID_SIZE) {
state.SkipWithError(
fmt::format("gridDim.x={} is greater than CUDA_MAX_GRID_SIZE", gridDim.x)
.c_str());
return;
}
cudaEvent_t start, stop;
PRINT_IF_ERROR(cudaEventCreate(&start));
PRINT_IF_ERROR(cudaEventCreate(&stop));
defer(cudaEventDestroy(start));
defer(cudaEventDestroy(stop));
for (auto _ : state) {
PRINT_IF_ERROR(cudaEventRecord(start));
compute_wmma_segmented_reduction_256<SEGMENTS_PER_WARP, WARPS_PER_BLOCK, BLOCK_DIM>
<<<gridDim, blockDim>>>(d_in_fp16, d_out, num_segments);
PRINT_IF_ERROR(cudaEventRecord(stop));
PRINT_IF_ERROR(cudaEventSynchronize(stop));
state.PauseTiming();
float msecTotal = 0.0f;
PRINT_IF_ERROR(cudaEventElapsedTime(&msecTotal, start, stop));
state.SetIterationTime(msecTotal / 1000);
state.ResumeTiming();
}
state.counters.insert({{"num_segments", num_segments},
{"segment_size", segment_size},
{"num_elements", num_segments * segment_size},
{"segmented_per_warp", SEGMENTS_PER_WARP},
{"warps_per_block", WARPS_PER_BLOCK},
{"flops",
{state.iterations() * 1.0 * num_segments * segment_size,
benchmark::Counter::kAvgThreadsRate}}});
#if 0
half *h_out = new half[num_segments];
PRINT_IF_ERROR(cudaMemcpy(h_out, d_out, num_segments * sizeof(half),
cudaMemcpyDeviceToHost));
int errors = 0;
for (int j = 0; j < num_segments; j++) {
float correct_segment_sum = 0;
for (int i = 0; i < segment_size; i++) {
correct_segment_sum += h_in[j * segment_size + i];
}
if (fabs(half_to_float(h_out[j]) - correct_segment_sum) > 0.1) {
errors++;
printf("Expected %f, get h_out[%d] = %f\n", correct_segment_sum, j,
half_to_float(h_out[j]));
}
}
if (errors > 0) {
printf(
"CUDA_WMMA_SEGMENTED_REDUCTION_256 does not agree with SEQUENTIAL! %d errors!\n",
errors);
} else {
printf("Results verified: they agree.\n\n");
}
delete h_out;
#endif
cudaFree(d_in_fp16);
cudaFree(d_out);
} catch (...) {
cudaFree(d_in_fp16);
cudaFree(d_out);
cudaDeviceReset();
const auto p = std::current_exception();
std::rethrow_exception(p);
}
}
template <int SEGMENTS_PER_WARP, int WARPS_PER_BLOCK>
void CUDA_WMMA_SEGMENTED_REDUCTION_256(benchmark::State &state) {
cudaDeviceReset();
try {
tryCUDA_WMMA_SEGMENTED_REDUCTION_256<SEGMENTS_PER_WARP, WARPS_PER_BLOCK>(state);
} catch (const std::exception &e) {
state.SkipWithError(e.what());
} catch (const std::string &e) {
state.SkipWithError(e.c_str());
} catch (...) {
state.SkipWithError("unknown exception");
}
}
#define RUN_CUDA_WMMA0(SEGMENTS_PER_WARP, WARPS_PER_BLOCK) \
BENCHMARK_TEMPLATE( \
CUDA_WMMA_SEGMENTED_REDUCTION_256, SEGMENTS_PER_WARP, WARPS_PER_BLOCK) \
->SEG_256_ARGS() \
->UseManualTime();
#define RUN_CUDA_WMMA(SEGMENTS_PER_WARP) \
RUN_CUDA_WMMA0(SEGMENTS_PER_WARP, 1); \
RUN_CUDA_WMMA0(SEGMENTS_PER_WARP, 2); \
RUN_CUDA_WMMA0(SEGMENTS_PER_WARP, 4); \
RUN_CUDA_WMMA0(SEGMENTS_PER_WARP, 8);
RUN_CUDA_WMMA(1);
RUN_CUDA_WMMA(2);
RUN_CUDA_WMMA(4);
RUN_CUDA_WMMA(8);
|
ee848be1a35fcb8d66a911c0479bdbdf6b35a29a.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <assert.h>
#include <omp.h>
#include <hip/hip_runtime.h>
#include <hip/hip_fp16.h>
#include "half2_operator_overload.cuh"
#include "newhalf.hpp"
#define THREADS_PER_DIM 16
#define BLOCKS_PER_DIM 16
#define THREADS_PER_BLOCK THREADS_PER_DIM*THREADS_PER_DIM
#include "kmeans_hip_kernel.hip"
//#define BLOCK_DELTA_REDUCE
//#define BLOCK_CENTER_REDUCE
#define CPU_DELTA_REDUCE
#define CPU_CENTER_REDUCE
extern "C"
int setup(int argc, char** argv); /* function prototype */
// GLOBAL!!!!!
unsigned int num_threads_perdim = THREADS_PER_DIM; /* sqrt(256) -- see references for this choice */
unsigned int num_blocks_perdim = BLOCKS_PER_DIM; /* temporary */
unsigned int num_threads = num_threads_perdim*num_threads_perdim; /* number of threads */
unsigned int num_blocks = num_blocks_perdim*num_blocks_perdim; /* number of blocks */
/* _d denotes it resides on the device */
int *membership_new; /* newly assignment membership */
half2 *feature_d; /* inverted data array */
half2 *feature_flipped_d; /* original (not inverted) data array */
int *membership_d; /* membership on the device */
half *block_new_centers; /* sum of points in a cluster (per block) */
half2 *clusters_d; /* cluster centers on the device */
half2 *block_clusters_d; /* per block calculation of cluster centers */
int *block_deltas_d; /* per block calculation of deltas */
half_float::half* features_half;
half_float::half* clusters_half;
/* -------------- allocateMemory() ------------------- */
/* allocate device memory, calculate number of blocks and threads, and invert the data array */
extern "C"
void allocateMemory(int npoints, int nfeatures, int nclusters, float **features)
{
num_blocks = npoints / num_threads;
if (npoints % num_threads > 0) /* defeat truncation */
num_blocks++;
num_blocks_perdim = sqrt((double) num_blocks);
while (num_blocks_perdim * num_blocks_perdim < num_blocks) // defeat truncation (should run once)
num_blocks_perdim++;
num_blocks = num_blocks_perdim*num_blocks_perdim;
/* allocate memory for memory_new[] and initialize to -1 (host) */
membership_new = (int*) malloc(npoints * sizeof(int));
for(int i=0;i<npoints;i++) {
membership_new[i] = -1;
}
/* allocate memory for block_new_centers[] (host) */
block_new_centers = (half *) malloc(nclusters*nfeatures*sizeof(half));
/* allocate memory for feature_flipped_d[][], feature_d[][] (device) */
hipMalloc((void**) &feature_flipped_d, npoints*nfeatures*sizeof(half));
features_half = (half_float::half*)malloc(npoints*nfeatures*sizeof(half));
for (int i = 0; i<npoints*nfeatures; i++){
features_half[i] = half_float::half(features[0][i]);
}
hipMemcpy(feature_flipped_d, features_half, npoints*nfeatures*sizeof(half), hipMemcpyHostToDevice);
hipMalloc((void**) &feature_d, npoints*nfeatures*sizeof(half));
/* invert the data array (kernel execution) */
hipLaunchKernelGGL(( invert_mapping), dim3(num_blocks),dim3(num_threads), 0, 0, (half*)feature_flipped_d,(half*)feature_d,npoints,nfeatures);
/* allocate memory for membership_d[] and clusters_d[][] (device) */
hipMalloc((void**) &membership_d, npoints*sizeof(int));
hipMalloc((void**) &clusters_d, nclusters*nfeatures*sizeof(half));
#ifdef BLOCK_DELTA_REDUCE
// allocate array to hold the per block deltas on the gpu side
hipMalloc((void**) &block_deltas_d, num_blocks_perdim * num_blocks_perdim * sizeof(int));
//hipMemcpy(block_delta_d, &delta_h, sizeof(int), hipMemcpyHostToDevice);
#endif
#ifdef BLOCK_CENTER_REDUCE
// allocate memory and copy to card cluster array in which to accumulate center points for the next iteration
hipMalloc((void**) &block_clusters_d,
num_blocks_perdim * num_blocks_perdim *
nclusters * nfeatures * sizeof(half));
//hipMemcpy(new_clusters_d, new_centers[0], nclusters*nfeatures*sizeof(float), hipMemcpyHostToDevice);
#endif
}
/* -------------- allocateMemory() end ------------------- */
/* -------------- deallocateMemory() ------------------- */
/* free host and device memory */
extern "C"
void deallocateMemory()
{
free(membership_new);
free(block_new_centers);
hipFree(feature_d);
hipFree(feature_flipped_d);
hipFree(membership_d);
hipFree(clusters_d);
#ifdef BLOCK_CENTER_REDUCE
hipFree(block_clusters_d);
#endif
#ifdef BLOCK_DELTA_REDUCE
hipFree(block_deltas_d);
#endif
}
/* -------------- deallocateMemory() end ------------------- */
////////////////////////////////////////////////////////////////////////////////
// Program main //
int
main( int argc, char** argv)
{
// make sure we're running on the big card
hipSetDevice(1);
// as done in the CUDA start/help document provided
setup(argc, argv);
}
// //
////////////////////////////////////////////////////////////////////////////////
/* ------------------- kmeansCuda() ------------------------ */
extern "C"
int // delta -- had problems when return value was of float type
kmeansCuda(float **feature, /* in: [npoints][nfeatures] */
int nfeatures, /* number of attributes for each point */
int npoints, /* number of data points */
int nclusters, /* number of clusters */
int *membership, /* which cluster the point belongs to */
float **clusters, /* coordinates of cluster centers */
int *new_centers_len, /* number of elements in each cluster */
float **new_centers /* sum of elements in each cluster */
)
{
int delta = 0; /* if point has moved */
int i,j; /* counters */
clusters_half = (half_float::half*)malloc(nclusters*nfeatures*sizeof(half));
for (int i=0;i<nclusters*nfeatures; i++){
clusters_half[i] = half_float::half(clusters[0][i]);
}
for (int i =0;i<npoints*nfeatures; i++){
}
hipSetDevice(1);
/* copy membership (host to device) */
hipMemcpy(membership_d, membership_new, npoints*sizeof(int), hipMemcpyHostToDevice);
/* copy clusters (host to device) */
hipMemcpy(clusters_d, clusters_half, nclusters*nfeatures*sizeof(half), hipMemcpyHostToDevice);
/* set up texture */
hipChannelFormatDesc chDesc0 = hipCreateChannelDesc<int>();
t_features.filterMode = hipFilterModePoint;
t_features.normalized = false;
t_features.channelDesc = chDesc0;
if(hipBindTexture(NULL, &t_features, feature_d, &chDesc0, npoints*nfeatures*sizeof(half)) != hipSuccess)
printf("Couldn't bind features array to texture!\n");
hipChannelFormatDesc chDesc1 = hipCreateChannelDesc<int>();
t_features_flipped.filterMode = hipFilterModePoint;
t_features_flipped.normalized = false;
t_features_flipped.channelDesc = chDesc1;
if(hipBindTexture(NULL, &t_features_flipped, feature_flipped_d, &chDesc1, npoints*nfeatures*sizeof(half)) != hipSuccess)
printf("Couldn't bind features_flipped array to texture!\n");
hipChannelFormatDesc chDesc2 = hipCreateChannelDesc<int>();
t_clusters.filterMode = hipFilterModePoint;
t_clusters.normalized = false;
t_clusters.channelDesc = chDesc2;
if(hipBindTexture(NULL, &t_clusters, clusters_d, &chDesc2, nclusters*nfeatures*sizeof(half)) != hipSuccess)
printf("Couldn't bind clusters array to texture!\n");
/* copy clusters to constant memory */
hipMemcpyToSymbol("c_clusters",clusters_half,nclusters*nfeatures*sizeof(half),0,hipMemcpyHostToDevice);
hipError_t error;
hipEvent_t start;
error = hipEventCreate(&start);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to create start event (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
hipEvent_t stop;
error = hipEventCreate(&stop);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to create stop event (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
// Record the start event
error = hipEventRecord(start, NULL);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to record start event (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
/* setup execution parameters.
changed to 2d (source code on NVIDIA CUDA Programming Guide) */
dim3 grid( num_blocks_perdim, num_blocks_perdim );
dim3 threads( num_threads_perdim*num_threads_perdim );
/* execute the kernel */
hipLaunchKernelGGL(( kmeansPoint), dim3(grid), dim3(threads) , 0, 0, feature_d,
nfeatures,
npoints,
nclusters,
membership_d,
clusters_d,
block_clusters_d,
block_deltas_d);
hipDeviceSynchronize();
error = hipEventRecord(stop, NULL);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to record stop event (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
// Wait for the stop event to complete
error = hipEventSynchronize(stop);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to synchronize on the stop event (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
float msecTotal = 0.0f;
error = hipEventElapsedTime(&msecTotal, start, stop);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to get time elapsed between events (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
printf ("%f, ",msecTotal);
/* copy back membership (device to host) */
hipMemcpy(membership_new, membership_d, npoints*sizeof(int), hipMemcpyDeviceToHost);
//for (int i =npoints/2; i<npoints/2+100; i++)
// printf ("%d,", membership_new[i]);//
#ifdef BLOCK_CENTER_REDUCE
/*** Copy back arrays of per block sums ***/
float * block_clusters_h = (float *) malloc(
num_blocks_perdim * num_blocks_perdim *
nclusters * nfeatures * sizeof(float));
hipMemcpy(block_clusters_h, block_clusters_d,
num_blocks_perdim * num_blocks_perdim *
nclusters * nfeatures * sizeof(float),
hipMemcpyDeviceToHost);
#endif
#ifdef BLOCK_DELTA_REDUCE
int * block_deltas_h = (int *) malloc(
num_blocks_perdim * num_blocks_perdim * sizeof(int));
hipMemcpy(block_deltas_h, block_deltas_d,
num_blocks_perdim * num_blocks_perdim * sizeof(int),
hipMemcpyDeviceToHost);
#endif
/* for each point, sum data points in each cluster
and see if membership has changed:
if so, increase delta and change old membership, and update new_centers;
otherwise, update new_centers */
delta = 0;
for (i = 0; i < npoints; i++)
{
int cluster_id = membership_new[i];
new_centers_len[cluster_id]++;
if (membership_new[i] != membership[i])
{
#ifdef CPU_DELTA_REDUCE
delta++;
#endif
membership[i] = membership_new[i];
}
#ifdef CPU_CENTER_REDUCE
for (j = 0; j < nfeatures; j++)
{
new_centers[cluster_id][j] += feature[i][j];
}
#endif
}
#ifdef BLOCK_DELTA_REDUCE
/*** calculate global sums from per block sums for delta and the new centers ***/
//debug
//printf("\t \t reducing %d block sums to global sum \n",num_blocks_perdim * num_blocks_perdim);
for(i = 0; i < num_blocks_perdim * num_blocks_perdim; i++) {
//printf("block %d delta is %d \n",i,block_deltas_h[i]);
delta += block_deltas_h[i];
}
#endif
#ifdef BLOCK_CENTER_REDUCE
for(int j = 0; j < nclusters;j++) {
for(int k = 0; k < nfeatures;k++) {
block_new_centers[j*nfeatures + k] = 0.f;
}
}
for(i = 0; i < num_blocks_perdim * num_blocks_perdim; i++) {
for(int j = 0; j < nclusters;j++) {
for(int k = 0; k < nfeatures;k++) {
block_new_centers[j*nfeatures + k] += block_clusters_h[i * nclusters*nfeatures + j * nfeatures + k];
}
}
}
#ifdef CPU_CENTER_REDUCE
//debug
/*for(int j = 0; j < nclusters;j++) {
for(int k = 0; k < nfeatures;k++) {
if(new_centers[j][k] > 1.001 * block_new_centers[j*nfeatures + k] || new_centers[j][k] < 0.999 * block_new_centers[j*nfeatures + k]) {
printf("\t \t for %d:%d, normal value is %e and gpu reduced value id %e \n",j,k,new_centers[j][k],block_new_centers[j*nfeatures + k]);
}
}
}*/
#endif
#ifdef BLOCK_CENTER_REDUCE
for(int j = 0; j < nclusters;j++) {
for(int k = 0; k < nfeatures;k++)
new_centers[j][k]= block_new_centers[j*nfeatures + k];
}
#endif
#endif
return delta;
}
/* ------------------- kmeansCuda() end ------------------------ */
| ee848be1a35fcb8d66a911c0479bdbdf6b35a29a.cu | #include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <assert.h>
#include <omp.h>
#include <cuda.h>
#include <cuda_fp16.h>
#include "half2_operator_overload.cuh"
#include "newhalf.hpp"
#define THREADS_PER_DIM 16
#define BLOCKS_PER_DIM 16
#define THREADS_PER_BLOCK THREADS_PER_DIM*THREADS_PER_DIM
#include "kmeans_cuda_kernel.cu"
//#define BLOCK_DELTA_REDUCE
//#define BLOCK_CENTER_REDUCE
#define CPU_DELTA_REDUCE
#define CPU_CENTER_REDUCE
extern "C"
int setup(int argc, char** argv); /* function prototype */
// GLOBAL!!!!!
unsigned int num_threads_perdim = THREADS_PER_DIM; /* sqrt(256) -- see references for this choice */
unsigned int num_blocks_perdim = BLOCKS_PER_DIM; /* temporary */
unsigned int num_threads = num_threads_perdim*num_threads_perdim; /* number of threads */
unsigned int num_blocks = num_blocks_perdim*num_blocks_perdim; /* number of blocks */
/* _d denotes it resides on the device */
int *membership_new; /* newly assignment membership */
half2 *feature_d; /* inverted data array */
half2 *feature_flipped_d; /* original (not inverted) data array */
int *membership_d; /* membership on the device */
half *block_new_centers; /* sum of points in a cluster (per block) */
half2 *clusters_d; /* cluster centers on the device */
half2 *block_clusters_d; /* per block calculation of cluster centers */
int *block_deltas_d; /* per block calculation of deltas */
half_float::half* features_half;
half_float::half* clusters_half;
/* -------------- allocateMemory() ------------------- */
/* allocate device memory, calculate number of blocks and threads, and invert the data array */
extern "C"
void allocateMemory(int npoints, int nfeatures, int nclusters, float **features)
{
num_blocks = npoints / num_threads;
if (npoints % num_threads > 0) /* defeat truncation */
num_blocks++;
num_blocks_perdim = sqrt((double) num_blocks);
while (num_blocks_perdim * num_blocks_perdim < num_blocks) // defeat truncation (should run once)
num_blocks_perdim++;
num_blocks = num_blocks_perdim*num_blocks_perdim;
/* allocate memory for memory_new[] and initialize to -1 (host) */
membership_new = (int*) malloc(npoints * sizeof(int));
for(int i=0;i<npoints;i++) {
membership_new[i] = -1;
}
/* allocate memory for block_new_centers[] (host) */
block_new_centers = (half *) malloc(nclusters*nfeatures*sizeof(half));
/* allocate memory for feature_flipped_d[][], feature_d[][] (device) */
cudaMalloc((void**) &feature_flipped_d, npoints*nfeatures*sizeof(half));
features_half = (half_float::half*)malloc(npoints*nfeatures*sizeof(half));
for (int i = 0; i<npoints*nfeatures; i++){
features_half[i] = half_float::half(features[0][i]);
}
cudaMemcpy(feature_flipped_d, features_half, npoints*nfeatures*sizeof(half), cudaMemcpyHostToDevice);
cudaMalloc((void**) &feature_d, npoints*nfeatures*sizeof(half));
/* invert the data array (kernel execution) */
invert_mapping<<<num_blocks,num_threads>>>((half*)feature_flipped_d,(half*)feature_d,npoints,nfeatures);
/* allocate memory for membership_d[] and clusters_d[][] (device) */
cudaMalloc((void**) &membership_d, npoints*sizeof(int));
cudaMalloc((void**) &clusters_d, nclusters*nfeatures*sizeof(half));
#ifdef BLOCK_DELTA_REDUCE
// allocate array to hold the per block deltas on the gpu side
cudaMalloc((void**) &block_deltas_d, num_blocks_perdim * num_blocks_perdim * sizeof(int));
//cudaMemcpy(block_delta_d, &delta_h, sizeof(int), cudaMemcpyHostToDevice);
#endif
#ifdef BLOCK_CENTER_REDUCE
// allocate memory and copy to card cluster array in which to accumulate center points for the next iteration
cudaMalloc((void**) &block_clusters_d,
num_blocks_perdim * num_blocks_perdim *
nclusters * nfeatures * sizeof(half));
//cudaMemcpy(new_clusters_d, new_centers[0], nclusters*nfeatures*sizeof(float), cudaMemcpyHostToDevice);
#endif
}
/* -------------- allocateMemory() end ------------------- */
/* -------------- deallocateMemory() ------------------- */
/* free host and device memory */
extern "C"
void deallocateMemory()
{
free(membership_new);
free(block_new_centers);
cudaFree(feature_d);
cudaFree(feature_flipped_d);
cudaFree(membership_d);
cudaFree(clusters_d);
#ifdef BLOCK_CENTER_REDUCE
cudaFree(block_clusters_d);
#endif
#ifdef BLOCK_DELTA_REDUCE
cudaFree(block_deltas_d);
#endif
}
/* -------------- deallocateMemory() end ------------------- */
////////////////////////////////////////////////////////////////////////////////
// Program main //
int
main( int argc, char** argv)
{
// make sure we're running on the big card
cudaSetDevice(1);
// as done in the CUDA start/help document provided
setup(argc, argv);
}
// //
////////////////////////////////////////////////////////////////////////////////
/* ------------------- kmeansCuda() ------------------------ */
extern "C"
int // delta -- had problems when return value was of float type
kmeansCuda(float **feature, /* in: [npoints][nfeatures] */
int nfeatures, /* number of attributes for each point */
int npoints, /* number of data points */
int nclusters, /* number of clusters */
int *membership, /* which cluster the point belongs to */
float **clusters, /* coordinates of cluster centers */
int *new_centers_len, /* number of elements in each cluster */
float **new_centers /* sum of elements in each cluster */
)
{
int delta = 0; /* if point has moved */
int i,j; /* counters */
clusters_half = (half_float::half*)malloc(nclusters*nfeatures*sizeof(half));
for (int i=0;i<nclusters*nfeatures; i++){
clusters_half[i] = half_float::half(clusters[0][i]);
}
for (int i =0;i<npoints*nfeatures; i++){
}
cudaSetDevice(1);
/* copy membership (host to device) */
cudaMemcpy(membership_d, membership_new, npoints*sizeof(int), cudaMemcpyHostToDevice);
/* copy clusters (host to device) */
cudaMemcpy(clusters_d, clusters_half, nclusters*nfeatures*sizeof(half), cudaMemcpyHostToDevice);
/* set up texture */
cudaChannelFormatDesc chDesc0 = cudaCreateChannelDesc<int>();
t_features.filterMode = cudaFilterModePoint;
t_features.normalized = false;
t_features.channelDesc = chDesc0;
if(cudaBindTexture(NULL, &t_features, feature_d, &chDesc0, npoints*nfeatures*sizeof(half)) != CUDA_SUCCESS)
printf("Couldn't bind features array to texture!\n");
cudaChannelFormatDesc chDesc1 = cudaCreateChannelDesc<int>();
t_features_flipped.filterMode = cudaFilterModePoint;
t_features_flipped.normalized = false;
t_features_flipped.channelDesc = chDesc1;
if(cudaBindTexture(NULL, &t_features_flipped, feature_flipped_d, &chDesc1, npoints*nfeatures*sizeof(half)) != CUDA_SUCCESS)
printf("Couldn't bind features_flipped array to texture!\n");
cudaChannelFormatDesc chDesc2 = cudaCreateChannelDesc<int>();
t_clusters.filterMode = cudaFilterModePoint;
t_clusters.normalized = false;
t_clusters.channelDesc = chDesc2;
if(cudaBindTexture(NULL, &t_clusters, clusters_d, &chDesc2, nclusters*nfeatures*sizeof(half)) != CUDA_SUCCESS)
printf("Couldn't bind clusters array to texture!\n");
/* copy clusters to constant memory */
cudaMemcpyToSymbol("c_clusters",clusters_half,nclusters*nfeatures*sizeof(half),0,cudaMemcpyHostToDevice);
cudaError_t error;
cudaEvent_t start;
error = cudaEventCreate(&start);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to create start event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
cudaEvent_t stop;
error = cudaEventCreate(&stop);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to create stop event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
// Record the start event
error = cudaEventRecord(start, NULL);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to record start event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
/* setup execution parameters.
changed to 2d (source code on NVIDIA CUDA Programming Guide) */
dim3 grid( num_blocks_perdim, num_blocks_perdim );
dim3 threads( num_threads_perdim*num_threads_perdim );
/* execute the kernel */
kmeansPoint<<< grid, threads >>>( feature_d,
nfeatures,
npoints,
nclusters,
membership_d,
clusters_d,
block_clusters_d,
block_deltas_d);
cudaThreadSynchronize();
error = cudaEventRecord(stop, NULL);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to record stop event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
// Wait for the stop event to complete
error = cudaEventSynchronize(stop);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to synchronize on the stop event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
float msecTotal = 0.0f;
error = cudaEventElapsedTime(&msecTotal, start, stop);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to get time elapsed between events (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
printf ("%f, ",msecTotal);
/* copy back membership (device to host) */
cudaMemcpy(membership_new, membership_d, npoints*sizeof(int), cudaMemcpyDeviceToHost);
//for (int i =npoints/2; i<npoints/2+100; i++)
// printf ("%d,", membership_new[i]);//
#ifdef BLOCK_CENTER_REDUCE
/*** Copy back arrays of per block sums ***/
float * block_clusters_h = (float *) malloc(
num_blocks_perdim * num_blocks_perdim *
nclusters * nfeatures * sizeof(float));
cudaMemcpy(block_clusters_h, block_clusters_d,
num_blocks_perdim * num_blocks_perdim *
nclusters * nfeatures * sizeof(float),
cudaMemcpyDeviceToHost);
#endif
#ifdef BLOCK_DELTA_REDUCE
int * block_deltas_h = (int *) malloc(
num_blocks_perdim * num_blocks_perdim * sizeof(int));
cudaMemcpy(block_deltas_h, block_deltas_d,
num_blocks_perdim * num_blocks_perdim * sizeof(int),
cudaMemcpyDeviceToHost);
#endif
/* for each point, sum data points in each cluster
and see if membership has changed:
if so, increase delta and change old membership, and update new_centers;
otherwise, update new_centers */
delta = 0;
for (i = 0; i < npoints; i++)
{
int cluster_id = membership_new[i];
new_centers_len[cluster_id]++;
if (membership_new[i] != membership[i])
{
#ifdef CPU_DELTA_REDUCE
delta++;
#endif
membership[i] = membership_new[i];
}
#ifdef CPU_CENTER_REDUCE
for (j = 0; j < nfeatures; j++)
{
new_centers[cluster_id][j] += feature[i][j];
}
#endif
}
#ifdef BLOCK_DELTA_REDUCE
/*** calculate global sums from per block sums for delta and the new centers ***/
//debug
//printf("\t \t reducing %d block sums to global sum \n",num_blocks_perdim * num_blocks_perdim);
for(i = 0; i < num_blocks_perdim * num_blocks_perdim; i++) {
//printf("block %d delta is %d \n",i,block_deltas_h[i]);
delta += block_deltas_h[i];
}
#endif
#ifdef BLOCK_CENTER_REDUCE
for(int j = 0; j < nclusters;j++) {
for(int k = 0; k < nfeatures;k++) {
block_new_centers[j*nfeatures + k] = 0.f;
}
}
for(i = 0; i < num_blocks_perdim * num_blocks_perdim; i++) {
for(int j = 0; j < nclusters;j++) {
for(int k = 0; k < nfeatures;k++) {
block_new_centers[j*nfeatures + k] += block_clusters_h[i * nclusters*nfeatures + j * nfeatures + k];
}
}
}
#ifdef CPU_CENTER_REDUCE
//debug
/*for(int j = 0; j < nclusters;j++) {
for(int k = 0; k < nfeatures;k++) {
if(new_centers[j][k] > 1.001 * block_new_centers[j*nfeatures + k] || new_centers[j][k] < 0.999 * block_new_centers[j*nfeatures + k]) {
printf("\t \t for %d:%d, normal value is %e and gpu reduced value id %e \n",j,k,new_centers[j][k],block_new_centers[j*nfeatures + k]);
}
}
}*/
#endif
#ifdef BLOCK_CENTER_REDUCE
for(int j = 0; j < nclusters;j++) {
for(int k = 0; k < nfeatures;k++)
new_centers[j][k]= block_new_centers[j*nfeatures + k];
}
#endif
#endif
return delta;
}
/* ------------------- kmeansCuda() end ------------------------ */
|
zmergeidr.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.2.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2016
@precisions normal z -> c d s
@author Hartwig Anzt
*/
#include "magmasparse_internal.h"
#define BLOCK_SIZE 512
#define PRECISION_z
// These routines merge multiple kernels from zidr into one.
/* -------------------------------------------------------------------------- */
__global__ void
magma_zidr_smoothing_1_kernel(
int num_rows,
int num_cols,
magmaDoubleComplex *drs,
magmaDoubleComplex *dr,
magmaDoubleComplex *dt )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i<num_rows ) {
for( int j=0; j<num_cols; j++ ){
dt[ i+j*num_rows ] = drs[ i+j*num_rows ] - dr[ i+j*num_rows ];
}
}
}
/**
Purpose
-------
Mergels multiple operations into one kernel:
dt = drs - dr
Arguments
---------
@param[in]
num_rows magma_int_t
dimension m
@param[in]
num_cols magma_int_t
dimension n
@param[in]
drs magmaDoubleComplex_ptr
vector
@param[in]
dr magmaDoubleComplex_ptr
vector
@param[in,out]
dt magmaDoubleComplex_ptr
vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zgegpuk
********************************************************************/
extern "C"
magma_int_t
magma_zidr_smoothing_1(
magma_int_t num_rows,
magma_int_t num_cols,
magmaDoubleComplex_ptr drs,
magmaDoubleComplex_ptr dr,
magmaDoubleComplex_ptr dt,
magma_queue_t queue )
{
dim3 Bs( BLOCK_SIZE );
dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) );
hipLaunchKernelGGL(( magma_zidr_smoothing_1_kernel), dim3(Gs), dim3(Bs), 0, queue->cuda_stream() , num_rows, num_cols, drs, dr, dt );
return MAGMA_SUCCESS;
}
__global__ void
magma_zidr_smoothing_2_kernel(
int num_rows,
int num_cols,
magmaDoubleComplex omega,
magmaDoubleComplex *dx,
magmaDoubleComplex *dxs )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i<num_rows ) {
for( int j=0; j<num_cols; j++ ){
dxs[ i+j*num_rows ] = dxs[ i+j*num_rows ] + omega * dxs[ i+j*num_rows ]
- omega * dx[ i+j*num_rows ];
}
}
}
/**
Purpose
-------
Mergels multiple operations into one kernel:
dxs = dxs - gamma*(dxs-dx)
Arguments
---------
@param[in]
num_rows magma_int_t
dimension m
@param[in]
num_cols magma_int_t
dimension n
@param[in]
omega magmaDoubleComplex
scalar
@param[in]
dx magmaDoubleComplex_ptr
vector
@param[in,out]
dxs magmaDoubleComplex_ptr
vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zgegpuk
********************************************************************/
extern "C"
magma_int_t
magma_zidr_smoothing_2(
magma_int_t num_rows,
magma_int_t num_cols,
magmaDoubleComplex omega,
magmaDoubleComplex_ptr dx,
magmaDoubleComplex_ptr dxs,
magma_queue_t queue )
{
dim3 Bs( BLOCK_SIZE );
dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) );
hipLaunchKernelGGL(( magma_zidr_smoothing_2_kernel), dim3(Gs), dim3(Bs), 0, queue->cuda_stream() , num_rows, num_cols, omega, dx, dxs);
return MAGMA_SUCCESS;
}
| zmergeidr.cu | /*
-- MAGMA (version 2.2.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2016
@precisions normal z -> c d s
@author Hartwig Anzt
*/
#include "magmasparse_internal.h"
#define BLOCK_SIZE 512
#define PRECISION_z
// These routines merge multiple kernels from zidr into one.
/* -------------------------------------------------------------------------- */
__global__ void
magma_zidr_smoothing_1_kernel(
int num_rows,
int num_cols,
magmaDoubleComplex *drs,
magmaDoubleComplex *dr,
magmaDoubleComplex *dt )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i<num_rows ) {
for( int j=0; j<num_cols; j++ ){
dt[ i+j*num_rows ] = drs[ i+j*num_rows ] - dr[ i+j*num_rows ];
}
}
}
/**
Purpose
-------
Mergels multiple operations into one kernel:
dt = drs - dr
Arguments
---------
@param[in]
num_rows magma_int_t
dimension m
@param[in]
num_cols magma_int_t
dimension n
@param[in]
drs magmaDoubleComplex_ptr
vector
@param[in]
dr magmaDoubleComplex_ptr
vector
@param[in,out]
dt magmaDoubleComplex_ptr
vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zgegpuk
********************************************************************/
extern "C"
magma_int_t
magma_zidr_smoothing_1(
magma_int_t num_rows,
magma_int_t num_cols,
magmaDoubleComplex_ptr drs,
magmaDoubleComplex_ptr dr,
magmaDoubleComplex_ptr dt,
magma_queue_t queue )
{
dim3 Bs( BLOCK_SIZE );
dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) );
magma_zidr_smoothing_1_kernel<<< Gs, Bs, 0, queue->cuda_stream() >>>( num_rows, num_cols, drs, dr, dt );
return MAGMA_SUCCESS;
}
__global__ void
magma_zidr_smoothing_2_kernel(
int num_rows,
int num_cols,
magmaDoubleComplex omega,
magmaDoubleComplex *dx,
magmaDoubleComplex *dxs )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i<num_rows ) {
for( int j=0; j<num_cols; j++ ){
dxs[ i+j*num_rows ] = dxs[ i+j*num_rows ] + omega * dxs[ i+j*num_rows ]
- omega * dx[ i+j*num_rows ];
}
}
}
/**
Purpose
-------
Mergels multiple operations into one kernel:
dxs = dxs - gamma*(dxs-dx)
Arguments
---------
@param[in]
num_rows magma_int_t
dimension m
@param[in]
num_cols magma_int_t
dimension n
@param[in]
omega magmaDoubleComplex
scalar
@param[in]
dx magmaDoubleComplex_ptr
vector
@param[in,out]
dxs magmaDoubleComplex_ptr
vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zgegpuk
********************************************************************/
extern "C"
magma_int_t
magma_zidr_smoothing_2(
magma_int_t num_rows,
magma_int_t num_cols,
magmaDoubleComplex omega,
magmaDoubleComplex_ptr dx,
magmaDoubleComplex_ptr dxs,
magma_queue_t queue )
{
dim3 Bs( BLOCK_SIZE );
dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) );
magma_zidr_smoothing_2_kernel<<< Gs, Bs, 0, queue->cuda_stream() >>>( num_rows, num_cols, omega, dx, dxs);
return MAGMA_SUCCESS;
}
|
eb9af48979e00137dbaeae3bb3dff41cac117f28.hip | // !!! This is a file automatically generated by hipify!!!
/*
* BorisYeeMain.c
*
* Created on: 2016615
* Author: salmon
*/
#include <stdio.h>
#include "../../sp_lite/sp_def.h"
#include "../../sp_lite/spMesh.h"
#include "../../sp_lite/spField.h"
#include "../../sp_lite/spParticle.h"
#include "Boris.h"
#include "BorisYee.h"
int main(int argc, char **argv)
{
CUDA_CHECK_RETURN(hipDeviceSynchronize()); // Wait for the GPU launched work to complete
CUDA_CHECK_RETURN(hipGetLastError());
spMesh *mesh;
sp_particle_type *ps = 0x0;
sp_field_type *fE = 0x0;
sp_field_type *fB = 0x0;
sp_field_type *fRho = 0x0;
sp_field_type *fJ = 0x0;
spCreateMesh(&mesh);
mesh->dims[0] = 16;
mesh->dims[1] = 16;
mesh->dims[2] = 16;
mesh->dx[0] = 1;
mesh->dx[1] = 1;
mesh->dx[2] = 1;
spInitializeMesh(mesh);
spCreateField(mesh, &fE, 1);
spCreateField(mesh, &fB, 2);
spCreateField(mesh, &fJ, 1);
spCreateField(mesh, &fRho, 0);
spClearField(mesh, fE);
spClearField(mesh, fB);
spClearField(mesh, fJ);
spClearField(mesh, fRho);
int NUMBER_OF_PIC = 256;
spCreateParticle(mesh, &ps, sizeof(struct boris_point_s), NUMBER_OF_PIC);
spInitializeParticle_BorisYee(mesh, ps, NUMBER_OF_PIC);
int count = 1;
Real dt = 1.0;
spWriteField(mesh, fE, "/start/E", SP_NEW);
spWriteField(mesh, fB, "/start/B", SP_NEW);
spWriteField(mesh, fJ, "/start/J", SP_NEW);
spWriteField(mesh, fRho, "/start/rho", SP_NEW);
while (count > 0)
{
printf("====== REMINED STEP= %d ======\n", count);
spUpdateParticle_BorisYee(mesh, dt, ps, fE, fB, fRho, fJ);
// spUpdateField_Yee(mesh, dt, fRho, fJ, fE, fB);
// spWriteField(mesh, fE, "/checkpoint/E", SP_RECORD);
// spWriteField(mesh, fB, "/checkpoint/B", SP_RECORD);
// spWriteField(mesh, fJ, "/checkpoint/J", SP_RECORD);
// spWriteField(mesh, fRho, "/checkpoint/rho", SP_RECORD);
--count;
}
printf("====== The End ======\n", count);
spWriteField(mesh, fE, "/dump/E", SP_NEW);
spWriteField(mesh, fB, "/dump/B", SP_NEW);
spWriteField(mesh, fJ, "/dump/J", SP_NEW);
spWriteField(mesh, fRho, "/dump/rho", SP_NEW);
// spWriteParticle(mesh, pg, "/dump/H", SP_NEW);
spDestroyField(&fE);
spDestroyField(&fB);
spDestroyField(&fJ);
spDestroyField(&fRho);
spDestroyParticle(&ps);
spDestroyMesh(&mesh);
CUDA_CHECK_RETURN(hipDeviceReset());
DONE
return 0;
}
| eb9af48979e00137dbaeae3bb3dff41cac117f28.cu | /*
* BorisYeeMain.c
*
* Created on: 2016年6月15日
* Author: salmon
*/
#include <stdio.h>
#include "../../sp_lite/sp_def.h"
#include "../../sp_lite/spMesh.h"
#include "../../sp_lite/spField.h"
#include "../../sp_lite/spParticle.h"
#include "Boris.h"
#include "BorisYee.h"
int main(int argc, char **argv)
{
CUDA_CHECK_RETURN(cudaThreadSynchronize()); // Wait for the GPU launched work to complete
CUDA_CHECK_RETURN(cudaGetLastError());
spMesh *mesh;
sp_particle_type *ps = 0x0;
sp_field_type *fE = 0x0;
sp_field_type *fB = 0x0;
sp_field_type *fRho = 0x0;
sp_field_type *fJ = 0x0;
spCreateMesh(&mesh);
mesh->dims[0] = 16;
mesh->dims[1] = 16;
mesh->dims[2] = 16;
mesh->dx[0] = 1;
mesh->dx[1] = 1;
mesh->dx[2] = 1;
spInitializeMesh(mesh);
spCreateField(mesh, &fE, 1);
spCreateField(mesh, &fB, 2);
spCreateField(mesh, &fJ, 1);
spCreateField(mesh, &fRho, 0);
spClearField(mesh, fE);
spClearField(mesh, fB);
spClearField(mesh, fJ);
spClearField(mesh, fRho);
int NUMBER_OF_PIC = 256;
spCreateParticle(mesh, &ps, sizeof(struct boris_point_s), NUMBER_OF_PIC);
spInitializeParticle_BorisYee(mesh, ps, NUMBER_OF_PIC);
int count = 1;
Real dt = 1.0;
spWriteField(mesh, fE, "/start/E", SP_NEW);
spWriteField(mesh, fB, "/start/B", SP_NEW);
spWriteField(mesh, fJ, "/start/J", SP_NEW);
spWriteField(mesh, fRho, "/start/rho", SP_NEW);
while (count > 0)
{
printf("====== REMINED STEP= %d ======\n", count);
spUpdateParticle_BorisYee(mesh, dt, ps, fE, fB, fRho, fJ);
// spUpdateField_Yee(mesh, dt, fRho, fJ, fE, fB);
// spWriteField(mesh, fE, "/checkpoint/E", SP_RECORD);
// spWriteField(mesh, fB, "/checkpoint/B", SP_RECORD);
// spWriteField(mesh, fJ, "/checkpoint/J", SP_RECORD);
// spWriteField(mesh, fRho, "/checkpoint/rho", SP_RECORD);
--count;
}
printf("====== The End ======\n", count);
spWriteField(mesh, fE, "/dump/E", SP_NEW);
spWriteField(mesh, fB, "/dump/B", SP_NEW);
spWriteField(mesh, fJ, "/dump/J", SP_NEW);
spWriteField(mesh, fRho, "/dump/rho", SP_NEW);
// spWriteParticle(mesh, pg, "/dump/H", SP_NEW);
spDestroyField(&fE);
spDestroyField(&fB);
spDestroyField(&fJ);
spDestroyField(&fRho);
spDestroyParticle(&ps);
spDestroyMesh(&mesh);
CUDA_CHECK_RETURN(cudaDeviceReset());
DONE
return 0;
}
|
84a05cf6227240b742e0367ad756ed69dba80cfd.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <gdf/gdf.h>
#include <gdf/cffi/functions.h>
#include <hip/hip_runtime.h>
#include <limits.h>
#include <gtest/gtest.h>
#include "utils.cuh"
gdf_valid_type * get_gdf_valid_from_device(gdf_column* column) {
gdf_valid_type * host_valid_out;
size_t n_bytes = get_number_of_bytes_for_valid(column->size);
host_valid_out = new gdf_valid_type[n_bytes];
hipMemcpy(host_valid_out,column->valid, n_bytes, hipMemcpyDeviceToHost);
return host_valid_out;
}
std::string gdf_valid_to_str(gdf_valid_type *valid, size_t column_size)
{
size_t n_bytes = get_number_of_bytes_for_valid(column_size);
std::string response;
for (int i = 0; i < n_bytes; i++)
{
int length = n_bytes != i + 1 ? GDF_VALID_BITSIZE : column_size - GDF_VALID_BITSIZE * (n_bytes - 1);
auto result = chartobin(valid[i], length);
response += std::string(result);
}
return response;
}
gdf_valid_type* gen_gdf_valid(size_t column_size, size_t init_value)
{
gdf_valid_type *valid = nullptr;
if (column_size == 0)
{
valid = new gdf_valid_type[1];
}
else
{
size_t n_bytes = get_number_of_bytes_for_valid (column_size);
valid = new gdf_valid_type[n_bytes];
int i;
for (i = 0; i < n_bytes - 1; ++i)
{
valid[i] = (init_value % 256);
}
size_t length = column_size - GDF_VALID_BITSIZE * (n_bytes - 1);
valid[i] = 1 << length - 1;
}
return valid;
}
void delete_gdf_column(gdf_column * column){
hipFree(column->data);
hipFree(column->valid);
}
gdf_size_type count_zero_bits(gdf_valid_type *valid, size_t column_size)
{
size_t numbits = 0;
auto bin = gdf_valid_to_str(valid, column_size);
for(int i = 0; i < bin.length(); i++) {
if ( bin [i] == '0')
numbits++;
}
return numbits;
}
std::string chartobin(gdf_valid_type c, int size/* = 8*/)
{
std::string bin;
bin.resize(size);
bin[0] = 0;
int i;
for (i = size - 1; i >= 0; i--)
{
bin[i] = (c % 2) + '0';
c /= 2;
}
return bin;
}
auto print_binary(gdf_valid_type n, int size) -> void {
std::cout << chartobin(n) << "\t sz: " << size << "\tbinary: " << chartobin(n, size) << std::endl;
}
| 84a05cf6227240b742e0367ad756ed69dba80cfd.cu |
#include <iostream>
#include <gdf/gdf.h>
#include <gdf/cffi/functions.h>
#include <cuda_runtime.h>
#include <limits.h>
#include <gtest/gtest.h>
#include "utils.cuh"
gdf_valid_type * get_gdf_valid_from_device(gdf_column* column) {
gdf_valid_type * host_valid_out;
size_t n_bytes = get_number_of_bytes_for_valid(column->size);
host_valid_out = new gdf_valid_type[n_bytes];
cudaMemcpy(host_valid_out,column->valid, n_bytes, cudaMemcpyDeviceToHost);
return host_valid_out;
}
std::string gdf_valid_to_str(gdf_valid_type *valid, size_t column_size)
{
size_t n_bytes = get_number_of_bytes_for_valid(column_size);
std::string response;
for (int i = 0; i < n_bytes; i++)
{
int length = n_bytes != i + 1 ? GDF_VALID_BITSIZE : column_size - GDF_VALID_BITSIZE * (n_bytes - 1);
auto result = chartobin(valid[i], length);
response += std::string(result);
}
return response;
}
gdf_valid_type* gen_gdf_valid(size_t column_size, size_t init_value)
{
gdf_valid_type *valid = nullptr;
if (column_size == 0)
{
valid = new gdf_valid_type[1];
}
else
{
size_t n_bytes = get_number_of_bytes_for_valid (column_size);
valid = new gdf_valid_type[n_bytes];
int i;
for (i = 0; i < n_bytes - 1; ++i)
{
valid[i] = (init_value % 256);
}
size_t length = column_size - GDF_VALID_BITSIZE * (n_bytes - 1);
valid[i] = 1 << length - 1;
}
return valid;
}
void delete_gdf_column(gdf_column * column){
cudaFree(column->data);
cudaFree(column->valid);
}
gdf_size_type count_zero_bits(gdf_valid_type *valid, size_t column_size)
{
size_t numbits = 0;
auto bin = gdf_valid_to_str(valid, column_size);
for(int i = 0; i < bin.length(); i++) {
if ( bin [i] == '0')
numbits++;
}
return numbits;
}
std::string chartobin(gdf_valid_type c, int size/* = 8*/)
{
std::string bin;
bin.resize(size);
bin[0] = 0;
int i;
for (i = size - 1; i >= 0; i--)
{
bin[i] = (c % 2) + '0';
c /= 2;
}
return bin;
}
auto print_binary(gdf_valid_type n, int size) -> void {
std::cout << chartobin(n) << "\t sz: " << size << "\tbinary: " << chartobin(n, size) << std::endl;
}
|
8e1f47da6a330807ac5d3143c9ff1256c7b7b369.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "uplo_swap_transp.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const int sd = 1;
const int unit = 1;
const int bottom = 1;
REAL *a = NULL;
hipMalloc(&a, XSIZE*YSIZE);
const int offset_a = 1;
const int ld_a = 1;
REAL *b = NULL;
hipMalloc(&b, XSIZE*YSIZE);
const int offset_b = 1;
const int ld_b = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
uplo_swap_transp), dim3(gridBlock),dim3(threadBlock), 0, 0, sd,unit,bottom,a,offset_a,ld_a,b,offset_b,ld_b);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
uplo_swap_transp), dim3(gridBlock),dim3(threadBlock), 0, 0, sd,unit,bottom,a,offset_a,ld_a,b,offset_b,ld_b);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
uplo_swap_transp), dim3(gridBlock),dim3(threadBlock), 0, 0, sd,unit,bottom,a,offset_a,ld_a,b,offset_b,ld_b);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 8e1f47da6a330807ac5d3143c9ff1256c7b7b369.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "uplo_swap_transp.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const int sd = 1;
const int unit = 1;
const int bottom = 1;
REAL *a = NULL;
cudaMalloc(&a, XSIZE*YSIZE);
const int offset_a = 1;
const int ld_a = 1;
REAL *b = NULL;
cudaMalloc(&b, XSIZE*YSIZE);
const int offset_b = 1;
const int ld_b = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
uplo_swap_transp<<<gridBlock,threadBlock>>>(sd,unit,bottom,a,offset_a,ld_a,b,offset_b,ld_b);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
uplo_swap_transp<<<gridBlock,threadBlock>>>(sd,unit,bottom,a,offset_a,ld_a,b,offset_b,ld_b);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
uplo_swap_transp<<<gridBlock,threadBlock>>>(sd,unit,bottom,a,offset_a,ld_a,b,offset_b,ld_b);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
374c250d5c8759b5936ea6d58015289a3129723f.hip | // !!! This is a file automatically generated by hipify!!!
/*
* tests.cu
*
* Created on: Nov 4, 2011
* Author: tombr
*/
#include "tests.h"
#include <tbblas/tensor.hpp>
#include <thrust/copy.h>
#include <tbblas/io.hpp>
#include <tbblas/zeros.hpp>
#include <tbblas/random.hpp>
#include <tbblas/math.hpp>
#include <tbblas/fill.hpp>
#include <iostream>
#include <boost/timer.hpp>
#include <thrust/for_each.h>
typedef tbblas::tensor<float, 2, true> matrix_t;
typedef tbblas::tensor<float, 1, true> vector_t;
typedef tbblas::random_tensor<float, 2, true, tbblas::uniform<float> > randu_t;
typedef tbblas::random_tensor<float, 2, true, tbblas::normal<float> > randn_t;
void helloworld() {
using namespace tbblas;
using namespace thrust::placeholders;
//const float values1[] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12};
//const float values2[] = {2, 3, 5, 1, 3, 2, 6, 7, 3, 1, 23, 2};
randu_t randu(3, 4);
randn_t randn(3, 4);
matrix_t A = 4 * randu;
matrix_t B = 0.5 * randn + 2;
//thrust::copy(values1, values1 + A.count(), A.begin());
//thrust::copy(values2, values2 + B.count(), B.begin());
std::cout << "A = " << A << std::endl;
std::cout << "B = " << B << std::endl;
matrix_t C = ((2.f * A - B) * B) / 2.f;
std::cout << "A + B = " << C << std::endl;
matrix_t D = floor(10 * randu_t(3, 3));
std::cout << "D = " << D << std::endl;
matrix_t E = zeros<float>(5, 5);
E[seq(2,2), seq(2,2)] = 2, 3,
3, 5;
std::cout << "E = " << E << std::endl;
matrix_t F(3,3);
F = 1, 2, 4,
4, 3, 2,
1, 1, 3;
std::cout << "F = " << F << std::endl;
//thrust::transform(A.begin(), A.end(), B.begin(), D.begin(), ((2.f * _1 - _2) * _2) / 2.f);
// thrust::for_each(
// thrust::make_zip_iterator(thrust::make_tuple(A.begin(), B.begin(), D.begin())),
// thrust::make_zip_iterator(thrust::make_tuple(A.end(), B.end(), D.end())),
// functor()
// );
// std::cout << "A + B = " << D << std::endl;
#if 0
matrix_t A1(5000, 2000), A2(5000, 2000), A3(5000, 2000);
matrix2_t B1(5000, 2000), B2(5000, 2000), B3(5000, 2000);
hipDeviceSynchronize();
std::cout << "New interface:" << std::endl;
boost::timer timer;
for (unsigned i = 0; i < 500; ++i)
//A3 = ((2.f * A1 - A2) + A2) / 2.f;
A3 = A1 * A2;
hipDeviceSynchronize();
std::cout << "tbblas time: " << timer.elapsed() << "s" << std::endl;
timer.restart();
for (unsigned i = 0; i < 500; ++i) {
//thrust::transform(A1.begin(), A1.end(), A2.begin(), A3.begin(), ((2.f * _1 - _2) + _2) / 2.f);
thrust::for_each(
thrust::make_zip_iterator(thrust::make_tuple(A1.begin(), A2.begin(), A3.begin())),
thrust::make_zip_iterator(thrust::make_tuple(A1.end(), A2.end(), A3.end())),
functor()
);
}
hipDeviceSynchronize();
std::cout << "thrust time: " << timer.elapsed() << "s" << std::endl;
std::cout << "\nOld interface:" << std::endl;
timer.restart();
for (unsigned i = 0; i < 500; ++i)
B3 = tbblas::copy((B3 = (B3 = 2.f * B1 - B2) + B2) / 2.f);
hipDeviceSynchronize();
std::cout << "tbblas time: " << timer.elapsed() << "s" << std::endl;
timer.restart();
for (unsigned i = 0; i < 500; ++i)
thrust::transform(B1.cbegin(), B1.cend(), B2.cbegin(), B3.begin(), ((2.f * _1 - _2) + _2) / 2.f);
hipDeviceSynchronize();
std::cout << "thrust time: " << timer.elapsed() << "s" << std::endl;
#endif
}
| 374c250d5c8759b5936ea6d58015289a3129723f.cu | /*
* tests.cu
*
* Created on: Nov 4, 2011
* Author: tombr
*/
#include "tests.h"
#include <tbblas/tensor.hpp>
#include <thrust/copy.h>
#include <tbblas/io.hpp>
#include <tbblas/zeros.hpp>
#include <tbblas/random.hpp>
#include <tbblas/math.hpp>
#include <tbblas/fill.hpp>
#include <iostream>
#include <boost/timer.hpp>
#include <thrust/for_each.h>
typedef tbblas::tensor<float, 2, true> matrix_t;
typedef tbblas::tensor<float, 1, true> vector_t;
typedef tbblas::random_tensor<float, 2, true, tbblas::uniform<float> > randu_t;
typedef tbblas::random_tensor<float, 2, true, tbblas::normal<float> > randn_t;
void helloworld() {
using namespace tbblas;
using namespace thrust::placeholders;
//const float values1[] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12};
//const float values2[] = {2, 3, 5, 1, 3, 2, 6, 7, 3, 1, 23, 2};
randu_t randu(3, 4);
randn_t randn(3, 4);
matrix_t A = 4 * randu;
matrix_t B = 0.5 * randn + 2;
//thrust::copy(values1, values1 + A.count(), A.begin());
//thrust::copy(values2, values2 + B.count(), B.begin());
std::cout << "A = " << A << std::endl;
std::cout << "B = " << B << std::endl;
matrix_t C = ((2.f * A - B) * B) / 2.f;
std::cout << "A + B = " << C << std::endl;
matrix_t D = floor(10 * randu_t(3, 3));
std::cout << "D = " << D << std::endl;
matrix_t E = zeros<float>(5, 5);
E[seq(2,2), seq(2,2)] = 2, 3,
3, 5;
std::cout << "E = " << E << std::endl;
matrix_t F(3,3);
F = 1, 2, 4,
4, 3, 2,
1, 1, 3;
std::cout << "F = " << F << std::endl;
//thrust::transform(A.begin(), A.end(), B.begin(), D.begin(), ((2.f * _1 - _2) * _2) / 2.f);
// thrust::for_each(
// thrust::make_zip_iterator(thrust::make_tuple(A.begin(), B.begin(), D.begin())),
// thrust::make_zip_iterator(thrust::make_tuple(A.end(), B.end(), D.end())),
// functor()
// );
// std::cout << "A + B = " << D << std::endl;
#if 0
matrix_t A1(5000, 2000), A2(5000, 2000), A3(5000, 2000);
matrix2_t B1(5000, 2000), B2(5000, 2000), B3(5000, 2000);
cudaThreadSynchronize();
std::cout << "New interface:" << std::endl;
boost::timer timer;
for (unsigned i = 0; i < 500; ++i)
//A3 = ((2.f * A1 - A2) + A2) / 2.f;
A3 = A1 * A2;
cudaThreadSynchronize();
std::cout << "tbblas time: " << timer.elapsed() << "s" << std::endl;
timer.restart();
for (unsigned i = 0; i < 500; ++i) {
//thrust::transform(A1.begin(), A1.end(), A2.begin(), A3.begin(), ((2.f * _1 - _2) + _2) / 2.f);
thrust::for_each(
thrust::make_zip_iterator(thrust::make_tuple(A1.begin(), A2.begin(), A3.begin())),
thrust::make_zip_iterator(thrust::make_tuple(A1.end(), A2.end(), A3.end())),
functor()
);
}
cudaThreadSynchronize();
std::cout << "thrust time: " << timer.elapsed() << "s" << std::endl;
std::cout << "\nOld interface:" << std::endl;
timer.restart();
for (unsigned i = 0; i < 500; ++i)
B3 = tbblas::copy((B3 = (B3 = 2.f * B1 - B2) + B2) / 2.f);
cudaThreadSynchronize();
std::cout << "tbblas time: " << timer.elapsed() << "s" << std::endl;
timer.restart();
for (unsigned i = 0; i < 500; ++i)
thrust::transform(B1.cbegin(), B1.cend(), B2.cbegin(), B3.begin(), ((2.f * _1 - _2) + _2) / 2.f);
cudaThreadSynchronize();
std::cout << "thrust time: " << timer.elapsed() << "s" << std::endl;
#endif
}
|
d8e5b582c0b794da7e732e281c26dde34450a48e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "hip/device_functions.h"
#include <stdio.h>
#define N 4
#define M 5
#define THREADS 32
__global__ void kernel(int* mat, int* max, int shsize)
{
extern __shared__ int kolone[];
int row = blockDim.y * blockIdx.y + threadIdx.y;
int col = blockDim.x * blockIdx.x + threadIdx.x;
int index = col + row * M; // row * total cols = prvi element u nekom redu, a + col pomeraj u tom redu
int localIndex = threadIdx.x + THREADS * threadIdx.y;
if (row >= N || col >= M) return;
//shared memoriju mapiramo na vise promenjivih
int* colsh = kolone; // velicina shsize * shsize
int* maxsh = &kolone[shsize*shsize]; // velicina shsize
// ucitaju svi u svoju shared i inicijalizuje se max
colsh[localIndex] = mat[index];
maxsh[threadIdx.x] = 0;
__syncthreads();
// kroz atomicne operacije u shared nadje se max za svaki tile
maxsh[threadIdx.y] = atomicMax(&maxsh[threadIdx.y], colsh[localIndex]);
__syncthreads();
max[blockIdx.y * (blockDim.x + gridDim.x) + col] = maxsh[threadIdx.x];
}
void host(int *A, int* max) {
}
int main()
{
int A[N][M], max[M], temp[N][M], maxh[M];
int* Ad, *maxd;
for (int i = 0; i < N; i++) {
for (int j = 0; j < M; j++) {
A[i][j] = i % 4 == 0 ? i * i - j : j * i;
}
}
for (int i = 0; i < N; i++) {
for (int j = 0; j < M; j++) {
printf("%d ", A[i][j]);
}
printf("\n");
}
hipMalloc((void**)&Ad, sizeof(int) * N * M);
hipMalloc((void**)&maxd, sizeof(int) * M * ((M + THREADS - 1) / THREADS));
hipMemcpy(Ad, A, sizeof(int) * N * M, hipMemcpyHostToDevice);
dim3 gridSize((N+THREADS-1)/THREADS,(M+THREADS-1)/THREADS);
dim3 blockSize(THREADS,THREADS);
int memSize = THREADS*(THREADS + 1);
kernel << <gridSize, blockSize, memSize * sizeof(int) >> > (Ad, maxd, THREADS);
hipMemcpy(temp, maxd, sizeof(int) * M * ((N + THREADS - 1) / THREADS), hipMemcpyDeviceToHost);
// kernel << <1, blockSize, memSize * sizeof(int) >> > (maxd, maxd, THREADS);
hipFree(Ad);
hipFree(maxd);
return 0;
} | d8e5b582c0b794da7e732e281c26dde34450a48e.cu | #include "cuda_runtime.h"
#include "cuda.h"
#include "device_launch_parameters.h"
#include "device_functions.h"
#include <stdio.h>
#define N 4
#define M 5
#define THREADS 32
__global__ void kernel(int* mat, int* max, int shsize)
{
extern __shared__ int kolone[];
int row = blockDim.y * blockIdx.y + threadIdx.y;
int col = blockDim.x * blockIdx.x + threadIdx.x;
int index = col + row * M; // row * total cols = prvi element u nekom redu, a + col pomeraj u tom redu
int localIndex = threadIdx.x + THREADS * threadIdx.y;
if (row >= N || col >= M) return;
//shared memoriju mapiramo na vise promenjivih
int* colsh = kolone; // velicina shsize * shsize
int* maxsh = &kolone[shsize*shsize]; // velicina shsize
// ucitaju svi u svoju shared i inicijalizuje se max
colsh[localIndex] = mat[index];
maxsh[threadIdx.x] = 0;
__syncthreads();
// kroz atomicne operacije u shared nadje se max za svaki tile
maxsh[threadIdx.y] = atomicMax(&maxsh[threadIdx.y], colsh[localIndex]);
__syncthreads();
max[blockIdx.y * (blockDim.x + gridDim.x) + col] = maxsh[threadIdx.x];
}
void host(int *A, int* max) {
}
int main()
{
int A[N][M], max[M], temp[N][M], maxh[M];
int* Ad, *maxd;
for (int i = 0; i < N; i++) {
for (int j = 0; j < M; j++) {
A[i][j] = i % 4 == 0 ? i * i - j : j * i;
}
}
for (int i = 0; i < N; i++) {
for (int j = 0; j < M; j++) {
printf("%d ", A[i][j]);
}
printf("\n");
}
cudaMalloc((void**)&Ad, sizeof(int) * N * M);
cudaMalloc((void**)&maxd, sizeof(int) * M * ((M + THREADS - 1) / THREADS));
cudaMemcpy(Ad, A, sizeof(int) * N * M, cudaMemcpyHostToDevice);
dim3 gridSize((N+THREADS-1)/THREADS,(M+THREADS-1)/THREADS);
dim3 blockSize(THREADS,THREADS);
int memSize = THREADS*(THREADS + 1);
kernel << <gridSize, blockSize, memSize * sizeof(int) >> > (Ad, maxd, THREADS);
cudaMemcpy(temp, maxd, sizeof(int) * M * ((N + THREADS - 1) / THREADS), cudaMemcpyDeviceToHost);
// kernel << <1, blockSize, memSize * sizeof(int) >> > (maxd, maxd, THREADS);
cudaFree(Ad);
cudaFree(maxd);
return 0;
} |
fe607a3d0ddb434808eb3e33b961b7bb589bccfa.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// #include <torch/torch.h>
// name should be different from .cpp file!!!
#include <torch/types.h>
#include <ATen/ATen.h>
#include <ATen/hip/HIPContext.h>
#include <THH/THHAtomics.cuh>
#include <stdio.h>
#include <vector>
#define CUDA_1D_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \
i += blockDim.x * gridDim.x)
// #define THREADS_PER_BLOCK 1024
#define THREADS_PER_BLOCK 128
inline int GET_BLOCKS(const int N) {
int optimal_block_num = (N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
int max_block_num = 65000;
return min(optimal_block_num, max_block_num);
}
template <typename scalar_t>
__global__ void tl_forward_kernel(const int nthreads,
const scalar_t *input_ptr,
const scalar_t *guide_ptr,
scalar_t *max_ptr,
scalar_t *outptr,
const int bs, const int ch,
const int sh, const int sw
) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int dc = index % ch;
int db = index / ch;
for (int i = 0; i < sh; i++) {
for (int j = 0; j < sw; j++) {
auto x1 = *(outptr + db * ch * sh * sw + dc * sh * sw + i * sw + j);
auto x2 = x1;
auto x3 = x1;
if (j > 0) x2 = *(outptr + db * ch * sh * sw + dc * sh * sw + i * sw + j-1);
if (i > 0) x3 = *(outptr + db * ch * sh * sw + dc * sh * sw + (i-1) * sw + j);
auto sigmoid = *(guide_ptr + db * ch * sh * sw + dc * sh * sw + i * sw + j);
auto out = x1;
if (out < x2) out = x2;
if (out < x3) out = x3;
outptr[db * ch * sh * sw + dc * sh * sw + i * sw + j] = sigmoid * out + (1-sigmoid) * x1;
max_ptr[db * ch * sh * sw + dc * sh * sw + i * sw + j] = out;
}
}
}
}
template <typename scalar_t>
__global__ void tl_backward_kernel(const int nthreads,
const scalar_t *input_ptr,
const scalar_t *guide_ptr,
const scalar_t *output_ptr,
const scalar_t *maxout_ptr,
scalar_t *gradout_ptr,
scalar_t *gradin_ptr,
scalar_t *gradguide_ptr,
const int bs, const int ch,
const int sh, const int sw
) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int dc = index % ch;
int db = index / ch;
for (int i = sh-1; i >= 0; i--) {
for (int j = sw-1; j >= 0; j--) {
auto x1 = *(input_ptr + db * ch * sh * sw + dc * sh * sw + i * sw + j);
auto out = *(maxout_ptr + db * ch * sh * sw + dc * sh * sw + i * sw + j);
auto sigmoid = *(guide_ptr + db * ch * sh * sw + dc * sh * sw + i * sw + j);
auto out_x2 = x1;
auto out_x3 = x1;
if (j > 0) out_x2 = *(output_ptr + db * ch * sh * sw + dc * sh * sw + i * sw + j-1);
if (i > 0) out_x3 = *(output_ptr + db * ch * sh * sw + dc * sh * sw + (i-1) * sw + j);
auto g1 = scalar_t(x1 >= out);
auto g2 = (1-g1) * scalar_t(out_x2 >= out);
auto g3 = (1-g1) * (1-g2) * scalar_t(out_x3 >= out);
auto grad_x1 = sigmoid * g1 + (1-sigmoid);
auto grad_x2 = sigmoid * g2;
auto grad_x3 = sigmoid * g3;
auto grad_sigmoid = out - x1;
// printf("(%i, %i) x1 %f out %f out_x2 %f out_x3 %f g1 %f g2 %f g3 %f gsig %f \n", i, j, x1, out, out_x2, out_x3, g1, g2, g3, grad_sigmoid);
gradguide_ptr[db * ch * sh * sw + dc * sh * sw + i * sw + j] =
grad_sigmoid * gradout_ptr[db * ch * sh * sw + dc * sh * sw + i * sw + j];
gradin_ptr[db * ch * sh * sw + dc * sh * sw + i * sw + j] =
grad_x1 * gradout_ptr[db * ch * sh * sw + dc * sh * sw + i * sw + j];
if (j > 0) gradout_ptr[db * ch * sh * sw + dc * sh * sw + i * sw + j-1] +=
grad_x2 * gradout_ptr[db * ch * sh * sw + dc * sh * sw + i * sw + j];
if (i > 0) gradout_ptr[db * ch * sh * sw + dc * sh * sw + (i-1) * sw + j] +=
grad_x3 * gradout_ptr[db * ch * sh * sw + dc * sh * sw + i * sw + j];
// grad_x3 * *(gradout_ptr + db * ch * sh * sw + dc * sh * sw + i * sw + j);
}
}
}
}
template <typename scalar_t>
__global__ void tr_forward_kernel(const int nthreads,
const scalar_t *input_ptr,
const scalar_t *guide_ptr,
scalar_t *max_ptr,
scalar_t *outptr,
const int bs, const int ch,
const int sh, const int sw
) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int dc = index % ch;
int db = index / ch;
for (int i = 0; i < sh; i++) {
for (int j = sw-1; j >= 0; j--) {
auto x1 = *(outptr + db * ch * sh * sw + dc * sh * sw + i * sw + j);
auto x2 = x1;
auto x3 = x1;
if (j < sw-1) x2 = *(outptr + db * ch * sh * sw + dc * sh * sw + i * sw + j+1);
if (i > 0) x3 = *(outptr + db * ch * sh * sw + dc * sh * sw + (i-1) * sw + j);
auto sigmoid = *(guide_ptr + db * ch * sh * sw + dc * sh * sw + i * sw + j);
// printf("%f %f %f %f \n", x1, x2, x3, sigmoid);
// printf("(%i, %i) x1 %f x2 %f x3 %f sigmoid %f \n", i, j, x1, x2, x3, sigmoid);
// calculate the max values
auto out = x1;
if (out < x2) out = x2;
if (out < x3) out = x3;
outptr[db * ch * sh * sw + dc * sh * sw + i * sw + j] = sigmoid * out + (1-sigmoid) * x1;
max_ptr[db * ch * sh * sw + dc * sh * sw + i * sw + j] = out;
}
}
}
}
template <typename scalar_t>
__global__ void tr_backward_kernel(const int nthreads,
const scalar_t *input_ptr,
const scalar_t *guide_ptr,
const scalar_t *output_ptr,
const scalar_t *maxout_ptr,
scalar_t *gradout_ptr,
scalar_t *gradin_ptr,
scalar_t *gradguide_ptr,
const int bs, const int ch,
const int sh, const int sw
) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int dc = index % ch;
int db = index / ch;
for (int i = sh-1; i >= 0; i--) {
for (int j = 0; j < sw; j++) {
auto x1 = *(input_ptr + db * ch * sh * sw + dc * sh * sw + i * sw + j);
auto out = *(maxout_ptr + db * ch * sh * sw + dc * sh * sw + i * sw + j);
auto sigmoid = *(guide_ptr + db * ch * sh * sw + dc * sh * sw + i * sw + j);
auto out_x2 = x1;
auto out_x3 = x1;
if (j < sw-1) out_x2 = *(output_ptr + db * ch * sh * sw + dc * sh * sw + i * sw + j+1);
if (i > 0) out_x3 = *(output_ptr + db * ch * sh * sw + dc * sh * sw + (i-1) * sw + j);
auto g1 = scalar_t(x1 >= out);
auto g2 = (1-g1) * scalar_t(out_x2 >= out);
auto g3 = (1-g1) * (1-g2) * scalar_t(out_x3 >= out);
auto grad_x1 = sigmoid * g1 + (1-sigmoid);
auto grad_x2 = sigmoid * g2;
auto grad_x3 = sigmoid * g3;
auto grad_sigmoid = out - x1;
// printf("(%i, %i) x1 %f out %f out_x2 %f out_x3 %f g1 %f g2 %f g3 %f gsig %f \n", i, j, x1, out, out_x2, out_x3, g1, g2, g3, grad_sigmoid);
gradguide_ptr[db * ch * sh * sw + dc * sh * sw + i * sw + j] =
grad_sigmoid * gradout_ptr[db * ch * sh * sw + dc * sh * sw + i * sw + j];
gradin_ptr[db * ch * sh * sw + dc * sh * sw + i * sw + j] =
grad_x1 * gradout_ptr[db * ch * sh * sw + dc * sh * sw + i * sw + j];
if (j < sw-1) gradout_ptr[db * ch * sh * sw + dc * sh * sw + i * sw + j+1] +=
grad_x2 * gradout_ptr[db * ch * sh * sw + dc * sh * sw + i * sw + j];
if (i > 0) gradout_ptr[db * ch * sh * sw + dc * sh * sw + (i-1) * sw + j] +=
grad_x3 * gradout_ptr[db * ch * sh * sw + dc * sh * sw + i * sw + j];
}
}
}
}
template <typename scalar_t>
__global__ void bl_forward_kernel(const int nthreads,
const scalar_t *input_ptr,
const scalar_t *guide_ptr,
scalar_t *max_ptr,
scalar_t *outptr,
const int bs, const int ch,
const int sh, const int sw
) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int dc = index % ch;
int db = index / ch;
for (int i = sh-1; i >= 0; i--) {
for (int j = 0; j < sw; j++) {
auto x1 = *(outptr + db * ch * sh * sw + dc * sh * sw + i * sw + j);
auto x2 = x1;
auto x3 = x1;
if (j > 0) x2 = *(outptr + db * ch * sh * sw + dc * sh * sw + i * sw + j-1);
if (i < sh-1) x3 = *(outptr + db * ch * sh * sw + dc * sh * sw + (i+1) * sw + j);
auto sigmoid = *(guide_ptr + db * ch * sh * sw + dc * sh * sw + i * sw + j);
// calculate the max values
auto out = x1;
if (out < x2) out = x2;
if (out < x3) out = x3;
outptr[db * ch * sh * sw + dc * sh * sw + i * sw + j] = sigmoid * out + (1-sigmoid) * x1;
max_ptr[db * ch * sh * sw + dc * sh * sw + i * sw + j] = out;
}
}
}
}
template <typename scalar_t>
__global__ void bl_backward_kernel(const int nthreads,
const scalar_t *input_ptr,
const scalar_t *guide_ptr,
const scalar_t *output_ptr,
const scalar_t *maxout_ptr,
scalar_t *gradout_ptr,
scalar_t *gradin_ptr,
scalar_t *gradguide_ptr,
const int bs, const int ch,
const int sh, const int sw
) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int dc = index % ch;
int db = index / ch;
for (int i = 0; i < sh; i++) {
for (int j = sw-1; j >= 0; j--) {
auto x1 = *(input_ptr + db * ch * sh * sw + dc * sh * sw + i * sw + j);
auto out = *(maxout_ptr + db * ch * sh * sw + dc * sh * sw + i * sw + j);
auto sigmoid = *(guide_ptr + db * ch * sh * sw + dc * sh * sw + i * sw + j);
auto out_x2 = x1;
auto out_x3 = x1;
if (j > 0) out_x2 = *(output_ptr + db * ch * sh * sw + dc * sh * sw + i * sw + j-1);
if (i < sh-1) out_x3 = *(output_ptr + db * ch * sh * sw + dc * sh * sw + (i+1) * sw + j);
auto g1 = scalar_t(x1 >= out);
auto g2 = (1-g1) * scalar_t(out_x2 >= out);
auto g3 = (1-g1) * (1-g2) * scalar_t(out_x3 >= out);
auto grad_x1 = sigmoid * g1 + (1-sigmoid);
auto grad_x2 = sigmoid * g2;
auto grad_x3 = sigmoid * g3;
auto grad_sigmoid = out - x1;
// printf("(%i, %i) x1 %f out %f out_x2 %f out_x3 %f g1 %f g2 %f g3 %f gsig %f \n", i, j, x1, out, out_x2, out_x3, g1, g2, g3, grad_sigmoid);
gradguide_ptr[db * ch * sh * sw + dc * sh * sw + i * sw + j] =
grad_sigmoid * gradout_ptr[db * ch * sh * sw + dc * sh * sw + i * sw + j];
gradin_ptr[db * ch * sh * sw + dc * sh * sw + i * sw + j] =
grad_x1 * gradout_ptr[db * ch * sh * sw + dc * sh * sw + i * sw + j];
if (j > 0) gradout_ptr[db * ch * sh * sw + dc * sh * sw + i * sw + j-1] +=
grad_x2 * gradout_ptr[db * ch * sh * sw + dc * sh * sw + i * sw + j];
if (i < sh-1) gradout_ptr[db * ch * sh * sw + dc * sh * sw + (i+1) * sw + j] +=
grad_x3 * gradout_ptr[db * ch * sh * sw + dc * sh * sw + i * sw + j];
}
}
}
}
template <typename scalar_t>
__global__ void br_forward_kernel(const int nthreads,
const scalar_t *input_ptr,
const scalar_t *guide_ptr,
scalar_t *max_ptr,
scalar_t *outptr,
const int bs, const int ch,
const int sh, const int sw
) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int dc = index % ch;
int db = index / ch;
for (int i = sh-1; i >= 0; i--) {
for (int j = sw-1; j >= 0; j--) {
auto x1 = *(outptr + db * ch * sh * sw + dc * sh * sw + i * sw + j);
auto x2 = x1;
auto x3 = x1;
if (j < sw-1) x2 = *(outptr + db * ch * sh * sw + dc * sh * sw + i * sw + j+1);
if (i < sh-1) x3 = *(outptr + db * ch * sh * sw + dc * sh * sw + (i+1) * sw + j);
auto sigmoid = *(guide_ptr + db * ch * sh * sw + dc * sh * sw + i * sw + j);
// calculate the max values
auto out = x1;
if (out < x2) out = x2;
if (out < x3) out = x3;
outptr[db * ch * sh * sw + dc * sh * sw + i * sw + j] = sigmoid * out + (1-sigmoid) * x1;
max_ptr[db * ch * sh * sw + dc * sh * sw + i * sw + j] = out;
}
}
}
}
template <typename scalar_t>
__global__ void br_backward_kernel(const int nthreads,
const scalar_t *input_ptr,
const scalar_t *guide_ptr,
const scalar_t *output_ptr,
const scalar_t *maxout_ptr,
scalar_t *gradout_ptr,
scalar_t *gradin_ptr,
scalar_t *gradguide_ptr,
const int bs, const int ch,
const int sh, const int sw
) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int dc = index % ch;
int db = index / ch;
for (int i = 0; i < sh; i++) {
for (int j = 0; j < sw; j++) {
auto x1 = *(input_ptr + db * ch * sh * sw + dc * sh * sw + i * sw + j);
auto out = *(maxout_ptr + db * ch * sh * sw + dc * sh * sw + i * sw + j);
auto sigmoid = *(guide_ptr + db * ch * sh * sw + dc * sh * sw + i * sw + j);
auto out_x2 = x1;
auto out_x3 = x1;
if (j < sw-1) out_x2 = *(output_ptr + db * ch * sh * sw + dc * sh * sw + i * sw + j+1);
if (i < sh-1) out_x3 = *(output_ptr + db * ch * sh * sw + dc * sh * sw + (i+1) * sw + j);
auto g1 = scalar_t(x1 >= out);
auto g2 = (1-g1) * scalar_t(out_x2 >= out);
auto g3 = (1-g1) * (1-g2) * scalar_t(out_x3 >= out);
auto grad_x1 = sigmoid * g1 + (1-sigmoid);
auto grad_x2 = sigmoid * g2;
auto grad_x3 = sigmoid * g3;
auto grad_sigmoid = out - x1;
// printf("(%i, %i) x1 %f out %f out_x2 %f out_x3 %f g1 %f g2 %f g3 %f gsig %f \n", i, j, x1, out, out_x2, out_x3, g1, g2, g3, grad_sigmoid);
gradguide_ptr[db * ch * sh * sw + dc * sh * sw + i * sw + j] =
grad_sigmoid * gradout_ptr[db * ch * sh * sw + dc * sh * sw + i * sw + j];
gradin_ptr[db * ch * sh * sw + dc * sh * sw + i * sw + j] =
grad_x1 * gradout_ptr[db * ch * sh * sw + dc * sh * sw + i * sw + j];
if (j < sw-1) gradout_ptr[db * ch * sh * sw + dc * sh * sw + i * sw + j+1] +=
grad_x2 * gradout_ptr[db * ch * sh * sw + dc * sh * sw + i * sw + j];
if (i < sh-1) gradout_ptr[db * ch * sh * sw + dc * sh * sw + (i+1) * sw + j] +=
grad_x3 * gradout_ptr[db * ch * sh * sw + dc * sh * sw + i * sw + j];
}
}
}
}
namespace landmarkconv {
std::vector<at::Tensor> tl_pool_forward_laucher(
const at::Tensor &input,
const at::Tensor &guide) {
// Ensure CUDA uses the input tensor device.
at::DeviceGuard guard(input.device());
AT_ASSERTM(guide.type().is_cuda(), "map must be a CUDA tensor.");
AT_ASSERTM(input.type().is_cuda(), "input must be a CUDA tensor.");
// printf("call by cuda...\n");
hipDeviceSynchronize(); // for print
auto output = input.clone();
auto maxout = input.clone();
int bs = input.size(0);
int ch = input.size(1);
int sh = input.size(2);
int sw = input.size(3);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
input.scalar_type(), "tl_pool_forward_laucher", ([&] {
const scalar_t *input_ptr = input.data_ptr<scalar_t>();
const scalar_t *guide_ptr = guide.data_ptr<scalar_t>();
scalar_t *max_ptr = maxout.data_ptr<scalar_t>();
scalar_t *output_ptr = output.data_ptr<scalar_t>();
hipLaunchKernelGGL(( tl_forward_kernel<scalar_t>), dim3(GET_BLOCKS(bs*ch)), dim3(THREADS_PER_BLOCK),
0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
bs*ch,
input_ptr,
guide_ptr,
max_ptr,
output_ptr,
bs, ch, sh, sw
);
}
)
);
THCudaCheck(hipGetLastError());
return {
output,
maxout
};
}
std::vector<at::Tensor> tl_pool_backward_laucher(
const at::Tensor &input,
const at::Tensor &guide,
const at::Tensor &output,
const at::Tensor &maxout,
const at::Tensor &grad_output
) {
// Ensure CUDA uses the input tensor device.
at::DeviceGuard guard(input.device());
AT_ASSERTM(guide.type().is_cuda(), "map must be a CUDA tensor.");
AT_ASSERTM(input.type().is_cuda(), "input must be a CUDA tensor.");
hipDeviceSynchronize(); // for print
auto grad_input = at::zeros_like(input);
auto grad_guide = at::zeros_like(guide);
auto gradout = grad_output.clone();
int bs = input.size(0);
int ch = input.size(1);
int sh = input.size(2);
int sw = input.size(3);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
input.scalar_type(), "tl_pool_backward_laucher", ([&] {
const scalar_t *input_ptr = input.data_ptr<scalar_t>();
const scalar_t *guide_ptr = guide.data_ptr<scalar_t>();
const scalar_t *max_ptr = maxout.data_ptr<scalar_t>();
const scalar_t *output_ptr = output.data_ptr<scalar_t>();
scalar_t *gradout_ptr = gradout.data_ptr<scalar_t>();
scalar_t *gradin_ptr = grad_input.data_ptr<scalar_t>();
scalar_t *gradguide_ptr = grad_guide.data_ptr<scalar_t>();
hipLaunchKernelGGL(( tl_backward_kernel<scalar_t>), dim3(GET_BLOCKS(bs*ch)), dim3(THREADS_PER_BLOCK),
0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
bs*ch,
input_ptr,
guide_ptr,
output_ptr,
max_ptr,
gradout_ptr,
gradin_ptr,
gradguide_ptr,
bs, ch, sh, sw
);
}
)
);
THCudaCheck(hipGetLastError());
return {
grad_input,
grad_guide
};
}
std::vector<at::Tensor> tr_pool_forward_laucher(
const at::Tensor &input,
const at::Tensor &guide) {
// Ensure CUDA uses the input tensor device.
at::DeviceGuard guard(input.device());
AT_ASSERTM(guide.type().is_cuda(), "map must be a CUDA tensor.");
AT_ASSERTM(input.type().is_cuda(), "input must be a CUDA tensor.");
// printf("call by cuda...\n");
hipDeviceSynchronize(); // for print
auto output = input.clone();
auto maxout = input.clone();
int bs = input.size(0);
int ch = input.size(1);
int sh = input.size(2);
int sw = input.size(3);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
input.scalar_type(), "tr_pool_forward_laucher", ([&] {
const scalar_t *input_ptr = input.data_ptr<scalar_t>();
const scalar_t *guide_ptr = guide.data_ptr<scalar_t>();
scalar_t *max_ptr = maxout.data_ptr<scalar_t>();
scalar_t *output_ptr = output.data_ptr<scalar_t>();
hipLaunchKernelGGL(( tr_forward_kernel<scalar_t>), dim3(GET_BLOCKS(bs*ch)), dim3(THREADS_PER_BLOCK),
0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
bs*ch,
input_ptr,
guide_ptr,
max_ptr,
output_ptr,
bs, ch, sh, sw
);
}
)
);
THCudaCheck(hipGetLastError());
return {
output,
maxout
};
}
std::vector<at::Tensor> tr_pool_backward_laucher(
const at::Tensor &input,
const at::Tensor &guide,
const at::Tensor &output,
const at::Tensor &maxout,
const at::Tensor &grad_output
) {
// Ensure CUDA uses the input tensor device.
at::DeviceGuard guard(input.device());
AT_ASSERTM(guide.type().is_cuda(), "map must be a CUDA tensor.");
AT_ASSERTM(input.type().is_cuda(), "input must be a CUDA tensor.");
hipDeviceSynchronize(); // for print
auto grad_input = at::zeros_like(input);
auto grad_guide = at::zeros_like(guide);
auto gradout = grad_output.clone();
int bs = input.size(0);
int ch = input.size(1);
int sh = input.size(2);
int sw = input.size(3);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
input.scalar_type(), "tr_pool_backward_laucher", ([&] {
const scalar_t *input_ptr = input.data_ptr<scalar_t>();
const scalar_t *guide_ptr = guide.data_ptr<scalar_t>();
const scalar_t *max_ptr = maxout.data_ptr<scalar_t>();
const scalar_t *output_ptr = output.data_ptr<scalar_t>();
scalar_t *gradout_ptr = gradout.data_ptr<scalar_t>();
scalar_t *gradin_ptr = grad_input.data_ptr<scalar_t>();
scalar_t *gradguide_ptr = grad_guide.data_ptr<scalar_t>();
hipLaunchKernelGGL(( tr_backward_kernel<scalar_t>), dim3(GET_BLOCKS(bs*ch)), dim3(THREADS_PER_BLOCK),
0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
bs*ch,
input_ptr,
guide_ptr,
output_ptr,
max_ptr,
gradout_ptr,
gradin_ptr,
gradguide_ptr,
bs, ch, sh, sw
);
}
)
);
THCudaCheck(hipGetLastError());
return {
grad_input,
grad_guide
};
}
std::vector<at::Tensor> bl_pool_forward_laucher(
const at::Tensor &input,
const at::Tensor &guide) {
// Ensure CUDA uses the input tensor device.
at::DeviceGuard guard(input.device());
AT_ASSERTM(guide.type().is_cuda(), "map must be a CUDA tensor.");
AT_ASSERTM(input.type().is_cuda(), "input must be a CUDA tensor.");
// printf("call by cuda...\n");
hipDeviceSynchronize(); // for print
auto output = input.clone();
auto maxout = input.clone();
int bs = input.size(0);
int ch = input.size(1);
int sh = input.size(2);
int sw = input.size(3);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
input.scalar_type(), "bl_pool_forward_laucher", ([&] {
const scalar_t *input_ptr = input.data_ptr<scalar_t>();
const scalar_t *guide_ptr = guide.data_ptr<scalar_t>();
scalar_t *max_ptr = maxout.data_ptr<scalar_t>();
scalar_t *output_ptr = output.data_ptr<scalar_t>();
hipLaunchKernelGGL(( bl_forward_kernel<scalar_t>), dim3(GET_BLOCKS(bs*ch)), dim3(THREADS_PER_BLOCK),
0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
bs*ch,
input_ptr,
guide_ptr,
max_ptr,
output_ptr,
bs, ch, sh, sw
);
}
)
);
THCudaCheck(hipGetLastError());
return {
output,
maxout
};
}
std::vector<at::Tensor> bl_pool_backward_laucher(
const at::Tensor &input,
const at::Tensor &guide,
const at::Tensor &output,
const at::Tensor &maxout,
const at::Tensor &grad_output
) {
// Ensure CUDA uses the input tensor device.
at::DeviceGuard guard(input.device());
AT_ASSERTM(guide.type().is_cuda(), "map must be a CUDA tensor.");
AT_ASSERTM(input.type().is_cuda(), "input must be a CUDA tensor.");
hipDeviceSynchronize(); // for print
auto grad_input = at::zeros_like(input);
auto grad_guide = at::zeros_like(guide);
auto gradout = grad_output.clone();
int bs = input.size(0);
int ch = input.size(1);
int sh = input.size(2);
int sw = input.size(3);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
input.scalar_type(), "bl_pool_backward_laucher", ([&] {
const scalar_t *input_ptr = input.data_ptr<scalar_t>();
const scalar_t *guide_ptr = guide.data_ptr<scalar_t>();
const scalar_t *max_ptr = maxout.data_ptr<scalar_t>();
const scalar_t *output_ptr = output.data_ptr<scalar_t>();
scalar_t *gradout_ptr = gradout.data_ptr<scalar_t>();
scalar_t *gradin_ptr = grad_input.data_ptr<scalar_t>();
scalar_t *gradguide_ptr = grad_guide.data_ptr<scalar_t>();
hipLaunchKernelGGL(( bl_backward_kernel<scalar_t>), dim3(GET_BLOCKS(bs*ch)), dim3(THREADS_PER_BLOCK),
0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
bs*ch,
input_ptr,
guide_ptr,
output_ptr,
max_ptr,
gradout_ptr,
gradin_ptr,
gradguide_ptr,
bs, ch, sh, sw
);
}
)
);
THCudaCheck(hipGetLastError());
return {
grad_input,
grad_guide
};
}
std::vector<at::Tensor> br_pool_forward_laucher(
const at::Tensor &input,
const at::Tensor &guide) {
// Ensure CUDA uses the input tensor device.
at::DeviceGuard guard(input.device());
AT_ASSERTM(guide.type().is_cuda(), "map must be a CUDA tensor.");
AT_ASSERTM(input.type().is_cuda(), "input must be a CUDA tensor.");
// printf("call by cuda...\n");
hipDeviceSynchronize(); // for print
auto output = input.clone();
auto maxout = input.clone();
int bs = input.size(0);
int ch = input.size(1);
int sh = input.size(2);
int sw = input.size(3);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
input.scalar_type(), "br_pool_forward_laucher", ([&] {
const scalar_t *input_ptr = input.data_ptr<scalar_t>();
const scalar_t *guide_ptr = guide.data_ptr<scalar_t>();
scalar_t *max_ptr = maxout.data_ptr<scalar_t>();
scalar_t *output_ptr = output.data_ptr<scalar_t>();
hipLaunchKernelGGL(( br_forward_kernel<scalar_t>), dim3(GET_BLOCKS(bs*ch)), dim3(THREADS_PER_BLOCK),
0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
bs*ch,
input_ptr,
guide_ptr,
max_ptr,
output_ptr,
bs, ch, sh, sw
);
}
)
);
THCudaCheck(hipGetLastError());
return {
output,
maxout
};
}
std::vector<at::Tensor> br_pool_backward_laucher(
const at::Tensor &input,
const at::Tensor &guide,
const at::Tensor &output,
const at::Tensor &maxout,
const at::Tensor &grad_output
) {
// Ensure CUDA uses the input tensor device.
at::DeviceGuard guard(input.device());
AT_ASSERTM(guide.type().is_cuda(), "map must be a CUDA tensor.");
AT_ASSERTM(input.type().is_cuda(), "input must be a CUDA tensor.");
hipDeviceSynchronize(); // for print
auto grad_input = at::zeros_like(input);
auto grad_guide = at::zeros_like(guide);
auto gradout = grad_output.clone();
int bs = input.size(0);
int ch = input.size(1);
int sh = input.size(2);
int sw = input.size(3);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
input.scalar_type(), "br_pool_backward_laucher", ([&] {
const scalar_t *input_ptr = input.data_ptr<scalar_t>();
const scalar_t *guide_ptr = guide.data_ptr<scalar_t>();
const scalar_t *max_ptr = maxout.data_ptr<scalar_t>();
const scalar_t *output_ptr = output.data_ptr<scalar_t>();
scalar_t *gradout_ptr = gradout.data_ptr<scalar_t>();
scalar_t *gradin_ptr = grad_input.data_ptr<scalar_t>();
scalar_t *gradguide_ptr = grad_guide.data_ptr<scalar_t>();
hipLaunchKernelGGL(( br_backward_kernel<scalar_t>), dim3(GET_BLOCKS(bs*ch)), dim3(THREADS_PER_BLOCK),
0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
bs*ch,
input_ptr,
guide_ptr,
output_ptr,
max_ptr,
gradout_ptr,
gradin_ptr,
gradguide_ptr,
bs, ch, sh, sw
);
}
)
);
THCudaCheck(hipGetLastError());
return {
grad_input,
grad_guide
};
}
} // namespace landmarkconv2 | fe607a3d0ddb434808eb3e33b961b7bb589bccfa.cu | // #include <torch/torch.h>
// name should be different from .cpp file!!!
#include <torch/types.h>
#include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <THC/THCAtomics.cuh>
#include <stdio.h>
#include <vector>
#define CUDA_1D_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \
i += blockDim.x * gridDim.x)
// #define THREADS_PER_BLOCK 1024
#define THREADS_PER_BLOCK 128
inline int GET_BLOCKS(const int N) {
int optimal_block_num = (N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
int max_block_num = 65000;
return min(optimal_block_num, max_block_num);
}
template <typename scalar_t>
__global__ void tl_forward_kernel(const int nthreads,
const scalar_t *input_ptr,
const scalar_t *guide_ptr,
scalar_t *max_ptr,
scalar_t *outptr,
const int bs, const int ch,
const int sh, const int sw
) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int dc = index % ch;
int db = index / ch;
for (int i = 0; i < sh; i++) {
for (int j = 0; j < sw; j++) {
auto x1 = *(outptr + db * ch * sh * sw + dc * sh * sw + i * sw + j);
auto x2 = x1;
auto x3 = x1;
if (j > 0) x2 = *(outptr + db * ch * sh * sw + dc * sh * sw + i * sw + j-1);
if (i > 0) x3 = *(outptr + db * ch * sh * sw + dc * sh * sw + (i-1) * sw + j);
auto sigmoid = *(guide_ptr + db * ch * sh * sw + dc * sh * sw + i * sw + j);
auto out = x1;
if (out < x2) out = x2;
if (out < x3) out = x3;
outptr[db * ch * sh * sw + dc * sh * sw + i * sw + j] = sigmoid * out + (1-sigmoid) * x1;
max_ptr[db * ch * sh * sw + dc * sh * sw + i * sw + j] = out;
}
}
}
}
template <typename scalar_t>
__global__ void tl_backward_kernel(const int nthreads,
const scalar_t *input_ptr,
const scalar_t *guide_ptr,
const scalar_t *output_ptr,
const scalar_t *maxout_ptr,
scalar_t *gradout_ptr,
scalar_t *gradin_ptr,
scalar_t *gradguide_ptr,
const int bs, const int ch,
const int sh, const int sw
) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int dc = index % ch;
int db = index / ch;
for (int i = sh-1; i >= 0; i--) {
for (int j = sw-1; j >= 0; j--) {
auto x1 = *(input_ptr + db * ch * sh * sw + dc * sh * sw + i * sw + j);
auto out = *(maxout_ptr + db * ch * sh * sw + dc * sh * sw + i * sw + j);
auto sigmoid = *(guide_ptr + db * ch * sh * sw + dc * sh * sw + i * sw + j);
auto out_x2 = x1;
auto out_x3 = x1;
if (j > 0) out_x2 = *(output_ptr + db * ch * sh * sw + dc * sh * sw + i * sw + j-1);
if (i > 0) out_x3 = *(output_ptr + db * ch * sh * sw + dc * sh * sw + (i-1) * sw + j);
auto g1 = scalar_t(x1 >= out);
auto g2 = (1-g1) * scalar_t(out_x2 >= out);
auto g3 = (1-g1) * (1-g2) * scalar_t(out_x3 >= out);
auto grad_x1 = sigmoid * g1 + (1-sigmoid);
auto grad_x2 = sigmoid * g2;
auto grad_x3 = sigmoid * g3;
auto grad_sigmoid = out - x1;
// printf("(%i, %i) x1 %f out %f out_x2 %f out_x3 %f g1 %f g2 %f g3 %f gsig %f \n", i, j, x1, out, out_x2, out_x3, g1, g2, g3, grad_sigmoid);
gradguide_ptr[db * ch * sh * sw + dc * sh * sw + i * sw + j] =
grad_sigmoid * gradout_ptr[db * ch * sh * sw + dc * sh * sw + i * sw + j];
gradin_ptr[db * ch * sh * sw + dc * sh * sw + i * sw + j] =
grad_x1 * gradout_ptr[db * ch * sh * sw + dc * sh * sw + i * sw + j];
if (j > 0) gradout_ptr[db * ch * sh * sw + dc * sh * sw + i * sw + j-1] +=
grad_x2 * gradout_ptr[db * ch * sh * sw + dc * sh * sw + i * sw + j];
if (i > 0) gradout_ptr[db * ch * sh * sw + dc * sh * sw + (i-1) * sw + j] +=
grad_x3 * gradout_ptr[db * ch * sh * sw + dc * sh * sw + i * sw + j];
// grad_x3 * *(gradout_ptr + db * ch * sh * sw + dc * sh * sw + i * sw + j);
}
}
}
}
template <typename scalar_t>
__global__ void tr_forward_kernel(const int nthreads,
const scalar_t *input_ptr,
const scalar_t *guide_ptr,
scalar_t *max_ptr,
scalar_t *outptr,
const int bs, const int ch,
const int sh, const int sw
) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int dc = index % ch;
int db = index / ch;
for (int i = 0; i < sh; i++) {
for (int j = sw-1; j >= 0; j--) {
auto x1 = *(outptr + db * ch * sh * sw + dc * sh * sw + i * sw + j);
auto x2 = x1;
auto x3 = x1;
if (j < sw-1) x2 = *(outptr + db * ch * sh * sw + dc * sh * sw + i * sw + j+1);
if (i > 0) x3 = *(outptr + db * ch * sh * sw + dc * sh * sw + (i-1) * sw + j);
auto sigmoid = *(guide_ptr + db * ch * sh * sw + dc * sh * sw + i * sw + j);
// printf("%f %f %f %f \n", x1, x2, x3, sigmoid);
// printf("(%i, %i) x1 %f x2 %f x3 %f sigmoid %f \n", i, j, x1, x2, x3, sigmoid);
// calculate the max values
auto out = x1;
if (out < x2) out = x2;
if (out < x3) out = x3;
outptr[db * ch * sh * sw + dc * sh * sw + i * sw + j] = sigmoid * out + (1-sigmoid) * x1;
max_ptr[db * ch * sh * sw + dc * sh * sw + i * sw + j] = out;
}
}
}
}
template <typename scalar_t>
__global__ void tr_backward_kernel(const int nthreads,
const scalar_t *input_ptr,
const scalar_t *guide_ptr,
const scalar_t *output_ptr,
const scalar_t *maxout_ptr,
scalar_t *gradout_ptr,
scalar_t *gradin_ptr,
scalar_t *gradguide_ptr,
const int bs, const int ch,
const int sh, const int sw
) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int dc = index % ch;
int db = index / ch;
for (int i = sh-1; i >= 0; i--) {
for (int j = 0; j < sw; j++) {
auto x1 = *(input_ptr + db * ch * sh * sw + dc * sh * sw + i * sw + j);
auto out = *(maxout_ptr + db * ch * sh * sw + dc * sh * sw + i * sw + j);
auto sigmoid = *(guide_ptr + db * ch * sh * sw + dc * sh * sw + i * sw + j);
auto out_x2 = x1;
auto out_x3 = x1;
if (j < sw-1) out_x2 = *(output_ptr + db * ch * sh * sw + dc * sh * sw + i * sw + j+1);
if (i > 0) out_x3 = *(output_ptr + db * ch * sh * sw + dc * sh * sw + (i-1) * sw + j);
auto g1 = scalar_t(x1 >= out);
auto g2 = (1-g1) * scalar_t(out_x2 >= out);
auto g3 = (1-g1) * (1-g2) * scalar_t(out_x3 >= out);
auto grad_x1 = sigmoid * g1 + (1-sigmoid);
auto grad_x2 = sigmoid * g2;
auto grad_x3 = sigmoid * g3;
auto grad_sigmoid = out - x1;
// printf("(%i, %i) x1 %f out %f out_x2 %f out_x3 %f g1 %f g2 %f g3 %f gsig %f \n", i, j, x1, out, out_x2, out_x3, g1, g2, g3, grad_sigmoid);
gradguide_ptr[db * ch * sh * sw + dc * sh * sw + i * sw + j] =
grad_sigmoid * gradout_ptr[db * ch * sh * sw + dc * sh * sw + i * sw + j];
gradin_ptr[db * ch * sh * sw + dc * sh * sw + i * sw + j] =
grad_x1 * gradout_ptr[db * ch * sh * sw + dc * sh * sw + i * sw + j];
if (j < sw-1) gradout_ptr[db * ch * sh * sw + dc * sh * sw + i * sw + j+1] +=
grad_x2 * gradout_ptr[db * ch * sh * sw + dc * sh * sw + i * sw + j];
if (i > 0) gradout_ptr[db * ch * sh * sw + dc * sh * sw + (i-1) * sw + j] +=
grad_x3 * gradout_ptr[db * ch * sh * sw + dc * sh * sw + i * sw + j];
}
}
}
}
template <typename scalar_t>
__global__ void bl_forward_kernel(const int nthreads,
const scalar_t *input_ptr,
const scalar_t *guide_ptr,
scalar_t *max_ptr,
scalar_t *outptr,
const int bs, const int ch,
const int sh, const int sw
) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int dc = index % ch;
int db = index / ch;
for (int i = sh-1; i >= 0; i--) {
for (int j = 0; j < sw; j++) {
auto x1 = *(outptr + db * ch * sh * sw + dc * sh * sw + i * sw + j);
auto x2 = x1;
auto x3 = x1;
if (j > 0) x2 = *(outptr + db * ch * sh * sw + dc * sh * sw + i * sw + j-1);
if (i < sh-1) x3 = *(outptr + db * ch * sh * sw + dc * sh * sw + (i+1) * sw + j);
auto sigmoid = *(guide_ptr + db * ch * sh * sw + dc * sh * sw + i * sw + j);
// calculate the max values
auto out = x1;
if (out < x2) out = x2;
if (out < x3) out = x3;
outptr[db * ch * sh * sw + dc * sh * sw + i * sw + j] = sigmoid * out + (1-sigmoid) * x1;
max_ptr[db * ch * sh * sw + dc * sh * sw + i * sw + j] = out;
}
}
}
}
template <typename scalar_t>
__global__ void bl_backward_kernel(const int nthreads,
const scalar_t *input_ptr,
const scalar_t *guide_ptr,
const scalar_t *output_ptr,
const scalar_t *maxout_ptr,
scalar_t *gradout_ptr,
scalar_t *gradin_ptr,
scalar_t *gradguide_ptr,
const int bs, const int ch,
const int sh, const int sw
) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int dc = index % ch;
int db = index / ch;
for (int i = 0; i < sh; i++) {
for (int j = sw-1; j >= 0; j--) {
auto x1 = *(input_ptr + db * ch * sh * sw + dc * sh * sw + i * sw + j);
auto out = *(maxout_ptr + db * ch * sh * sw + dc * sh * sw + i * sw + j);
auto sigmoid = *(guide_ptr + db * ch * sh * sw + dc * sh * sw + i * sw + j);
auto out_x2 = x1;
auto out_x3 = x1;
if (j > 0) out_x2 = *(output_ptr + db * ch * sh * sw + dc * sh * sw + i * sw + j-1);
if (i < sh-1) out_x3 = *(output_ptr + db * ch * sh * sw + dc * sh * sw + (i+1) * sw + j);
auto g1 = scalar_t(x1 >= out);
auto g2 = (1-g1) * scalar_t(out_x2 >= out);
auto g3 = (1-g1) * (1-g2) * scalar_t(out_x3 >= out);
auto grad_x1 = sigmoid * g1 + (1-sigmoid);
auto grad_x2 = sigmoid * g2;
auto grad_x3 = sigmoid * g3;
auto grad_sigmoid = out - x1;
// printf("(%i, %i) x1 %f out %f out_x2 %f out_x3 %f g1 %f g2 %f g3 %f gsig %f \n", i, j, x1, out, out_x2, out_x3, g1, g2, g3, grad_sigmoid);
gradguide_ptr[db * ch * sh * sw + dc * sh * sw + i * sw + j] =
grad_sigmoid * gradout_ptr[db * ch * sh * sw + dc * sh * sw + i * sw + j];
gradin_ptr[db * ch * sh * sw + dc * sh * sw + i * sw + j] =
grad_x1 * gradout_ptr[db * ch * sh * sw + dc * sh * sw + i * sw + j];
if (j > 0) gradout_ptr[db * ch * sh * sw + dc * sh * sw + i * sw + j-1] +=
grad_x2 * gradout_ptr[db * ch * sh * sw + dc * sh * sw + i * sw + j];
if (i < sh-1) gradout_ptr[db * ch * sh * sw + dc * sh * sw + (i+1) * sw + j] +=
grad_x3 * gradout_ptr[db * ch * sh * sw + dc * sh * sw + i * sw + j];
}
}
}
}
template <typename scalar_t>
__global__ void br_forward_kernel(const int nthreads,
const scalar_t *input_ptr,
const scalar_t *guide_ptr,
scalar_t *max_ptr,
scalar_t *outptr,
const int bs, const int ch,
const int sh, const int sw
) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int dc = index % ch;
int db = index / ch;
for (int i = sh-1; i >= 0; i--) {
for (int j = sw-1; j >= 0; j--) {
auto x1 = *(outptr + db * ch * sh * sw + dc * sh * sw + i * sw + j);
auto x2 = x1;
auto x3 = x1;
if (j < sw-1) x2 = *(outptr + db * ch * sh * sw + dc * sh * sw + i * sw + j+1);
if (i < sh-1) x3 = *(outptr + db * ch * sh * sw + dc * sh * sw + (i+1) * sw + j);
auto sigmoid = *(guide_ptr + db * ch * sh * sw + dc * sh * sw + i * sw + j);
// calculate the max values
auto out = x1;
if (out < x2) out = x2;
if (out < x3) out = x3;
outptr[db * ch * sh * sw + dc * sh * sw + i * sw + j] = sigmoid * out + (1-sigmoid) * x1;
max_ptr[db * ch * sh * sw + dc * sh * sw + i * sw + j] = out;
}
}
}
}
template <typename scalar_t>
__global__ void br_backward_kernel(const int nthreads,
const scalar_t *input_ptr,
const scalar_t *guide_ptr,
const scalar_t *output_ptr,
const scalar_t *maxout_ptr,
scalar_t *gradout_ptr,
scalar_t *gradin_ptr,
scalar_t *gradguide_ptr,
const int bs, const int ch,
const int sh, const int sw
) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int dc = index % ch;
int db = index / ch;
for (int i = 0; i < sh; i++) {
for (int j = 0; j < sw; j++) {
auto x1 = *(input_ptr + db * ch * sh * sw + dc * sh * sw + i * sw + j);
auto out = *(maxout_ptr + db * ch * sh * sw + dc * sh * sw + i * sw + j);
auto sigmoid = *(guide_ptr + db * ch * sh * sw + dc * sh * sw + i * sw + j);
auto out_x2 = x1;
auto out_x3 = x1;
if (j < sw-1) out_x2 = *(output_ptr + db * ch * sh * sw + dc * sh * sw + i * sw + j+1);
if (i < sh-1) out_x3 = *(output_ptr + db * ch * sh * sw + dc * sh * sw + (i+1) * sw + j);
auto g1 = scalar_t(x1 >= out);
auto g2 = (1-g1) * scalar_t(out_x2 >= out);
auto g3 = (1-g1) * (1-g2) * scalar_t(out_x3 >= out);
auto grad_x1 = sigmoid * g1 + (1-sigmoid);
auto grad_x2 = sigmoid * g2;
auto grad_x3 = sigmoid * g3;
auto grad_sigmoid = out - x1;
// printf("(%i, %i) x1 %f out %f out_x2 %f out_x3 %f g1 %f g2 %f g3 %f gsig %f \n", i, j, x1, out, out_x2, out_x3, g1, g2, g3, grad_sigmoid);
gradguide_ptr[db * ch * sh * sw + dc * sh * sw + i * sw + j] =
grad_sigmoid * gradout_ptr[db * ch * sh * sw + dc * sh * sw + i * sw + j];
gradin_ptr[db * ch * sh * sw + dc * sh * sw + i * sw + j] =
grad_x1 * gradout_ptr[db * ch * sh * sw + dc * sh * sw + i * sw + j];
if (j < sw-1) gradout_ptr[db * ch * sh * sw + dc * sh * sw + i * sw + j+1] +=
grad_x2 * gradout_ptr[db * ch * sh * sw + dc * sh * sw + i * sw + j];
if (i < sh-1) gradout_ptr[db * ch * sh * sw + dc * sh * sw + (i+1) * sw + j] +=
grad_x3 * gradout_ptr[db * ch * sh * sw + dc * sh * sw + i * sw + j];
}
}
}
}
namespace landmarkconv {
std::vector<at::Tensor> tl_pool_forward_laucher(
const at::Tensor &input,
const at::Tensor &guide) {
// Ensure CUDA uses the input tensor device.
at::DeviceGuard guard(input.device());
AT_ASSERTM(guide.type().is_cuda(), "map must be a CUDA tensor.");
AT_ASSERTM(input.type().is_cuda(), "input must be a CUDA tensor.");
// printf("call by cuda...\n");
cudaDeviceSynchronize(); // for print
auto output = input.clone();
auto maxout = input.clone();
int bs = input.size(0);
int ch = input.size(1);
int sh = input.size(2);
int sw = input.size(3);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
input.scalar_type(), "tl_pool_forward_laucher", ([&] {
const scalar_t *input_ptr = input.data_ptr<scalar_t>();
const scalar_t *guide_ptr = guide.data_ptr<scalar_t>();
scalar_t *max_ptr = maxout.data_ptr<scalar_t>();
scalar_t *output_ptr = output.data_ptr<scalar_t>();
tl_forward_kernel<scalar_t><<<GET_BLOCKS(bs*ch), THREADS_PER_BLOCK,
0, at::cuda::getCurrentCUDAStream()>>>(
bs*ch,
input_ptr,
guide_ptr,
max_ptr,
output_ptr,
bs, ch, sh, sw
);
}
)
);
THCudaCheck(cudaGetLastError());
return {
output,
maxout
};
}
std::vector<at::Tensor> tl_pool_backward_laucher(
const at::Tensor &input,
const at::Tensor &guide,
const at::Tensor &output,
const at::Tensor &maxout,
const at::Tensor &grad_output
) {
// Ensure CUDA uses the input tensor device.
at::DeviceGuard guard(input.device());
AT_ASSERTM(guide.type().is_cuda(), "map must be a CUDA tensor.");
AT_ASSERTM(input.type().is_cuda(), "input must be a CUDA tensor.");
cudaDeviceSynchronize(); // for print
auto grad_input = at::zeros_like(input);
auto grad_guide = at::zeros_like(guide);
auto gradout = grad_output.clone();
int bs = input.size(0);
int ch = input.size(1);
int sh = input.size(2);
int sw = input.size(3);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
input.scalar_type(), "tl_pool_backward_laucher", ([&] {
const scalar_t *input_ptr = input.data_ptr<scalar_t>();
const scalar_t *guide_ptr = guide.data_ptr<scalar_t>();
const scalar_t *max_ptr = maxout.data_ptr<scalar_t>();
const scalar_t *output_ptr = output.data_ptr<scalar_t>();
scalar_t *gradout_ptr = gradout.data_ptr<scalar_t>();
scalar_t *gradin_ptr = grad_input.data_ptr<scalar_t>();
scalar_t *gradguide_ptr = grad_guide.data_ptr<scalar_t>();
tl_backward_kernel<scalar_t><<<GET_BLOCKS(bs*ch), THREADS_PER_BLOCK,
0, at::cuda::getCurrentCUDAStream()>>>(
bs*ch,
input_ptr,
guide_ptr,
output_ptr,
max_ptr,
gradout_ptr,
gradin_ptr,
gradguide_ptr,
bs, ch, sh, sw
);
}
)
);
THCudaCheck(cudaGetLastError());
return {
grad_input,
grad_guide
};
}
std::vector<at::Tensor> tr_pool_forward_laucher(
const at::Tensor &input,
const at::Tensor &guide) {
// Ensure CUDA uses the input tensor device.
at::DeviceGuard guard(input.device());
AT_ASSERTM(guide.type().is_cuda(), "map must be a CUDA tensor.");
AT_ASSERTM(input.type().is_cuda(), "input must be a CUDA tensor.");
// printf("call by cuda...\n");
cudaDeviceSynchronize(); // for print
auto output = input.clone();
auto maxout = input.clone();
int bs = input.size(0);
int ch = input.size(1);
int sh = input.size(2);
int sw = input.size(3);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
input.scalar_type(), "tr_pool_forward_laucher", ([&] {
const scalar_t *input_ptr = input.data_ptr<scalar_t>();
const scalar_t *guide_ptr = guide.data_ptr<scalar_t>();
scalar_t *max_ptr = maxout.data_ptr<scalar_t>();
scalar_t *output_ptr = output.data_ptr<scalar_t>();
tr_forward_kernel<scalar_t><<<GET_BLOCKS(bs*ch), THREADS_PER_BLOCK,
0, at::cuda::getCurrentCUDAStream()>>>(
bs*ch,
input_ptr,
guide_ptr,
max_ptr,
output_ptr,
bs, ch, sh, sw
);
}
)
);
THCudaCheck(cudaGetLastError());
return {
output,
maxout
};
}
std::vector<at::Tensor> tr_pool_backward_laucher(
const at::Tensor &input,
const at::Tensor &guide,
const at::Tensor &output,
const at::Tensor &maxout,
const at::Tensor &grad_output
) {
// Ensure CUDA uses the input tensor device.
at::DeviceGuard guard(input.device());
AT_ASSERTM(guide.type().is_cuda(), "map must be a CUDA tensor.");
AT_ASSERTM(input.type().is_cuda(), "input must be a CUDA tensor.");
cudaDeviceSynchronize(); // for print
auto grad_input = at::zeros_like(input);
auto grad_guide = at::zeros_like(guide);
auto gradout = grad_output.clone();
int bs = input.size(0);
int ch = input.size(1);
int sh = input.size(2);
int sw = input.size(3);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
input.scalar_type(), "tr_pool_backward_laucher", ([&] {
const scalar_t *input_ptr = input.data_ptr<scalar_t>();
const scalar_t *guide_ptr = guide.data_ptr<scalar_t>();
const scalar_t *max_ptr = maxout.data_ptr<scalar_t>();
const scalar_t *output_ptr = output.data_ptr<scalar_t>();
scalar_t *gradout_ptr = gradout.data_ptr<scalar_t>();
scalar_t *gradin_ptr = grad_input.data_ptr<scalar_t>();
scalar_t *gradguide_ptr = grad_guide.data_ptr<scalar_t>();
tr_backward_kernel<scalar_t><<<GET_BLOCKS(bs*ch), THREADS_PER_BLOCK,
0, at::cuda::getCurrentCUDAStream()>>>(
bs*ch,
input_ptr,
guide_ptr,
output_ptr,
max_ptr,
gradout_ptr,
gradin_ptr,
gradguide_ptr,
bs, ch, sh, sw
);
}
)
);
THCudaCheck(cudaGetLastError());
return {
grad_input,
grad_guide
};
}
std::vector<at::Tensor> bl_pool_forward_laucher(
const at::Tensor &input,
const at::Tensor &guide) {
// Ensure CUDA uses the input tensor device.
at::DeviceGuard guard(input.device());
AT_ASSERTM(guide.type().is_cuda(), "map must be a CUDA tensor.");
AT_ASSERTM(input.type().is_cuda(), "input must be a CUDA tensor.");
// printf("call by cuda...\n");
cudaDeviceSynchronize(); // for print
auto output = input.clone();
auto maxout = input.clone();
int bs = input.size(0);
int ch = input.size(1);
int sh = input.size(2);
int sw = input.size(3);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
input.scalar_type(), "bl_pool_forward_laucher", ([&] {
const scalar_t *input_ptr = input.data_ptr<scalar_t>();
const scalar_t *guide_ptr = guide.data_ptr<scalar_t>();
scalar_t *max_ptr = maxout.data_ptr<scalar_t>();
scalar_t *output_ptr = output.data_ptr<scalar_t>();
bl_forward_kernel<scalar_t><<<GET_BLOCKS(bs*ch), THREADS_PER_BLOCK,
0, at::cuda::getCurrentCUDAStream()>>>(
bs*ch,
input_ptr,
guide_ptr,
max_ptr,
output_ptr,
bs, ch, sh, sw
);
}
)
);
THCudaCheck(cudaGetLastError());
return {
output,
maxout
};
}
std::vector<at::Tensor> bl_pool_backward_laucher(
const at::Tensor &input,
const at::Tensor &guide,
const at::Tensor &output,
const at::Tensor &maxout,
const at::Tensor &grad_output
) {
// Ensure CUDA uses the input tensor device.
at::DeviceGuard guard(input.device());
AT_ASSERTM(guide.type().is_cuda(), "map must be a CUDA tensor.");
AT_ASSERTM(input.type().is_cuda(), "input must be a CUDA tensor.");
cudaDeviceSynchronize(); // for print
auto grad_input = at::zeros_like(input);
auto grad_guide = at::zeros_like(guide);
auto gradout = grad_output.clone();
int bs = input.size(0);
int ch = input.size(1);
int sh = input.size(2);
int sw = input.size(3);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
input.scalar_type(), "bl_pool_backward_laucher", ([&] {
const scalar_t *input_ptr = input.data_ptr<scalar_t>();
const scalar_t *guide_ptr = guide.data_ptr<scalar_t>();
const scalar_t *max_ptr = maxout.data_ptr<scalar_t>();
const scalar_t *output_ptr = output.data_ptr<scalar_t>();
scalar_t *gradout_ptr = gradout.data_ptr<scalar_t>();
scalar_t *gradin_ptr = grad_input.data_ptr<scalar_t>();
scalar_t *gradguide_ptr = grad_guide.data_ptr<scalar_t>();
bl_backward_kernel<scalar_t><<<GET_BLOCKS(bs*ch), THREADS_PER_BLOCK,
0, at::cuda::getCurrentCUDAStream()>>>(
bs*ch,
input_ptr,
guide_ptr,
output_ptr,
max_ptr,
gradout_ptr,
gradin_ptr,
gradguide_ptr,
bs, ch, sh, sw
);
}
)
);
THCudaCheck(cudaGetLastError());
return {
grad_input,
grad_guide
};
}
std::vector<at::Tensor> br_pool_forward_laucher(
const at::Tensor &input,
const at::Tensor &guide) {
// Ensure CUDA uses the input tensor device.
at::DeviceGuard guard(input.device());
AT_ASSERTM(guide.type().is_cuda(), "map must be a CUDA tensor.");
AT_ASSERTM(input.type().is_cuda(), "input must be a CUDA tensor.");
// printf("call by cuda...\n");
cudaDeviceSynchronize(); // for print
auto output = input.clone();
auto maxout = input.clone();
int bs = input.size(0);
int ch = input.size(1);
int sh = input.size(2);
int sw = input.size(3);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
input.scalar_type(), "br_pool_forward_laucher", ([&] {
const scalar_t *input_ptr = input.data_ptr<scalar_t>();
const scalar_t *guide_ptr = guide.data_ptr<scalar_t>();
scalar_t *max_ptr = maxout.data_ptr<scalar_t>();
scalar_t *output_ptr = output.data_ptr<scalar_t>();
br_forward_kernel<scalar_t><<<GET_BLOCKS(bs*ch), THREADS_PER_BLOCK,
0, at::cuda::getCurrentCUDAStream()>>>(
bs*ch,
input_ptr,
guide_ptr,
max_ptr,
output_ptr,
bs, ch, sh, sw
);
}
)
);
THCudaCheck(cudaGetLastError());
return {
output,
maxout
};
}
std::vector<at::Tensor> br_pool_backward_laucher(
const at::Tensor &input,
const at::Tensor &guide,
const at::Tensor &output,
const at::Tensor &maxout,
const at::Tensor &grad_output
) {
// Ensure CUDA uses the input tensor device.
at::DeviceGuard guard(input.device());
AT_ASSERTM(guide.type().is_cuda(), "map must be a CUDA tensor.");
AT_ASSERTM(input.type().is_cuda(), "input must be a CUDA tensor.");
cudaDeviceSynchronize(); // for print
auto grad_input = at::zeros_like(input);
auto grad_guide = at::zeros_like(guide);
auto gradout = grad_output.clone();
int bs = input.size(0);
int ch = input.size(1);
int sh = input.size(2);
int sw = input.size(3);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
input.scalar_type(), "br_pool_backward_laucher", ([&] {
const scalar_t *input_ptr = input.data_ptr<scalar_t>();
const scalar_t *guide_ptr = guide.data_ptr<scalar_t>();
const scalar_t *max_ptr = maxout.data_ptr<scalar_t>();
const scalar_t *output_ptr = output.data_ptr<scalar_t>();
scalar_t *gradout_ptr = gradout.data_ptr<scalar_t>();
scalar_t *gradin_ptr = grad_input.data_ptr<scalar_t>();
scalar_t *gradguide_ptr = grad_guide.data_ptr<scalar_t>();
br_backward_kernel<scalar_t><<<GET_BLOCKS(bs*ch), THREADS_PER_BLOCK,
0, at::cuda::getCurrentCUDAStream()>>>(
bs*ch,
input_ptr,
guide_ptr,
output_ptr,
max_ptr,
gradout_ptr,
gradin_ptr,
gradguide_ptr,
bs, ch, sh, sw
);
}
)
);
THCudaCheck(cudaGetLastError());
return {
grad_input,
grad_guide
};
}
} // namespace landmarkconv2 |
8e0a5fb4e1248b6dca0d049a7fb2c4b0f2058110.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2014 by Joern Dinkla, www.dinkla.com, All rights reserved.
*
* See the LICENSE file in the root directory.
*/
#include <algorithm> // CUDA 6.5 requires this for std::min
#include "while_find.h"
#include "CudaExecConfig.h"
#include <thrust/sequence.h>
#include "ThrustUtilities.h"
__global__
void while_find_kernel(const node* n, const int numElems,
const int* elems, bool* found)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
if (x < numElems)
{
node* m = const_cast<node*>(n);
const int elem = elems[x];
while (m != 0 && m->n != elem)
{
if (elem < m->n) m = m->left;
else m = m->right;
}
found[x] = m != 0;
}
}
void while_find(
CudaExecConfig cnf,
const node* n,
thrust::device_vector<int>& elems,
thrust::device_vector<bool>& found
)
{
const size_t numElems = elems.size();
dim3 g = cnf.get_grid();
dim3 b = cnf.get_block();
const int* ePtr = thrust::raw_pointer_cast(&elems[0]);;
bool* fPr = thrust::raw_pointer_cast(&found[0]);
hipLaunchKernelGGL(( while_find_kernel), dim3(g), dim3(b), 0, 0, n, numElems, ePtr, fPr);
}
void cuda_while_example_7()
{
node* n1 = new node(1);
node* n3 = new node(3);
node* n2 = new node(2, n1, n3);
node* n5 = new node(5);
node* n7 = new node(7);
node* n6 = new node(6, n5, n7);
node* n4 = new node(4, n2, n6);
thrust::device_vector<int> d(7);
thrust::sequence(d.begin(), d.end(), 1);
thrust::device_vector<bool> found(7, false);
hipLaunchKernelGGL(( while_find_kernel), dim3(1), dim3(128), 0, 0, n4, 7, raw_pointer_cast(&d[0]), raw_pointer_cast(&found[0]));
show(found);
}
| 8e0a5fb4e1248b6dca0d049a7fb2c4b0f2058110.cu | /*
* Copyright (c) 2014 by Joern Dinkla, www.dinkla.com, All rights reserved.
*
* See the LICENSE file in the root directory.
*/
#include <algorithm> // CUDA 6.5 requires this for std::min
#include "while_find.h"
#include "CudaExecConfig.h"
#include <thrust/sequence.h>
#include "ThrustUtilities.h"
__global__
void while_find_kernel(const node* n, const int numElems,
const int* elems, bool* found)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
if (x < numElems)
{
node* m = const_cast<node*>(n);
const int elem = elems[x];
while (m != 0 && m->n != elem)
{
if (elem < m->n) m = m->left;
else m = m->right;
}
found[x] = m != 0;
}
}
void while_find(
CudaExecConfig cnf,
const node* n,
thrust::device_vector<int>& elems,
thrust::device_vector<bool>& found
)
{
const size_t numElems = elems.size();
dim3 g = cnf.get_grid();
dim3 b = cnf.get_block();
const int* ePtr = thrust::raw_pointer_cast(&elems[0]);;
bool* fPr = thrust::raw_pointer_cast(&found[0]);
while_find_kernel<<<g, b>>>(n, numElems, ePtr, fPr);
}
void cuda_while_example_7()
{
node* n1 = new node(1);
node* n3 = new node(3);
node* n2 = new node(2, n1, n3);
node* n5 = new node(5);
node* n7 = new node(7);
node* n6 = new node(6, n5, n7);
node* n4 = new node(4, n2, n6);
thrust::device_vector<int> d(7);
thrust::sequence(d.begin(), d.end(), 1);
thrust::device_vector<bool> found(7, false);
while_find_kernel<<<1, 128>>>(n4, 7, raw_pointer_cast(&d[0]), raw_pointer_cast(&found[0]));
show(found);
}
|
d93e9968034686c94b53b616fc4cc739409ed679.hip | // !!! This is a file automatically generated by hipify!!!
/**
* Copyright 2018~2023 by XGBoost contributors
*/
#include <thrust/binary_search.h>
#include <thrust/copy.h>
#include <thrust/execution_policy.h>
#include <thrust/functional.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/iterator/discard_iterator.h>
#include <thrust/iterator/transform_iterator.h>
#include <thrust/reduce.h>
#include <thrust/sort.h>
#include <xgboost/logging.h>
#include <cstddef> // for size_t
#include <memory>
#include <mutex>
#include <utility>
#include <vector>
#include "categorical.h"
#include "device_helpers_hip.cuh"
#include "hist_util_hip.cuh"
#include "hist_util.h"
#include "math.h" // NOLINT
#include "quantile.h"
#include "xgboost/host_device_vector.h"
namespace xgboost {
namespace common {
constexpr float SketchContainer::kFactor;
namespace detail {
size_t RequiredSampleCutsPerColumn(int max_bins, size_t num_rows) {
double eps = 1.0 / (WQSketch::kFactor * max_bins);
size_t dummy_nlevel;
size_t num_cuts;
WQuantileSketch<bst_float, bst_float>::LimitSizeLevel(
num_rows, eps, &dummy_nlevel, &num_cuts);
return ::min(num_cuts, num_rows);
}
size_t RequiredSampleCuts(bst_row_t num_rows, bst_feature_t num_columns,
size_t max_bins, size_t nnz) {
auto per_column = RequiredSampleCutsPerColumn(max_bins, num_rows);
auto if_dense = num_columns * per_column;
auto result = ::min(nnz, if_dense);
return result;
}
size_t RequiredMemory(bst_row_t num_rows, bst_feature_t num_columns, size_t nnz,
size_t num_bins, bool with_weights) {
size_t peak = 0;
// 0. Allocate cut pointer in quantile container by increasing: n_columns + 1
size_t total = (num_columns + 1) * sizeof(SketchContainer::OffsetT);
// 1. Copy and sort: 2 * bytes_per_element * shape
total += BytesPerElement(with_weights) * num_rows * num_columns;
peak = ::max(peak, total);
// 2. Deallocate bytes_per_element * shape due to reusing memory in sort.
total -= BytesPerElement(with_weights) * num_rows * num_columns / 2;
// 3. Allocate colomn size scan by increasing: n_columns + 1
total += (num_columns + 1) * sizeof(SketchContainer::OffsetT);
// 4. Allocate cut pointer by increasing: n_columns + 1
total += (num_columns + 1) * sizeof(SketchContainer::OffsetT);
// 5. Allocate cuts: assuming rows is greater than bins: n_columns * limit_size
total += RequiredSampleCuts(num_rows, num_bins, num_bins, nnz) * sizeof(SketchEntry);
// 6. Deallocate copied entries by reducing: bytes_per_element * shape.
peak = ::max(peak, total);
total -= (BytesPerElement(with_weights) * num_rows * num_columns) / 2;
// 7. Deallocate column size scan.
peak = ::max(peak, total);
total -= (num_columns + 1) * sizeof(SketchContainer::OffsetT);
// 8. Deallocate cut size scan.
total -= (num_columns + 1) * sizeof(SketchContainer::OffsetT);
// 9. Allocate final cut values, min values, cut ptrs: ::min(rows, bins + 1) *
// n_columns + n_columns + n_columns + 1
total += ::min(num_rows, num_bins) * num_columns * sizeof(float);
total += num_columns *
sizeof(std::remove_reference_t<decltype(
std::declval<HistogramCuts>().MinValues())>::value_type);
total += (num_columns + 1) *
sizeof(std::remove_reference_t<decltype(
std::declval<HistogramCuts>().Ptrs())>::value_type);
peak = ::max(peak, total);
return peak;
}
size_t SketchBatchNumElements(size_t sketch_batch_num_elements,
bst_row_t num_rows, bst_feature_t columns,
size_t nnz, int device,
size_t num_cuts, bool has_weight) {
#if defined(XGBOOST_USE_RMM) && XGBOOST_USE_RMM == 1
// device available memory is not accurate when rmm is used.
return nnz;
#endif // defined(XGBOOST_USE_RMM) && XGBOOST_USE_RMM == 1
if (sketch_batch_num_elements == 0) {
auto required_memory = RequiredMemory(num_rows, columns, nnz, num_cuts, has_weight);
// use up to 80% of available space
auto avail = dh::AvailableMemory(device) * 0.8;
if (required_memory > avail) {
sketch_batch_num_elements = avail / BytesPerElement(has_weight);
} else {
sketch_batch_num_elements = ::min(num_rows * static_cast<size_t>(columns), nnz);
}
}
return sketch_batch_num_elements;
}
void SortByWeight(dh::device_vector<float>* weights,
dh::device_vector<Entry>* sorted_entries) {
// Sort both entries and wegihts.
dh::XGBDeviceAllocator<char> alloc;
thrust::sort_by_key(thrust::hip::par(alloc), sorted_entries->begin(),
sorted_entries->end(), weights->begin(),
detail::EntryCompareOp());
// Scan weights
dh::XGBCachingDeviceAllocator<char> caching;
thrust::inclusive_scan_by_key(thrust::hip::par(caching),
sorted_entries->begin(), sorted_entries->end(),
weights->begin(), weights->begin(),
[=] __device__(const Entry& a, const Entry& b) {
return a.index == b.index;
});
}
void RemoveDuplicatedCategories(
int32_t device, MetaInfo const &info, Span<bst_row_t> d_cuts_ptr,
dh::device_vector<Entry> *p_sorted_entries,
dh::caching_device_vector<size_t> *p_column_sizes_scan) {
info.feature_types.SetDevice(device);
auto d_feature_types = info.feature_types.ConstDeviceSpan();
CHECK(!d_feature_types.empty());
auto &column_sizes_scan = *p_column_sizes_scan;
auto &sorted_entries = *p_sorted_entries;
// Removing duplicated entries in categorical features.
dh::caching_device_vector<size_t> new_column_scan(column_sizes_scan.size());
dh::SegmentedUnique(column_sizes_scan.data().get(),
column_sizes_scan.data().get() + column_sizes_scan.size(),
sorted_entries.begin(), sorted_entries.end(),
new_column_scan.data().get(), sorted_entries.begin(),
[=] __device__(Entry const &l, Entry const &r) {
if (l.index == r.index) {
if (IsCat(d_feature_types, l.index)) {
return l.fvalue == r.fvalue;
}
}
return false;
});
// Renew the column scan and cut scan based on categorical data.
auto d_old_column_sizes_scan = dh::ToSpan(column_sizes_scan);
dh::caching_device_vector<SketchContainer::OffsetT> new_cuts_size(
info.num_col_ + 1);
CHECK_EQ(new_column_scan.size(), new_cuts_size.size());
dh::LaunchN(
new_column_scan.size(),
[=, d_new_cuts_size = dh::ToSpan(new_cuts_size),
d_old_column_sizes_scan = dh::ToSpan(column_sizes_scan),
d_new_columns_ptr = dh::ToSpan(new_column_scan)] __device__(size_t idx) {
d_old_column_sizes_scan[idx] = d_new_columns_ptr[idx];
if (idx == d_new_columns_ptr.size() - 1) {
return;
}
if (IsCat(d_feature_types, idx)) {
// Cut size is the same as number of categories in input.
d_new_cuts_size[idx] =
d_new_columns_ptr[idx + 1] - d_new_columns_ptr[idx];
} else {
d_new_cuts_size[idx] = d_cuts_ptr[idx + 1] - d_cuts_ptr[idx];
}
});
// Turn size into ptr.
thrust::exclusive_scan(thrust::device, new_cuts_size.cbegin(),
new_cuts_size.cend(), d_cuts_ptr.data());
}
} // namespace detail
void ProcessBatch(int device, MetaInfo const &info, const SparsePage &page,
size_t begin, size_t end, SketchContainer *sketch_container,
int num_cuts_per_feature, size_t num_columns) {
dh::XGBCachingDeviceAllocator<char> alloc;
dh::device_vector<Entry> sorted_entries;
if (page.data.DeviceCanRead()) {
const auto& device_data = page.data.ConstDevicePointer();
sorted_entries = dh::device_vector<Entry>(device_data + begin, device_data + end);
} else {
const auto& host_data = page.data.ConstHostVector();
sorted_entries = dh::device_vector<Entry>(host_data.begin() + begin,
host_data.begin() + end);
}
thrust::sort(thrust::hip::par(alloc), sorted_entries.begin(),
sorted_entries.end(), detail::EntryCompareOp());
HostDeviceVector<SketchContainer::OffsetT> cuts_ptr;
dh::caching_device_vector<size_t> column_sizes_scan;
data::IsValidFunctor dummy_is_valid(std::numeric_limits<float>::quiet_NaN());
auto batch_it = dh::MakeTransformIterator<data::COOTuple>(
sorted_entries.data().get(),
[] __device__(Entry const &e) -> data::COOTuple {
return {0, e.index, e.fvalue}; // row_idx is not needed for scanning column size.
});
detail::GetColumnSizesScan(device, num_columns, num_cuts_per_feature,
batch_it, dummy_is_valid,
0, sorted_entries.size(),
&cuts_ptr, &column_sizes_scan);
auto d_cuts_ptr = cuts_ptr.DeviceSpan();
if (sketch_container->HasCategorical()) {
detail::RemoveDuplicatedCategories(device, info, d_cuts_ptr,
&sorted_entries, &column_sizes_scan);
}
auto const& h_cuts_ptr = cuts_ptr.ConstHostVector();
CHECK_EQ(d_cuts_ptr.size(), column_sizes_scan.size());
// add cuts into sketches
sketch_container->Push(dh::ToSpan(sorted_entries), dh::ToSpan(column_sizes_scan),
d_cuts_ptr, h_cuts_ptr.back());
sorted_entries.clear();
sorted_entries.shrink_to_fit();
CHECK_EQ(sorted_entries.capacity(), 0);
CHECK_NE(cuts_ptr.Size(), 0);
}
void ProcessWeightedBatch(int device, const SparsePage& page,
MetaInfo const& info, size_t begin, size_t end,
SketchContainer* sketch_container, int num_cuts_per_feature,
size_t num_columns,
bool is_ranking, Span<bst_group_t const> d_group_ptr) {
auto weights = info.weights_.ConstDeviceSpan();
dh::XGBCachingDeviceAllocator<char> alloc;
const auto& host_data = page.data.ConstHostVector();
dh::device_vector<Entry> sorted_entries(host_data.begin() + begin,
host_data.begin() + end);
// Binary search to assign weights to each element
dh::device_vector<float> temp_weights(sorted_entries.size());
auto d_temp_weights = temp_weights.data().get();
page.offset.SetDevice(device);
auto row_ptrs = page.offset.ConstDeviceSpan();
size_t base_rowid = page.base_rowid;
if (is_ranking) {
CHECK_GE(d_group_ptr.size(), 2)
<< "Must have at least 1 group for ranking.";
CHECK_EQ(weights.size(), d_group_ptr.size() - 1)
<< "Weight size should equal to number of groups.";
dh::LaunchN(temp_weights.size(), [=] __device__(size_t idx) {
size_t element_idx = idx + begin;
size_t ridx = dh::SegmentId(row_ptrs, element_idx);
bst_group_t group_idx = dh::SegmentId(d_group_ptr, ridx + base_rowid);
d_temp_weights[idx] = weights[group_idx];
});
} else {
dh::LaunchN(temp_weights.size(), [=] __device__(size_t idx) {
size_t element_idx = idx + begin;
size_t ridx = dh::SegmentId(row_ptrs, element_idx);
d_temp_weights[idx] = weights[ridx + base_rowid];
});
}
detail::SortByWeight(&temp_weights, &sorted_entries);
HostDeviceVector<SketchContainer::OffsetT> cuts_ptr;
dh::caching_device_vector<size_t> column_sizes_scan;
data::IsValidFunctor dummy_is_valid(std::numeric_limits<float>::quiet_NaN());
auto batch_it = dh::MakeTransformIterator<data::COOTuple>(
sorted_entries.data().get(),
[] __device__(Entry const &e) -> data::COOTuple {
return {0, e.index, e.fvalue}; // row_idx is not needed for scaning column size.
});
detail::GetColumnSizesScan(device, num_columns, num_cuts_per_feature,
batch_it, dummy_is_valid,
0, sorted_entries.size(),
&cuts_ptr, &column_sizes_scan);
auto d_cuts_ptr = cuts_ptr.DeviceSpan();
if (sketch_container->HasCategorical()) {
detail::RemoveDuplicatedCategories(device, info, d_cuts_ptr,
&sorted_entries, &column_sizes_scan);
}
auto const& h_cuts_ptr = cuts_ptr.ConstHostVector();
// Extract cuts
sketch_container->Push(dh::ToSpan(sorted_entries),
dh::ToSpan(column_sizes_scan), d_cuts_ptr,
h_cuts_ptr.back(), dh::ToSpan(temp_weights));
sorted_entries.clear();
sorted_entries.shrink_to_fit();
}
HistogramCuts DeviceSketch(int device, DMatrix* dmat, int max_bins,
size_t sketch_batch_num_elements) {
dmat->Info().feature_types.SetDevice(device);
dmat->Info().feature_types.ConstDevicePointer(); // pull to device early
// Configure batch size based on available memory
bool has_weights = dmat->Info().weights_.Size() > 0;
size_t num_cuts_per_feature =
detail::RequiredSampleCutsPerColumn(max_bins, dmat->Info().num_row_);
sketch_batch_num_elements = detail::SketchBatchNumElements(
sketch_batch_num_elements,
dmat->Info().num_row_,
dmat->Info().num_col_,
dmat->Info().num_nonzero_,
device, num_cuts_per_feature, has_weights);
HistogramCuts cuts;
SketchContainer sketch_container(dmat->Info().feature_types, max_bins, dmat->Info().num_col_,
dmat->Info().num_row_, device);
dmat->Info().weights_.SetDevice(device);
for (const auto& batch : dmat->GetBatches<SparsePage>()) {
size_t batch_nnz = batch.data.Size();
auto const& info = dmat->Info();
for (auto begin = 0ull; begin < batch_nnz; begin += sketch_batch_num_elements) {
size_t end = ::min(batch_nnz, static_cast<std::size_t>(begin + sketch_batch_num_elements));
if (has_weights) {
bool is_ranking = HostSketchContainer::UseGroup(dmat->Info());
dh::caching_device_vector<uint32_t> groups(info.group_ptr_.cbegin(),
info.group_ptr_.cend());
ProcessWeightedBatch(
device, batch, dmat->Info(), begin, end,
&sketch_container,
num_cuts_per_feature,
dmat->Info().num_col_,
is_ranking, dh::ToSpan(groups));
} else {
ProcessBatch(device, dmat->Info(), batch, begin, end, &sketch_container,
num_cuts_per_feature, dmat->Info().num_col_);
}
}
}
sketch_container.MakeCuts(&cuts);
return cuts;
}
} // namespace common
} // namespace xgboost
| d93e9968034686c94b53b616fc4cc739409ed679.cu | /**
* Copyright 2018~2023 by XGBoost contributors
*/
#include <thrust/binary_search.h>
#include <thrust/copy.h>
#include <thrust/execution_policy.h>
#include <thrust/functional.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/iterator/discard_iterator.h>
#include <thrust/iterator/transform_iterator.h>
#include <thrust/reduce.h>
#include <thrust/sort.h>
#include <xgboost/logging.h>
#include <cstddef> // for size_t
#include <memory>
#include <mutex>
#include <utility>
#include <vector>
#include "categorical.h"
#include "device_helpers.cuh"
#include "hist_util.cuh"
#include "hist_util.h"
#include "math.h" // NOLINT
#include "quantile.h"
#include "xgboost/host_device_vector.h"
namespace xgboost {
namespace common {
constexpr float SketchContainer::kFactor;
namespace detail {
size_t RequiredSampleCutsPerColumn(int max_bins, size_t num_rows) {
double eps = 1.0 / (WQSketch::kFactor * max_bins);
size_t dummy_nlevel;
size_t num_cuts;
WQuantileSketch<bst_float, bst_float>::LimitSizeLevel(
num_rows, eps, &dummy_nlevel, &num_cuts);
return std::min(num_cuts, num_rows);
}
size_t RequiredSampleCuts(bst_row_t num_rows, bst_feature_t num_columns,
size_t max_bins, size_t nnz) {
auto per_column = RequiredSampleCutsPerColumn(max_bins, num_rows);
auto if_dense = num_columns * per_column;
auto result = std::min(nnz, if_dense);
return result;
}
size_t RequiredMemory(bst_row_t num_rows, bst_feature_t num_columns, size_t nnz,
size_t num_bins, bool with_weights) {
size_t peak = 0;
// 0. Allocate cut pointer in quantile container by increasing: n_columns + 1
size_t total = (num_columns + 1) * sizeof(SketchContainer::OffsetT);
// 1. Copy and sort: 2 * bytes_per_element * shape
total += BytesPerElement(with_weights) * num_rows * num_columns;
peak = std::max(peak, total);
// 2. Deallocate bytes_per_element * shape due to reusing memory in sort.
total -= BytesPerElement(with_weights) * num_rows * num_columns / 2;
// 3. Allocate colomn size scan by increasing: n_columns + 1
total += (num_columns + 1) * sizeof(SketchContainer::OffsetT);
// 4. Allocate cut pointer by increasing: n_columns + 1
total += (num_columns + 1) * sizeof(SketchContainer::OffsetT);
// 5. Allocate cuts: assuming rows is greater than bins: n_columns * limit_size
total += RequiredSampleCuts(num_rows, num_bins, num_bins, nnz) * sizeof(SketchEntry);
// 6. Deallocate copied entries by reducing: bytes_per_element * shape.
peak = std::max(peak, total);
total -= (BytesPerElement(with_weights) * num_rows * num_columns) / 2;
// 7. Deallocate column size scan.
peak = std::max(peak, total);
total -= (num_columns + 1) * sizeof(SketchContainer::OffsetT);
// 8. Deallocate cut size scan.
total -= (num_columns + 1) * sizeof(SketchContainer::OffsetT);
// 9. Allocate final cut values, min values, cut ptrs: std::min(rows, bins + 1) *
// n_columns + n_columns + n_columns + 1
total += std::min(num_rows, num_bins) * num_columns * sizeof(float);
total += num_columns *
sizeof(std::remove_reference_t<decltype(
std::declval<HistogramCuts>().MinValues())>::value_type);
total += (num_columns + 1) *
sizeof(std::remove_reference_t<decltype(
std::declval<HistogramCuts>().Ptrs())>::value_type);
peak = std::max(peak, total);
return peak;
}
size_t SketchBatchNumElements(size_t sketch_batch_num_elements,
bst_row_t num_rows, bst_feature_t columns,
size_t nnz, int device,
size_t num_cuts, bool has_weight) {
#if defined(XGBOOST_USE_RMM) && XGBOOST_USE_RMM == 1
// device available memory is not accurate when rmm is used.
return nnz;
#endif // defined(XGBOOST_USE_RMM) && XGBOOST_USE_RMM == 1
if (sketch_batch_num_elements == 0) {
auto required_memory = RequiredMemory(num_rows, columns, nnz, num_cuts, has_weight);
// use up to 80% of available space
auto avail = dh::AvailableMemory(device) * 0.8;
if (required_memory > avail) {
sketch_batch_num_elements = avail / BytesPerElement(has_weight);
} else {
sketch_batch_num_elements = std::min(num_rows * static_cast<size_t>(columns), nnz);
}
}
return sketch_batch_num_elements;
}
void SortByWeight(dh::device_vector<float>* weights,
dh::device_vector<Entry>* sorted_entries) {
// Sort both entries and wegihts.
dh::XGBDeviceAllocator<char> alloc;
thrust::sort_by_key(thrust::cuda::par(alloc), sorted_entries->begin(),
sorted_entries->end(), weights->begin(),
detail::EntryCompareOp());
// Scan weights
dh::XGBCachingDeviceAllocator<char> caching;
thrust::inclusive_scan_by_key(thrust::cuda::par(caching),
sorted_entries->begin(), sorted_entries->end(),
weights->begin(), weights->begin(),
[=] __device__(const Entry& a, const Entry& b) {
return a.index == b.index;
});
}
void RemoveDuplicatedCategories(
int32_t device, MetaInfo const &info, Span<bst_row_t> d_cuts_ptr,
dh::device_vector<Entry> *p_sorted_entries,
dh::caching_device_vector<size_t> *p_column_sizes_scan) {
info.feature_types.SetDevice(device);
auto d_feature_types = info.feature_types.ConstDeviceSpan();
CHECK(!d_feature_types.empty());
auto &column_sizes_scan = *p_column_sizes_scan;
auto &sorted_entries = *p_sorted_entries;
// Removing duplicated entries in categorical features.
dh::caching_device_vector<size_t> new_column_scan(column_sizes_scan.size());
dh::SegmentedUnique(column_sizes_scan.data().get(),
column_sizes_scan.data().get() + column_sizes_scan.size(),
sorted_entries.begin(), sorted_entries.end(),
new_column_scan.data().get(), sorted_entries.begin(),
[=] __device__(Entry const &l, Entry const &r) {
if (l.index == r.index) {
if (IsCat(d_feature_types, l.index)) {
return l.fvalue == r.fvalue;
}
}
return false;
});
// Renew the column scan and cut scan based on categorical data.
auto d_old_column_sizes_scan = dh::ToSpan(column_sizes_scan);
dh::caching_device_vector<SketchContainer::OffsetT> new_cuts_size(
info.num_col_ + 1);
CHECK_EQ(new_column_scan.size(), new_cuts_size.size());
dh::LaunchN(
new_column_scan.size(),
[=, d_new_cuts_size = dh::ToSpan(new_cuts_size),
d_old_column_sizes_scan = dh::ToSpan(column_sizes_scan),
d_new_columns_ptr = dh::ToSpan(new_column_scan)] __device__(size_t idx) {
d_old_column_sizes_scan[idx] = d_new_columns_ptr[idx];
if (idx == d_new_columns_ptr.size() - 1) {
return;
}
if (IsCat(d_feature_types, idx)) {
// Cut size is the same as number of categories in input.
d_new_cuts_size[idx] =
d_new_columns_ptr[idx + 1] - d_new_columns_ptr[idx];
} else {
d_new_cuts_size[idx] = d_cuts_ptr[idx + 1] - d_cuts_ptr[idx];
}
});
// Turn size into ptr.
thrust::exclusive_scan(thrust::device, new_cuts_size.cbegin(),
new_cuts_size.cend(), d_cuts_ptr.data());
}
} // namespace detail
void ProcessBatch(int device, MetaInfo const &info, const SparsePage &page,
size_t begin, size_t end, SketchContainer *sketch_container,
int num_cuts_per_feature, size_t num_columns) {
dh::XGBCachingDeviceAllocator<char> alloc;
dh::device_vector<Entry> sorted_entries;
if (page.data.DeviceCanRead()) {
const auto& device_data = page.data.ConstDevicePointer();
sorted_entries = dh::device_vector<Entry>(device_data + begin, device_data + end);
} else {
const auto& host_data = page.data.ConstHostVector();
sorted_entries = dh::device_vector<Entry>(host_data.begin() + begin,
host_data.begin() + end);
}
thrust::sort(thrust::cuda::par(alloc), sorted_entries.begin(),
sorted_entries.end(), detail::EntryCompareOp());
HostDeviceVector<SketchContainer::OffsetT> cuts_ptr;
dh::caching_device_vector<size_t> column_sizes_scan;
data::IsValidFunctor dummy_is_valid(std::numeric_limits<float>::quiet_NaN());
auto batch_it = dh::MakeTransformIterator<data::COOTuple>(
sorted_entries.data().get(),
[] __device__(Entry const &e) -> data::COOTuple {
return {0, e.index, e.fvalue}; // row_idx is not needed for scanning column size.
});
detail::GetColumnSizesScan(device, num_columns, num_cuts_per_feature,
batch_it, dummy_is_valid,
0, sorted_entries.size(),
&cuts_ptr, &column_sizes_scan);
auto d_cuts_ptr = cuts_ptr.DeviceSpan();
if (sketch_container->HasCategorical()) {
detail::RemoveDuplicatedCategories(device, info, d_cuts_ptr,
&sorted_entries, &column_sizes_scan);
}
auto const& h_cuts_ptr = cuts_ptr.ConstHostVector();
CHECK_EQ(d_cuts_ptr.size(), column_sizes_scan.size());
// add cuts into sketches
sketch_container->Push(dh::ToSpan(sorted_entries), dh::ToSpan(column_sizes_scan),
d_cuts_ptr, h_cuts_ptr.back());
sorted_entries.clear();
sorted_entries.shrink_to_fit();
CHECK_EQ(sorted_entries.capacity(), 0);
CHECK_NE(cuts_ptr.Size(), 0);
}
void ProcessWeightedBatch(int device, const SparsePage& page,
MetaInfo const& info, size_t begin, size_t end,
SketchContainer* sketch_container, int num_cuts_per_feature,
size_t num_columns,
bool is_ranking, Span<bst_group_t const> d_group_ptr) {
auto weights = info.weights_.ConstDeviceSpan();
dh::XGBCachingDeviceAllocator<char> alloc;
const auto& host_data = page.data.ConstHostVector();
dh::device_vector<Entry> sorted_entries(host_data.begin() + begin,
host_data.begin() + end);
// Binary search to assign weights to each element
dh::device_vector<float> temp_weights(sorted_entries.size());
auto d_temp_weights = temp_weights.data().get();
page.offset.SetDevice(device);
auto row_ptrs = page.offset.ConstDeviceSpan();
size_t base_rowid = page.base_rowid;
if (is_ranking) {
CHECK_GE(d_group_ptr.size(), 2)
<< "Must have at least 1 group for ranking.";
CHECK_EQ(weights.size(), d_group_ptr.size() - 1)
<< "Weight size should equal to number of groups.";
dh::LaunchN(temp_weights.size(), [=] __device__(size_t idx) {
size_t element_idx = idx + begin;
size_t ridx = dh::SegmentId(row_ptrs, element_idx);
bst_group_t group_idx = dh::SegmentId(d_group_ptr, ridx + base_rowid);
d_temp_weights[idx] = weights[group_idx];
});
} else {
dh::LaunchN(temp_weights.size(), [=] __device__(size_t idx) {
size_t element_idx = idx + begin;
size_t ridx = dh::SegmentId(row_ptrs, element_idx);
d_temp_weights[idx] = weights[ridx + base_rowid];
});
}
detail::SortByWeight(&temp_weights, &sorted_entries);
HostDeviceVector<SketchContainer::OffsetT> cuts_ptr;
dh::caching_device_vector<size_t> column_sizes_scan;
data::IsValidFunctor dummy_is_valid(std::numeric_limits<float>::quiet_NaN());
auto batch_it = dh::MakeTransformIterator<data::COOTuple>(
sorted_entries.data().get(),
[] __device__(Entry const &e) -> data::COOTuple {
return {0, e.index, e.fvalue}; // row_idx is not needed for scaning column size.
});
detail::GetColumnSizesScan(device, num_columns, num_cuts_per_feature,
batch_it, dummy_is_valid,
0, sorted_entries.size(),
&cuts_ptr, &column_sizes_scan);
auto d_cuts_ptr = cuts_ptr.DeviceSpan();
if (sketch_container->HasCategorical()) {
detail::RemoveDuplicatedCategories(device, info, d_cuts_ptr,
&sorted_entries, &column_sizes_scan);
}
auto const& h_cuts_ptr = cuts_ptr.ConstHostVector();
// Extract cuts
sketch_container->Push(dh::ToSpan(sorted_entries),
dh::ToSpan(column_sizes_scan), d_cuts_ptr,
h_cuts_ptr.back(), dh::ToSpan(temp_weights));
sorted_entries.clear();
sorted_entries.shrink_to_fit();
}
HistogramCuts DeviceSketch(int device, DMatrix* dmat, int max_bins,
size_t sketch_batch_num_elements) {
dmat->Info().feature_types.SetDevice(device);
dmat->Info().feature_types.ConstDevicePointer(); // pull to device early
// Configure batch size based on available memory
bool has_weights = dmat->Info().weights_.Size() > 0;
size_t num_cuts_per_feature =
detail::RequiredSampleCutsPerColumn(max_bins, dmat->Info().num_row_);
sketch_batch_num_elements = detail::SketchBatchNumElements(
sketch_batch_num_elements,
dmat->Info().num_row_,
dmat->Info().num_col_,
dmat->Info().num_nonzero_,
device, num_cuts_per_feature, has_weights);
HistogramCuts cuts;
SketchContainer sketch_container(dmat->Info().feature_types, max_bins, dmat->Info().num_col_,
dmat->Info().num_row_, device);
dmat->Info().weights_.SetDevice(device);
for (const auto& batch : dmat->GetBatches<SparsePage>()) {
size_t batch_nnz = batch.data.Size();
auto const& info = dmat->Info();
for (auto begin = 0ull; begin < batch_nnz; begin += sketch_batch_num_elements) {
size_t end = std::min(batch_nnz, static_cast<std::size_t>(begin + sketch_batch_num_elements));
if (has_weights) {
bool is_ranking = HostSketchContainer::UseGroup(dmat->Info());
dh::caching_device_vector<uint32_t> groups(info.group_ptr_.cbegin(),
info.group_ptr_.cend());
ProcessWeightedBatch(
device, batch, dmat->Info(), begin, end,
&sketch_container,
num_cuts_per_feature,
dmat->Info().num_col_,
is_ranking, dh::ToSpan(groups));
} else {
ProcessBatch(device, dmat->Info(), batch, begin, end, &sketch_container,
num_cuts_per_feature, dmat->Info().num_col_);
}
}
}
sketch_container.MakeCuts(&cuts);
return cuts;
}
} // namespace common
} // namespace xgboost
|
6fff548fa6d9a5399a44680db3781bf93d9eb5ab.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/****************************************************************************
*
* cuda-reduce0.cu - Reduction with CUDA
*
* Written in 2017 by Moreno Marzolla <moreno.marzolla(at)unibo.it>
*
* To the extent possible under law, the author(s) have dedicated all
* copyright and related and neighboring rights to this software to the
* public domain worldwide. This software is distributed without any warranty.
*
* You should have received a copy of the CC0 Public Domain Dedication
* along with this software. If not, see
* <http://creativecommons.org/publicdomain/zero/1.0/>.
*
* ---------------------------------------------------------------------------
*
* This program realizes a simple sum-reduction on the GPU. This
* implementation is not the most efficient one: each thread block
* copies a portion of the array in shared memory; thread 0 of each
* block computes a partial sum of the local data. The final reduction
* must then be completed on the CPU.
*
* Compile with:
* nvcc cuda-reduce0.cu -o cuda-reduce0
*
* Run with:
* ./cuda-reduce0
*
****************************************************************************/
#include <stdio.h>
#include <assert.h>
#define BLKSIZE 512
#define N_OF_BLOCKS 1024
/* N must be an integer multiple of BLKSIZE */
#define N ((N_OF_BLOCKS)*(BLKSIZE))
/* d_sums is an array of N_OF_BLOCKS integers that reside in device
memory; therefore, there is no need to hipMalloc'ate it */
__device__ int d_sums[N_OF_BLOCKS];
int h_sums[N_OF_BLOCKS];
/* This kernel copies a portion of array a[] of n elements into
thread-local shared memory. Thread 0 computes the sum of the local
data, and stores the computed value on the appropriate entry of
d_sums[]. Different thread blocks access different elements of
d_sums[], so no race condition is possible. */
__global__ void sum( int *a, int n )
{
__shared__ int temp[BLKSIZE];
int lindex = threadIdx.x; /* local idx (index of the thread within the block) */
int bindex = blockIdx.x; /* block idx (index of the block within the grid) */
int gindex = threadIdx.x + blockIdx.x * blockDim.x; /* global idx (index of the array element handled by this thread) */
temp[lindex] = a[gindex];
__syncthreads(); /* wait for all threads to finish the copy operation */
/* only thread 0 computes the local sum */
if ( 0 == lindex ) {
int i, my_sum = 0;
for (i=0; i<blockDim.x; i++) {
my_sum += temp[i];
}
d_sums[bindex] = my_sum;
}
}
int main( void )
{
int *h_a;
int *d_a;
int i, s=0;
assert( 0 == N % BLKSIZE );
/* Allocate space for device copies of d_a */
hipMalloc((void **)&d_a, N*sizeof(int));
/* Allocate space for host copy of the array */
h_a = (int*)malloc(N * sizeof(int));
/* Set all elements of vector h_a to 2, so that we know that the
result of the sum must be 2*N */
for (i=0; i<N; i++) {
h_a[i] = 2;
}
/* Copy inputs to device */
hipMemcpy(d_a, h_a, N*sizeof(int), hipMemcpyHostToDevice);
/* Launch sum() kernel on the GPU */
hipLaunchKernelGGL(( sum), dim3(N_OF_BLOCKS), dim3(BLKSIZE), 0, 0, d_a, N);
/* Copy the d_sums[] array from device memory to host memory h_sums[] */
hipMemcpyFromSymbol(h_sums, d_sums, N_OF_BLOCKS*sizeof(h_sums[0]));
/* Perform the final reduction on the CPU */
s = 0;
for (i=0; i<N_OF_BLOCKS; i++) {
s += h_sums[i];
}
/* Check result */
if ( s != 2*N ) {
printf("Check failed: Expected %d, got %d\n", 2*N, s);
return -1;
}
printf("Check OK: computed sum = %d\n", s);
/* Cleanup */
free(h_a);
hipFree(d_a);
return 0;
}
| 6fff548fa6d9a5399a44680db3781bf93d9eb5ab.cu | /****************************************************************************
*
* cuda-reduce0.cu - Reduction with CUDA
*
* Written in 2017 by Moreno Marzolla <moreno.marzolla(at)unibo.it>
*
* To the extent possible under law, the author(s) have dedicated all
* copyright and related and neighboring rights to this software to the
* public domain worldwide. This software is distributed without any warranty.
*
* You should have received a copy of the CC0 Public Domain Dedication
* along with this software. If not, see
* <http://creativecommons.org/publicdomain/zero/1.0/>.
*
* ---------------------------------------------------------------------------
*
* This program realizes a simple sum-reduction on the GPU. This
* implementation is not the most efficient one: each thread block
* copies a portion of the array in shared memory; thread 0 of each
* block computes a partial sum of the local data. The final reduction
* must then be completed on the CPU.
*
* Compile with:
* nvcc cuda-reduce0.cu -o cuda-reduce0
*
* Run with:
* ./cuda-reduce0
*
****************************************************************************/
#include <stdio.h>
#include <assert.h>
#define BLKSIZE 512
#define N_OF_BLOCKS 1024
/* N must be an integer multiple of BLKSIZE */
#define N ((N_OF_BLOCKS)*(BLKSIZE))
/* d_sums is an array of N_OF_BLOCKS integers that reside in device
memory; therefore, there is no need to cudaMalloc'ate it */
__device__ int d_sums[N_OF_BLOCKS];
int h_sums[N_OF_BLOCKS];
/* This kernel copies a portion of array a[] of n elements into
thread-local shared memory. Thread 0 computes the sum of the local
data, and stores the computed value on the appropriate entry of
d_sums[]. Different thread blocks access different elements of
d_sums[], so no race condition is possible. */
__global__ void sum( int *a, int n )
{
__shared__ int temp[BLKSIZE];
int lindex = threadIdx.x; /* local idx (index of the thread within the block) */
int bindex = blockIdx.x; /* block idx (index of the block within the grid) */
int gindex = threadIdx.x + blockIdx.x * blockDim.x; /* global idx (index of the array element handled by this thread) */
temp[lindex] = a[gindex];
__syncthreads(); /* wait for all threads to finish the copy operation */
/* only thread 0 computes the local sum */
if ( 0 == lindex ) {
int i, my_sum = 0;
for (i=0; i<blockDim.x; i++) {
my_sum += temp[i];
}
d_sums[bindex] = my_sum;
}
}
int main( void )
{
int *h_a;
int *d_a;
int i, s=0;
assert( 0 == N % BLKSIZE );
/* Allocate space for device copies of d_a */
cudaMalloc((void **)&d_a, N*sizeof(int));
/* Allocate space for host copy of the array */
h_a = (int*)malloc(N * sizeof(int));
/* Set all elements of vector h_a to 2, so that we know that the
result of the sum must be 2*N */
for (i=0; i<N; i++) {
h_a[i] = 2;
}
/* Copy inputs to device */
cudaMemcpy(d_a, h_a, N*sizeof(int), cudaMemcpyHostToDevice);
/* Launch sum() kernel on the GPU */
sum<<<N_OF_BLOCKS, BLKSIZE>>>(d_a, N);
/* Copy the d_sums[] array from device memory to host memory h_sums[] */
cudaMemcpyFromSymbol(h_sums, d_sums, N_OF_BLOCKS*sizeof(h_sums[0]));
/* Perform the final reduction on the CPU */
s = 0;
for (i=0; i<N_OF_BLOCKS; i++) {
s += h_sums[i];
}
/* Check result */
if ( s != 2*N ) {
printf("Check failed: Expected %d, got %d\n", 2*N, s);
return -1;
}
printf("Check OK: computed sum = %d\n", s);
/* Cleanup */
free(h_a);
cudaFree(d_a);
return 0;
}
|
b01ef68f0a03d37045af07f6076fb3228288aff7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright 2022 Huawei Technologies Co., Ltd
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// ============================================================================
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include "./interpolate_gpu.h"
#include "./cuda_utils.h"
__global__ void three_nn_kernel_fast(int b, int n, int m,
const float* __restrict__ unknown,
const float* __restrict__ known,
float* __restrict__ dist2,
int* __restrict__ idx) {
// unknown: (B, N, 3)
// known: (B, M, 3)
// output:
// dist2: (B, N, 3)
// idx: (B, N, 3)
int bs_idx = blockIdx.y;
int pt_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (bs_idx >= b || pt_idx >= n) return;
unknown += bs_idx * n * 3 + pt_idx * 3;
known += bs_idx * m * 3;
dist2 += bs_idx * n * 3 + pt_idx * 3;
idx += bs_idx * n * 3 + pt_idx * 3;
float ux = unknown[0];
float uy = unknown[1];
float uz = unknown[2];
double best1 = 1e40, best2 = 1e40, best3 = 1e40;
int besti1 = 0, besti2 = 0, besti3 = 0;
for (int k = 0; k < m; ++k) {
float x = known[k * 3 + 0];
float y = known[k * 3 + 1];
float z = known[k * 3 + 2];
float d = (ux - x) * (ux - x) + (uy - y) * (uy - y) + (uz - z) * (uz - z);
if (d < best1) {
best3 = best2;
besti3 = besti2;
best2 = best1;
besti2 = besti1;
best1 = d;
besti1 = k;
} else if (d < best2) {
best3 = best2;
besti3 = besti2;
best2 = d;
besti2 = k;
} else if (d < best3) {
best3 = d;
besti3 = k;
}
}
dist2[0] = best1;
dist2[1] = best2;
dist2[2] = best3;
idx[0] = besti1;
idx[1] = besti2;
idx[2] = besti3;
}
void three_nn_kernel_launcher_fast(int b, int n, int m, const float* unknown,
const float* known, float* dist2, int* idx,
hipStream_t stream) {
// unknown: (B, N, 3)
// known: (B, M, 3)
// output:
// dist2: (B, N, 3)
// idx: (B, N, 3)
hipError_t err;
dim3 blocks(DIVUP(n, THREADS_PER_BLOCK),
b); // blockIdx.x(col), blockIdx.y(row)
dim3 threads(THREADS_PER_BLOCK);
hipLaunchKernelGGL(( three_nn_kernel_fast), dim3(blocks), dim3(threads), 0, stream, b, n, m, unknown, known,
dist2, idx);
err = hipGetLastError();
if (hipSuccess != err) {
exit(-1);
}
}
__global__ void three_interpolate_kernel_fast(int b, int c, int m, int n,
const float* __restrict__ points,
const int* __restrict__ idx,
const float* __restrict__ weight,
float* __restrict__ out) {
// points: (B, C, M)
// idx: (B, N, 3)
// weight: (B, N, 3)
// output:
// out: (B, C, N)
int bs_idx = blockIdx.z;
int c_idx = blockIdx.y;
int pt_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (bs_idx >= b || c_idx >= c || pt_idx >= n) return;
weight += bs_idx * n * 3 + pt_idx * 3;
points += bs_idx * c * m + c_idx * m;
idx += bs_idx * n * 3 + pt_idx * 3;
out += bs_idx * c * n + c_idx * n;
out[pt_idx] = weight[0] * points[idx[0]] + weight[1] * points[idx[1]] +
weight[2] * points[idx[2]];
}
void three_interpolate_kernel_launcher_fast(int b, int c, int m, int n,
const float* points, const int* idx,
const float* weight, float* out,
hipStream_t stream) {
// points: (B, C, M)
// idx: (B, N, 3)
// weight: (B, N, 3)
// output:
// out: (B, C, N)
hipError_t err;
dim3 blocks(DIVUP(n, THREADS_PER_BLOCK), c,
b); // blockIdx.x(col), blockIdx.y(row)
dim3 threads(THREADS_PER_BLOCK);
hipLaunchKernelGGL(( three_interpolate_kernel_fast), dim3(blocks), dim3(threads), 0, stream,
b, c, m, n, points, idx, weight, out);
err = hipGetLastError();
if (hipSuccess != err) {
exit(-1);
}
}
__global__ void three_interpolate_grad_kernel_fast(
int b, int c, int n, int m, const float* __restrict__ grad_out,
const int* __restrict__ idx, const float* __restrict__ weight,
float* __restrict__ grad_points) {
// grad_out: (B, C, N)
// weight: (B, N, 3)
// output:
// grad_points: (B, C, M)
int bs_idx = blockIdx.z;
int c_idx = blockIdx.y;
int pt_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (bs_idx >= b || c_idx >= c || pt_idx >= n) return;
grad_out += bs_idx * c * n + c_idx * n + pt_idx;
weight += bs_idx * n * 3 + pt_idx * 3;
grad_points += bs_idx * c * m + c_idx * m;
idx += bs_idx * n * 3 + pt_idx * 3;
atomicAdd(grad_points + idx[0], grad_out[0] * weight[0]);
atomicAdd(grad_points + idx[1], grad_out[0] * weight[1]);
atomicAdd(grad_points + idx[2], grad_out[0] * weight[2]);
}
void three_interpolate_grad_kernel_launcher_fast(
int b, int c, int n, int m, const float* grad_out, const int* idx,
const float* weight, float* grad_points, hipStream_t stream) {
// grad_out: (B, C, N)
// weight: (B, N, 3)
// output:
// grad_points: (B, C, M)
hipError_t err;
dim3 blocks(DIVUP(n, THREADS_PER_BLOCK), c,
b); // blockIdx.x(col), blockIdx.y(row)
dim3 threads(THREADS_PER_BLOCK);
hipLaunchKernelGGL(( three_interpolate_grad_kernel_fast), dim3(blocks), dim3(threads), 0, stream,
b, c, n, m, grad_out, idx, weight, grad_points);
err = hipGetLastError();
if (hipSuccess != err) {
exit(-1);
}
}
| b01ef68f0a03d37045af07f6076fb3228288aff7.cu | // Copyright 2022 Huawei Technologies Co., Ltd
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// ============================================================================
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include "./interpolate_gpu.h"
#include "./cuda_utils.h"
__global__ void three_nn_kernel_fast(int b, int n, int m,
const float* __restrict__ unknown,
const float* __restrict__ known,
float* __restrict__ dist2,
int* __restrict__ idx) {
// unknown: (B, N, 3)
// known: (B, M, 3)
// output:
// dist2: (B, N, 3)
// idx: (B, N, 3)
int bs_idx = blockIdx.y;
int pt_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (bs_idx >= b || pt_idx >= n) return;
unknown += bs_idx * n * 3 + pt_idx * 3;
known += bs_idx * m * 3;
dist2 += bs_idx * n * 3 + pt_idx * 3;
idx += bs_idx * n * 3 + pt_idx * 3;
float ux = unknown[0];
float uy = unknown[1];
float uz = unknown[2];
double best1 = 1e40, best2 = 1e40, best3 = 1e40;
int besti1 = 0, besti2 = 0, besti3 = 0;
for (int k = 0; k < m; ++k) {
float x = known[k * 3 + 0];
float y = known[k * 3 + 1];
float z = known[k * 3 + 2];
float d = (ux - x) * (ux - x) + (uy - y) * (uy - y) + (uz - z) * (uz - z);
if (d < best1) {
best3 = best2;
besti3 = besti2;
best2 = best1;
besti2 = besti1;
best1 = d;
besti1 = k;
} else if (d < best2) {
best3 = best2;
besti3 = besti2;
best2 = d;
besti2 = k;
} else if (d < best3) {
best3 = d;
besti3 = k;
}
}
dist2[0] = best1;
dist2[1] = best2;
dist2[2] = best3;
idx[0] = besti1;
idx[1] = besti2;
idx[2] = besti3;
}
void three_nn_kernel_launcher_fast(int b, int n, int m, const float* unknown,
const float* known, float* dist2, int* idx,
cudaStream_t stream) {
// unknown: (B, N, 3)
// known: (B, M, 3)
// output:
// dist2: (B, N, 3)
// idx: (B, N, 3)
cudaError_t err;
dim3 blocks(DIVUP(n, THREADS_PER_BLOCK),
b); // blockIdx.x(col), blockIdx.y(row)
dim3 threads(THREADS_PER_BLOCK);
three_nn_kernel_fast<<<blocks, threads, 0, stream>>>(b, n, m, unknown, known,
dist2, idx);
err = cudaGetLastError();
if (cudaSuccess != err) {
exit(-1);
}
}
__global__ void three_interpolate_kernel_fast(int b, int c, int m, int n,
const float* __restrict__ points,
const int* __restrict__ idx,
const float* __restrict__ weight,
float* __restrict__ out) {
// points: (B, C, M)
// idx: (B, N, 3)
// weight: (B, N, 3)
// output:
// out: (B, C, N)
int bs_idx = blockIdx.z;
int c_idx = blockIdx.y;
int pt_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (bs_idx >= b || c_idx >= c || pt_idx >= n) return;
weight += bs_idx * n * 3 + pt_idx * 3;
points += bs_idx * c * m + c_idx * m;
idx += bs_idx * n * 3 + pt_idx * 3;
out += bs_idx * c * n + c_idx * n;
out[pt_idx] = weight[0] * points[idx[0]] + weight[1] * points[idx[1]] +
weight[2] * points[idx[2]];
}
void three_interpolate_kernel_launcher_fast(int b, int c, int m, int n,
const float* points, const int* idx,
const float* weight, float* out,
cudaStream_t stream) {
// points: (B, C, M)
// idx: (B, N, 3)
// weight: (B, N, 3)
// output:
// out: (B, C, N)
cudaError_t err;
dim3 blocks(DIVUP(n, THREADS_PER_BLOCK), c,
b); // blockIdx.x(col), blockIdx.y(row)
dim3 threads(THREADS_PER_BLOCK);
three_interpolate_kernel_fast<<<blocks, threads, 0, stream>>>(
b, c, m, n, points, idx, weight, out);
err = cudaGetLastError();
if (cudaSuccess != err) {
exit(-1);
}
}
__global__ void three_interpolate_grad_kernel_fast(
int b, int c, int n, int m, const float* __restrict__ grad_out,
const int* __restrict__ idx, const float* __restrict__ weight,
float* __restrict__ grad_points) {
// grad_out: (B, C, N)
// weight: (B, N, 3)
// output:
// grad_points: (B, C, M)
int bs_idx = blockIdx.z;
int c_idx = blockIdx.y;
int pt_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (bs_idx >= b || c_idx >= c || pt_idx >= n) return;
grad_out += bs_idx * c * n + c_idx * n + pt_idx;
weight += bs_idx * n * 3 + pt_idx * 3;
grad_points += bs_idx * c * m + c_idx * m;
idx += bs_idx * n * 3 + pt_idx * 3;
atomicAdd(grad_points + idx[0], grad_out[0] * weight[0]);
atomicAdd(grad_points + idx[1], grad_out[0] * weight[1]);
atomicAdd(grad_points + idx[2], grad_out[0] * weight[2]);
}
void three_interpolate_grad_kernel_launcher_fast(
int b, int c, int n, int m, const float* grad_out, const int* idx,
const float* weight, float* grad_points, cudaStream_t stream) {
// grad_out: (B, C, N)
// weight: (B, N, 3)
// output:
// grad_points: (B, C, M)
cudaError_t err;
dim3 blocks(DIVUP(n, THREADS_PER_BLOCK), c,
b); // blockIdx.x(col), blockIdx.y(row)
dim3 threads(THREADS_PER_BLOCK);
three_interpolate_grad_kernel_fast<<<blocks, threads, 0, stream>>>(
b, c, n, m, grad_out, idx, weight, grad_points);
err = cudaGetLastError();
if (cudaSuccess != err) {
exit(-1);
}
}
|
cbdc46b9504d87bfdff24fda0814945e96d53dcc.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <math.h>
#include <stdlib.h>
#include <stdio.h>
#include <sys/time.h>
#include "functions.h"
#include "reduction_cuda.h"
#include "reduction_seq.h"
#define MATRIX_ORDER 3
int main(int argc, char *argv[])
{
const char* filename;
const char* description;
struct timeval t0, t1, t2;
/* Teste 1: Reduo de 500k matrizes */
description = "Reduo de 500k matrizes";
//given
filename = "data/teste_500k.txt";
write_matrix_list(500000, filename, MATRIX_ORDER);
//when
gettimeofday(&t0, NULL);
reduction_cuda(filename, MATRIX_ORDER);
gettimeofday(&t1, NULL);
reduction_seq(filename, MATRIX_ORDER);
gettimeofday(&t2, NULL);
//then
print_performance_test_result(description, time_elapsed(t0, t1), time_elapsed(t1, t2));
/* Teste 2: Reduo de 1M matrizes */
description = "Reduo de 1M matrizes";
//given
filename = "data/teste_1M.txt";
write_matrix_list(1000000, filename, MATRIX_ORDER);
//when
gettimeofday(&t0, NULL);
reduction_cuda(filename, MATRIX_ORDER);
gettimeofday(&t1, NULL);
reduction_seq(filename, MATRIX_ORDER);
gettimeofday(&t2, NULL);
//then
print_performance_test_result(description, time_elapsed(t0, t1), time_elapsed(t1, t2));
/* Teste 3: Reduo de 2M matrizes */
description = "Reduo de 2M matrizes";
//given
filename = "data/teste_2M.txt";
write_matrix_list(2000000, filename, MATRIX_ORDER);
//when
gettimeofday(&t0, NULL);
reduction_cuda(filename, MATRIX_ORDER);
gettimeofday(&t1, NULL);
reduction_seq(filename, MATRIX_ORDER);
gettimeofday(&t2, NULL);
//then
print_performance_test_result(description, time_elapsed(t0, t1), time_elapsed(t1, t2));
/* Teste 4: Reduo de 4M matrizes */
description = "Reduo de 4M matrizes";
//given
filename = "data/teste_4M.txt";
write_matrix_list(4000000, filename, MATRIX_ORDER);
//when
gettimeofday(&t0, NULL);
reduction_cuda(filename, MATRIX_ORDER);
gettimeofday(&t1, NULL);
reduction_seq(filename, MATRIX_ORDER);
gettimeofday(&t2, NULL);
//then
print_performance_test_result(description, time_elapsed(t0, t1), time_elapsed(t1, t2));
/* Teste 5: Reduo de 8M matrizes */
description = "Reduo de 8M matrizes";
//given
filename = "data/teste_8M.txt";
write_matrix_list(8000000, filename, MATRIX_ORDER);
//when
gettimeofday(&t0, NULL);
reduction_cuda(filename, MATRIX_ORDER);
gettimeofday(&t1, NULL);
reduction_seq(filename, MATRIX_ORDER);
gettimeofday(&t2, NULL);
//then
print_performance_test_result(description, time_elapsed(t0, t1), time_elapsed(t1, t2));
/* Teste 6: Reduo de 16M matrizes */
description = "Reduo de 16M matrizes";
//given
filename = "data/teste_16M.txt";
write_matrix_list(16000000, filename, MATRIX_ORDER);
//when
gettimeofday(&t0, NULL);
reduction_cuda(filename, MATRIX_ORDER);
gettimeofday(&t1, NULL);
reduction_seq(filename, MATRIX_ORDER);
gettimeofday(&t2, NULL);
//then
print_performance_test_result(description, time_elapsed(t0, t1), time_elapsed(t1, t2));
return 0;
}
| cbdc46b9504d87bfdff24fda0814945e96d53dcc.cu | #include <cuda.h>
#include <math.h>
#include <stdlib.h>
#include <stdio.h>
#include <sys/time.h>
#include "functions.h"
#include "reduction_cuda.h"
#include "reduction_seq.h"
#define MATRIX_ORDER 3
int main(int argc, char *argv[])
{
const char* filename;
const char* description;
struct timeval t0, t1, t2;
/* Teste 1: Redução de 500k matrizes */
description = "Redução de 500k matrizes";
//given
filename = "data/teste_500k.txt";
write_matrix_list(500000, filename, MATRIX_ORDER);
//when
gettimeofday(&t0, NULL);
reduction_cuda(filename, MATRIX_ORDER);
gettimeofday(&t1, NULL);
reduction_seq(filename, MATRIX_ORDER);
gettimeofday(&t2, NULL);
//then
print_performance_test_result(description, time_elapsed(t0, t1), time_elapsed(t1, t2));
/* Teste 2: Redução de 1M matrizes */
description = "Redução de 1M matrizes";
//given
filename = "data/teste_1M.txt";
write_matrix_list(1000000, filename, MATRIX_ORDER);
//when
gettimeofday(&t0, NULL);
reduction_cuda(filename, MATRIX_ORDER);
gettimeofday(&t1, NULL);
reduction_seq(filename, MATRIX_ORDER);
gettimeofday(&t2, NULL);
//then
print_performance_test_result(description, time_elapsed(t0, t1), time_elapsed(t1, t2));
/* Teste 3: Redução de 2M matrizes */
description = "Redução de 2M matrizes";
//given
filename = "data/teste_2M.txt";
write_matrix_list(2000000, filename, MATRIX_ORDER);
//when
gettimeofday(&t0, NULL);
reduction_cuda(filename, MATRIX_ORDER);
gettimeofday(&t1, NULL);
reduction_seq(filename, MATRIX_ORDER);
gettimeofday(&t2, NULL);
//then
print_performance_test_result(description, time_elapsed(t0, t1), time_elapsed(t1, t2));
/* Teste 4: Redução de 4M matrizes */
description = "Redução de 4M matrizes";
//given
filename = "data/teste_4M.txt";
write_matrix_list(4000000, filename, MATRIX_ORDER);
//when
gettimeofday(&t0, NULL);
reduction_cuda(filename, MATRIX_ORDER);
gettimeofday(&t1, NULL);
reduction_seq(filename, MATRIX_ORDER);
gettimeofday(&t2, NULL);
//then
print_performance_test_result(description, time_elapsed(t0, t1), time_elapsed(t1, t2));
/* Teste 5: Redução de 8M matrizes */
description = "Redução de 8M matrizes";
//given
filename = "data/teste_8M.txt";
write_matrix_list(8000000, filename, MATRIX_ORDER);
//when
gettimeofday(&t0, NULL);
reduction_cuda(filename, MATRIX_ORDER);
gettimeofday(&t1, NULL);
reduction_seq(filename, MATRIX_ORDER);
gettimeofday(&t2, NULL);
//then
print_performance_test_result(description, time_elapsed(t0, t1), time_elapsed(t1, t2));
/* Teste 6: Redução de 16M matrizes */
description = "Redução de 16M matrizes";
//given
filename = "data/teste_16M.txt";
write_matrix_list(16000000, filename, MATRIX_ORDER);
//when
gettimeofday(&t0, NULL);
reduction_cuda(filename, MATRIX_ORDER);
gettimeofday(&t1, NULL);
reduction_seq(filename, MATRIX_ORDER);
gettimeofday(&t2, NULL);
//then
print_performance_test_result(description, time_elapsed(t0, t1), time_elapsed(t1, t2));
return 0;
}
|
b656e29e3098d39acd5e0850d38de93e50698986.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "common/kernels/kernel.h"
#include "common/kernels/saturate.h"
#include "hip/hip_fp16.h"
#include <array>
namespace nvinfer1
{
namespace plugin
{
// overloading exp for half type
inline __device__ __half exp(__half a)
{
#if __CUDA_ARCH__ >= 530
return hexp(a);
#else
return exp(float(a));
#endif
}
inline __device__ __half add_fb(const __half & a, const __half & b) {
#if __CUDA_ARCH__ >= 530
return a + b;
#else
return __float2half(__half2float(a) + __half2float(b));
#endif
}
inline __device__ float add_fb(const float & a, const float & b) {
return a + b;
}
inline __device__ __half minus_fb(const __half & a, const __half & b) {
#if __CUDA_ARCH__ >= 530
return a - b;
#else
return __float2half(__half2float(a) - __half2float(b));
#endif
}
inline __device__ float minus_fb(const float & a, const float & b) {
return a - b;
}
inline __device__ __half mul_fb(const __half & a, const __half & b) {
#if __CUDA_ARCH__ >= 530
return a * b;
#else
return __float2half(__half2float(a) * __half2float(b));
#endif
}
inline __device__ __half mul_fb(const __half & a, const float & b) {
#if __CUDA_ARCH__ >= 530
return a * __float2half(b);
#else
return __float2half(__half2float(a) * b);
#endif
}
inline __device__ float mul_fb(const float & a, const float & b) {
return a * b;
}
inline __device__ __half div_fb(const __half & a, const __half & b) {
#if __CUDA_ARCH__ >= 530
return a / b;
#else
return __float2half(__half2float(a) / __half2float(b));
#endif
}
inline __device__ float div_fb(const float & a, const float & b) {
return a / b;
}
template <typename T_BBOX, unsigned nthds_per_cta>
__launch_bounds__(nthds_per_cta)
__global__ void decodeBBoxes_kernel(
const int nthreads,
const CodeTypeSSD code_type,
const bool variance_encoded_in_target,
const int num_priors,
const bool share_location,
const int num_loc_classes,
const int background_label_id,
const bool clip_bbox,
const T_BBOX* loc_data,
const T_BBOX* prior_data,
T_BBOX* bbox_data,
const bool batch_agnostic)
{
for (int index = blockIdx.x * nthds_per_cta + threadIdx.x;
index < nthreads;
index += nthds_per_cta * gridDim.x)
{
// Bounding box coordinate index {0, 1, 2, 3}
// loc_data: (N, R, C, 4)
// prior_data: (N, 2, R, 4)
const int i = index % 4;
// Bounding box class index
const int c = (index / 4) % num_loc_classes;
// Prior box id corresponding to the bounding box
const int d = (index / 4 / num_loc_classes) % num_priors;
// batch dim
const int batch = index / (4 * num_loc_classes * num_priors);
// If bounding box was not shared among all the classes and the bounding box is corresponding to the background class
if (!share_location && c == background_label_id)
{
// Ignore background class if not share_location.
return;
}
// Index to the right anchor box corresponding to the current bounding box
// do not assume each images' anchor boxes are identical
// e.g., in FasterRCNN, priors are ROIs from proposal layer and are different
// for each image.
const int pi = batch_agnostic ? d * 4 : (batch * 2 * num_priors + d) * 4;
// Index to the right variances corresponding to the current bounding box
const int vi = pi + num_priors * 4;
// Encoding method: CodeTypeSSD::CORNER
//if (code_type == PriorBoxParameter_CodeType_CORNER){
if (code_type == CodeTypeSSD::CORNER)
{
// Do not want to use variances to adjust the bounding box decoding
if (variance_encoded_in_target)
{
// variance is encoded in target, we simply need to add the offset
// predictions.
// prior_data[pi + i]: prior box coordinates corresponding to the current bounding box coordinate
bbox_data[index] = add_fb(prior_data[pi + i], loc_data[index]);
}
else
{
// variance is encoded in bbox, we need to scale the offset accordingly.
// prior_data[vi + i]: variance corresponding to the current bounding box coordinate
bbox_data[index] = add_fb(prior_data[pi + i], mul_fb(loc_data[index], prior_data[vi + i]));
}
//} else if (code_type == PriorBoxParameter_CodeType_CENTER_SIZE) {
}
// Encoding method: CodeTypeSSD::CENTER_SIZE
else if (code_type == CodeTypeSSD::CENTER_SIZE)
{
// Get prior box coordinates
const T_BBOX p_xmin = prior_data[pi];
const T_BBOX p_ymin = prior_data[pi + 1];
const T_BBOX p_xmax = prior_data[pi + 2];
const T_BBOX p_ymax = prior_data[pi + 3];
// Calculate prior box center, height, and width
const T_BBOX prior_width = minus_fb(p_xmax, p_xmin);
const T_BBOX prior_height = minus_fb(p_ymax, p_ymin);
const T_BBOX prior_center_x = div_fb(add_fb(p_xmin, p_xmax), T_BBOX(2));
const T_BBOX prior_center_y = div_fb(add_fb(p_ymin, p_ymax), T_BBOX(2));
// Get the current bounding box coordinates
const T_BBOX xmin = loc_data[index - i];
const T_BBOX ymin = loc_data[index - i + 1];
const T_BBOX xmax = loc_data[index - i + 2];
const T_BBOX ymax = loc_data[index - i + 3];
// Declare decoded bounding box coordinates
T_BBOX decode_bbox_center_x, decode_bbox_center_y;
T_BBOX decode_bbox_width, decode_bbox_height;
// Do not want to use variances to adjust the bounding box decoding
if (variance_encoded_in_target)
{
// variance is encoded in target, we simply need to retore the offset
// predictions.
decode_bbox_center_x = add_fb(mul_fb(xmin, prior_width), prior_center_x);
decode_bbox_center_y = add_fb(mul_fb(ymin, prior_height), prior_center_y);
decode_bbox_width = mul_fb(exp(xmax), prior_width);
decode_bbox_height = mul_fb(exp(ymax), prior_height);
}
else
{
// variance is encoded in bbox, we need to scale the offset accordingly.
decode_bbox_center_x = add_fb(mul_fb(mul_fb(prior_data[vi], xmin), prior_width), prior_center_x);
decode_bbox_center_y = add_fb(mul_fb(mul_fb(prior_data[vi + 1], ymin), prior_height), prior_center_y);
decode_bbox_width = mul_fb(exp(mul_fb(prior_data[vi + 2], xmax)), prior_width);
decode_bbox_height = mul_fb(exp(mul_fb(prior_data[vi + 3], ymax)), prior_height);
}
// Use [x_topleft, y_topleft, x_bottomright, y_bottomright] as coordinates for final decoded bounding box output
switch (i)
{
case 0:
bbox_data[index] = minus_fb(decode_bbox_center_x, div_fb(decode_bbox_width, T_BBOX(2)));
break;
case 1:
bbox_data[index] = minus_fb(decode_bbox_center_y, div_fb(decode_bbox_height, T_BBOX(2)));
break;
case 2:
bbox_data[index] = add_fb(decode_bbox_center_x, div_fb(decode_bbox_width, T_BBOX(2)));
break;
case 3:
bbox_data[index] = add_fb(decode_bbox_center_y, div_fb(decode_bbox_height, T_BBOX(2)));
break;
}
//} else if (code_type == PriorBoxParameter_CodeType_CORNER_SIZE) {
}
// Encoding method: CodeTypeSSD::CORNER_SIZE
else if (code_type == CodeTypeSSD::CORNER_SIZE)
{
// Get prior box coordinates
const T_BBOX p_xmin = prior_data[pi];
const T_BBOX p_ymin = prior_data[pi + 1];
const T_BBOX p_xmax = prior_data[pi + 2];
const T_BBOX p_ymax = prior_data[pi + 3];
// Get prior box width and height
const T_BBOX prior_width = minus_fb(p_xmax, p_xmin);
const T_BBOX prior_height = minus_fb(p_ymax, p_ymin);
T_BBOX p_size;
if (i == 0 || i == 2)
{
p_size = prior_width;
}
else
{
p_size = prior_height;
}
// Do not want to use variances to adjust the bounding box decoding
if (variance_encoded_in_target)
{
// variance is encoded in target, we simply need to add the offset
// predictions.
bbox_data[index] = add_fb(prior_data[pi + i], mul_fb(loc_data[index], p_size));
}
else
{
// variance is encoded in bbox, we need to scale the offset accordingly.
bbox_data[index] = add_fb(prior_data[pi + i], mul_fb(mul_fb(loc_data[index], prior_data[vi + i]), p_size));
}
}
// Exactly the same to CodeTypeSSD::CENTER_SIZE with using variance to adjust the bounding box decoding
else if (code_type == CodeTypeSSD::TF_CENTER)
{
const T_BBOX pXmin = prior_data[pi];
const T_BBOX pYmin = prior_data[pi + 1];
const T_BBOX pXmax = prior_data[pi + 2];
const T_BBOX pYmax = prior_data[pi + 3];
const T_BBOX priorWidth = minus_fb(pXmax, pXmin);
const T_BBOX priorHeight = minus_fb(pYmax, pYmin);
const T_BBOX priorCenterX = div_fb(add_fb(pXmin, pXmax), T_BBOX(2));
const T_BBOX priorCenterY = div_fb(add_fb(pYmin, pYmax), T_BBOX(2));
const T_BBOX ymin = loc_data[index - i];
const T_BBOX xmin = loc_data[index - i + 1];
const T_BBOX ymax = loc_data[index - i + 2];
const T_BBOX xmax = loc_data[index - i + 3];
T_BBOX bboxCenterX, bboxCenterY;
T_BBOX bboxWidth, bboxHeight;
bboxCenterX = add_fb(mul_fb(mul_fb(prior_data[vi], xmin), priorWidth), priorCenterX);
bboxCenterY = add_fb(mul_fb(mul_fb(prior_data[vi + 1], ymin), priorHeight), priorCenterY);
bboxWidth = mul_fb(exp(mul_fb(prior_data[vi + 2], xmax)), priorWidth);
bboxHeight = mul_fb(exp(mul_fb(prior_data[vi + 3], ymax)), priorHeight);
switch (i)
{
case 0:
bbox_data[index] = minus_fb(bboxCenterX, div_fb(bboxWidth, T_BBOX(2)));
break;
case 1:
bbox_data[index] = minus_fb(bboxCenterY, div_fb(bboxHeight, T_BBOX(2)));
break;
case 2:
bbox_data[index] = add_fb(bboxCenterX, div_fb(bboxWidth, T_BBOX(2)));
break;
case 3:
bbox_data[index] = add_fb(bboxCenterY, div_fb(bboxHeight, T_BBOX(2)));
break;
}
}
else
{
// Unknown code type.
assert(!"Unknown Box decode code type");
}
// Clip bounding box or not
if (clip_bbox)
{
bbox_data[index] = saturate(bbox_data[index]);
}
}
}
template <typename T_BBOX>
pluginStatus_t decodeBBoxes_gpu(
hipStream_t stream,
const int nthreads,
const CodeTypeSSD code_type,
const bool variance_encoded_in_target,
const int num_priors,
const bool share_location,
const int num_loc_classes,
const int background_label_id,
const bool clip_bbox,
const void* loc_data,
const void* prior_data,
void* bbox_data,
const bool batch_agnostic)
{
const int BS = 512;
const int GS = (nthreads + BS - 1) / BS;
hipLaunchKernelGGL(( decodeBBoxes_kernel<T_BBOX, BS>), dim3(GS), dim3(BS), 0, stream, nthreads, code_type, variance_encoded_in_target,
num_priors, share_location, num_loc_classes,
background_label_id, clip_bbox,
(const T_BBOX*) loc_data, (const T_BBOX*) prior_data,
(T_BBOX*) bbox_data, batch_agnostic);
CSC(hipGetLastError(), STATUS_FAILURE);
return STATUS_SUCCESS;
}
// decodeBBoxes LAUNCH CONFIG
typedef pluginStatus_t (*dbbFunc)(hipStream_t,
const int,
const CodeTypeSSD,
const bool,
const int,
const bool,
const int,
const int,
const bool,
const void*,
const void*,
void*,
const bool);
struct dbbLaunchConfig
{
DataType t_bbox;
dbbFunc function;
dbbLaunchConfig(DataType t_bbox)
: t_bbox(t_bbox)
, function(nullptr)
{
}
dbbLaunchConfig(DataType t_bbox, dbbFunc function)
: t_bbox(t_bbox)
, function(function)
{
}
bool operator==(const dbbLaunchConfig& other)
{
return t_bbox == other.t_bbox;
}
};
static std::array<dbbLaunchConfig, 2> dbbLCOptions = {
dbbLaunchConfig(DataType::kFLOAT, decodeBBoxes_gpu<float>),
dbbLaunchConfig(DataType::kHALF, decodeBBoxes_gpu<__half>)
};
pluginStatus_t decodeBBoxes(
hipStream_t stream,
const int nthreads,
const CodeTypeSSD code_type,
const bool variance_encoded_in_target,
const int num_priors,
const bool share_location,
const int num_loc_classes,
const int background_label_id,
const bool clip_bbox,
const DataType DT_BBOX,
const void* loc_data,
const void* prior_data,
void* bbox_data,
const bool batch_agnostic)
{
dbbLaunchConfig lc = dbbLaunchConfig(DT_BBOX);
for (unsigned i = 0; i < dbbLCOptions.size(); ++i)
{
if (lc == dbbLCOptions[i])
{
DEBUG_PRINTF("decodeBBox kernel %d\n", i);
return dbbLCOptions[i].function(stream,
nthreads,
code_type,
variance_encoded_in_target,
num_priors,
share_location,
num_loc_classes,
background_label_id,
clip_bbox,
loc_data,
prior_data,
bbox_data,
batch_agnostic);
}
}
return STATUS_BAD_PARAM;
}
} // namespace plugin
} // namespace nvinfer1
| b656e29e3098d39acd5e0850d38de93e50698986.cu | /*
* SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "common/kernels/kernel.h"
#include "common/kernels/saturate.h"
#include "cuda_fp16.h"
#include <array>
namespace nvinfer1
{
namespace plugin
{
// overloading exp for half type
inline __device__ __half exp(__half a)
{
#if __CUDA_ARCH__ >= 530
return hexp(a);
#else
return exp(float(a));
#endif
}
inline __device__ __half add_fb(const __half & a, const __half & b) {
#if __CUDA_ARCH__ >= 530
return a + b;
#else
return __float2half(__half2float(a) + __half2float(b));
#endif
}
inline __device__ float add_fb(const float & a, const float & b) {
return a + b;
}
inline __device__ __half minus_fb(const __half & a, const __half & b) {
#if __CUDA_ARCH__ >= 530
return a - b;
#else
return __float2half(__half2float(a) - __half2float(b));
#endif
}
inline __device__ float minus_fb(const float & a, const float & b) {
return a - b;
}
inline __device__ __half mul_fb(const __half & a, const __half & b) {
#if __CUDA_ARCH__ >= 530
return a * b;
#else
return __float2half(__half2float(a) * __half2float(b));
#endif
}
inline __device__ __half mul_fb(const __half & a, const float & b) {
#if __CUDA_ARCH__ >= 530
return a * __float2half(b);
#else
return __float2half(__half2float(a) * b);
#endif
}
inline __device__ float mul_fb(const float & a, const float & b) {
return a * b;
}
inline __device__ __half div_fb(const __half & a, const __half & b) {
#if __CUDA_ARCH__ >= 530
return a / b;
#else
return __float2half(__half2float(a) / __half2float(b));
#endif
}
inline __device__ float div_fb(const float & a, const float & b) {
return a / b;
}
template <typename T_BBOX, unsigned nthds_per_cta>
__launch_bounds__(nthds_per_cta)
__global__ void decodeBBoxes_kernel(
const int nthreads,
const CodeTypeSSD code_type,
const bool variance_encoded_in_target,
const int num_priors,
const bool share_location,
const int num_loc_classes,
const int background_label_id,
const bool clip_bbox,
const T_BBOX* loc_data,
const T_BBOX* prior_data,
T_BBOX* bbox_data,
const bool batch_agnostic)
{
for (int index = blockIdx.x * nthds_per_cta + threadIdx.x;
index < nthreads;
index += nthds_per_cta * gridDim.x)
{
// Bounding box coordinate index {0, 1, 2, 3}
// loc_data: (N, R, C, 4)
// prior_data: (N, 2, R, 4)
const int i = index % 4;
// Bounding box class index
const int c = (index / 4) % num_loc_classes;
// Prior box id corresponding to the bounding box
const int d = (index / 4 / num_loc_classes) % num_priors;
// batch dim
const int batch = index / (4 * num_loc_classes * num_priors);
// If bounding box was not shared among all the classes and the bounding box is corresponding to the background class
if (!share_location && c == background_label_id)
{
// Ignore background class if not share_location.
return;
}
// Index to the right anchor box corresponding to the current bounding box
// do not assume each images' anchor boxes are identical
// e.g., in FasterRCNN, priors are ROIs from proposal layer and are different
// for each image.
const int pi = batch_agnostic ? d * 4 : (batch * 2 * num_priors + d) * 4;
// Index to the right variances corresponding to the current bounding box
const int vi = pi + num_priors * 4;
// Encoding method: CodeTypeSSD::CORNER
//if (code_type == PriorBoxParameter_CodeType_CORNER){
if (code_type == CodeTypeSSD::CORNER)
{
// Do not want to use variances to adjust the bounding box decoding
if (variance_encoded_in_target)
{
// variance is encoded in target, we simply need to add the offset
// predictions.
// prior_data[pi + i]: prior box coordinates corresponding to the current bounding box coordinate
bbox_data[index] = add_fb(prior_data[pi + i], loc_data[index]);
}
else
{
// variance is encoded in bbox, we need to scale the offset accordingly.
// prior_data[vi + i]: variance corresponding to the current bounding box coordinate
bbox_data[index] = add_fb(prior_data[pi + i], mul_fb(loc_data[index], prior_data[vi + i]));
}
//} else if (code_type == PriorBoxParameter_CodeType_CENTER_SIZE) {
}
// Encoding method: CodeTypeSSD::CENTER_SIZE
else if (code_type == CodeTypeSSD::CENTER_SIZE)
{
// Get prior box coordinates
const T_BBOX p_xmin = prior_data[pi];
const T_BBOX p_ymin = prior_data[pi + 1];
const T_BBOX p_xmax = prior_data[pi + 2];
const T_BBOX p_ymax = prior_data[pi + 3];
// Calculate prior box center, height, and width
const T_BBOX prior_width = minus_fb(p_xmax, p_xmin);
const T_BBOX prior_height = minus_fb(p_ymax, p_ymin);
const T_BBOX prior_center_x = div_fb(add_fb(p_xmin, p_xmax), T_BBOX(2));
const T_BBOX prior_center_y = div_fb(add_fb(p_ymin, p_ymax), T_BBOX(2));
// Get the current bounding box coordinates
const T_BBOX xmin = loc_data[index - i];
const T_BBOX ymin = loc_data[index - i + 1];
const T_BBOX xmax = loc_data[index - i + 2];
const T_BBOX ymax = loc_data[index - i + 3];
// Declare decoded bounding box coordinates
T_BBOX decode_bbox_center_x, decode_bbox_center_y;
T_BBOX decode_bbox_width, decode_bbox_height;
// Do not want to use variances to adjust the bounding box decoding
if (variance_encoded_in_target)
{
// variance is encoded in target, we simply need to retore the offset
// predictions.
decode_bbox_center_x = add_fb(mul_fb(xmin, prior_width), prior_center_x);
decode_bbox_center_y = add_fb(mul_fb(ymin, prior_height), prior_center_y);
decode_bbox_width = mul_fb(exp(xmax), prior_width);
decode_bbox_height = mul_fb(exp(ymax), prior_height);
}
else
{
// variance is encoded in bbox, we need to scale the offset accordingly.
decode_bbox_center_x = add_fb(mul_fb(mul_fb(prior_data[vi], xmin), prior_width), prior_center_x);
decode_bbox_center_y = add_fb(mul_fb(mul_fb(prior_data[vi + 1], ymin), prior_height), prior_center_y);
decode_bbox_width = mul_fb(exp(mul_fb(prior_data[vi + 2], xmax)), prior_width);
decode_bbox_height = mul_fb(exp(mul_fb(prior_data[vi + 3], ymax)), prior_height);
}
// Use [x_topleft, y_topleft, x_bottomright, y_bottomright] as coordinates for final decoded bounding box output
switch (i)
{
case 0:
bbox_data[index] = minus_fb(decode_bbox_center_x, div_fb(decode_bbox_width, T_BBOX(2)));
break;
case 1:
bbox_data[index] = minus_fb(decode_bbox_center_y, div_fb(decode_bbox_height, T_BBOX(2)));
break;
case 2:
bbox_data[index] = add_fb(decode_bbox_center_x, div_fb(decode_bbox_width, T_BBOX(2)));
break;
case 3:
bbox_data[index] = add_fb(decode_bbox_center_y, div_fb(decode_bbox_height, T_BBOX(2)));
break;
}
//} else if (code_type == PriorBoxParameter_CodeType_CORNER_SIZE) {
}
// Encoding method: CodeTypeSSD::CORNER_SIZE
else if (code_type == CodeTypeSSD::CORNER_SIZE)
{
// Get prior box coordinates
const T_BBOX p_xmin = prior_data[pi];
const T_BBOX p_ymin = prior_data[pi + 1];
const T_BBOX p_xmax = prior_data[pi + 2];
const T_BBOX p_ymax = prior_data[pi + 3];
// Get prior box width and height
const T_BBOX prior_width = minus_fb(p_xmax, p_xmin);
const T_BBOX prior_height = minus_fb(p_ymax, p_ymin);
T_BBOX p_size;
if (i == 0 || i == 2)
{
p_size = prior_width;
}
else
{
p_size = prior_height;
}
// Do not want to use variances to adjust the bounding box decoding
if (variance_encoded_in_target)
{
// variance is encoded in target, we simply need to add the offset
// predictions.
bbox_data[index] = add_fb(prior_data[pi + i], mul_fb(loc_data[index], p_size));
}
else
{
// variance is encoded in bbox, we need to scale the offset accordingly.
bbox_data[index] = add_fb(prior_data[pi + i], mul_fb(mul_fb(loc_data[index], prior_data[vi + i]), p_size));
}
}
// Exactly the same to CodeTypeSSD::CENTER_SIZE with using variance to adjust the bounding box decoding
else if (code_type == CodeTypeSSD::TF_CENTER)
{
const T_BBOX pXmin = prior_data[pi];
const T_BBOX pYmin = prior_data[pi + 1];
const T_BBOX pXmax = prior_data[pi + 2];
const T_BBOX pYmax = prior_data[pi + 3];
const T_BBOX priorWidth = minus_fb(pXmax, pXmin);
const T_BBOX priorHeight = minus_fb(pYmax, pYmin);
const T_BBOX priorCenterX = div_fb(add_fb(pXmin, pXmax), T_BBOX(2));
const T_BBOX priorCenterY = div_fb(add_fb(pYmin, pYmax), T_BBOX(2));
const T_BBOX ymin = loc_data[index - i];
const T_BBOX xmin = loc_data[index - i + 1];
const T_BBOX ymax = loc_data[index - i + 2];
const T_BBOX xmax = loc_data[index - i + 3];
T_BBOX bboxCenterX, bboxCenterY;
T_BBOX bboxWidth, bboxHeight;
bboxCenterX = add_fb(mul_fb(mul_fb(prior_data[vi], xmin), priorWidth), priorCenterX);
bboxCenterY = add_fb(mul_fb(mul_fb(prior_data[vi + 1], ymin), priorHeight), priorCenterY);
bboxWidth = mul_fb(exp(mul_fb(prior_data[vi + 2], xmax)), priorWidth);
bboxHeight = mul_fb(exp(mul_fb(prior_data[vi + 3], ymax)), priorHeight);
switch (i)
{
case 0:
bbox_data[index] = minus_fb(bboxCenterX, div_fb(bboxWidth, T_BBOX(2)));
break;
case 1:
bbox_data[index] = minus_fb(bboxCenterY, div_fb(bboxHeight, T_BBOX(2)));
break;
case 2:
bbox_data[index] = add_fb(bboxCenterX, div_fb(bboxWidth, T_BBOX(2)));
break;
case 3:
bbox_data[index] = add_fb(bboxCenterY, div_fb(bboxHeight, T_BBOX(2)));
break;
}
}
else
{
// Unknown code type.
assert(!"Unknown Box decode code type");
}
// Clip bounding box or not
if (clip_bbox)
{
bbox_data[index] = saturate(bbox_data[index]);
}
}
}
template <typename T_BBOX>
pluginStatus_t decodeBBoxes_gpu(
cudaStream_t stream,
const int nthreads,
const CodeTypeSSD code_type,
const bool variance_encoded_in_target,
const int num_priors,
const bool share_location,
const int num_loc_classes,
const int background_label_id,
const bool clip_bbox,
const void* loc_data,
const void* prior_data,
void* bbox_data,
const bool batch_agnostic)
{
const int BS = 512;
const int GS = (nthreads + BS - 1) / BS;
decodeBBoxes_kernel<T_BBOX, BS><<<GS, BS, 0, stream>>>(nthreads, code_type, variance_encoded_in_target,
num_priors, share_location, num_loc_classes,
background_label_id, clip_bbox,
(const T_BBOX*) loc_data, (const T_BBOX*) prior_data,
(T_BBOX*) bbox_data, batch_agnostic);
CSC(cudaGetLastError(), STATUS_FAILURE);
return STATUS_SUCCESS;
}
// decodeBBoxes LAUNCH CONFIG
typedef pluginStatus_t (*dbbFunc)(cudaStream_t,
const int,
const CodeTypeSSD,
const bool,
const int,
const bool,
const int,
const int,
const bool,
const void*,
const void*,
void*,
const bool);
struct dbbLaunchConfig
{
DataType t_bbox;
dbbFunc function;
dbbLaunchConfig(DataType t_bbox)
: t_bbox(t_bbox)
, function(nullptr)
{
}
dbbLaunchConfig(DataType t_bbox, dbbFunc function)
: t_bbox(t_bbox)
, function(function)
{
}
bool operator==(const dbbLaunchConfig& other)
{
return t_bbox == other.t_bbox;
}
};
static std::array<dbbLaunchConfig, 2> dbbLCOptions = {
dbbLaunchConfig(DataType::kFLOAT, decodeBBoxes_gpu<float>),
dbbLaunchConfig(DataType::kHALF, decodeBBoxes_gpu<__half>)
};
pluginStatus_t decodeBBoxes(
cudaStream_t stream,
const int nthreads,
const CodeTypeSSD code_type,
const bool variance_encoded_in_target,
const int num_priors,
const bool share_location,
const int num_loc_classes,
const int background_label_id,
const bool clip_bbox,
const DataType DT_BBOX,
const void* loc_data,
const void* prior_data,
void* bbox_data,
const bool batch_agnostic)
{
dbbLaunchConfig lc = dbbLaunchConfig(DT_BBOX);
for (unsigned i = 0; i < dbbLCOptions.size(); ++i)
{
if (lc == dbbLCOptions[i])
{
DEBUG_PRINTF("decodeBBox kernel %d\n", i);
return dbbLCOptions[i].function(stream,
nthreads,
code_type,
variance_encoded_in_target,
num_priors,
share_location,
num_loc_classes,
background_label_id,
clip_bbox,
loc_data,
prior_data,
bbox_data,
batch_agnostic);
}
}
return STATUS_BAD_PARAM;
}
} // namespace plugin
} // namespace nvinfer1
|
pcl_losses_kernel.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<ATen/ATen.h>
#include <ATen/hip/HIPContext.h>
#include <THH/THH.h>
#include <THH/THHAtomics.cuh>
#include <THH/THHDeviceUtils.cuh>
#include <stdio.h>
#include <vector>
#include <math.h>
#include <float.h>
#define CUDA_1D_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \
i += blockDim.x * gridDim.x)
#define THREADS_PER_BLOCK 1024
template <typename scalar_t>
__global__ void PCLLossesForward(const int nthreads, const scalar_t* bottom_data,
const scalar_t* labels, const scalar_t* cls_loss_weights, const scalar_t* pc_labels,
const scalar_t* pc_probs, const scalar_t* img_cls_loss_weights,
const scalar_t* im_labels, const int batch_size, const int num_positive, scalar_t* top_data)
{
CUDA_1D_KERNEL_LOOP(index, nthreads)
{
top_data[index] = 0;
if (im_labels[index] != 0) {
if (index == 0) {
for (int i = 0; i < batch_size; i++) {
if (labels[i] == 0) {
top_data[index] -= cls_loss_weights[i] * log(bottom_data[i * nthreads + index]);
}
}
}
else {
for (int i = 0; i < num_positive; i++) {
if (pc_labels[i] == index) {
top_data[index] -= img_cls_loss_weights[i] * log(pc_probs[i]);
}
}
}
}
}
}
int PCLLossesForwardLaucher(
const at::Tensor bottom_data, const at::Tensor labels, const at::Tensor cls_loss_weights,
const at::Tensor pc_labels, const at::Tensor pc_probs, const at::Tensor img_cls_loss_weights,
const at::Tensor im_labels, const int batch_size, const int channels,
const int num_positive, at::Tensor top_data)
{
const int kThreadsPerBlock = 4;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(bottom_data.scalar_type(), "PCLLosses_forward", (
[&] {
hipLaunchKernelGGL(( PCLLossesForward<scalar_t>)
, dim3((channels + kThreadsPerBlock - 1) / kThreadsPerBlock), dim3(kThreadsPerBlock), 0, 0,
channels,
bottom_data.contiguous().data<scalar_t>(),
labels.contiguous().data<scalar_t>(),
cls_loss_weights.contiguous().data<scalar_t>(),
pc_labels.contiguous().data<scalar_t>(),
pc_probs.contiguous().data<scalar_t>(),
img_cls_loss_weights.contiguous().data<scalar_t>(),
im_labels.contiguous().data<scalar_t>(),
batch_size,
num_positive,
top_data.contiguous().data<scalar_t>());
}
));
THCudaCheck(hipGetLastError());
return 1;
}
template <typename scalar_t>
__global__ void PCLLossesBackward(const int nthreads, const scalar_t* prob_data,
const scalar_t* labels, const scalar_t* cls_loss_weights, const scalar_t* gt_assignment,
const scalar_t* pc_labels, const scalar_t* pc_probs, const scalar_t* pc_count,
const scalar_t* img_cls_loss_weights, const scalar_t* im_labels, const int channels,
scalar_t* bottom_diff) {
CUDA_1D_KERNEL_LOOP(index, nthreads)
{
int i = index / channels;
int c = index % channels;
bottom_diff[index] = 0;
if (im_labels[c] != 0) {
if (c == 0) {
if (labels[i] == 0) {
bottom_diff[index] = -cls_loss_weights[i] / prob_data[index];
}
}
else {
if (labels[i] == c) {
int pc_index = gt_assignment[i];
if (c != pc_labels[pc_index]) {
printf("labels mismatch.\n");
}
bottom_diff[index] = -img_cls_loss_weights[pc_index]
/ (pc_count[pc_index] * pc_probs[pc_index]);
}
}
}
}
}
int PCLLossesBackwardLaucher(const at::Tensor top_diff, const at::Tensor prob_data,
const at::Tensor labels, const at::Tensor cls_loss_weights, const at::Tensor gt_assignment,
const at::Tensor pc_labels, const at::Tensor pc_probs, const at::Tensor pc_count,
const at::Tensor img_cls_loss_weights, const at::Tensor im_labels, const int batch_size,
const int channels, at::Tensor bottom_diff)
{
const int kThreadsPerBlock = 16;
auto output_size = batch_size * channels;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(top_diff.scalar_type(), "ROIPool_backward", (
[&]{
hipLaunchKernelGGL(( PCLLossesBackward<scalar_t>)
, dim3((output_size + kThreadsPerBlock - 1) / kThreadsPerBlock), dim3(kThreadsPerBlock), 0, 0,
output_size,
prob_data.contiguous().data<scalar_t>(),
labels.contiguous().data<scalar_t>(),
cls_loss_weights.contiguous().data<scalar_t>(),
gt_assignment.contiguous().data<scalar_t>(),
pc_labels.contiguous().data<scalar_t>(),
pc_probs.contiguous().data<scalar_t>(),
pc_count.contiguous().data<scalar_t>(),
img_cls_loss_weights.contiguous().data<scalar_t>(),
im_labels.contiguous().data<scalar_t>(),
channels,
bottom_diff.contiguous().data<scalar_t>());
}
));
THCudaCheck(hipGetLastError());
return 1;
} | pcl_losses_kernel.cu | #include<ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <THC/THC.h>
#include <THC/THCAtomics.cuh>
#include <THC/THCDeviceUtils.cuh>
#include <stdio.h>
#include <vector>
#include <math.h>
#include <float.h>
#define CUDA_1D_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \
i += blockDim.x * gridDim.x)
#define THREADS_PER_BLOCK 1024
template <typename scalar_t>
__global__ void PCLLossesForward(const int nthreads, const scalar_t* bottom_data,
const scalar_t* labels, const scalar_t* cls_loss_weights, const scalar_t* pc_labels,
const scalar_t* pc_probs, const scalar_t* img_cls_loss_weights,
const scalar_t* im_labels, const int batch_size, const int num_positive, scalar_t* top_data)
{
CUDA_1D_KERNEL_LOOP(index, nthreads)
{
top_data[index] = 0;
if (im_labels[index] != 0) {
if (index == 0) {
for (int i = 0; i < batch_size; i++) {
if (labels[i] == 0) {
top_data[index] -= cls_loss_weights[i] * log(bottom_data[i * nthreads + index]);
}
}
}
else {
for (int i = 0; i < num_positive; i++) {
if (pc_labels[i] == index) {
top_data[index] -= img_cls_loss_weights[i] * log(pc_probs[i]);
}
}
}
}
}
}
int PCLLossesForwardLaucher(
const at::Tensor bottom_data, const at::Tensor labels, const at::Tensor cls_loss_weights,
const at::Tensor pc_labels, const at::Tensor pc_probs, const at::Tensor img_cls_loss_weights,
const at::Tensor im_labels, const int batch_size, const int channels,
const int num_positive, at::Tensor top_data)
{
const int kThreadsPerBlock = 4;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(bottom_data.scalar_type(), "PCLLosses_forward", (
[&] {
PCLLossesForward<scalar_t>
<<<(channels + kThreadsPerBlock - 1) / kThreadsPerBlock, kThreadsPerBlock>>>(
channels,
bottom_data.contiguous().data<scalar_t>(),
labels.contiguous().data<scalar_t>(),
cls_loss_weights.contiguous().data<scalar_t>(),
pc_labels.contiguous().data<scalar_t>(),
pc_probs.contiguous().data<scalar_t>(),
img_cls_loss_weights.contiguous().data<scalar_t>(),
im_labels.contiguous().data<scalar_t>(),
batch_size,
num_positive,
top_data.contiguous().data<scalar_t>());
}
));
THCudaCheck(cudaGetLastError());
return 1;
}
template <typename scalar_t>
__global__ void PCLLossesBackward(const int nthreads, const scalar_t* prob_data,
const scalar_t* labels, const scalar_t* cls_loss_weights, const scalar_t* gt_assignment,
const scalar_t* pc_labels, const scalar_t* pc_probs, const scalar_t* pc_count,
const scalar_t* img_cls_loss_weights, const scalar_t* im_labels, const int channels,
scalar_t* bottom_diff) {
CUDA_1D_KERNEL_LOOP(index, nthreads)
{
int i = index / channels;
int c = index % channels;
bottom_diff[index] = 0;
if (im_labels[c] != 0) {
if (c == 0) {
if (labels[i] == 0) {
bottom_diff[index] = -cls_loss_weights[i] / prob_data[index];
}
}
else {
if (labels[i] == c) {
int pc_index = gt_assignment[i];
if (c != pc_labels[pc_index]) {
printf("labels mismatch.\n");
}
bottom_diff[index] = -img_cls_loss_weights[pc_index]
/ (pc_count[pc_index] * pc_probs[pc_index]);
}
}
}
}
}
int PCLLossesBackwardLaucher(const at::Tensor top_diff, const at::Tensor prob_data,
const at::Tensor labels, const at::Tensor cls_loss_weights, const at::Tensor gt_assignment,
const at::Tensor pc_labels, const at::Tensor pc_probs, const at::Tensor pc_count,
const at::Tensor img_cls_loss_weights, const at::Tensor im_labels, const int batch_size,
const int channels, at::Tensor bottom_diff)
{
const int kThreadsPerBlock = 16;
auto output_size = batch_size * channels;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(top_diff.scalar_type(), "ROIPool_backward", (
[&]{
PCLLossesBackward<scalar_t>
<<<(output_size + kThreadsPerBlock - 1) / kThreadsPerBlock, kThreadsPerBlock>>>(
output_size,
prob_data.contiguous().data<scalar_t>(),
labels.contiguous().data<scalar_t>(),
cls_loss_weights.contiguous().data<scalar_t>(),
gt_assignment.contiguous().data<scalar_t>(),
pc_labels.contiguous().data<scalar_t>(),
pc_probs.contiguous().data<scalar_t>(),
pc_count.contiguous().data<scalar_t>(),
img_cls_loss_weights.contiguous().data<scalar_t>(),
im_labels.contiguous().data<scalar_t>(),
channels,
bottom_diff.contiguous().data<scalar_t>());
}
));
THCudaCheck(cudaGetLastError());
return 1;
} |
38888b0475ab89fe612e6b4bd57636fbdf41b450.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <blas_quda.h>
#include <tune_quda.h>
#include <float_vector.h>
#include <register_traits.h>
// For kernels with precision conversion built in
#define checkSpinorLength(a, b) \
{ \
if (a.Length() != b.Length()) \
errorQuda("lengths do not match: %lu %lu", a.Length(), b.Length()); \
if (a.Stride() != b.Stride()) \
errorQuda("strides do not match: %d %d", a.Stride(), b.Stride()); \
if (a.GammaBasis() != b.GammaBasis()) \
errorQuda("gamma basis does not match: %d %d", a.GammaBasis(), b.GammaBasis()); \
}
namespace quda {
namespace blas {
hipStream_t* getStream();
namespace copy_ns {
#include <texture.h>
static struct {
const char *vol_str;
const char *aux_str;
} blasStrings;
template <typename FloatN, int N, typename Output, typename Input>
__global__ void copyKernel(Output Y, Input X, int length) {
unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x;
unsigned int parity = blockIdx.y;
unsigned int gridSize = gridDim.x*blockDim.x;
while (i < length) {
FloatN x[N];
X.load(x, i, parity);
Y.save(x, i, parity);
i += gridSize;
}
}
template <typename FloatN, int N, typename Output, typename Input>
class CopyCuda : public Tunable {
private:
Input &X;
Output &Y;
const int length;
const int nParity;
unsigned int sharedBytesPerThread() const { return 0; }
unsigned int sharedBytesPerBlock(const TuneParam ¶m) const { return 0; }
virtual bool advanceSharedBytes(TuneParam ¶m) const
{
TuneParam next(param);
advanceBlockDim(next); // to get next blockDim
int nthreads = next.block.x * next.block.y * next.block.z;
param.shared_bytes = sharedBytesPerThread()*nthreads > sharedBytesPerBlock(param) ?
sharedBytesPerThread()*nthreads : sharedBytesPerBlock(param);
return false;
}
public:
CopyCuda(Output &Y, Input &X, int length, int nParity)
: X(X), Y(Y), length(length/nParity), nParity(nParity) { }
virtual ~CopyCuda() { ; }
inline TuneKey tuneKey() const {
return TuneKey(blasStrings.vol_str, "copyKernel", blasStrings.aux_str);
}
inline void apply(const hipStream_t &stream) {
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
hipLaunchKernelGGL(( copyKernel<FloatN, N>), dim3(tp.grid), dim3(tp.block), tp.shared_bytes, stream, Y, X, length);
}
void preTune() { ; } // no need to save state for copy kernels
void postTune() { ; } // no need to restore state for copy kernels
void initTuneParam(TuneParam ¶m) const {
Tunable::initTuneParam(param);
param.grid.y = nParity;
}
void defaultTuneParam(TuneParam ¶m) const {
Tunable::defaultTuneParam(param);
param.grid.y = nParity;
}
long long flops() const { return 0; }
long long bytes() const {
const int Ninternal = (sizeof(FloatN)/sizeof(((FloatN*)0)->x))*N;
size_t bytes = (X.Precision() + Y.Precision())*Ninternal;
if (X.Precision() == QUDA_HALF_PRECISION || X.Precision() == QUDA_QUARTER_PRECISION) bytes += sizeof(float);
if (Y.Precision() == QUDA_HALF_PRECISION || Y.Precision() == QUDA_QUARTER_PRECISION) bytes += sizeof(float);
return bytes*length*nParity;
}
int tuningIter() const { return 3; }
};
void copy(cudaColorSpinorField &dst, const cudaColorSpinorField &src) {
if (&src == &dst) return; // aliasing fields
if (src.SiteSubset() != dst.SiteSubset())
errorQuda("Spinor fields do not have matching subsets dst=%d src=%d\n", src.SiteSubset(), dst.SiteSubset());
checkSpinorLength(dst, src);
blasStrings.vol_str = src.VolString();
char tmp[256];
strcpy(tmp, "dst=");
strcat(tmp, dst.AuxString());
strcat(tmp, ",src=");
strcat(tmp, src.AuxString());
blasStrings.aux_str = tmp;
if (dst.Nspin() != src.Nspin())
errorQuda("Spins (%d,%d) do not match", dst.Nspin(), src.Nspin());
// For a given dst precision, there are two non-trivial possibilities for the
// src precision.
blas::bytes += (unsigned long long)src.RealLength()*(src.Precision() + dst.Precision());
int partitions = (src.IsComposite() ? src.CompositeDim() : 1) * (src.SiteSubset());
if (dst.Precision() == src.Precision()) {
if (src.Bytes() != dst.Bytes()) errorQuda("Precisions match, but bytes do not");
qudaMemcpyAsync(dst.V(), src.V(), dst.Bytes(), hipMemcpyDeviceToDevice, *blas::getStream());
if (dst.Precision() == QUDA_HALF_PRECISION || dst.Precision() == QUDA_QUARTER_PRECISION) {
qudaMemcpyAsync(dst.Norm(), src.Norm(), dst.NormBytes(), hipMemcpyDeviceToDevice, *blas::getStream());
blas::bytes += 2*(unsigned long long)dst.RealLength()*sizeof(float);
}
} else if (dst.Precision() == QUDA_DOUBLE_PRECISION && src.Precision() == QUDA_SINGLE_PRECISION) {
if (src.Nspin() == 4){
Spinor<float4, float4, 6, 0> src_tex(src);
Spinor<float4, double2, 6, 1> dst_spinor(dst);
CopyCuda<float4, 6, decltype(dst_spinor), decltype(src_tex)> copy(dst_spinor, src_tex, src.Volume(),
partitions);
copy.apply(*blas::getStream());
} else if (src.Nspin() == 2) {
if (src.Length() != src.RealLength() || dst.Length() != dst.RealLength())
errorQuda("Non-zero stride not supported"); // we need to know how many colors to set "M" (requires JIT)
Spinor<float2, float2, 1, 0> src_tex(src);
Spinor<float2, double2, 1, 1> dst_spinor(dst);
CopyCuda<float2, 1, decltype(dst_spinor), decltype(src_tex)> copy(dst_spinor, src_tex, src.Length() / 2,
partitions);
copy.apply(*blas::getStream());
} else if (src.Nspin() == 1) {
Spinor<float2, float2, 3, 0> src_tex(src);
Spinor<float2, double2, 3, 1> dst_spinor(dst);
CopyCuda<float2, 3, decltype(dst_spinor), decltype(src_tex)> copy(dst_spinor, src_tex, src.Volume(),
partitions);
copy.apply(*blas::getStream());
} else {
errorQuda("Nspin(%d) is not supported", src.Nspin());
}
} else if (dst.Precision() == QUDA_SINGLE_PRECISION && src.Precision() == QUDA_DOUBLE_PRECISION) {
if (src.Nspin() == 4){
Spinor<float4, double2, 6, 0> src_tex(src);
Spinor<float4, float4, 6, 1> dst_spinor(dst);
CopyCuda<float4, 6, decltype(dst_spinor), decltype(src_tex)> copy(dst_spinor, src_tex, src.Volume(),
partitions);
copy.apply(*blas::getStream());
} else if (src.Nspin() == 2) {
if (src.Length() != src.RealLength() || dst.Length() != dst.RealLength())
errorQuda("Non-zero stride not supported"); // we need to know how many colors to set "M" (requires JIT)
Spinor<float2, double2, 1, 0> src_tex(src);
Spinor<float2, float2, 1, 1> dst_spinor(dst);
CopyCuda<float2, 1, decltype(dst_spinor), decltype(src_tex)> copy(dst_spinor, src_tex, src.Length() / 2,
partitions);
copy.apply(*blas::getStream());
} else if (src.Nspin() == 1) {
Spinor<float2, double2, 3, 0> src_tex(src);
Spinor<float2, float2, 3, 1> dst_spinor(dst);
CopyCuda<float2, 3, decltype(dst_spinor), decltype(src_tex)> copy(dst_spinor, src_tex, src.Volume(),
partitions);
copy.apply(*blas::getStream());
} else {
errorQuda("Nspin(%d) is not supported", src.Nspin());
}
} else if (dst.Precision() == QUDA_SINGLE_PRECISION && src.Precision() == QUDA_HALF_PRECISION) {
blas::bytes += (unsigned long long)src.Volume()*sizeof(float);
if (src.Nspin() == 4){
Spinor<float4, short4, 6, 0> src_tex(src);
Spinor<float4, float4, 6, 1> dst_spinor(dst);
CopyCuda<float4, 6, decltype(dst_spinor), decltype(src_tex)> copy(dst_spinor, src_tex, src.Volume(),
partitions);
copy.apply(*blas::getStream());
} else if (src.Nspin() == 1) {
Spinor<float2, short2, 3, 0> src_tex(src);
Spinor<float2, float2, 3, 1> dst_spinor(dst);
CopyCuda<float2, 3, decltype(dst_spinor), decltype(src_tex)> copy(dst_spinor, src_tex, src.Volume(),
partitions);
copy.apply(*blas::getStream());
} else {
errorQuda("Nspin(%d) is not supported", src.Nspin());
}
} else if (dst.Precision() == QUDA_HALF_PRECISION && src.Precision() == QUDA_SINGLE_PRECISION) {
blas::bytes += (unsigned long long)dst.Volume()*sizeof(float);
if (src.Nspin() == 4){
Spinor<float4, float4, 6, 0> src_tex(src);
Spinor<float4, short4, 6, 1> dst_spinor(dst);
CopyCuda<float4, 6, decltype(dst_spinor), decltype(src_tex)> copy(dst_spinor, src_tex, src.Volume(),
partitions);
copy.apply(*blas::getStream());
} else if (src.Nspin() == 1) {
Spinor<float2, float2, 3, 0> src_tex(src);
Spinor<float2, short2, 3, 1> dst_spinor(dst);
CopyCuda<float2, 3, decltype(dst_spinor), decltype(src_tex)> copy(dst_spinor, src_tex, src.Volume(),
partitions);
copy.apply(*blas::getStream());
} else {
errorQuda("Nspin(%d) is not supported", src.Nspin());
}
} else if (dst.Precision() == QUDA_DOUBLE_PRECISION && src.Precision() == QUDA_HALF_PRECISION) {
blas::bytes += (unsigned long long)src.Volume()*sizeof(float);
if (src.Nspin() == 4){
Spinor<double2, short4, 12, 0> src_tex(src);
Spinor<double2, double2, 12, 1> dst_spinor(dst);
CopyCuda<double2, 12, decltype(dst_spinor), decltype(src_tex)> copy(dst_spinor, src_tex, src.Volume(),
partitions);
copy.apply(*blas::getStream());
} else if (src.Nspin() == 1) {
Spinor<double2, short2, 3, 0> src_tex(src);
Spinor<double2, double2, 3, 1> dst_spinor(dst);
CopyCuda<double2, 3, decltype(dst_spinor), decltype(src_tex)> copy(dst_spinor, src_tex, src.Volume(),
partitions);
copy.apply(*blas::getStream());
} else {
errorQuda("Nspin(%d) is not supported", src.Nspin());
}
} else if (dst.Precision() == QUDA_HALF_PRECISION && src.Precision() == QUDA_DOUBLE_PRECISION) {
blas::bytes += (unsigned long long)dst.Volume()*sizeof(float);
if (src.Nspin() == 4){
Spinor<double2, double2, 12, 0> src_tex(src);
Spinor<double2, short4, 12, 1> dst_spinor(dst);
CopyCuda<double2, 12, decltype(dst_spinor), decltype(src_tex)> copy(dst_spinor, src_tex, src.Volume(),
partitions);
copy.apply(*blas::getStream());
} else if (src.Nspin() == 1) {
Spinor<double2, double2, 3, 0> src_tex(src);
Spinor<double2, short2, 3, 1> dst_spinor(dst);
CopyCuda<double2, 3, decltype(dst_spinor), decltype(src_tex)> copy(dst_spinor, src_tex, src.Volume(),
partitions);
copy.apply(*blas::getStream());
} else {
errorQuda("Nspin(%d) is not supported", src.Nspin());
}
} else if (dst.Precision() == QUDA_HALF_PRECISION && src.Precision() == QUDA_QUARTER_PRECISION) {
blas::bytes += (unsigned long long)src.Volume()*sizeof(float)*2;
if (src.Nspin() == 4){
Spinor<float4, char4, 6, 0> src_tex(src);
Spinor<float4, short4, 6, 1> dst_spinor(dst);
CopyCuda<float4, 6, decltype(dst_spinor), decltype(src_tex)> copy(dst_spinor, src_tex, src.Volume(), partitions);
copy.apply(*blas::getStream());
} else if (src.Nspin() == 1) {
Spinor<float2, char2, 3, 0> src_tex(src);
Spinor<float2, short2, 3, 1> dst_spinor(dst);
CopyCuda<float2, 3, decltype(dst_spinor), decltype(src_tex)> copy(dst_spinor, src_tex, src.Volume(), partitions);
copy.apply(*blas::getStream());
} else {
errorQuda("Nspin(%d) is not supported", src.Nspin());
}
} else if (dst.Precision() == QUDA_QUARTER_PRECISION && src.Precision() == QUDA_HALF_PRECISION) {
blas::bytes += (unsigned long long)dst.Volume()*sizeof(float)*2;
if (src.Nspin() == 4){
Spinor<float4, short4, 6, 0> src_tex(src);
Spinor<float4, char4, 6, 1> dst_spinor(dst);
CopyCuda<float4, 6, decltype(dst_spinor), decltype(src_tex)> copy(dst_spinor, src_tex, src.Volume(), partitions);
copy.apply(*blas::getStream());
} else if (src.Nspin() == 1) {
Spinor<float2, short2, 3, 0> src_tex(src);
Spinor<float2, char2, 3, 1> dst_spinor(dst);
CopyCuda<float2, 3, decltype(dst_spinor), decltype(src_tex)> copy(dst_spinor, src_tex, src.Volume(), partitions);
copy.apply(*blas::getStream());
} else {
errorQuda("Nspin(%d) is not supported", src.Nspin());
}
} else if (dst.Precision() == QUDA_SINGLE_PRECISION && src.Precision() == QUDA_QUARTER_PRECISION) {
blas::bytes += (unsigned long long)src.Volume()*sizeof(float);
if (src.Nspin() == 4){
Spinor<float4, char4, 6, 0> src_tex(src);
Spinor<float4, float4, 6, 1> dst_spinor(dst);
CopyCuda<float4, 6, decltype(dst_spinor), decltype(src_tex)> copy(dst_spinor, src_tex, src.Volume(), partitions);
copy.apply(*blas::getStream());
} else if (src.Nspin() == 1) {
Spinor<float2, char2, 3, 0> src_tex(src);
Spinor<float2, float2, 3, 1> dst_spinor(dst);
CopyCuda<float2, 3, decltype(dst_spinor), decltype(src_tex)> copy(dst_spinor, src_tex, src.Volume(), partitions);
copy.apply(*blas::getStream());
} else {
errorQuda("Nspin(%d) is not supported", src.Nspin());
}
} else if (dst.Precision() == QUDA_QUARTER_PRECISION && src.Precision() == QUDA_SINGLE_PRECISION) {
blas::bytes += (unsigned long long)dst.Volume()*sizeof(float);
if (src.Nspin() == 4){
Spinor<float4, float4, 6, 0> src_tex(src);
Spinor<float4, char4, 6, 1> dst_spinor(dst);
CopyCuda<float4, 6, decltype(dst_spinor), decltype(src_tex)> copy(dst_spinor, src_tex, src.Volume(), partitions);
copy.apply(*blas::getStream());
} else if (src.Nspin() == 1) {
Spinor<float2, float2, 3, 0> src_tex(src);
Spinor<float2, char2, 3, 1> dst_spinor(dst);
CopyCuda<float2, 3, decltype(dst_spinor), decltype(src_tex)> copy(dst_spinor, src_tex, src.Volume(), partitions);
copy.apply(*blas::getStream());
} else {
errorQuda("Nspin(%d) is not supported", src.Nspin());
}
} else if (dst.Precision() == QUDA_DOUBLE_PRECISION && src.Precision() == QUDA_QUARTER_PRECISION) {
blas::bytes += (unsigned long long)src.Volume()*sizeof(float);
if (src.Nspin() == 4){
Spinor<double2, char4, 12, 0> src_tex(src);
Spinor<double2, double2, 12, 1> dst_spinor(dst);
CopyCuda<double2, 12, decltype(dst_spinor), decltype(src_tex)> copy(dst_spinor, src_tex, src.Volume(), partitions);
copy.apply(*blas::getStream());
} else if (src.Nspin() == 1) {
Spinor<double2, char2, 3, 0> src_tex(src);
Spinor<double2, double2, 3, 1> dst_spinor(dst);
CopyCuda<double2, 3, decltype(dst_spinor), decltype(src_tex)> copy(dst_spinor, src_tex, src.Volume(), partitions);
copy.apply(*blas::getStream());
} else {
errorQuda("Nspin(%d) is not supported", src.Nspin());
}
} else if (dst.Precision() == QUDA_QUARTER_PRECISION && src.Precision() == QUDA_DOUBLE_PRECISION) {
blas::bytes += (unsigned long long)dst.Volume()*sizeof(float);
if (src.Nspin() == 4){
Spinor<double2, double2, 12, 0> src_tex(src);
Spinor<double2, char4, 12, 1> dst_spinor(dst);
CopyCuda<double2, 12, decltype(dst_spinor), decltype(src_tex)> copy(dst_spinor, src_tex, src.Volume(), partitions);
copy.apply(*blas::getStream());
} else if (src.Nspin() == 1) {
Spinor<double2, double2, 3, 0> src_tex(src);
Spinor<double2, char2, 3, 1> dst_spinor(dst);
CopyCuda<double2, 3, decltype(dst_spinor), decltype(src_tex)> copy(dst_spinor, src_tex, src.Volume(), partitions);
copy.apply(*blas::getStream());
} else {
errorQuda("Nspin(%d) is not supported", src.Nspin());
}
} else {
errorQuda("Invalid precision combination dst=%d and src=%d", dst.Precision(), src.Precision());
}
checkCudaError();
}
} // namespace copy_nw
void copy(ColorSpinorField &dst, const ColorSpinorField &src) {
if (dst.Location() == QUDA_CUDA_FIELD_LOCATION &&
src.Location() == QUDA_CUDA_FIELD_LOCATION) {
copy_ns::copy(static_cast<cudaColorSpinorField&>(dst),
static_cast<const cudaColorSpinorField&>(src));
} else {
dst = src;
}
}
} // namespace blas
} // namespace quda
| 38888b0475ab89fe612e6b4bd57636fbdf41b450.cu | #include <blas_quda.h>
#include <tune_quda.h>
#include <float_vector.h>
#include <register_traits.h>
// For kernels with precision conversion built in
#define checkSpinorLength(a, b) \
{ \
if (a.Length() != b.Length()) \
errorQuda("lengths do not match: %lu %lu", a.Length(), b.Length()); \
if (a.Stride() != b.Stride()) \
errorQuda("strides do not match: %d %d", a.Stride(), b.Stride()); \
if (a.GammaBasis() != b.GammaBasis()) \
errorQuda("gamma basis does not match: %d %d", a.GammaBasis(), b.GammaBasis()); \
}
namespace quda {
namespace blas {
cudaStream_t* getStream();
namespace copy_ns {
#include <texture.h>
static struct {
const char *vol_str;
const char *aux_str;
} blasStrings;
template <typename FloatN, int N, typename Output, typename Input>
__global__ void copyKernel(Output Y, Input X, int length) {
unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x;
unsigned int parity = blockIdx.y;
unsigned int gridSize = gridDim.x*blockDim.x;
while (i < length) {
FloatN x[N];
X.load(x, i, parity);
Y.save(x, i, parity);
i += gridSize;
}
}
template <typename FloatN, int N, typename Output, typename Input>
class CopyCuda : public Tunable {
private:
Input &X;
Output &Y;
const int length;
const int nParity;
unsigned int sharedBytesPerThread() const { return 0; }
unsigned int sharedBytesPerBlock(const TuneParam ¶m) const { return 0; }
virtual bool advanceSharedBytes(TuneParam ¶m) const
{
TuneParam next(param);
advanceBlockDim(next); // to get next blockDim
int nthreads = next.block.x * next.block.y * next.block.z;
param.shared_bytes = sharedBytesPerThread()*nthreads > sharedBytesPerBlock(param) ?
sharedBytesPerThread()*nthreads : sharedBytesPerBlock(param);
return false;
}
public:
CopyCuda(Output &Y, Input &X, int length, int nParity)
: X(X), Y(Y), length(length/nParity), nParity(nParity) { }
virtual ~CopyCuda() { ; }
inline TuneKey tuneKey() const {
return TuneKey(blasStrings.vol_str, "copyKernel", blasStrings.aux_str);
}
inline void apply(const cudaStream_t &stream) {
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
copyKernel<FloatN, N><<<tp.grid, tp.block, tp.shared_bytes, stream>>>(Y, X, length);
}
void preTune() { ; } // no need to save state for copy kernels
void postTune() { ; } // no need to restore state for copy kernels
void initTuneParam(TuneParam ¶m) const {
Tunable::initTuneParam(param);
param.grid.y = nParity;
}
void defaultTuneParam(TuneParam ¶m) const {
Tunable::defaultTuneParam(param);
param.grid.y = nParity;
}
long long flops() const { return 0; }
long long bytes() const {
const int Ninternal = (sizeof(FloatN)/sizeof(((FloatN*)0)->x))*N;
size_t bytes = (X.Precision() + Y.Precision())*Ninternal;
if (X.Precision() == QUDA_HALF_PRECISION || X.Precision() == QUDA_QUARTER_PRECISION) bytes += sizeof(float);
if (Y.Precision() == QUDA_HALF_PRECISION || Y.Precision() == QUDA_QUARTER_PRECISION) bytes += sizeof(float);
return bytes*length*nParity;
}
int tuningIter() const { return 3; }
};
void copy(cudaColorSpinorField &dst, const cudaColorSpinorField &src) {
if (&src == &dst) return; // aliasing fields
if (src.SiteSubset() != dst.SiteSubset())
errorQuda("Spinor fields do not have matching subsets dst=%d src=%d\n", src.SiteSubset(), dst.SiteSubset());
checkSpinorLength(dst, src);
blasStrings.vol_str = src.VolString();
char tmp[256];
strcpy(tmp, "dst=");
strcat(tmp, dst.AuxString());
strcat(tmp, ",src=");
strcat(tmp, src.AuxString());
blasStrings.aux_str = tmp;
if (dst.Nspin() != src.Nspin())
errorQuda("Spins (%d,%d) do not match", dst.Nspin(), src.Nspin());
// For a given dst precision, there are two non-trivial possibilities for the
// src precision.
blas::bytes += (unsigned long long)src.RealLength()*(src.Precision() + dst.Precision());
int partitions = (src.IsComposite() ? src.CompositeDim() : 1) * (src.SiteSubset());
if (dst.Precision() == src.Precision()) {
if (src.Bytes() != dst.Bytes()) errorQuda("Precisions match, but bytes do not");
qudaMemcpyAsync(dst.V(), src.V(), dst.Bytes(), cudaMemcpyDeviceToDevice, *blas::getStream());
if (dst.Precision() == QUDA_HALF_PRECISION || dst.Precision() == QUDA_QUARTER_PRECISION) {
qudaMemcpyAsync(dst.Norm(), src.Norm(), dst.NormBytes(), cudaMemcpyDeviceToDevice, *blas::getStream());
blas::bytes += 2*(unsigned long long)dst.RealLength()*sizeof(float);
}
} else if (dst.Precision() == QUDA_DOUBLE_PRECISION && src.Precision() == QUDA_SINGLE_PRECISION) {
if (src.Nspin() == 4){
Spinor<float4, float4, 6, 0> src_tex(src);
Spinor<float4, double2, 6, 1> dst_spinor(dst);
CopyCuda<float4, 6, decltype(dst_spinor), decltype(src_tex)> copy(dst_spinor, src_tex, src.Volume(),
partitions);
copy.apply(*blas::getStream());
} else if (src.Nspin() == 2) {
if (src.Length() != src.RealLength() || dst.Length() != dst.RealLength())
errorQuda("Non-zero stride not supported"); // we need to know how many colors to set "M" (requires JIT)
Spinor<float2, float2, 1, 0> src_tex(src);
Spinor<float2, double2, 1, 1> dst_spinor(dst);
CopyCuda<float2, 1, decltype(dst_spinor), decltype(src_tex)> copy(dst_spinor, src_tex, src.Length() / 2,
partitions);
copy.apply(*blas::getStream());
} else if (src.Nspin() == 1) {
Spinor<float2, float2, 3, 0> src_tex(src);
Spinor<float2, double2, 3, 1> dst_spinor(dst);
CopyCuda<float2, 3, decltype(dst_spinor), decltype(src_tex)> copy(dst_spinor, src_tex, src.Volume(),
partitions);
copy.apply(*blas::getStream());
} else {
errorQuda("Nspin(%d) is not supported", src.Nspin());
}
} else if (dst.Precision() == QUDA_SINGLE_PRECISION && src.Precision() == QUDA_DOUBLE_PRECISION) {
if (src.Nspin() == 4){
Spinor<float4, double2, 6, 0> src_tex(src);
Spinor<float4, float4, 6, 1> dst_spinor(dst);
CopyCuda<float4, 6, decltype(dst_spinor), decltype(src_tex)> copy(dst_spinor, src_tex, src.Volume(),
partitions);
copy.apply(*blas::getStream());
} else if (src.Nspin() == 2) {
if (src.Length() != src.RealLength() || dst.Length() != dst.RealLength())
errorQuda("Non-zero stride not supported"); // we need to know how many colors to set "M" (requires JIT)
Spinor<float2, double2, 1, 0> src_tex(src);
Spinor<float2, float2, 1, 1> dst_spinor(dst);
CopyCuda<float2, 1, decltype(dst_spinor), decltype(src_tex)> copy(dst_spinor, src_tex, src.Length() / 2,
partitions);
copy.apply(*blas::getStream());
} else if (src.Nspin() == 1) {
Spinor<float2, double2, 3, 0> src_tex(src);
Spinor<float2, float2, 3, 1> dst_spinor(dst);
CopyCuda<float2, 3, decltype(dst_spinor), decltype(src_tex)> copy(dst_spinor, src_tex, src.Volume(),
partitions);
copy.apply(*blas::getStream());
} else {
errorQuda("Nspin(%d) is not supported", src.Nspin());
}
} else if (dst.Precision() == QUDA_SINGLE_PRECISION && src.Precision() == QUDA_HALF_PRECISION) {
blas::bytes += (unsigned long long)src.Volume()*sizeof(float);
if (src.Nspin() == 4){
Spinor<float4, short4, 6, 0> src_tex(src);
Spinor<float4, float4, 6, 1> dst_spinor(dst);
CopyCuda<float4, 6, decltype(dst_spinor), decltype(src_tex)> copy(dst_spinor, src_tex, src.Volume(),
partitions);
copy.apply(*blas::getStream());
} else if (src.Nspin() == 1) {
Spinor<float2, short2, 3, 0> src_tex(src);
Spinor<float2, float2, 3, 1> dst_spinor(dst);
CopyCuda<float2, 3, decltype(dst_spinor), decltype(src_tex)> copy(dst_spinor, src_tex, src.Volume(),
partitions);
copy.apply(*blas::getStream());
} else {
errorQuda("Nspin(%d) is not supported", src.Nspin());
}
} else if (dst.Precision() == QUDA_HALF_PRECISION && src.Precision() == QUDA_SINGLE_PRECISION) {
blas::bytes += (unsigned long long)dst.Volume()*sizeof(float);
if (src.Nspin() == 4){
Spinor<float4, float4, 6, 0> src_tex(src);
Spinor<float4, short4, 6, 1> dst_spinor(dst);
CopyCuda<float4, 6, decltype(dst_spinor), decltype(src_tex)> copy(dst_spinor, src_tex, src.Volume(),
partitions);
copy.apply(*blas::getStream());
} else if (src.Nspin() == 1) {
Spinor<float2, float2, 3, 0> src_tex(src);
Spinor<float2, short2, 3, 1> dst_spinor(dst);
CopyCuda<float2, 3, decltype(dst_spinor), decltype(src_tex)> copy(dst_spinor, src_tex, src.Volume(),
partitions);
copy.apply(*blas::getStream());
} else {
errorQuda("Nspin(%d) is not supported", src.Nspin());
}
} else if (dst.Precision() == QUDA_DOUBLE_PRECISION && src.Precision() == QUDA_HALF_PRECISION) {
blas::bytes += (unsigned long long)src.Volume()*sizeof(float);
if (src.Nspin() == 4){
Spinor<double2, short4, 12, 0> src_tex(src);
Spinor<double2, double2, 12, 1> dst_spinor(dst);
CopyCuda<double2, 12, decltype(dst_spinor), decltype(src_tex)> copy(dst_spinor, src_tex, src.Volume(),
partitions);
copy.apply(*blas::getStream());
} else if (src.Nspin() == 1) {
Spinor<double2, short2, 3, 0> src_tex(src);
Spinor<double2, double2, 3, 1> dst_spinor(dst);
CopyCuda<double2, 3, decltype(dst_spinor), decltype(src_tex)> copy(dst_spinor, src_tex, src.Volume(),
partitions);
copy.apply(*blas::getStream());
} else {
errorQuda("Nspin(%d) is not supported", src.Nspin());
}
} else if (dst.Precision() == QUDA_HALF_PRECISION && src.Precision() == QUDA_DOUBLE_PRECISION) {
blas::bytes += (unsigned long long)dst.Volume()*sizeof(float);
if (src.Nspin() == 4){
Spinor<double2, double2, 12, 0> src_tex(src);
Spinor<double2, short4, 12, 1> dst_spinor(dst);
CopyCuda<double2, 12, decltype(dst_spinor), decltype(src_tex)> copy(dst_spinor, src_tex, src.Volume(),
partitions);
copy.apply(*blas::getStream());
} else if (src.Nspin() == 1) {
Spinor<double2, double2, 3, 0> src_tex(src);
Spinor<double2, short2, 3, 1> dst_spinor(dst);
CopyCuda<double2, 3, decltype(dst_spinor), decltype(src_tex)> copy(dst_spinor, src_tex, src.Volume(),
partitions);
copy.apply(*blas::getStream());
} else {
errorQuda("Nspin(%d) is not supported", src.Nspin());
}
} else if (dst.Precision() == QUDA_HALF_PRECISION && src.Precision() == QUDA_QUARTER_PRECISION) {
blas::bytes += (unsigned long long)src.Volume()*sizeof(float)*2;
if (src.Nspin() == 4){
Spinor<float4, char4, 6, 0> src_tex(src);
Spinor<float4, short4, 6, 1> dst_spinor(dst);
CopyCuda<float4, 6, decltype(dst_spinor), decltype(src_tex)> copy(dst_spinor, src_tex, src.Volume(), partitions);
copy.apply(*blas::getStream());
} else if (src.Nspin() == 1) {
Spinor<float2, char2, 3, 0> src_tex(src);
Spinor<float2, short2, 3, 1> dst_spinor(dst);
CopyCuda<float2, 3, decltype(dst_spinor), decltype(src_tex)> copy(dst_spinor, src_tex, src.Volume(), partitions);
copy.apply(*blas::getStream());
} else {
errorQuda("Nspin(%d) is not supported", src.Nspin());
}
} else if (dst.Precision() == QUDA_QUARTER_PRECISION && src.Precision() == QUDA_HALF_PRECISION) {
blas::bytes += (unsigned long long)dst.Volume()*sizeof(float)*2;
if (src.Nspin() == 4){
Spinor<float4, short4, 6, 0> src_tex(src);
Spinor<float4, char4, 6, 1> dst_spinor(dst);
CopyCuda<float4, 6, decltype(dst_spinor), decltype(src_tex)> copy(dst_spinor, src_tex, src.Volume(), partitions);
copy.apply(*blas::getStream());
} else if (src.Nspin() == 1) {
Spinor<float2, short2, 3, 0> src_tex(src);
Spinor<float2, char2, 3, 1> dst_spinor(dst);
CopyCuda<float2, 3, decltype(dst_spinor), decltype(src_tex)> copy(dst_spinor, src_tex, src.Volume(), partitions);
copy.apply(*blas::getStream());
} else {
errorQuda("Nspin(%d) is not supported", src.Nspin());
}
} else if (dst.Precision() == QUDA_SINGLE_PRECISION && src.Precision() == QUDA_QUARTER_PRECISION) {
blas::bytes += (unsigned long long)src.Volume()*sizeof(float);
if (src.Nspin() == 4){
Spinor<float4, char4, 6, 0> src_tex(src);
Spinor<float4, float4, 6, 1> dst_spinor(dst);
CopyCuda<float4, 6, decltype(dst_spinor), decltype(src_tex)> copy(dst_spinor, src_tex, src.Volume(), partitions);
copy.apply(*blas::getStream());
} else if (src.Nspin() == 1) {
Spinor<float2, char2, 3, 0> src_tex(src);
Spinor<float2, float2, 3, 1> dst_spinor(dst);
CopyCuda<float2, 3, decltype(dst_spinor), decltype(src_tex)> copy(dst_spinor, src_tex, src.Volume(), partitions);
copy.apply(*blas::getStream());
} else {
errorQuda("Nspin(%d) is not supported", src.Nspin());
}
} else if (dst.Precision() == QUDA_QUARTER_PRECISION && src.Precision() == QUDA_SINGLE_PRECISION) {
blas::bytes += (unsigned long long)dst.Volume()*sizeof(float);
if (src.Nspin() == 4){
Spinor<float4, float4, 6, 0> src_tex(src);
Spinor<float4, char4, 6, 1> dst_spinor(dst);
CopyCuda<float4, 6, decltype(dst_spinor), decltype(src_tex)> copy(dst_spinor, src_tex, src.Volume(), partitions);
copy.apply(*blas::getStream());
} else if (src.Nspin() == 1) {
Spinor<float2, float2, 3, 0> src_tex(src);
Spinor<float2, char2, 3, 1> dst_spinor(dst);
CopyCuda<float2, 3, decltype(dst_spinor), decltype(src_tex)> copy(dst_spinor, src_tex, src.Volume(), partitions);
copy.apply(*blas::getStream());
} else {
errorQuda("Nspin(%d) is not supported", src.Nspin());
}
} else if (dst.Precision() == QUDA_DOUBLE_PRECISION && src.Precision() == QUDA_QUARTER_PRECISION) {
blas::bytes += (unsigned long long)src.Volume()*sizeof(float);
if (src.Nspin() == 4){
Spinor<double2, char4, 12, 0> src_tex(src);
Spinor<double2, double2, 12, 1> dst_spinor(dst);
CopyCuda<double2, 12, decltype(dst_spinor), decltype(src_tex)> copy(dst_spinor, src_tex, src.Volume(), partitions);
copy.apply(*blas::getStream());
} else if (src.Nspin() == 1) {
Spinor<double2, char2, 3, 0> src_tex(src);
Spinor<double2, double2, 3, 1> dst_spinor(dst);
CopyCuda<double2, 3, decltype(dst_spinor), decltype(src_tex)> copy(dst_spinor, src_tex, src.Volume(), partitions);
copy.apply(*blas::getStream());
} else {
errorQuda("Nspin(%d) is not supported", src.Nspin());
}
} else if (dst.Precision() == QUDA_QUARTER_PRECISION && src.Precision() == QUDA_DOUBLE_PRECISION) {
blas::bytes += (unsigned long long)dst.Volume()*sizeof(float);
if (src.Nspin() == 4){
Spinor<double2, double2, 12, 0> src_tex(src);
Spinor<double2, char4, 12, 1> dst_spinor(dst);
CopyCuda<double2, 12, decltype(dst_spinor), decltype(src_tex)> copy(dst_spinor, src_tex, src.Volume(), partitions);
copy.apply(*blas::getStream());
} else if (src.Nspin() == 1) {
Spinor<double2, double2, 3, 0> src_tex(src);
Spinor<double2, char2, 3, 1> dst_spinor(dst);
CopyCuda<double2, 3, decltype(dst_spinor), decltype(src_tex)> copy(dst_spinor, src_tex, src.Volume(), partitions);
copy.apply(*blas::getStream());
} else {
errorQuda("Nspin(%d) is not supported", src.Nspin());
}
} else {
errorQuda("Invalid precision combination dst=%d and src=%d", dst.Precision(), src.Precision());
}
checkCudaError();
}
} // namespace copy_nw
void copy(ColorSpinorField &dst, const ColorSpinorField &src) {
if (dst.Location() == QUDA_CUDA_FIELD_LOCATION &&
src.Location() == QUDA_CUDA_FIELD_LOCATION) {
copy_ns::copy(static_cast<cudaColorSpinorField&>(dst),
static_cast<const cudaColorSpinorField&>(src));
} else {
dst = src;
}
}
} // namespace blas
} // namespace quda
|
3bb67c47a68be72e7e5e5242d68c03e827258bd7.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "Conv2D.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *d_image = NULL;
hipMalloc(&d_image, XSIZE*YSIZE);
float *kernel = NULL;
hipMalloc(&kernel, XSIZE*YSIZE);
float *d_result = NULL;
hipMalloc(&d_result, XSIZE*YSIZE);
int width = XSIZE;
int height = YSIZE;
int kernelSize = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
Conv2D), dim3(gridBlock),dim3(threadBlock), 0, 0, d_image,kernel,d_result,width,height,kernelSize);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
Conv2D), dim3(gridBlock),dim3(threadBlock), 0, 0, d_image,kernel,d_result,width,height,kernelSize);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
Conv2D), dim3(gridBlock),dim3(threadBlock), 0, 0, d_image,kernel,d_result,width,height,kernelSize);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 3bb67c47a68be72e7e5e5242d68c03e827258bd7.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "Conv2D.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *d_image = NULL;
cudaMalloc(&d_image, XSIZE*YSIZE);
float *kernel = NULL;
cudaMalloc(&kernel, XSIZE*YSIZE);
float *d_result = NULL;
cudaMalloc(&d_result, XSIZE*YSIZE);
int width = XSIZE;
int height = YSIZE;
int kernelSize = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
Conv2D<<<gridBlock,threadBlock>>>(d_image,kernel,d_result,width,height,kernelSize);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
Conv2D<<<gridBlock,threadBlock>>>(d_image,kernel,d_result,width,height,kernelSize);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
Conv2D<<<gridBlock,threadBlock>>>(d_image,kernel,d_result,width,height,kernelSize);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
53a66c0fdc5d8a6141de3f2d7d974161471d6d2c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//Copyright (c) 2018 ETH Zurich, Lukas Cavigelli
#include <cstdio>
#include <cmath>
extern "C" {
__global__ void changeDetectionFG_kernel(const float* __restrict__ input,
const float* __restrict__ prevInput,
float* __restrict__ diffs,
char* __restrict__ changeMap,
const int numVals, const float threshold) {
int valIdx = blockIdx.x*blockDim.x + threadIdx.x;
if(valIdx >= numVals)
return;
float d = input[valIdx] - prevInput[valIdx];
bool pred = fabs(d) > threshold;
changeMap[valIdx] = pred;
if(pred)
diffs[valIdx] = d;
}
void changeDetectionFG(const float* input,
const float* prevInput,
float* diffs,
char* changeMap,
const int numVals, const float threshold) {
const int blockSize = 128;
dim3 grid((numVals - 1)/blockSize + 1);
dim3 block(blockSize);
hipLaunchKernelGGL(( changeDetectionFG_kernel), dim3(grid), dim3(block), 0, 0, input, prevInput, diffs, changeMap, numVals, threshold);
}
__global__ void updateOutputFG_kernel(const float* __restrict__ diffs,
const float* __restrict__ weight,
float* __restrict__ output,
const long* __restrict__ changeCoords,
const int numOut, const int numIn, const int height, const int width,
const int kH, const int kW, const int numChanges) {
int chngIdx = blockIdx.x*blockDim.x + threadIdx.x;
if(chngIdx >= numChanges)
return;
int pos = (int) changeCoords[chngIdx];
int ci = (int) pos/(height*width);
int y = (int) (pos / width) % height;
int x = (int) pos % width;
float d = diffs[(ci*height + y)*width + x];
for(int co = 0; co < numOut; co++) {
for(int iky = 0; iky < kH; iky++) {
for(int ikx = 0; ikx < kW; ikx++) {
int ytot = y - iky + kH/2, xtot = x - ikx + kW/2;
float wght = weight[((co*numIn + ci)*kH + iky)*kW + ikx];
if(0 <= ytot && ytot < height && 0 <= xtot && xtot < width){
atomicAdd(output + (co*height + ytot)*width + xtot, wght*d);
}
}
}
}
}
void updateOutputFG(int gridz, int gridy, int gridx, int blockz, int blocky, int blockx,
const float* diffs, const float* weight, float* output, const long* changeCoords,
const int numOut, const int numIn, const int height, const int width,
const int kH, const int kW, const int numChanges) {
dim3 grid(gridx, gridy, gridz);
dim3 block(blockx, blocky, blockz);
hipLaunchKernelGGL(( updateOutputFG_kernel), dim3(grid), dim3(block), 0, 0, diffs, weight, output, changeCoords,
numOut, numIn, height, width,
kH, kW, numChanges);
}
void conv2d_fg_cpu(const float* __restrict__ input, const float* __restrict__ prevInput, float* __restrict__ output,
const float* __restrict__ weight, const float threshold,
const int no, const int ni, const int h, const int w, const int kh, const int kw) {
const int khhalf = kh/2, kwhalf = kw/2;
#pragma omp parallel
#pragma omp for
for(int ci = 0; ci < ni; ci++) {
for(int y = 0; y < h; y++) {
for(int x = 0; x < w; x++) {
int iidx = (ci*h + y)*w + x;
float diff = input[iidx] - prevInput[iidx];
if(fabs(diff) < threshold)
continue;
for(int co = 0; co < no; co++) {
for(int iky = 0; iky < kh; iky++){
int oy = y - iky + khhalf;
if(oy < 0 || oy >= h)
continue;
for(int ikx = 0; ikx < kw; ikx++){
int ox = x - ikx + kwhalf;
if(ox < 0 || ox >= w)
continue;
int oidx = (co*h + oy)*w + ox;
output[oidx] += diff * weight[((co*ni + ci)*kh + iky)*kw + ikx];
}
}
}
}
}
}
}
}
| 53a66c0fdc5d8a6141de3f2d7d974161471d6d2c.cu | //Copyright (c) 2018 ETH Zurich, Lukas Cavigelli
#include <cstdio>
#include <cmath>
extern "C" {
__global__ void changeDetectionFG_kernel(const float* __restrict__ input,
const float* __restrict__ prevInput,
float* __restrict__ diffs,
char* __restrict__ changeMap,
const int numVals, const float threshold) {
int valIdx = blockIdx.x*blockDim.x + threadIdx.x;
if(valIdx >= numVals)
return;
float d = input[valIdx] - prevInput[valIdx];
bool pred = fabs(d) > threshold;
changeMap[valIdx] = pred;
if(pred)
diffs[valIdx] = d;
}
void changeDetectionFG(const float* input,
const float* prevInput,
float* diffs,
char* changeMap,
const int numVals, const float threshold) {
const int blockSize = 128;
dim3 grid((numVals - 1)/blockSize + 1);
dim3 block(blockSize);
changeDetectionFG_kernel<<<grid, block>>>(input, prevInput, diffs, changeMap, numVals, threshold);
}
__global__ void updateOutputFG_kernel(const float* __restrict__ diffs,
const float* __restrict__ weight,
float* __restrict__ output,
const long* __restrict__ changeCoords,
const int numOut, const int numIn, const int height, const int width,
const int kH, const int kW, const int numChanges) {
int chngIdx = blockIdx.x*blockDim.x + threadIdx.x;
if(chngIdx >= numChanges)
return;
int pos = (int) changeCoords[chngIdx];
int ci = (int) pos/(height*width);
int y = (int) (pos / width) % height;
int x = (int) pos % width;
float d = diffs[(ci*height + y)*width + x];
for(int co = 0; co < numOut; co++) {
for(int iky = 0; iky < kH; iky++) {
for(int ikx = 0; ikx < kW; ikx++) {
int ytot = y - iky + kH/2, xtot = x - ikx + kW/2;
float wght = weight[((co*numIn + ci)*kH + iky)*kW + ikx];
if(0 <= ytot && ytot < height && 0 <= xtot && xtot < width){
atomicAdd(output + (co*height + ytot)*width + xtot, wght*d);
}
}
}
}
}
void updateOutputFG(int gridz, int gridy, int gridx, int blockz, int blocky, int blockx,
const float* diffs, const float* weight, float* output, const long* changeCoords,
const int numOut, const int numIn, const int height, const int width,
const int kH, const int kW, const int numChanges) {
dim3 grid(gridx, gridy, gridz);
dim3 block(blockx, blocky, blockz);
updateOutputFG_kernel<<<grid, block>>>(diffs, weight, output, changeCoords,
numOut, numIn, height, width,
kH, kW, numChanges);
}
void conv2d_fg_cpu(const float* __restrict__ input, const float* __restrict__ prevInput, float* __restrict__ output,
const float* __restrict__ weight, const float threshold,
const int no, const int ni, const int h, const int w, const int kh, const int kw) {
const int khhalf = kh/2, kwhalf = kw/2;
#pragma omp parallel
#pragma omp for
for(int ci = 0; ci < ni; ci++) {
for(int y = 0; y < h; y++) {
for(int x = 0; x < w; x++) {
int iidx = (ci*h + y)*w + x;
float diff = input[iidx] - prevInput[iidx];
if(fabs(diff) < threshold)
continue;
for(int co = 0; co < no; co++) {
for(int iky = 0; iky < kh; iky++){
int oy = y - iky + khhalf;
if(oy < 0 || oy >= h)
continue;
for(int ikx = 0; ikx < kw; ikx++){
int ox = x - ikx + kwhalf;
if(ox < 0 || ox >= w)
continue;
int oidx = (co*h + oy)*w + ox;
output[oidx] += diff * weight[((co*ni + ci)*kh + iky)*kw + ikx];
}
}
}
}
}
}
}
}
|
933cf7d431f8bba970cece673c875ae5c3ff43f1.hip | // !!! This is a file automatically generated by hipify!!!
// RUN: %run_test hipify "%s" "%t" %cuda_args
#include <iostream>
// CHECK: #include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#define TOKEN_PASTE(X, Y) X ## Y
#define ARG_LIST_AS_MACRO a, device_x, device_y
#define KERNEL_CALL_AS_MACROhipLaunchKernelGGL(( axpy<float>), dim3(1), dim3(kDataLen), 0, 0,
#define KERNEL_NAME_MACRO( axpy<float>)
// CHECK: #define COMPLETE_LAUNCH hipLaunchKernelGGLaxpy, dim3(1), dim3(kDataLen), 0, 0, a, device_x, device_y)
#define COMPLETE_LAUNCHhipLaunchKernelGGL(( axpy), dim3(1), dim3(kDataLen), 0, 0, a, device_x, device_y)
template<typename T>
__global__ void axpy(T a, T *x, T *y) {
y[threadIdx.x] = a * x[threadIdx.x];
}
int main(int argc, char* argv[]) {
const int kDataLen = 4;
float a = 2.0f;
float host_x[kDataLen] = {1.0f, 2.0f, 3.0f, 4.0f};
float host_y[kDataLen];
// Copy input data to device.
float* device_x;
float* device_y;
// CHECK: hipMalloc(&device_x, kDataLen * sizeof(float));
hipMalloc(&device_x, kDataLen * sizeof(float));
#ifdef HERRING
// CHECK: hipMalloc(&device_y, kDataLen * sizeof(float));
hipMalloc(&device_y, kDataLen * sizeof(float));
#else
// CHECK: hipMalloc(&device_y, kDataLen * sizeof(double));
hipMalloc(&device_y, kDataLen * sizeof(double));
#endif
// CHECK: hipMemcpy(device_x, host_x, kDataLen * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(device_x, host_x, kDataLen * sizeof(float), hipMemcpyHostToDevice);
// Launch the kernel in numerous different strange ways to exercise the prerocessor.
// CHECK: hipLaunchKernelGGL(axpy, dim3(1), dim3(kDataLen), 0, 0, a, device_x, device_y);
hipLaunchKernelGGL(( axpy), dim3(1), dim3(kDataLen), 0, 0, a, device_x, device_y);
// CHECK: hipLaunchKernelGGL(axpy<float>, dim3(1), dim3(kDataLen), 0, 0, a, device_x, device_y);
hipLaunchKernelGGL(( axpy<float>), dim3(1), dim3(kDataLen), 0, 0, a, device_x, device_y);
// CHECK: hipLaunchKernelGGL(axpy<float>, dim3(1), dim3(kDataLen), 0, 0, a, TOKEN_PASTE(device, _x), device_y);
hipLaunchKernelGGL(( axpy<float>), dim3(1), dim3(kDataLen), 0, 0, a, TOKEN_PASTE(device, _x), device_y);
// CHECK: hipLaunchKernelGGL(axpy<float>, dim3(1), dim3(kDataLen), 0, 0, ARG_LIST_AS_MACRO);
hipLaunchKernelGGL(( axpy<float>), dim3(1), dim3(kDataLen), 0, 0, ARG_LIST_AS_MACRO);
// CHECK: hipLaunchKernelGGL(KERNEL_NAME_MACRO, dim3(1), dim3(kDataLen), 0, 0, ARG_LIST_AS_MACRO);
hipLaunchKernelGGL(( KERNEL_NAME_MACRO), dim3(1), dim3(kDataLen), 0, 0, ARG_LIST_AS_MACRO);
// CHECK: hipLaunchKernelGGL(axpy<float>, dim3(1), dim3(kDataLen), 0, 0, ARG_LIST_AS_MACRO);
KERNEL_CALL_AS_MACRO(ARG_LIST_AS_MACRO);
// CHECK: COMPLETE_LAUNCH;
COMPLETE_LAUNCH;
// Copy output data to host.
// CHECK: hipDeviceSynchronize();
hipDeviceSynchronize();
// CHECK: hipMemcpy(host_y, device_y, kDataLen * sizeof(float), hipMemcpyDeviceToHost);
hipMemcpy(host_y, device_y, kDataLen * sizeof(float), hipMemcpyDeviceToHost);
// Print the results.
for (int i = 0; i < kDataLen; ++i) {
std::cout << "y[" << i << "] = " << host_y[i] << "\n";
}
// CHECK: hipDeviceReset();
hipDeviceReset();
return 0;
}
| 933cf7d431f8bba970cece673c875ae5c3ff43f1.cu | // RUN: %run_test hipify "%s" "%t" %cuda_args
#include <iostream>
// CHECK: #include <hip/hip_runtime.h>
#include <cuda.h>
#define TOKEN_PASTE(X, Y) X ## Y
#define ARG_LIST_AS_MACRO a, device_x, device_y
#define KERNEL_CALL_AS_MACRO axpy<float><<<1, kDataLen>>>
#define KERNEL_NAME_MACRO axpy<float>
// CHECK: #define COMPLETE_LAUNCH hipLaunchKernelGGL(axpy, dim3(1), dim3(kDataLen), 0, 0, a, device_x, device_y)
#define COMPLETE_LAUNCH axpy<<<1, kDataLen>>>(a, device_x, device_y)
template<typename T>
__global__ void axpy(T a, T *x, T *y) {
y[threadIdx.x] = a * x[threadIdx.x];
}
int main(int argc, char* argv[]) {
const int kDataLen = 4;
float a = 2.0f;
float host_x[kDataLen] = {1.0f, 2.0f, 3.0f, 4.0f};
float host_y[kDataLen];
// Copy input data to device.
float* device_x;
float* device_y;
// CHECK: hipMalloc(&device_x, kDataLen * sizeof(float));
cudaMalloc(&device_x, kDataLen * sizeof(float));
#ifdef HERRING
// CHECK: hipMalloc(&device_y, kDataLen * sizeof(float));
cudaMalloc(&device_y, kDataLen * sizeof(float));
#else
// CHECK: hipMalloc(&device_y, kDataLen * sizeof(double));
cudaMalloc(&device_y, kDataLen * sizeof(double));
#endif
// CHECK: hipMemcpy(device_x, host_x, kDataLen * sizeof(float), hipMemcpyHostToDevice);
cudaMemcpy(device_x, host_x, kDataLen * sizeof(float), cudaMemcpyHostToDevice);
// Launch the kernel in numerous different strange ways to exercise the prerocessor.
// CHECK: hipLaunchKernelGGL(axpy, dim3(1), dim3(kDataLen), 0, 0, a, device_x, device_y);
axpy<<<1, kDataLen>>>(a, device_x, device_y);
// CHECK: hipLaunchKernelGGL(axpy<float>, dim3(1), dim3(kDataLen), 0, 0, a, device_x, device_y);
axpy<float><<<1, kDataLen>>>(a, device_x, device_y);
// CHECK: hipLaunchKernelGGL(axpy<float>, dim3(1), dim3(kDataLen), 0, 0, a, TOKEN_PASTE(device, _x), device_y);
axpy<float><<<1, kDataLen>>>(a, TOKEN_PASTE(device, _x), device_y);
// CHECK: hipLaunchKernelGGL(axpy<float>, dim3(1), dim3(kDataLen), 0, 0, ARG_LIST_AS_MACRO);
axpy<float><<<1, kDataLen>>>(ARG_LIST_AS_MACRO);
// CHECK: hipLaunchKernelGGL(KERNEL_NAME_MACRO, dim3(1), dim3(kDataLen), 0, 0, ARG_LIST_AS_MACRO);
KERNEL_NAME_MACRO<<<1, kDataLen>>>(ARG_LIST_AS_MACRO);
// CHECK: hipLaunchKernelGGL(axpy<float>, dim3(1), dim3(kDataLen), 0, 0, ARG_LIST_AS_MACRO);
KERNEL_CALL_AS_MACRO(ARG_LIST_AS_MACRO);
// CHECK: COMPLETE_LAUNCH;
COMPLETE_LAUNCH;
// Copy output data to host.
// CHECK: hipDeviceSynchronize();
cudaDeviceSynchronize();
// CHECK: hipMemcpy(host_y, device_y, kDataLen * sizeof(float), hipMemcpyDeviceToHost);
cudaMemcpy(host_y, device_y, kDataLen * sizeof(float), cudaMemcpyDeviceToHost);
// Print the results.
for (int i = 0; i < kDataLen; ++i) {
std::cout << "y[" << i << "] = " << host_y[i] << "\n";
}
// CHECK: hipDeviceReset();
cudaDeviceReset();
return 0;
}
|
557e5572d89f0aa73d3521c042539fd91c493b72.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "hiprand/hiprand.h"
#include "hiprand/hiprand_kernel.h"
#include "device_launch_parameters.h"
#include "Color.h"
#include "Vector3D.h"
#include "Ray.h"
#include "Utils.h"
#include "Hittable.h"
#include "Sphere.h"
#include "Camera.h"
#include "stb_image_write.h"
#include <vector>
#include <stdio.h>
#include <random>
#include <float.h>
#include <cstdlib>
#include <Windows.h>
#include <winbase.h>
struct ImageRenderingAttributes {
float aspectRatio;
int width;
int height;
int samplesPerPixel;
int maxRayBounceLimit;
float gammaCorrectionExponent;
};
struct CUDAAttributes {
int blockSize;
int threadSize;
int numRays;
int numResultPixelColors;
Camera* deviceCameraMemory;
Ray* deviceRayMemory;
Color* deviceSamplePixelColorMemory;
Color* deviceResultPixelColorMemory;
Sphere* deviceWorldMemory;
hiprandState_t* deviceRandomState;
};
ImageRenderingAttributes imageRenderingAttributes;
CUDAAttributes cudaAttributes;
// Scene
const size_t sceneSize = 4 + 22 * 22;
Sphere sphereScene[sceneSize];
// Camera
Camera camera;
// Host functions
__host__ void InitializeFromConfigFile();
__host__ void SetupScene();
__host__ void InitializeCUDA();
__host__ void LaunchCUDAKernels();
__host__ void WriteResultsToFile();
__host__ void FreeCUDAMemory();
// CUDA kernels
__global__ void InitializeCuRand(hiprandState_t* states);
__global__ void CreateRays(int imageWidth, int imageHeight, int samplesPerPixel, Camera* camera, Ray* rays, int numRays, hiprandState_t* curandStates);
__global__ void DoRaytracing(hiprandState_t* curandStates, Ray* rays, int numRays, Color* pixelColors, Sphere* world, size_t worldSize, int maxRayBounceLimit);
__global__ void ReducePixelColorSamplesToFinalValue(Color* pixelColors, int samplesPerPixel, Color* reducedPixelColors, int numReducedPixelColors);
__global__ void DoGammaCorrection(Color* resultPixelColors, int numResultPixelColors, float gammaExponent);
/// <summary>
/// Main Function
/// </summary>
int main() {
InitializeFromConfigFile();
SetupScene();
InitializeCUDA();
LaunchCUDAKernels();
// Wait for GPU computation to finish
hipDeviceSynchronize();
// Error check
hipError_t error = hipGetLastError();
if (error != hipSuccess) {
printf("CUDA error: %s\n", hipGetErrorString(error));
exit(-1);
}
printf("Raytracing finished!\n");
WriteResultsToFile();
FreeCUDAMemory();
return 0;
}
__host__ void InitializeFromConfigFile() {
const char* configFilePath = ".\\config.ini";
// Image Settings
char isbuffer1[10];
GetPrivateProfileString("ImageSettings", "AspectRatio", "", isbuffer1, 10, configFilePath);
float aspectRatio = (float)atof(isbuffer1);
int imageWidth = GetPrivateProfileInt("ImageSettings", "ImageWidth", -1, configFilePath);
char isbuffer2[10];
GetPrivateProfileString("ImageSettings", "GammaCorrectionExponent", "", isbuffer2, 10, configFilePath);
float gammaCorrectionExponent = (float)atof(isbuffer2);
int samplesPerPixel = GetPrivateProfileInt("ImageSettings", "SamplesPerPixel", -1, configFilePath);
int maxRayBounceLimit = GetPrivateProfileInt("ImageSettings", "MaxRayBounceLimit", -1, configFilePath);
imageRenderingAttributes.aspectRatio = aspectRatio;
imageRenderingAttributes.width = imageWidth;
imageRenderingAttributes.height = (int)(imageWidth / aspectRatio);
imageRenderingAttributes.gammaCorrectionExponent = gammaCorrectionExponent;
imageRenderingAttributes.samplesPerPixel = samplesPerPixel;
imageRenderingAttributes.maxRayBounceLimit = maxRayBounceLimit;
//printf("%f, %d, %d, %f, %d, %d\n", aspectRatio, imageWidth, imageRenderingAttributes.height, gammaCorrectionExponent, samplesPerPixel, maxRayBounceLimit);
// Camera Settings
char csbuffer1[10];
GetPrivateProfileString("CameraSettings", "LookFromX", "", csbuffer1, 10, configFilePath);
float lookFromX = (float)atof(csbuffer1);
char csbuffer2[10];
GetPrivateProfileString("CameraSettings", "LookFromY", "", csbuffer2, 10, configFilePath);
float lookFromY = (float)atof(csbuffer2);
char csbuffer3[10];
GetPrivateProfileString("CameraSettings", "LookFromZ", "", csbuffer3, 10, configFilePath);
float lookFromZ = (float)atof(csbuffer3);
char csbuffer4[10];
GetPrivateProfileString("CameraSettings", "LookAtX", "", csbuffer4, 10, configFilePath);
float lookAtX = (float)atof(csbuffer4);
char csbuffer5[10];
GetPrivateProfileString("CameraSettings", "LookAtY", "", csbuffer5, 10, configFilePath);
float lookAtY = (float)atof(csbuffer5);
char csbuffer6[10];
GetPrivateProfileString("CameraSettings", "LookAtZ", "", csbuffer6, 10, configFilePath);
float lookAtZ = (float)atof(csbuffer6);
char csbuffer7[10];
GetPrivateProfileString("CameraSettings", "VerticalFieldOfView", "", csbuffer7, 10, configFilePath);
float verticalFieldOfView = (float)atof(csbuffer7);
int useDepthOfField = GetPrivateProfileInt("CameraSettings", "UseDepthOfField", -1, configFilePath);
char csbuffer8[10];
GetPrivateProfileString("CameraSettings", "DistanceToFocus", "", csbuffer8, 10, configFilePath);
float distanceToFocus = (float)atof(csbuffer8);
char csbuffer9[10];
GetPrivateProfileString("CameraSettings", "ApertureSize", "", csbuffer9, 10, configFilePath);
float apertureSize = (float)atof(csbuffer9);
Vector3D lookFrom(lookFromX, lookFromY, lookFromZ);
Vector3D lookAt(lookAtX, lookAtY, lookAtZ);
Vector3D up(0.0f, 1.0f, 0.0f);
if (useDepthOfField == 0) {
distanceToFocus = 1.0f;
apertureSize = 0.0f;
}
//printf("%f, %f, %f, %f, %f, %f, %f, %d, %f, %f\n", lookFromX, lookFromY, lookFromZ, lookAtX, lookAtY, lookAtZ, verticalFieldOfView, useDepthOfField, distanceToFocus, apertureSize);
camera.Initialize(lookFrom, lookAt, up, verticalFieldOfView, aspectRatio, apertureSize, distanceToFocus);
}
__host__ void SetupScene() {
int index = 0;
Material groundMaterial;
groundMaterial.albedoColor = Color(0.5f, 0.5f, 0.5f);
groundMaterial.isMetal = false;
groundMaterial.isDielectric = false;
groundMaterial.refractionIndex = 0.0f;
groundMaterial.metalFuzziness = 0.0f;
Sphere groundSphere(Vector3D(0.0f, -1000.0f, 0.0f), 1000.0f, groundMaterial);
sphereScene[index++] = groundSphere;
for (int a = -11; a < 11; a++) {
for (int b = -11; b < 11; b++) {
float randomNumberForMaterialSelection = MathUtils::GetRandomFloatInRangeZeroToOne();
Vector3D center(a + 0.9f * MathUtils::GetRandomFloatInRangeZeroToOne(), 0.2f, b + 0.9f * MathUtils::GetRandomFloatInRangeZeroToOne());
if ((center - Vector3D(4.0f, 0.2f, 0.0f)).Size() > 0.9f) {
if (randomNumberForMaterialSelection < 0.4f) {
// Diffuse Material
Color albedo = GetRandomColor() * GetRandomColor();
Material sphereMaterial;
sphereMaterial.albedoColor = albedo;
sphereMaterial.isMetal = false;
sphereMaterial.isDielectric = false;
sphereMaterial.refractionIndex = 0.0f;
sphereMaterial.metalFuzziness = 0.0f;
Sphere sphere(center, 0.2f, sphereMaterial);
sphereScene[index++] = sphere;
}
else if (randomNumberForMaterialSelection < 0.9f) {
// Metal Material
Color albedo = GetRandomColor(0.5f, 1.0f);
float fuzziness = MathUtils::GetRandomFloatInRange(0.0f, 0.2f);
Material sphereMaterial;
sphereMaterial.albedoColor = albedo;
sphereMaterial.isMetal = true;
sphereMaterial.isDielectric = false;
sphereMaterial.refractionIndex = 0.0f;
sphereMaterial.metalFuzziness = fuzziness;
Sphere sphere(center, 0.2f, sphereMaterial);
sphereScene[index++] = sphere;
}
else {
// Glass Material
Material sphereMaterial;
sphereMaterial.albedoColor = Color(1.0f, 1.0f, 1.0f);
sphereMaterial.isMetal = false;
sphereMaterial.isDielectric = true;
sphereMaterial.refractionIndex = 1.5f;
sphereMaterial.metalFuzziness = 0.0f;
Sphere sphere(center, 0.2f, sphereMaterial);
sphereScene[index++] = sphere;
}
}
}
}
Material material1;
material1.albedoColor = Color(1.0f, 1.0f, 1.0f);
material1.isMetal = false;
material1.isDielectric = true;
material1.refractionIndex = 1.5f;
material1.metalFuzziness = 0.0f;
Material material2;
material2.albedoColor = Color(0.4f, 0.2f, 0.1f);
material2.isMetal = false;
material2.isDielectric = false;
material2.refractionIndex = 0.0f;
material2.metalFuzziness = 0.0f;
Material material3;
material3.albedoColor = Color(0.7f, 0.6f, 0.5f);
material3.isMetal = true;
material3.isDielectric = false;
material3.refractionIndex = 0.0f;
material3.metalFuzziness = 0.0f;
Sphere sphere1(Vector3D(0.0f, 1.0f, 0.0f), 1.0f, material1);
Sphere sphere2(Vector3D(-4.0f, 1.0f, 0.0f), 1.0f, material3);
Sphere sphere3(Vector3D(4.0f, 1.0f, 0.0f), 1.0f, material2);
sphereScene[index++] = sphere1;
sphereScene[index++] = sphere2;
sphereScene[index++] = sphere3;
}
__host__ void InitializeCUDA() {
// Set GPU
hipSetDevice(0);
// Determine how many rays have to be generated and how many result pixels there are
cudaAttributes.numRays = imageRenderingAttributes.width * imageRenderingAttributes.height * imageRenderingAttributes.samplesPerPixel;
cudaAttributes.numResultPixelColors = cudaAttributes.numRays / imageRenderingAttributes.samplesPerPixel;
// Allocate GPU memory
hipMalloc(&cudaAttributes.deviceRayMemory, cudaAttributes.numRays * sizeof(Ray));
hipMalloc(&cudaAttributes.deviceCameraMemory, sizeof(Camera));
hipMalloc(&cudaAttributes.deviceSamplePixelColorMemory, cudaAttributes.numRays * sizeof(Color));
hipMalloc(&cudaAttributes.deviceResultPixelColorMemory, cudaAttributes.numResultPixelColors * sizeof(Color));
hipMalloc(&cudaAttributes.deviceWorldMemory, sceneSize * sizeof(Sphere));
// Copy data to GPU memory
hipMemcpy(cudaAttributes.deviceCameraMemory, &camera, sizeof(Camera), hipMemcpyHostToDevice); // copy camera
hipMemcpy(cudaAttributes.deviceWorldMemory, &sphereScene[0], sceneSize * sizeof(Sphere), hipMemcpyHostToDevice); // copy world
// Set thread Size and determine block size
cudaAttributes.threadSize = 1024;
if (cudaAttributes.numRays % cudaAttributes.threadSize == 0) {
cudaAttributes.blockSize = cudaAttributes.numRays / cudaAttributes.threadSize;
}
else {
// Add one more block if needed. There will be idle threads in the last block, since the number of threads exceed the number of generated rays
cudaAttributes.blockSize = (int)(floor(cudaAttributes.numRays / cudaAttributes.threadSize)) + 1;
}
// Allocate memory for random generators for every thread
hipMalloc(&cudaAttributes.deviceRandomState, cudaAttributes.blockSize * cudaAttributes.threadSize * sizeof(hiprandState_t));
// Calculate memory usage in VRAM and print it for information
unsigned long long int VRAMmemoryInBytes = 2 * cudaAttributes.numRays * sizeof(Ray) +
sizeof(Camera) + cudaAttributes.numResultPixelColors * sizeof(Color) +
sceneSize * sizeof(Sphere) +
cudaAttributes.blockSize * cudaAttributes.threadSize * sizeof(hiprandState_t);
float VRAMmemoryInGigabytes = VRAMmemoryInBytes / 1024.0f / 1024.0f / 1024.0f;
printf("GPU VRAM: Allocated Gigabytes (GB): %f\n", VRAMmemoryInGigabytes);
}
__host__ void LaunchCUDAKernels() {
// Setup the random generators, one per each Thread
InitializeCuRand << <cudaAttributes.blockSize, cudaAttributes.threadSize >> > (cudaAttributes.deviceRandomState);
// Create one ray per thread
CreateRays << <cudaAttributes.blockSize, cudaAttributes.threadSize >> > (
imageRenderingAttributes.width,
imageRenderingAttributes.height,
imageRenderingAttributes.samplesPerPixel,
cudaAttributes.deviceCameraMemory,
cudaAttributes.deviceRayMemory,
cudaAttributes.numRays,
cudaAttributes.deviceRandomState);
// Render the scene
DoRaytracing << <cudaAttributes.blockSize, cudaAttributes.threadSize >> > (
cudaAttributes.deviceRandomState,
cudaAttributes.deviceRayMemory,
cudaAttributes.numRays,
cudaAttributes.deviceSamplePixelColorMemory,
cudaAttributes.deviceWorldMemory,
sceneSize,
imageRenderingAttributes.maxRayBounceLimit);
// The pixel color samples of one pixel need to be reduced to one final pixel color, if there are more than two samples per pixel
if (imageRenderingAttributes.samplesPerPixel >= 2) {
ReducePixelColorSamplesToFinalValue << <cudaAttributes.blockSize, cudaAttributes.threadSize >> > (
cudaAttributes.deviceSamplePixelColorMemory,
imageRenderingAttributes.samplesPerPixel,
cudaAttributes.deviceResultPixelColorMemory,
cudaAttributes.numResultPixelColors);
DoGammaCorrection << <cudaAttributes.blockSize, cudaAttributes.threadSize >> > (
cudaAttributes.deviceResultPixelColorMemory,
cudaAttributes.numResultPixelColors,
imageRenderingAttributes.gammaCorrectionExponent);
}
else {
DoGammaCorrection << <cudaAttributes.blockSize, cudaAttributes.threadSize >> > (
cudaAttributes.deviceSamplePixelColorMemory,
cudaAttributes.numResultPixelColors,
imageRenderingAttributes.gammaCorrectionExponent);
}
}
__global__ void InitializeCuRand(hiprandState_t* states) {
int id = threadIdx.x + blockDim.x * blockIdx.x;
int seed = id; // Different seed per thread
hiprand_init(seed, id, 0, &states[id]); // Initialize CURAND
}
__global__ void CreateRays(int imageWidth, int imageHeight, int samplesPerPixel, Camera* camera, Ray* rays, int numRays, hiprandState_t* curandStates) {
int threadId = threadIdx.x + (blockIdx.x * blockDim.x);
if (threadId >= numRays) {
return;
}
int pixelX = (int)(threadId / samplesPerPixel) % imageWidth; // sample the image from left to right row-wise. One pixel is sampled by "samplesPerPixel"-many consecutive threads
int pixelY = (imageHeight - 1) - (int)((int)(threadId / samplesPerPixel) / imageWidth); // sample the image from top to bottom with the same sample-logic as above
float u = (float)(pixelX + CudaUtils::GetRandomFloatInRangeMinusOneToOne(curandStates, threadId)) / (float)(imageWidth - 1);
float v = (float)(pixelY + CudaUtils::GetRandomFloatInRangeMinusOneToOne(curandStates, threadId)) / (float)(imageHeight - 1);
Ray ray = camera->GetRay(u, v, curandStates, threadId);
rays[threadId] = ray;
}
__global__ void DoRaytracing(hiprandState_t* curandStates, Ray* rays, int numRays, Color* pixelColors, Sphere* world, size_t worldSize, int maxRayBounceLimit) {
int threadId = threadIdx.x + (blockIdx.x * blockDim.x);
if (threadId >= numRays) {
return;
}
Ray rayForCurrentThread = rays[threadId];
Color rayColor(1.0f, 1.0f, 1.0f);
// Shoot ray to scene, check collisions and determine the ray color with the hit materials as long as the bounce limit of a ray is not reached or nothing in the scene is hit
for (size_t i = 0; i < maxRayBounceLimit; ++i) {
bool hasHitAnything = false;
HitRecord closestHit;
float tmpDistance = FLT_MAX;
for (size_t i = 0; i < worldSize; ++i) {
HitRecord tmpRecord;
bool hit = world[i].Hit(rayForCurrentThread, tmpRecord);
if (!hit) {
continue;
}
hasHitAnything = true;
// Check if ray length was shorter than the last length, resulting in the hit record of the nearest object
if (tmpRecord.t < tmpDistance) {
closestHit = tmpRecord;
tmpDistance = tmpRecord.t;
}
}
// If ray hit nothing in scene, stop raytrace
if (!hasHitAnything) {
break;
}
// Shade the ray with the material from the nearest hit object
Color attenuation;
Ray scatteredRay;
if (closestHit.material.isMetal) {
ScatterMetal(rayForCurrentThread, closestHit, attenuation, scatteredRay, closestHit.material, curandStates, threadId);
}
else if (closestHit.material.isDielectric) {
ScatterDielectric(rayForCurrentThread, closestHit, attenuation, scatteredRay, closestHit.material, curandStates, threadId);
}
else {
ScatterLambertian(rayForCurrentThread, closestHit, attenuation, scatteredRay, closestHit.material, curandStates, threadId);
}
rayForCurrentThread = scatteredRay; // Ray for next iteration is the scattered ray
rayColor *= attenuation; // Attenuate the RayColor with the shading result
}
// Attenuate the ray with a blue-white gradient sky color as a light source
Vector3D unitDirection = MakeUnitVector(rayForCurrentThread.direction);
float t = 0.5f * (unitDirection.Y() + 1.0f);
Color brightWhite(1.0f, 1.0f, 1.0f);
Color blueish(0.5f, 0.7f, 1.0f);
Color skyColor = Lerp(brightWhite, blueish, t);
rayColor *= skyColor;
pixelColors[threadId] = rayColor;
}
__global__ void ReducePixelColorSamplesToFinalValue(Color* pixelColors, int samplesPerPixel, Color* reducedPixelColors, int numReducedPixelColors) {
int threadId = threadIdx.x + (blockIdx.x * blockDim.x);
if (threadId >= numReducedPixelColors) {
return;
}
// Reduce the pixel color samples to one final pixel color. One thread is responsible for the reduction of all samples of one pixel
int startIndex = threadId * samplesPerPixel;
int endIndex = (startIndex + samplesPerPixel) - 1;
Color resultColor(0.0f, 0.0f, 0.0f);
for (int i = startIndex; i < endIndex; ++i) {
resultColor += pixelColors[i];
}
// The final pixel color value is averaged through all samples and than saved
reducedPixelColors[threadId] = resultColor / samplesPerPixel;
}
__global__ void DoGammaCorrection(Color* resultPixelColors, int numResultPixelColors, float gammaExponent/*, unsigned char* resultColorValues*/) {
int threadId = threadIdx.x + (blockIdx.x * blockDim.x);
if (threadId >= numResultPixelColors) {
return;
}
// Apply gamma correction by exponentiating the color with the gamma value
Color color = resultPixelColors[threadId];
color[0] = pow(color[0], gammaExponent);
color[1] = pow(color[1], gammaExponent);
color[2] = pow(color[2], gammaExponent);
resultPixelColors[threadId] = color;
}
__host__ void WriteResultsToFile() {
// Copy results from device memory to host memory
Color* hostColorMemory = new Color[cudaAttributes.numResultPixelColors];
if (imageRenderingAttributes.samplesPerPixel >= 2) {
hipMemcpy(hostColorMemory, cudaAttributes.deviceResultPixelColorMemory, cudaAttributes.numResultPixelColors * sizeof(Color), hipMemcpyDeviceToHost);
}
else {
hipMemcpy(hostColorMemory, cudaAttributes.deviceSamplePixelColorMemory, cudaAttributes.numResultPixelColors * sizeof(Color), hipMemcpyDeviceToHost);
}
unsigned char* pixelData = new unsigned char[imageRenderingAttributes.width * imageRenderingAttributes.height * 3]; // there are 3 RGB values for each pixel, so the size is multiplied by 3
size_t colorIndex = 0;
for (size_t i = 0; i < cudaAttributes.numResultPixelColors; ++i) {
Color resultColor = hostColorMemory[i];
pixelData[colorIndex++] = static_cast<int>(255.0f * MathUtils::Clamp(resultColor.R(), 0.0f, 1.0f));
pixelData[colorIndex++] = static_cast<int>(255.0f * MathUtils::Clamp(resultColor.G(), 0.0f, 1.0f));
pixelData[colorIndex++] = static_cast<int>(255.0f * MathUtils::Clamp(resultColor.B(), 0.0f, 1.0f));
}
// Write PNG image and open it immediately
stbi_write_png("raytraced_image.png", imageRenderingAttributes.width, imageRenderingAttributes.height, 3, pixelData, 0);
std::system("start raytraced_image.png");
delete[] pixelData;
delete[] hostColorMemory;
}
__host__ void FreeCUDAMemory() {
hipFree(cudaAttributes.deviceCameraMemory);
hipFree(cudaAttributes.deviceRayMemory);
hipFree(cudaAttributes.deviceSamplePixelColorMemory);
hipFree(cudaAttributes.deviceResultPixelColorMemory);
hipFree(cudaAttributes.deviceWorldMemory);
} | 557e5572d89f0aa73d3521c042539fd91c493b72.cu | #include "cuda_runtime.h"
#include "curand.h"
#include "curand_kernel.h"
#include "device_launch_parameters.h"
#include "Color.h"
#include "Vector3D.h"
#include "Ray.h"
#include "Utils.h"
#include "Hittable.h"
#include "Sphere.h"
#include "Camera.h"
#include "stb_image_write.h"
#include <vector>
#include <stdio.h>
#include <random>
#include <float.h>
#include <cstdlib>
#include <Windows.h>
#include <winbase.h>
struct ImageRenderingAttributes {
float aspectRatio;
int width;
int height;
int samplesPerPixel;
int maxRayBounceLimit;
float gammaCorrectionExponent;
};
struct CUDAAttributes {
int blockSize;
int threadSize;
int numRays;
int numResultPixelColors;
Camera* deviceCameraMemory;
Ray* deviceRayMemory;
Color* deviceSamplePixelColorMemory;
Color* deviceResultPixelColorMemory;
Sphere* deviceWorldMemory;
curandState* deviceRandomState;
};
ImageRenderingAttributes imageRenderingAttributes;
CUDAAttributes cudaAttributes;
// Scene
const size_t sceneSize = 4 + 22 * 22;
Sphere sphereScene[sceneSize];
// Camera
Camera camera;
// Host functions
__host__ void InitializeFromConfigFile();
__host__ void SetupScene();
__host__ void InitializeCUDA();
__host__ void LaunchCUDAKernels();
__host__ void WriteResultsToFile();
__host__ void FreeCUDAMemory();
// CUDA kernels
__global__ void InitializeCuRand(curandState* states);
__global__ void CreateRays(int imageWidth, int imageHeight, int samplesPerPixel, Camera* camera, Ray* rays, int numRays, curandState* curandStates);
__global__ void DoRaytracing(curandState* curandStates, Ray* rays, int numRays, Color* pixelColors, Sphere* world, size_t worldSize, int maxRayBounceLimit);
__global__ void ReducePixelColorSamplesToFinalValue(Color* pixelColors, int samplesPerPixel, Color* reducedPixelColors, int numReducedPixelColors);
__global__ void DoGammaCorrection(Color* resultPixelColors, int numResultPixelColors, float gammaExponent);
/// <summary>
/// Main Function
/// </summary>
int main() {
InitializeFromConfigFile();
SetupScene();
InitializeCUDA();
LaunchCUDAKernels();
// Wait for GPU computation to finish
cudaDeviceSynchronize();
// Error check
cudaError_t error = cudaGetLastError();
if (error != cudaSuccess) {
printf("CUDA error: %s\n", cudaGetErrorString(error));
exit(-1);
}
printf("Raytracing finished!\n");
WriteResultsToFile();
FreeCUDAMemory();
return 0;
}
__host__ void InitializeFromConfigFile() {
const char* configFilePath = ".\\config.ini";
// Image Settings
char isbuffer1[10];
GetPrivateProfileString("ImageSettings", "AspectRatio", "", isbuffer1, 10, configFilePath);
float aspectRatio = (float)atof(isbuffer1);
int imageWidth = GetPrivateProfileInt("ImageSettings", "ImageWidth", -1, configFilePath);
char isbuffer2[10];
GetPrivateProfileString("ImageSettings", "GammaCorrectionExponent", "", isbuffer2, 10, configFilePath);
float gammaCorrectionExponent = (float)atof(isbuffer2);
int samplesPerPixel = GetPrivateProfileInt("ImageSettings", "SamplesPerPixel", -1, configFilePath);
int maxRayBounceLimit = GetPrivateProfileInt("ImageSettings", "MaxRayBounceLimit", -1, configFilePath);
imageRenderingAttributes.aspectRatio = aspectRatio;
imageRenderingAttributes.width = imageWidth;
imageRenderingAttributes.height = (int)(imageWidth / aspectRatio);
imageRenderingAttributes.gammaCorrectionExponent = gammaCorrectionExponent;
imageRenderingAttributes.samplesPerPixel = samplesPerPixel;
imageRenderingAttributes.maxRayBounceLimit = maxRayBounceLimit;
//printf("%f, %d, %d, %f, %d, %d\n", aspectRatio, imageWidth, imageRenderingAttributes.height, gammaCorrectionExponent, samplesPerPixel, maxRayBounceLimit);
// Camera Settings
char csbuffer1[10];
GetPrivateProfileString("CameraSettings", "LookFromX", "", csbuffer1, 10, configFilePath);
float lookFromX = (float)atof(csbuffer1);
char csbuffer2[10];
GetPrivateProfileString("CameraSettings", "LookFromY", "", csbuffer2, 10, configFilePath);
float lookFromY = (float)atof(csbuffer2);
char csbuffer3[10];
GetPrivateProfileString("CameraSettings", "LookFromZ", "", csbuffer3, 10, configFilePath);
float lookFromZ = (float)atof(csbuffer3);
char csbuffer4[10];
GetPrivateProfileString("CameraSettings", "LookAtX", "", csbuffer4, 10, configFilePath);
float lookAtX = (float)atof(csbuffer4);
char csbuffer5[10];
GetPrivateProfileString("CameraSettings", "LookAtY", "", csbuffer5, 10, configFilePath);
float lookAtY = (float)atof(csbuffer5);
char csbuffer6[10];
GetPrivateProfileString("CameraSettings", "LookAtZ", "", csbuffer6, 10, configFilePath);
float lookAtZ = (float)atof(csbuffer6);
char csbuffer7[10];
GetPrivateProfileString("CameraSettings", "VerticalFieldOfView", "", csbuffer7, 10, configFilePath);
float verticalFieldOfView = (float)atof(csbuffer7);
int useDepthOfField = GetPrivateProfileInt("CameraSettings", "UseDepthOfField", -1, configFilePath);
char csbuffer8[10];
GetPrivateProfileString("CameraSettings", "DistanceToFocus", "", csbuffer8, 10, configFilePath);
float distanceToFocus = (float)atof(csbuffer8);
char csbuffer9[10];
GetPrivateProfileString("CameraSettings", "ApertureSize", "", csbuffer9, 10, configFilePath);
float apertureSize = (float)atof(csbuffer9);
Vector3D lookFrom(lookFromX, lookFromY, lookFromZ);
Vector3D lookAt(lookAtX, lookAtY, lookAtZ);
Vector3D up(0.0f, 1.0f, 0.0f);
if (useDepthOfField == 0) {
distanceToFocus = 1.0f;
apertureSize = 0.0f;
}
//printf("%f, %f, %f, %f, %f, %f, %f, %d, %f, %f\n", lookFromX, lookFromY, lookFromZ, lookAtX, lookAtY, lookAtZ, verticalFieldOfView, useDepthOfField, distanceToFocus, apertureSize);
camera.Initialize(lookFrom, lookAt, up, verticalFieldOfView, aspectRatio, apertureSize, distanceToFocus);
}
__host__ void SetupScene() {
int index = 0;
Material groundMaterial;
groundMaterial.albedoColor = Color(0.5f, 0.5f, 0.5f);
groundMaterial.isMetal = false;
groundMaterial.isDielectric = false;
groundMaterial.refractionIndex = 0.0f;
groundMaterial.metalFuzziness = 0.0f;
Sphere groundSphere(Vector3D(0.0f, -1000.0f, 0.0f), 1000.0f, groundMaterial);
sphereScene[index++] = groundSphere;
for (int a = -11; a < 11; a++) {
for (int b = -11; b < 11; b++) {
float randomNumberForMaterialSelection = MathUtils::GetRandomFloatInRangeZeroToOne();
Vector3D center(a + 0.9f * MathUtils::GetRandomFloatInRangeZeroToOne(), 0.2f, b + 0.9f * MathUtils::GetRandomFloatInRangeZeroToOne());
if ((center - Vector3D(4.0f, 0.2f, 0.0f)).Size() > 0.9f) {
if (randomNumberForMaterialSelection < 0.4f) {
// Diffuse Material
Color albedo = GetRandomColor() * GetRandomColor();
Material sphereMaterial;
sphereMaterial.albedoColor = albedo;
sphereMaterial.isMetal = false;
sphereMaterial.isDielectric = false;
sphereMaterial.refractionIndex = 0.0f;
sphereMaterial.metalFuzziness = 0.0f;
Sphere sphere(center, 0.2f, sphereMaterial);
sphereScene[index++] = sphere;
}
else if (randomNumberForMaterialSelection < 0.9f) {
// Metal Material
Color albedo = GetRandomColor(0.5f, 1.0f);
float fuzziness = MathUtils::GetRandomFloatInRange(0.0f, 0.2f);
Material sphereMaterial;
sphereMaterial.albedoColor = albedo;
sphereMaterial.isMetal = true;
sphereMaterial.isDielectric = false;
sphereMaterial.refractionIndex = 0.0f;
sphereMaterial.metalFuzziness = fuzziness;
Sphere sphere(center, 0.2f, sphereMaterial);
sphereScene[index++] = sphere;
}
else {
// Glass Material
Material sphereMaterial;
sphereMaterial.albedoColor = Color(1.0f, 1.0f, 1.0f);
sphereMaterial.isMetal = false;
sphereMaterial.isDielectric = true;
sphereMaterial.refractionIndex = 1.5f;
sphereMaterial.metalFuzziness = 0.0f;
Sphere sphere(center, 0.2f, sphereMaterial);
sphereScene[index++] = sphere;
}
}
}
}
Material material1;
material1.albedoColor = Color(1.0f, 1.0f, 1.0f);
material1.isMetal = false;
material1.isDielectric = true;
material1.refractionIndex = 1.5f;
material1.metalFuzziness = 0.0f;
Material material2;
material2.albedoColor = Color(0.4f, 0.2f, 0.1f);
material2.isMetal = false;
material2.isDielectric = false;
material2.refractionIndex = 0.0f;
material2.metalFuzziness = 0.0f;
Material material3;
material3.albedoColor = Color(0.7f, 0.6f, 0.5f);
material3.isMetal = true;
material3.isDielectric = false;
material3.refractionIndex = 0.0f;
material3.metalFuzziness = 0.0f;
Sphere sphere1(Vector3D(0.0f, 1.0f, 0.0f), 1.0f, material1);
Sphere sphere2(Vector3D(-4.0f, 1.0f, 0.0f), 1.0f, material3);
Sphere sphere3(Vector3D(4.0f, 1.0f, 0.0f), 1.0f, material2);
sphereScene[index++] = sphere1;
sphereScene[index++] = sphere2;
sphereScene[index++] = sphere3;
}
__host__ void InitializeCUDA() {
// Set GPU
cudaSetDevice(0);
// Determine how many rays have to be generated and how many result pixels there are
cudaAttributes.numRays = imageRenderingAttributes.width * imageRenderingAttributes.height * imageRenderingAttributes.samplesPerPixel;
cudaAttributes.numResultPixelColors = cudaAttributes.numRays / imageRenderingAttributes.samplesPerPixel;
// Allocate GPU memory
cudaMalloc(&cudaAttributes.deviceRayMemory, cudaAttributes.numRays * sizeof(Ray));
cudaMalloc(&cudaAttributes.deviceCameraMemory, sizeof(Camera));
cudaMalloc(&cudaAttributes.deviceSamplePixelColorMemory, cudaAttributes.numRays * sizeof(Color));
cudaMalloc(&cudaAttributes.deviceResultPixelColorMemory, cudaAttributes.numResultPixelColors * sizeof(Color));
cudaMalloc(&cudaAttributes.deviceWorldMemory, sceneSize * sizeof(Sphere));
// Copy data to GPU memory
cudaMemcpy(cudaAttributes.deviceCameraMemory, &camera, sizeof(Camera), cudaMemcpyHostToDevice); // copy camera
cudaMemcpy(cudaAttributes.deviceWorldMemory, &sphereScene[0], sceneSize * sizeof(Sphere), cudaMemcpyHostToDevice); // copy world
// Set thread Size and determine block size
cudaAttributes.threadSize = 1024;
if (cudaAttributes.numRays % cudaAttributes.threadSize == 0) {
cudaAttributes.blockSize = cudaAttributes.numRays / cudaAttributes.threadSize;
}
else {
// Add one more block if needed. There will be idle threads in the last block, since the number of threads exceed the number of generated rays
cudaAttributes.blockSize = (int)(floor(cudaAttributes.numRays / cudaAttributes.threadSize)) + 1;
}
// Allocate memory for random generators for every thread
cudaMalloc(&cudaAttributes.deviceRandomState, cudaAttributes.blockSize * cudaAttributes.threadSize * sizeof(curandState));
// Calculate memory usage in VRAM and print it for information
unsigned long long int VRAMmemoryInBytes = 2 * cudaAttributes.numRays * sizeof(Ray) +
sizeof(Camera) + cudaAttributes.numResultPixelColors * sizeof(Color) +
sceneSize * sizeof(Sphere) +
cudaAttributes.blockSize * cudaAttributes.threadSize * sizeof(curandState);
float VRAMmemoryInGigabytes = VRAMmemoryInBytes / 1024.0f / 1024.0f / 1024.0f;
printf("GPU VRAM: Allocated Gigabytes (GB): %f\n", VRAMmemoryInGigabytes);
}
__host__ void LaunchCUDAKernels() {
// Setup the random generators, one per each Thread
InitializeCuRand << <cudaAttributes.blockSize, cudaAttributes.threadSize >> > (cudaAttributes.deviceRandomState);
// Create one ray per thread
CreateRays << <cudaAttributes.blockSize, cudaAttributes.threadSize >> > (
imageRenderingAttributes.width,
imageRenderingAttributes.height,
imageRenderingAttributes.samplesPerPixel,
cudaAttributes.deviceCameraMemory,
cudaAttributes.deviceRayMemory,
cudaAttributes.numRays,
cudaAttributes.deviceRandomState);
// Render the scene
DoRaytracing << <cudaAttributes.blockSize, cudaAttributes.threadSize >> > (
cudaAttributes.deviceRandomState,
cudaAttributes.deviceRayMemory,
cudaAttributes.numRays,
cudaAttributes.deviceSamplePixelColorMemory,
cudaAttributes.deviceWorldMemory,
sceneSize,
imageRenderingAttributes.maxRayBounceLimit);
// The pixel color samples of one pixel need to be reduced to one final pixel color, if there are more than two samples per pixel
if (imageRenderingAttributes.samplesPerPixel >= 2) {
ReducePixelColorSamplesToFinalValue << <cudaAttributes.blockSize, cudaAttributes.threadSize >> > (
cudaAttributes.deviceSamplePixelColorMemory,
imageRenderingAttributes.samplesPerPixel,
cudaAttributes.deviceResultPixelColorMemory,
cudaAttributes.numResultPixelColors);
DoGammaCorrection << <cudaAttributes.blockSize, cudaAttributes.threadSize >> > (
cudaAttributes.deviceResultPixelColorMemory,
cudaAttributes.numResultPixelColors,
imageRenderingAttributes.gammaCorrectionExponent);
}
else {
DoGammaCorrection << <cudaAttributes.blockSize, cudaAttributes.threadSize >> > (
cudaAttributes.deviceSamplePixelColorMemory,
cudaAttributes.numResultPixelColors,
imageRenderingAttributes.gammaCorrectionExponent);
}
}
__global__ void InitializeCuRand(curandState* states) {
int id = threadIdx.x + blockDim.x * blockIdx.x;
int seed = id; // Different seed per thread
curand_init(seed, id, 0, &states[id]); // Initialize CURAND
}
__global__ void CreateRays(int imageWidth, int imageHeight, int samplesPerPixel, Camera* camera, Ray* rays, int numRays, curandState* curandStates) {
int threadId = threadIdx.x + (blockIdx.x * blockDim.x);
if (threadId >= numRays) {
return;
}
int pixelX = (int)(threadId / samplesPerPixel) % imageWidth; // sample the image from left to right row-wise. One pixel is sampled by "samplesPerPixel"-many consecutive threads
int pixelY = (imageHeight - 1) - (int)((int)(threadId / samplesPerPixel) / imageWidth); // sample the image from top to bottom with the same sample-logic as above
float u = (float)(pixelX + CudaUtils::GetRandomFloatInRangeMinusOneToOne(curandStates, threadId)) / (float)(imageWidth - 1);
float v = (float)(pixelY + CudaUtils::GetRandomFloatInRangeMinusOneToOne(curandStates, threadId)) / (float)(imageHeight - 1);
Ray ray = camera->GetRay(u, v, curandStates, threadId);
rays[threadId] = ray;
}
__global__ void DoRaytracing(curandState* curandStates, Ray* rays, int numRays, Color* pixelColors, Sphere* world, size_t worldSize, int maxRayBounceLimit) {
int threadId = threadIdx.x + (blockIdx.x * blockDim.x);
if (threadId >= numRays) {
return;
}
Ray rayForCurrentThread = rays[threadId];
Color rayColor(1.0f, 1.0f, 1.0f);
// Shoot ray to scene, check collisions and determine the ray color with the hit materials as long as the bounce limit of a ray is not reached or nothing in the scene is hit
for (size_t i = 0; i < maxRayBounceLimit; ++i) {
bool hasHitAnything = false;
HitRecord closestHit;
float tmpDistance = FLT_MAX;
for (size_t i = 0; i < worldSize; ++i) {
HitRecord tmpRecord;
bool hit = world[i].Hit(rayForCurrentThread, tmpRecord);
if (!hit) {
continue;
}
hasHitAnything = true;
// Check if ray length was shorter than the last length, resulting in the hit record of the nearest object
if (tmpRecord.t < tmpDistance) {
closestHit = tmpRecord;
tmpDistance = tmpRecord.t;
}
}
// If ray hit nothing in scene, stop raytrace
if (!hasHitAnything) {
break;
}
// Shade the ray with the material from the nearest hit object
Color attenuation;
Ray scatteredRay;
if (closestHit.material.isMetal) {
ScatterMetal(rayForCurrentThread, closestHit, attenuation, scatteredRay, closestHit.material, curandStates, threadId);
}
else if (closestHit.material.isDielectric) {
ScatterDielectric(rayForCurrentThread, closestHit, attenuation, scatteredRay, closestHit.material, curandStates, threadId);
}
else {
ScatterLambertian(rayForCurrentThread, closestHit, attenuation, scatteredRay, closestHit.material, curandStates, threadId);
}
rayForCurrentThread = scatteredRay; // Ray for next iteration is the scattered ray
rayColor *= attenuation; // Attenuate the RayColor with the shading result
}
// Attenuate the ray with a blue-white gradient sky color as a light source
Vector3D unitDirection = MakeUnitVector(rayForCurrentThread.direction);
float t = 0.5f * (unitDirection.Y() + 1.0f);
Color brightWhite(1.0f, 1.0f, 1.0f);
Color blueish(0.5f, 0.7f, 1.0f);
Color skyColor = Lerp(brightWhite, blueish, t);
rayColor *= skyColor;
pixelColors[threadId] = rayColor;
}
__global__ void ReducePixelColorSamplesToFinalValue(Color* pixelColors, int samplesPerPixel, Color* reducedPixelColors, int numReducedPixelColors) {
int threadId = threadIdx.x + (blockIdx.x * blockDim.x);
if (threadId >= numReducedPixelColors) {
return;
}
// Reduce the pixel color samples to one final pixel color. One thread is responsible for the reduction of all samples of one pixel
int startIndex = threadId * samplesPerPixel;
int endIndex = (startIndex + samplesPerPixel) - 1;
Color resultColor(0.0f, 0.0f, 0.0f);
for (int i = startIndex; i < endIndex; ++i) {
resultColor += pixelColors[i];
}
// The final pixel color value is averaged through all samples and than saved
reducedPixelColors[threadId] = resultColor / samplesPerPixel;
}
__global__ void DoGammaCorrection(Color* resultPixelColors, int numResultPixelColors, float gammaExponent/*, unsigned char* resultColorValues*/) {
int threadId = threadIdx.x + (blockIdx.x * blockDim.x);
if (threadId >= numResultPixelColors) {
return;
}
// Apply gamma correction by exponentiating the color with the gamma value
Color color = resultPixelColors[threadId];
color[0] = pow(color[0], gammaExponent);
color[1] = pow(color[1], gammaExponent);
color[2] = pow(color[2], gammaExponent);
resultPixelColors[threadId] = color;
}
__host__ void WriteResultsToFile() {
// Copy results from device memory to host memory
Color* hostColorMemory = new Color[cudaAttributes.numResultPixelColors];
if (imageRenderingAttributes.samplesPerPixel >= 2) {
cudaMemcpy(hostColorMemory, cudaAttributes.deviceResultPixelColorMemory, cudaAttributes.numResultPixelColors * sizeof(Color), cudaMemcpyDeviceToHost);
}
else {
cudaMemcpy(hostColorMemory, cudaAttributes.deviceSamplePixelColorMemory, cudaAttributes.numResultPixelColors * sizeof(Color), cudaMemcpyDeviceToHost);
}
unsigned char* pixelData = new unsigned char[imageRenderingAttributes.width * imageRenderingAttributes.height * 3]; // there are 3 RGB values for each pixel, so the size is multiplied by 3
size_t colorIndex = 0;
for (size_t i = 0; i < cudaAttributes.numResultPixelColors; ++i) {
Color resultColor = hostColorMemory[i];
pixelData[colorIndex++] = static_cast<int>(255.0f * MathUtils::Clamp(resultColor.R(), 0.0f, 1.0f));
pixelData[colorIndex++] = static_cast<int>(255.0f * MathUtils::Clamp(resultColor.G(), 0.0f, 1.0f));
pixelData[colorIndex++] = static_cast<int>(255.0f * MathUtils::Clamp(resultColor.B(), 0.0f, 1.0f));
}
// Write PNG image and open it immediately
stbi_write_png("raytraced_image.png", imageRenderingAttributes.width, imageRenderingAttributes.height, 3, pixelData, 0);
std::system("start raytraced_image.png");
delete[] pixelData;
delete[] hostColorMemory;
}
__host__ void FreeCUDAMemory() {
cudaFree(cudaAttributes.deviceCameraMemory);
cudaFree(cudaAttributes.deviceRayMemory);
cudaFree(cudaAttributes.deviceSamplePixelColorMemory);
cudaFree(cudaAttributes.deviceResultPixelColorMemory);
cudaFree(cudaAttributes.deviceWorldMemory);
} |
4cb69279c885d977eaebd846854c6e4e6427cdc6.hip | // !!! This is a file automatically generated by hipify!!!
#include <time.h>
#include <math.h>
#include <stdlib.h>
#include <stdio.h>
#include <stdint.h>
#include "utils.h"
#include "pcg/pcg_basic.h"
// Seeding n PRNGs on the host
pcg32_random_t *seed(int n) {
long base_seed;
base_seed = time(NULL);
// Allocate the memory and check \rERRORs
pcg32_random_t *rng = (pcg32_random_t *) malloc(n*sizeof(pcg32_random_t));
if (rng == NULL) {
fprintf(stderr, "%s\n", "\rERROR allocating memory:\tseed()");
exit(1);
}
// Seed each PRNG with different seed
for (int i = 0; i<n; i++) {
pcg32_srandom_r(&rng[i], base_seed+i, (intptr_t) &rng[i]);
}
return rng;
}
void allocate_rng_d(pcg32_random_t **rng, int n) {
if (hipMalloc(rng, n*sizeof(pcg32_random_t)) != hipSuccess) {
gpuErrchk(hipPeekAtLastError());
exit(1);
}
}
// Generate 32-bit unsigned random integer using the PCG algorithm
__host__ __device__ uint32_t rand_pcg(pcg32_random_t *rng) {
uint64_t oldstate = rng->state;
rng->state = oldstate * 6364136223846793005ULL + rng->inc;
uint32_t xorshifted = ((oldstate >> 18u) ^ oldstate) >> 27u;
uint32_t rot = oldstate >> 59u;
return (xorshifted >> rot) | (xorshifted << ((-rot) & 31));
}
__host__ __device__ uint32_t rand_pcg_bound(pcg32_random_t *rng, uint32_t lim) {
uint32_t threshold = -lim % lim;
for (;;) {
uint32_t r = rand_pcg(rng);
if (r >= threshold) return r % lim;
}
}
// Returns a float in the range [0,1) from PRNG with index idx
__host__ __device__ float rand_f(pcg32_random_t *rng_all, int idx) {
pcg32_random_t *rng = &rng_all[idx];
return ldexpf(rand_pcg(rng), -32);
}
//Returns integer x in the range 0 <= x < lim from PRNG with index idx
__host__ __device__ int rand_int(int lim, pcg32_random_t *rng_all, int idx) {
pcg32_random_t *rng = &rng_all[idx];
return rand_pcg_bound(rng, lim);
}
// Allocate memory in the host
int8_t *malloc_int_h(unsigned long n) {
int8_t *array = (int8_t *) malloc(n*n*n);
if (array == NULL) {
fprintf(
stderr,
"%s\n", "\rERROR allocating memory in the host:\tmalloc_int_h()"
);
exit(1);
}
return array;
}
int *malloc_integer_h(unsigned long n) {
int *array = (int *) malloc(n*n*n*sizeof(int));
if (array == NULL) {
fprintf(
stderr,
"%s\n", "\rERROR allocating memory in the host:\tmalloc_int_h()"
);
exit(1);
}
return array;
}
// Allocate memory in the device
void malloc_int_d(int8_t **array, unsigned long n) {
if (hipMalloc(array, n*n*n) != hipSuccess) {
gpuErrchk(hipPeekAtLastError());
exit(1);
}
}
void malloc_integer_d(int **array, unsigned long n) {
if (hipMalloc(array, n*n*n*sizeof(int)) != hipSuccess) {
gpuErrchk(hipPeekAtLastError());
exit(1);
}
}
float *malloc_float_h(unsigned long n) {
float *array = (float *) malloc(n*sizeof(float));
if (array == NULL) {
fprintf(
stderr,
"%s\n", "\rERROR allocating memory in the host:\tmalloc_float_h()"
);
exit(1);
}
return array;
}
void malloc_float_d(float **array, unsigned long n) {
if (hipMalloc(array, n*sizeof(float))) {
gpuErrchk(hipPeekAtLastError());
exit(1);
}
}
//converts 3D index to the actual 1D index
__device__ int index(int i, int j, int k, int n) {
return i*n*n + j*n + k;
}
float *linspace(float min, float max, int n) {
float *ret = malloc_float_h(n);
for (int i = 0; i < n; i++){
ret[i] = min + i*(max-min)/(n-1);
}
return ret;
}
int *linspace_int(int sizes_min, int sizes_max, int step) {
if (sizes_min > sizes_max) {
fprintf(stderr, "%s\n", "Invalid number of lattices");
exit(1);
}
int *array = (int *) malloc(((sizes_max-sizes_min)/step+1)*sizeof(int));
if (array == NULL) {
fprintf(stderr, "%s\n", "\rERROR allocating memory");
exit(1);
}
int i = 0;
while (sizes_min <= sizes_max) {
array[i] = sizes_min;
i++;
sizes_min += step;
}
return array;
}
void gpuAssert(hipError_t code, const char *file, int line) {
if (code != hipSuccess) {
fprintf(
stderr,
"\rGPUassert: %s %s %d\n", hipGetErrorString(code), file, line
);
}
}
| 4cb69279c885d977eaebd846854c6e4e6427cdc6.cu | #include <time.h>
#include <math.h>
#include <stdlib.h>
#include <stdio.h>
#include <stdint.h>
#include "utils.h"
#include "pcg/pcg_basic.h"
// Seeding n PRNGs on the host
pcg32_random_t *seed(int n) {
long base_seed;
base_seed = time(NULL);
// Allocate the memory and check \rERRORs
pcg32_random_t *rng = (pcg32_random_t *) malloc(n*sizeof(pcg32_random_t));
if (rng == NULL) {
fprintf(stderr, "%s\n", "\rERROR allocating memory:\tseed()");
exit(1);
}
// Seed each PRNG with different seed
for (int i = 0; i<n; i++) {
pcg32_srandom_r(&rng[i], base_seed+i, (intptr_t) &rng[i]);
}
return rng;
}
void allocate_rng_d(pcg32_random_t **rng, int n) {
if (cudaMalloc(rng, n*sizeof(pcg32_random_t)) != cudaSuccess) {
gpuErrchk(cudaPeekAtLastError());
exit(1);
}
}
// Generate 32-bit unsigned random integer using the PCG algorithm
__host__ __device__ uint32_t rand_pcg(pcg32_random_t *rng) {
uint64_t oldstate = rng->state;
rng->state = oldstate * 6364136223846793005ULL + rng->inc;
uint32_t xorshifted = ((oldstate >> 18u) ^ oldstate) >> 27u;
uint32_t rot = oldstate >> 59u;
return (xorshifted >> rot) | (xorshifted << ((-rot) & 31));
}
__host__ __device__ uint32_t rand_pcg_bound(pcg32_random_t *rng, uint32_t lim) {
uint32_t threshold = -lim % lim;
for (;;) {
uint32_t r = rand_pcg(rng);
if (r >= threshold) return r % lim;
}
}
// Returns a float in the range [0,1) from PRNG with index idx
__host__ __device__ float rand_f(pcg32_random_t *rng_all, int idx) {
pcg32_random_t *rng = &rng_all[idx];
return ldexpf(rand_pcg(rng), -32);
}
//Returns integer x in the range 0 <= x < lim from PRNG with index idx
__host__ __device__ int rand_int(int lim, pcg32_random_t *rng_all, int idx) {
pcg32_random_t *rng = &rng_all[idx];
return rand_pcg_bound(rng, lim);
}
// Allocate memory in the host
int8_t *malloc_int_h(unsigned long n) {
int8_t *array = (int8_t *) malloc(n*n*n);
if (array == NULL) {
fprintf(
stderr,
"%s\n", "\rERROR allocating memory in the host:\tmalloc_int_h()"
);
exit(1);
}
return array;
}
int *malloc_integer_h(unsigned long n) {
int *array = (int *) malloc(n*n*n*sizeof(int));
if (array == NULL) {
fprintf(
stderr,
"%s\n", "\rERROR allocating memory in the host:\tmalloc_int_h()"
);
exit(1);
}
return array;
}
// Allocate memory in the device
void malloc_int_d(int8_t **array, unsigned long n) {
if (cudaMalloc(array, n*n*n) != cudaSuccess) {
gpuErrchk(cudaPeekAtLastError());
exit(1);
}
}
void malloc_integer_d(int **array, unsigned long n) {
if (cudaMalloc(array, n*n*n*sizeof(int)) != cudaSuccess) {
gpuErrchk(cudaPeekAtLastError());
exit(1);
}
}
float *malloc_float_h(unsigned long n) {
float *array = (float *) malloc(n*sizeof(float));
if (array == NULL) {
fprintf(
stderr,
"%s\n", "\rERROR allocating memory in the host:\tmalloc_float_h()"
);
exit(1);
}
return array;
}
void malloc_float_d(float **array, unsigned long n) {
if (cudaMalloc(array, n*sizeof(float))) {
gpuErrchk(cudaPeekAtLastError());
exit(1);
}
}
//converts 3D index to the actual 1D index
__device__ int index(int i, int j, int k, int n) {
return i*n*n + j*n + k;
}
float *linspace(float min, float max, int n) {
float *ret = malloc_float_h(n);
for (int i = 0; i < n; i++){
ret[i] = min + i*(max-min)/(n-1);
}
return ret;
}
int *linspace_int(int sizes_min, int sizes_max, int step) {
if (sizes_min > sizes_max) {
fprintf(stderr, "%s\n", "Invalid number of lattices");
exit(1);
}
int *array = (int *) malloc(((sizes_max-sizes_min)/step+1)*sizeof(int));
if (array == NULL) {
fprintf(stderr, "%s\n", "\rERROR allocating memory");
exit(1);
}
int i = 0;
while (sizes_min <= sizes_max) {
array[i] = sizes_min;
i++;
sizes_min += step;
}
return array;
}
void gpuAssert(cudaError_t code, const char *file, int line) {
if (code != cudaSuccess) {
fprintf(
stderr,
"\rGPUassert: %s %s %d\n", cudaGetErrorString(code), file, line
);
}
}
|
b035fb25c74ea34f514c24cfc6f95fd6bf03177c.hip | // !!! This is a file automatically generated by hipify!!!
//This code is a modification of L1 cache benchmark from
//"Dissecting the NVIDIA Volta GPU Architecture via Microbenchmarking": https://arxiv.org/pdf/1804.06826.pdf
//This benchmark measures the latency of L1 cache
//This code have been tested on Volta V100 architecture
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#define THREADS_NUM 1 //Launch only one thread to calcaulte the latency using a pointer-chasing array technique
#define WARP_SIZE 32
#define ITERS 32768 //iterate over the array ITERS times
#define ARRAY_SIZE 4096 //size of the array
// GPU error check
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true){
if (code != hipSuccess) {
fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
//TO DO: @Jason, please change the code to be similar to the L2/DRAM latency format
//Measure latency of ITERS reads.
__global__ void l1_lat(uint32_t *startClk, uint32_t *stopClk, uint64_t *posArray, uint64_t *dsink){
// thread index
uint32_t tid = threadIdx.x;
// one thread to initialize the pointer-chasing array
if (tid == 0){
for (uint32_t i=0; i<(ARRAY_SIZE-1); i++)
posArray[i] = (uint64_t)(posArray + i + 1);
posArray[ARRAY_SIZE-1] = (uint64_t)posArray;
}
if(tid < THREADS_NUM){
// a register to avoid compiler optimization
uint64_t *ptr = posArray + tid;
uint64_t ptr1, ptr0;
// initialize the thread pointer with the start address of the array
// use ca modifier to cache the in L1
asm volatile ("{\t\n"
"ld.global.ca.u64 %0, [%1];\n\t"
"}" : "=l"(ptr1) : "l"(ptr) : "memory"
);
// synchronize all threads
asm volatile ("bar.sync 0;");
// start timing
uint32_t start = 0;
asm volatile ("mov.u32 %0, %%clock;" : "=r"(start) :: "memory");
// pointer-chasing ITERS times
// use ca modifier to cache the load in L1
for(uint32_t i=0; i<ITERS; ++i) {
asm volatile ("{\t\n"
"ld.global.ca.u64 %0, [%1];\n\t"
"}" : "=l"(ptr0) : "l"((uint64_t*)ptr1) : "memory"
);
ptr1 = ptr0; //swap the register for the next load
}
// stop timing
uint32_t stop = 0;
asm volatile("mov.u32 %0, %%clock;" : "=r"(stop) :: "memory");
// write time and data back to memory
startClk[tid] = start;
stopClk[tid] = stop;
dsink[tid] = ptr1;
}
}
int main(){
uint32_t *startClk = (uint32_t*) malloc(THREADS_NUM*sizeof(uint32_t));
uint32_t *stopClk = (uint32_t*) malloc(THREADS_NUM*sizeof(uint32_t));
uint64_t *dsink = (uint64_t*) malloc(THREADS_NUM*sizeof(uint64_t));
uint32_t *startClk_g;
uint32_t *stopClk_g;
uint64_t *posArray_g;
uint64_t *dsink_g;
gpuErrchk( hipMalloc(&startClk_g, THREADS_NUM*sizeof(uint32_t)) );
gpuErrchk( hipMalloc(&stopClk_g, THREADS_NUM*sizeof(uint32_t)) );
gpuErrchk( hipMalloc(&posArray_g, ARRAY_SIZE*sizeof(uint64_t)) );
gpuErrchk( hipMalloc(&dsink_g, THREADS_NUM*sizeof(uint64_t)) );
hipLaunchKernelGGL(( l1_lat), dim3(1),dim3(THREADS_NUM), 0, 0, startClk_g, stopClk_g, posArray_g, dsink_g);
gpuErrchk( hipPeekAtLastError() );
gpuErrchk( hipMemcpy(startClk, startClk_g, THREADS_NUM*sizeof(uint32_t), hipMemcpyDeviceToHost) );
gpuErrchk( hipMemcpy(stopClk, stopClk_g, THREADS_NUM*sizeof(uint32_t), hipMemcpyDeviceToHost) );
gpuErrchk( hipMemcpy(dsink, dsink_g, THREADS_NUM*sizeof(uint64_t), hipMemcpyDeviceToHost) );
printf("L1 Latency = %12.4f cycles\n", (float)(stopClk[0]-startClk[0])/ITERS);
printf("Total Clk number = %u \n", stopClk[0]-startClk[0]);
return 0;
}
| b035fb25c74ea34f514c24cfc6f95fd6bf03177c.cu | //This code is a modification of L1 cache benchmark from
//"Dissecting the NVIDIA Volta GPU Architecture via Microbenchmarking": https://arxiv.org/pdf/1804.06826.pdf
//This benchmark measures the latency of L1 cache
//This code have been tested on Volta V100 architecture
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#define THREADS_NUM 1 //Launch only one thread to calcaulte the latency using a pointer-chasing array technique
#define WARP_SIZE 32
#define ITERS 32768 //iterate over the array ITERS times
#define ARRAY_SIZE 4096 //size of the array
// GPU error check
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true){
if (code != cudaSuccess) {
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
//TO DO: @Jason, please change the code to be similar to the L2/DRAM latency format
//Measure latency of ITERS reads.
__global__ void l1_lat(uint32_t *startClk, uint32_t *stopClk, uint64_t *posArray, uint64_t *dsink){
// thread index
uint32_t tid = threadIdx.x;
// one thread to initialize the pointer-chasing array
if (tid == 0){
for (uint32_t i=0; i<(ARRAY_SIZE-1); i++)
posArray[i] = (uint64_t)(posArray + i + 1);
posArray[ARRAY_SIZE-1] = (uint64_t)posArray;
}
if(tid < THREADS_NUM){
// a register to avoid compiler optimization
uint64_t *ptr = posArray + tid;
uint64_t ptr1, ptr0;
// initialize the thread pointer with the start address of the array
// use ca modifier to cache the in L1
asm volatile ("{\t\n"
"ld.global.ca.u64 %0, [%1];\n\t"
"}" : "=l"(ptr1) : "l"(ptr) : "memory"
);
// synchronize all threads
asm volatile ("bar.sync 0;");
// start timing
uint32_t start = 0;
asm volatile ("mov.u32 %0, %%clock;" : "=r"(start) :: "memory");
// pointer-chasing ITERS times
// use ca modifier to cache the load in L1
for(uint32_t i=0; i<ITERS; ++i) {
asm volatile ("{\t\n"
"ld.global.ca.u64 %0, [%1];\n\t"
"}" : "=l"(ptr0) : "l"((uint64_t*)ptr1) : "memory"
);
ptr1 = ptr0; //swap the register for the next load
}
// stop timing
uint32_t stop = 0;
asm volatile("mov.u32 %0, %%clock;" : "=r"(stop) :: "memory");
// write time and data back to memory
startClk[tid] = start;
stopClk[tid] = stop;
dsink[tid] = ptr1;
}
}
int main(){
uint32_t *startClk = (uint32_t*) malloc(THREADS_NUM*sizeof(uint32_t));
uint32_t *stopClk = (uint32_t*) malloc(THREADS_NUM*sizeof(uint32_t));
uint64_t *dsink = (uint64_t*) malloc(THREADS_NUM*sizeof(uint64_t));
uint32_t *startClk_g;
uint32_t *stopClk_g;
uint64_t *posArray_g;
uint64_t *dsink_g;
gpuErrchk( cudaMalloc(&startClk_g, THREADS_NUM*sizeof(uint32_t)) );
gpuErrchk( cudaMalloc(&stopClk_g, THREADS_NUM*sizeof(uint32_t)) );
gpuErrchk( cudaMalloc(&posArray_g, ARRAY_SIZE*sizeof(uint64_t)) );
gpuErrchk( cudaMalloc(&dsink_g, THREADS_NUM*sizeof(uint64_t)) );
l1_lat<<<1,THREADS_NUM>>>(startClk_g, stopClk_g, posArray_g, dsink_g);
gpuErrchk( cudaPeekAtLastError() );
gpuErrchk( cudaMemcpy(startClk, startClk_g, THREADS_NUM*sizeof(uint32_t), cudaMemcpyDeviceToHost) );
gpuErrchk( cudaMemcpy(stopClk, stopClk_g, THREADS_NUM*sizeof(uint32_t), cudaMemcpyDeviceToHost) );
gpuErrchk( cudaMemcpy(dsink, dsink_g, THREADS_NUM*sizeof(uint64_t), cudaMemcpyDeviceToHost) );
printf("L1 Latency = %12.4f cycles\n", (float)(stopClk[0]-startClk[0])/ITERS);
printf("Total Clk number = %u \n", stopClk[0]-startClk[0]);
return 0;
}
|
625967a038b76801c7634b88bc6e144d670ec2eb.hip | // !!! This is a file automatically generated by hipify!!!
/*
* @brief: this file contains the definition of svm trainer class
* Created on: May 24, 2012
* Author: Zeyi Wen
*/
#include "svmTrainer.h"
#include "storageManager.h"
#include <sys/time.h>
/*
* @brief: set data involved in training, as in n-fold-cross validation, training data may be consisted of two parts
* @param: nStart1: the start position of the first continuous piece of data
* @param: nStart2: the start position of the second continuous piece of data
* @param: nEnd: the last element in the part (the last element is included in the part)
*/
bool CSVMTrainer::SetInvolveTrainingData(int nStart1, int nEnd1, int nStart2, int nEnd2)
{
bool bReturn = true;
bReturn = m_pSMOSolver->SetInvolveData(nStart1, nEnd1, nStart2, nEnd2);
return bReturn;
}
/**
* @brief: training preparation, e.g. allocate some resources
*/
void CSVMTrainer::TrainStarting(int nNumofInstance, int nNumofTrainingExample,
float_point *pfDevYiFValueSubset, float_point *pfDevAlphaSubset, int *pnDevLabelSubset)
{
timespec timePrepS, timePrepE;
clock_gettime(CLOCK_REALTIME, &timePrepS);
//allocate memory for SMO slover
bool bPreperation = m_pSMOSolver->SMOSolverPreparation(nNumofTrainingExample);
assert(bPreperation != false);
StorageManager *manager = StorageManager::getManager();
int nSizeofCache = manager->RowInGPUCache(nNumofTrainingExample, nNumofInstance);
timeval tInit1, tInit2;
float_point InitCacheElapsedTime;
gettimeofday(&tInit1, NULL);
//initialize cache
cout << "cache size v.s. ins is " << nSizeofCache << " v.s. " << nNumofInstance << endl;
bool bInitCache = m_pSMOSolver->InitCache(nSizeofCache, nNumofInstance);
assert(bInitCache != false);
cout << "cache initialized" << endl;
gettimeofday(&tInit2, NULL);
InitCacheElapsedTime = (tInit2.tv_sec - tInit1.tv_sec) * 1000.0;
InitCacheElapsedTime += (tInit2.tv_usec - tInit1.tv_usec) / 1000.0;
cout << "initCache time: " << InitCacheElapsedTime << " ms.\n";
clock_gettime(CLOCK_REALTIME, &timePrepE);
long lTempPrep = ((timePrepE.tv_sec - timePrepS.tv_sec) * 1e9 + (timePrepE.tv_nsec - timePrepS.tv_nsec));
if(lTempPrep > 0)
nTimeOfPrep += lTempPrep;
else
cout << "preparation timer error" << endl;
//allocate memory for reading hessian row
m_pSMOSolver->m_pHessianReader->AllocateBuffer(1);
checkCudaErrors(hipMemcpy(m_pSMOSolver->m_pnLabel, pnDevLabelSubset,
sizeof(int) * nNumofTrainingExample, hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(m_pSMOSolver->m_pfGValue, pfDevYiFValueSubset,
sizeof(float_point) * nNumofTrainingExample, hipMemcpyDeviceToHost));
checkCudaErrors(hipMemset(pfDevAlphaSubset, 0, sizeof(float_point) * nNumofTrainingExample));
}
/*
* @brief: clean resources when the training is terminated
* @pfP: is used in computing the objective value
*/
void CSVMTrainer::TrainEnding(int nIter, int nNumofTrainingExample, int nNumofInstance, svm_model &model,
int *pnDevLabelSubset, float_point *pfDevAlphaSubset, float_point *pfDevYiFValueSubset,
float_point *pfP)
{
// hipStreamDestroy(m_pSMOSolver->m_stream1_Hessian_row);//destroy stream
//release buffer for reading hessian row
m_pSMOSolver->m_pHessianReader->ReleaseBuffer();
//m_pSMOSolver->m_pCache->PrintCache();
cout << "# of iteration: " << nIter << endl;
//store classification result in SVM Model
int *pnLabel = new int[nNumofTrainingExample];
float_point *pfAlpha = new float_point[nNumofTrainingExample];
float_point *pfYiFValue = new float_point[nNumofTrainingExample];
memset(pnLabel, 0, sizeof(int) * nNumofTrainingExample);
memset(pfAlpha, 0, sizeof(float_point) * nNumofTrainingExample);
memset(pfYiFValue, 0, sizeof(float_point) * nNumofTrainingExample);
checkCudaErrors(hipMemcpy(pnLabel, pnDevLabelSubset, sizeof(int) * nNumofTrainingExample, hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(pfAlpha, pfDevAlphaSubset, sizeof(float_point) * nNumofTrainingExample, hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(pfYiFValue, pfDevYiFValueSubset, sizeof(float_point) * nNumofTrainingExample, hipMemcpyDeviceToHost));
//compute the # of support vectors, and get bias of the model
float_point *pfYiAlphaTemp = new float_point[nNumofTrainingExample];
float_point *pfPositiveAlphaTemp = new float_point[nNumofTrainingExample];
float_point *pfNegativeAlphaTemp = new float_point[nNumofTrainingExample];
int *pnIndexofSVTemp = new int[nNumofTrainingExample];
float_point fYGsumofFreeAlpha = 0;
int nNumofFreeAlpha = 0;
int nNumofSVs = 0;
int nNumofPSV = 0, nNumofNSV = 0;
// calculate objective value
if(pfP != NULL)
{
float v = 0;
for(int i=0;i<nNumofTrainingExample;i++)
v += pfAlpha[i] * pnLabel[i]* (pfYiFValue[i] + pfP[i]);
cout << "obj =" << v/2 << endl;
}
float sum_alpha = 0;
for(int i = 0; i < nNumofInstance; i++)
{
//pfAlpha[i] = pfAlpha[i] - pfAlpha[i+nNumofInstance]; //this line is for regression
sum_alpha += fabs(pfAlpha[i]);
/*if(pfAlpha[i] == 0)
{
cout << "alpha=0: " << m_pSMOSolver->m_pfGValue[i] << endl;
if(i % 1000 == 0)sleep(1);
}
if(pfAlpha[i] == gfPCost)
{
cout << "alpha=c: " << m_pSMOSolver->m_pfGValue[i] << endl;
if(i % 1000 == 0)sleep(1);
}*/
if(pfAlpha[i] != 0)
{
//keep support vector in temp memory
pfYiAlphaTemp[nNumofSVs] = pnLabel[i] * pfAlpha[i];
pnIndexofSVTemp[nNumofSVs] = i;
//convert index to global index in Hessian matrix
m_pSMOSolver->MapIndexToHessian(pnIndexofSVTemp[nNumofSVs]);
//count the # of support vectors
nNumofSVs++;
if(pnLabel[i] > 0)
{
pfPositiveAlphaTemp[nNumofPSV] = pfAlpha[i];
nNumofPSV++;
}
else
{
pfNegativeAlphaTemp[nNumofNSV] = pfAlpha[i];
nNumofNSV++;
}
//for getting bias
if((pfAlpha[i] < gfPCost && pnLabel[i] > 0) || (pfAlpha[i] < gfNCost && pnLabel[i] < 0))
{
fYGsumofFreeAlpha += pfYiFValue[i];//(pnLabel[i] * pfYiFValue[i]);
nNumofFreeAlpha++;
}
}
}
cout << "nu=" << sum_alpha/nNumofInstance << endl;
cout << "# free SV " << nNumofFreeAlpha << "; # of SV " << nNumofSVs << endl;
/**************** store result to SVM model ***********************/
model.label = new int[2];
model.nSV = new int[2];
model.label[0] = 1;
model.label[1] = -1;
model.nSV[0] = nNumofPSV;
model.nSV[1] = nNumofNSV;
model.pnIndexofSV = new int[nNumofSVs];
model.SV = new svm_node*[nNumofSVs];
//sv_coef is a second level pointer, which is for multiclasses SVM
model.sv_coef = new float_point*[3];
model.sv_coef[0] = new float_point[nNumofSVs]; //this coef is for GPU computation convenience
model.sv_coef[1] = new float_point[nNumofPSV]; //this coef is for saving model (positive support vectors)
model.sv_coef[2] = new float_point[nNumofNSV]; //this coef is for saving model (negative support vectors)
memcpy(model.sv_coef[0], pfYiAlphaTemp, sizeof(float_point) * nNumofSVs);
memcpy(model.sv_coef[1], pfPositiveAlphaTemp, sizeof(float_point) * nNumofPSV);
memcpy(model.sv_coef[2], pfNegativeAlphaTemp, sizeof(float_point) * nNumofNSV);
memcpy(model.pnIndexofSV, pnIndexofSVTemp, sizeof(int) * nNumofSVs);
//compute bias
model.rho = new float_point[1];
/*if(nNumofFreeAlpha > 0)
{
model.rho[0] = (fYGsumofFreeAlpha / nNumofFreeAlpha);
}
else
{*/
model.rho[0] = (-m_pSMOSolver->upValue + m_pSMOSolver->m_fLowValue) / 2;
//}
delete[] pnLabel;
delete[] pfAlpha;
delete[] pfYiAlphaTemp;
delete[] pfPositiveAlphaTemp;
delete[] pfNegativeAlphaTemp;
delete[] pnIndexofSVTemp;
delete[] pfYiFValue;
cout << m_pSMOSolver->upValue << " v.s. " << m_pSMOSolver->m_fLowValue << endl;
cout << "bias=" << model.rho[0] << endl;
//#####
m_pSMOSolver->SMOSolverEnd();
m_pSMOSolver->CleanCache();
}
/*
* @brief: save SVM model to file
*/
bool CSVMTrainer::SaveModel(string strFileName, svm_model *model, vector<vector<float_point> >& v_vTrainingExample)
{
bool bReturn = false;
ofstream writeOut;
writeOut.open(strFileName.c_str(), ios::out);
if(!writeOut.is_open())
{
return bReturn;
}
bReturn = true;
//these two output may need to be improved
writeOut << "svm_type c_svc" << endl;
writeOut << "kernel_type rbf" << endl;
//stable output
writeOut << "gamma " << gfGamma << endl;
writeOut << "nr_class " << 2 << endl;
writeOut << "total_sv " << model->nSV[0] + model->nSV[1] << endl;
writeOut << "rho " << model->rho[0] << endl;
//have potential risk
writeOut << "label " << model->label[0] << " " << model->label[1] << endl;
writeOut << "nr_sv " << model->nSV[0] << " " << model->nSV[1] << endl;
//data of support vectors
writeOut << "SV" << endl;
const float_point * const *sv_coef = model->sv_coef;
int *pnIndexofSV = model->pnIndexofSV;
int nNumofSVs = model->nSV[0] + model->nSV[1];//Better to use as a function (similar to GetNumSV as in svmPredictor.cu)
model->l = nNumofSVs;
for(int i = 0; i < nNumofSVs; i++)
{
writeOut << sv_coef[0][i] << " ";
int nonzeroDimCounter = 0;
for(int j = 0; j < v_vTrainingExample[0].size(); j++)//for each of dimension
{
if(v_vTrainingExample[pnIndexofSV[i]][j] > 0)//for the support vector i
{
writeOut << j << ":" << v_vTrainingExample[pnIndexofSV[i]][j]<< " ";
nonzeroDimCounter++;
}
}
model->SV[i] = new svm_node[nonzeroDimCounter + 1];//the last node to indicate the end of SV
int curDimCounter = 0;
for(int j = 0; j < v_vTrainingExample[0].size(); j++)//for each of dimension
{
if(v_vTrainingExample[pnIndexofSV[i]][j] > 0)//for the support vector i
{
model->SV[i][curDimCounter++].index = j;
model->SV[i][curDimCounter++].value = v_vTrainingExample[pnIndexofSV[i]][j];
}
}
//set the end of support vect
model->SV[i][curDimCounter++].index = -1;
model->SV[i][curDimCounter++].value = -1;
writeOut << endl;
}
writeOut.close();
return bReturn;
}
| 625967a038b76801c7634b88bc6e144d670ec2eb.cu | /*
* @brief: this file contains the definition of svm trainer class
* Created on: May 24, 2012
* Author: Zeyi Wen
*/
#include "svmTrainer.h"
#include "storageManager.h"
#include <sys/time.h>
/*
* @brief: set data involved in training, as in n-fold-cross validation, training data may be consisted of two parts
* @param: nStart1: the start position of the first continuous piece of data
* @param: nStart2: the start position of the second continuous piece of data
* @param: nEnd: the last element in the part (the last element is included in the part)
*/
bool CSVMTrainer::SetInvolveTrainingData(int nStart1, int nEnd1, int nStart2, int nEnd2)
{
bool bReturn = true;
bReturn = m_pSMOSolver->SetInvolveData(nStart1, nEnd1, nStart2, nEnd2);
return bReturn;
}
/**
* @brief: training preparation, e.g. allocate some resources
*/
void CSVMTrainer::TrainStarting(int nNumofInstance, int nNumofTrainingExample,
float_point *pfDevYiFValueSubset, float_point *pfDevAlphaSubset, int *pnDevLabelSubset)
{
timespec timePrepS, timePrepE;
clock_gettime(CLOCK_REALTIME, &timePrepS);
//allocate memory for SMO slover
bool bPreperation = m_pSMOSolver->SMOSolverPreparation(nNumofTrainingExample);
assert(bPreperation != false);
StorageManager *manager = StorageManager::getManager();
int nSizeofCache = manager->RowInGPUCache(nNumofTrainingExample, nNumofInstance);
timeval tInit1, tInit2;
float_point InitCacheElapsedTime;
gettimeofday(&tInit1, NULL);
//initialize cache
cout << "cache size v.s. ins is " << nSizeofCache << " v.s. " << nNumofInstance << endl;
bool bInitCache = m_pSMOSolver->InitCache(nSizeofCache, nNumofInstance);
assert(bInitCache != false);
cout << "cache initialized" << endl;
gettimeofday(&tInit2, NULL);
InitCacheElapsedTime = (tInit2.tv_sec - tInit1.tv_sec) * 1000.0;
InitCacheElapsedTime += (tInit2.tv_usec - tInit1.tv_usec) / 1000.0;
cout << "initCache time: " << InitCacheElapsedTime << " ms.\n";
clock_gettime(CLOCK_REALTIME, &timePrepE);
long lTempPrep = ((timePrepE.tv_sec - timePrepS.tv_sec) * 1e9 + (timePrepE.tv_nsec - timePrepS.tv_nsec));
if(lTempPrep > 0)
nTimeOfPrep += lTempPrep;
else
cout << "preparation timer error" << endl;
//allocate memory for reading hessian row
m_pSMOSolver->m_pHessianReader->AllocateBuffer(1);
checkCudaErrors(cudaMemcpy(m_pSMOSolver->m_pnLabel, pnDevLabelSubset,
sizeof(int) * nNumofTrainingExample, cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(m_pSMOSolver->m_pfGValue, pfDevYiFValueSubset,
sizeof(float_point) * nNumofTrainingExample, cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemset(pfDevAlphaSubset, 0, sizeof(float_point) * nNumofTrainingExample));
}
/*
* @brief: clean resources when the training is terminated
* @pfP: is used in computing the objective value
*/
void CSVMTrainer::TrainEnding(int nIter, int nNumofTrainingExample, int nNumofInstance, svm_model &model,
int *pnDevLabelSubset, float_point *pfDevAlphaSubset, float_point *pfDevYiFValueSubset,
float_point *pfP)
{
// cudaStreamDestroy(m_pSMOSolver->m_stream1_Hessian_row);//destroy stream
//release buffer for reading hessian row
m_pSMOSolver->m_pHessianReader->ReleaseBuffer();
//m_pSMOSolver->m_pCache->PrintCache();
cout << "# of iteration: " << nIter << endl;
//store classification result in SVM Model
int *pnLabel = new int[nNumofTrainingExample];
float_point *pfAlpha = new float_point[nNumofTrainingExample];
float_point *pfYiFValue = new float_point[nNumofTrainingExample];
memset(pnLabel, 0, sizeof(int) * nNumofTrainingExample);
memset(pfAlpha, 0, sizeof(float_point) * nNumofTrainingExample);
memset(pfYiFValue, 0, sizeof(float_point) * nNumofTrainingExample);
checkCudaErrors(cudaMemcpy(pnLabel, pnDevLabelSubset, sizeof(int) * nNumofTrainingExample, cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(pfAlpha, pfDevAlphaSubset, sizeof(float_point) * nNumofTrainingExample, cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(pfYiFValue, pfDevYiFValueSubset, sizeof(float_point) * nNumofTrainingExample, cudaMemcpyDeviceToHost));
//compute the # of support vectors, and get bias of the model
float_point *pfYiAlphaTemp = new float_point[nNumofTrainingExample];
float_point *pfPositiveAlphaTemp = new float_point[nNumofTrainingExample];
float_point *pfNegativeAlphaTemp = new float_point[nNumofTrainingExample];
int *pnIndexofSVTemp = new int[nNumofTrainingExample];
float_point fYGsumofFreeAlpha = 0;
int nNumofFreeAlpha = 0;
int nNumofSVs = 0;
int nNumofPSV = 0, nNumofNSV = 0;
// calculate objective value
if(pfP != NULL)
{
float v = 0;
for(int i=0;i<nNumofTrainingExample;i++)
v += pfAlpha[i] * pnLabel[i]* (pfYiFValue[i] + pfP[i]);
cout << "obj =" << v/2 << endl;
}
float sum_alpha = 0;
for(int i = 0; i < nNumofInstance; i++)
{
//pfAlpha[i] = pfAlpha[i] - pfAlpha[i+nNumofInstance]; //this line is for regression
sum_alpha += fabs(pfAlpha[i]);
/*if(pfAlpha[i] == 0)
{
cout << "alpha=0: " << m_pSMOSolver->m_pfGValue[i] << endl;
if(i % 1000 == 0)sleep(1);
}
if(pfAlpha[i] == gfPCost)
{
cout << "alpha=c: " << m_pSMOSolver->m_pfGValue[i] << endl;
if(i % 1000 == 0)sleep(1);
}*/
if(pfAlpha[i] != 0)
{
//keep support vector in temp memory
pfYiAlphaTemp[nNumofSVs] = pnLabel[i] * pfAlpha[i];
pnIndexofSVTemp[nNumofSVs] = i;
//convert index to global index in Hessian matrix
m_pSMOSolver->MapIndexToHessian(pnIndexofSVTemp[nNumofSVs]);
//count the # of support vectors
nNumofSVs++;
if(pnLabel[i] > 0)
{
pfPositiveAlphaTemp[nNumofPSV] = pfAlpha[i];
nNumofPSV++;
}
else
{
pfNegativeAlphaTemp[nNumofNSV] = pfAlpha[i];
nNumofNSV++;
}
//for getting bias
if((pfAlpha[i] < gfPCost && pnLabel[i] > 0) || (pfAlpha[i] < gfNCost && pnLabel[i] < 0))
{
fYGsumofFreeAlpha += pfYiFValue[i];//(pnLabel[i] * pfYiFValue[i]);
nNumofFreeAlpha++;
}
}
}
cout << "nu=" << sum_alpha/nNumofInstance << endl;
cout << "# free SV " << nNumofFreeAlpha << "; # of SV " << nNumofSVs << endl;
/**************** store result to SVM model ***********************/
model.label = new int[2];
model.nSV = new int[2];
model.label[0] = 1;
model.label[1] = -1;
model.nSV[0] = nNumofPSV;
model.nSV[1] = nNumofNSV;
model.pnIndexofSV = new int[nNumofSVs];
model.SV = new svm_node*[nNumofSVs];
//sv_coef is a second level pointer, which is for multiclasses SVM
model.sv_coef = new float_point*[3];
model.sv_coef[0] = new float_point[nNumofSVs]; //this coef is for GPU computation convenience
model.sv_coef[1] = new float_point[nNumofPSV]; //this coef is for saving model (positive support vectors)
model.sv_coef[2] = new float_point[nNumofNSV]; //this coef is for saving model (negative support vectors)
memcpy(model.sv_coef[0], pfYiAlphaTemp, sizeof(float_point) * nNumofSVs);
memcpy(model.sv_coef[1], pfPositiveAlphaTemp, sizeof(float_point) * nNumofPSV);
memcpy(model.sv_coef[2], pfNegativeAlphaTemp, sizeof(float_point) * nNumofNSV);
memcpy(model.pnIndexofSV, pnIndexofSVTemp, sizeof(int) * nNumofSVs);
//compute bias
model.rho = new float_point[1];
/*if(nNumofFreeAlpha > 0)
{
model.rho[0] = (fYGsumofFreeAlpha / nNumofFreeAlpha);
}
else
{*/
model.rho[0] = (-m_pSMOSolver->upValue + m_pSMOSolver->m_fLowValue) / 2;
//}
delete[] pnLabel;
delete[] pfAlpha;
delete[] pfYiAlphaTemp;
delete[] pfPositiveAlphaTemp;
delete[] pfNegativeAlphaTemp;
delete[] pnIndexofSVTemp;
delete[] pfYiFValue;
cout << m_pSMOSolver->upValue << " v.s. " << m_pSMOSolver->m_fLowValue << endl;
cout << "bias=" << model.rho[0] << endl;
//#####
m_pSMOSolver->SMOSolverEnd();
m_pSMOSolver->CleanCache();
}
/*
* @brief: save SVM model to file
*/
bool CSVMTrainer::SaveModel(string strFileName, svm_model *model, vector<vector<float_point> >& v_vTrainingExample)
{
bool bReturn = false;
ofstream writeOut;
writeOut.open(strFileName.c_str(), ios::out);
if(!writeOut.is_open())
{
return bReturn;
}
bReturn = true;
//these two output may need to be improved
writeOut << "svm_type c_svc" << endl;
writeOut << "kernel_type rbf" << endl;
//stable output
writeOut << "gamma " << gfGamma << endl;
writeOut << "nr_class " << 2 << endl;
writeOut << "total_sv " << model->nSV[0] + model->nSV[1] << endl;
writeOut << "rho " << model->rho[0] << endl;
//have potential risk
writeOut << "label " << model->label[0] << " " << model->label[1] << endl;
writeOut << "nr_sv " << model->nSV[0] << " " << model->nSV[1] << endl;
//data of support vectors
writeOut << "SV" << endl;
const float_point * const *sv_coef = model->sv_coef;
int *pnIndexofSV = model->pnIndexofSV;
int nNumofSVs = model->nSV[0] + model->nSV[1];//Better to use as a function (similar to GetNumSV as in svmPredictor.cu)
model->l = nNumofSVs;
for(int i = 0; i < nNumofSVs; i++)
{
writeOut << sv_coef[0][i] << " ";
int nonzeroDimCounter = 0;
for(int j = 0; j < v_vTrainingExample[0].size(); j++)//for each of dimension
{
if(v_vTrainingExample[pnIndexofSV[i]][j] > 0)//for the support vector i
{
writeOut << j << ":" << v_vTrainingExample[pnIndexofSV[i]][j]<< " ";
nonzeroDimCounter++;
}
}
model->SV[i] = new svm_node[nonzeroDimCounter + 1];//the last node to indicate the end of SV
int curDimCounter = 0;
for(int j = 0; j < v_vTrainingExample[0].size(); j++)//for each of dimension
{
if(v_vTrainingExample[pnIndexofSV[i]][j] > 0)//for the support vector i
{
model->SV[i][curDimCounter++].index = j;
model->SV[i][curDimCounter++].value = v_vTrainingExample[pnIndexofSV[i]][j];
}
}
//set the end of support vect
model->SV[i][curDimCounter++].index = -1;
model->SV[i][curDimCounter++].value = -1;
writeOut << endl;
}
writeOut.close();
return bReturn;
}
|
174157b7b11c152d54f29d2fbfec85a6da81bd37.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef UTIL_CU_
#define UTIL_CU_
#include "util.h"
__device__ void load_roundkey(uint *s, uint *rk) {
int tid = threadIdx.x;
if (tid < MAX_RK_SIZE)
s[tid] = rk[tid];
__syncthreads();
}
__device__ void load_smem_sbox(uchar *smem, uchar *gmem) {
int tid = threadIdx.x;
if (tid < 256)
smem[tid] = gmem[tid];
__syncthreads();
}
#if NUM_THREADS == 1024
__device__ void load_smem(uchar *st0, uchar *gt0, uchar *st1, uchar *gt1, uchar *st2, uchar *gt2, uchar *st3, uchar *gt3) {
int tid = threadIdx.x;
uint *s, *g;
if (tid < 256) {
s = (uint *)st0; g = (uint *)gt0;
s[tid] = g[tid];
} else if (tid < 512) {
tid -= 256;
s = (uint *)st1; g = (uint *)gt1;
s[tid] = g[tid];
} else if (tid < 768) {
tid -= 512;
s = (uint *)st2; g = (uint *)gt2;
s[tid] = g[tid];
} else {
tid -= 768;
s = (uint *)st3; g = (uint *)gt3;
s[tid] = g[tid];
}
__syncthreads();
}
__device__ void load_smem(uint *st0, uint *gt0, uint *st1, uint *gt1, uint *st2, uint *gt2, uint *st3, uint *gt3) {
int tid = threadIdx.x;
if (tid < 256) {
st0[tid] = gt0[tid];
} else if (tid < 512) {
tid -= 256;
st1[tid] = gt1[tid];
} else if (tid < 768) {
tid -= 512;
st2[tid] = gt2[tid];
} else {
tid -= 768;
st3[tid] = gt3[tid];
}
__syncthreads();
}
#elif NUM_THREADS == 512
__device__ void load_smem(uchar *st0, uchar *gt0, uchar *st1, uchar *gt1, uchar *st2, uchar *gt2, uchar *st3, uchar *gt3) {
int tid = threadIdx.x;
uint *s, *g;
if (tid < 256) {
s = (uint *)st0; g = (uint *)gt0;
s[tid] = g[tid];
s = (uint *)st2; g = (uint *)gt2;
s[tid] = g[tid];
} else {
tid -= 256;
s = (uint *)st1; g = (uint *)gt1;
s[tid] = g[tid];
s = (uint *)st3; g = (uint *)gt3;
s[tid] = g[tid];
}
__syncthreads();
}
__device__ void load_smem(uint *st0, uint *gt0, uint *st1, uint *gt1, uint *st2, uint *gt2, uint *st3, uint *gt3) {
int tid = threadIdx.x;
if (tid < 256) {
st0[tid] = gt0[tid];
st2[tid] = gt2[tid];
} else {
tid -= 256;
st1[tid] = gt1[tid];
st3[tid] = gt3[tid];
}
__syncthreads();
}
#elif NUM_THREADS == 256
__device__ void load_smem(uchar *st0, uchar *gt0, uchar *st1, uchar *gt1, uchar *st2, uchar *gt2, uchar *st3, uchar *gt3) {
int tid = threadIdx.x;
uint *s, *g;
s = (uint *)st0; g = (uint *)gt0;
s[tid] = g[tid];
s = (uint *)st1; g = (uint *)gt1;
s[tid] = g[tid];
s = (uint *)st2; g = (uint *)gt2;
s[tid] = g[tid];
s = (uint *)st3; g = (uint *)gt3;
s[tid] = g[tid];
__syncthreads();
}
__device__ void load_smem(uint *st0, uint *gt0, uint *st1, uint *gt1, uint *st2, uint *gt2, uint *st3, uint *gt3) {
int tid = threadIdx.x;
st0[tid] = gt0[tid];
st1[tid] = gt1[tid];
st2[tid] = gt2[tid];
st3[tid] = gt3[tid];
__syncthreads();
}
#elif NUM_THREADS == 128
#define GET_TWO( tid, ptr_a, ptr_g ) {\
*( ((uint*)(ptr_a) ) + ( 2 * (tid) ) ) = *( ( (uint*)(ptr_g) ) + ( 2 * (tid) ) );\
*( ((uint*)(ptr_a) ) + ( ( 2 * (tid) ) + 1 ) ) = *( ( (uint*)(ptr_g) ) + ( ( 2 * (tid) ) + 1 ) ); \
}
__device__ void load_smem(uchar *st0, uchar *gt0, uchar *st1, uchar *gt1, uchar *st2, uchar *gt2, uchar *st3, uchar *gt3) {
int tid = threadIdx.x;
uint *s, *g;
GET_TWO( tid, st0, gt0 );
GET_TWO( tid, st1, gt1 );
GET_TWO( tid, st2, gt2 );
GET_TWO( tid, st3, gt3 );
__syncthreads();
}
__device__ void load_smem(uint *st0, uint *gt0, uint *st1, uint *gt1, uint *st2, uint *gt2, uint *st3, uint *gt3) {
int tid = threadIdx.x;
GET_TWO( tid, st0, gt0 );
GET_TWO( tid, st1, gt1 );
GET_TWO( tid, st2, gt2 );
GET_TWO( tid, st3, gt3 );
__syncthreads();
}
#else
#error NUM_THREADS must be 128, 256, 512 or 1024
#endif // NUM_THREADS
#endif
| 174157b7b11c152d54f29d2fbfec85a6da81bd37.cu | #ifndef UTIL_CU_
#define UTIL_CU_
#include "util.h"
__device__ void load_roundkey(uint *s, uint *rk) {
int tid = threadIdx.x;
if (tid < MAX_RK_SIZE)
s[tid] = rk[tid];
__syncthreads();
}
__device__ void load_smem_sbox(uchar *smem, uchar *gmem) {
int tid = threadIdx.x;
if (tid < 256)
smem[tid] = gmem[tid];
__syncthreads();
}
#if NUM_THREADS == 1024
__device__ void load_smem(uchar *st0, uchar *gt0, uchar *st1, uchar *gt1, uchar *st2, uchar *gt2, uchar *st3, uchar *gt3) {
int tid = threadIdx.x;
uint *s, *g;
if (tid < 256) {
s = (uint *)st0; g = (uint *)gt0;
s[tid] = g[tid];
} else if (tid < 512) {
tid -= 256;
s = (uint *)st1; g = (uint *)gt1;
s[tid] = g[tid];
} else if (tid < 768) {
tid -= 512;
s = (uint *)st2; g = (uint *)gt2;
s[tid] = g[tid];
} else {
tid -= 768;
s = (uint *)st3; g = (uint *)gt3;
s[tid] = g[tid];
}
__syncthreads();
}
__device__ void load_smem(uint *st0, uint *gt0, uint *st1, uint *gt1, uint *st2, uint *gt2, uint *st3, uint *gt3) {
int tid = threadIdx.x;
if (tid < 256) {
st0[tid] = gt0[tid];
} else if (tid < 512) {
tid -= 256;
st1[tid] = gt1[tid];
} else if (tid < 768) {
tid -= 512;
st2[tid] = gt2[tid];
} else {
tid -= 768;
st3[tid] = gt3[tid];
}
__syncthreads();
}
#elif NUM_THREADS == 512
__device__ void load_smem(uchar *st0, uchar *gt0, uchar *st1, uchar *gt1, uchar *st2, uchar *gt2, uchar *st3, uchar *gt3) {
int tid = threadIdx.x;
uint *s, *g;
if (tid < 256) {
s = (uint *)st0; g = (uint *)gt0;
s[tid] = g[tid];
s = (uint *)st2; g = (uint *)gt2;
s[tid] = g[tid];
} else {
tid -= 256;
s = (uint *)st1; g = (uint *)gt1;
s[tid] = g[tid];
s = (uint *)st3; g = (uint *)gt3;
s[tid] = g[tid];
}
__syncthreads();
}
__device__ void load_smem(uint *st0, uint *gt0, uint *st1, uint *gt1, uint *st2, uint *gt2, uint *st3, uint *gt3) {
int tid = threadIdx.x;
if (tid < 256) {
st0[tid] = gt0[tid];
st2[tid] = gt2[tid];
} else {
tid -= 256;
st1[tid] = gt1[tid];
st3[tid] = gt3[tid];
}
__syncthreads();
}
#elif NUM_THREADS == 256
__device__ void load_smem(uchar *st0, uchar *gt0, uchar *st1, uchar *gt1, uchar *st2, uchar *gt2, uchar *st3, uchar *gt3) {
int tid = threadIdx.x;
uint *s, *g;
s = (uint *)st0; g = (uint *)gt0;
s[tid] = g[tid];
s = (uint *)st1; g = (uint *)gt1;
s[tid] = g[tid];
s = (uint *)st2; g = (uint *)gt2;
s[tid] = g[tid];
s = (uint *)st3; g = (uint *)gt3;
s[tid] = g[tid];
__syncthreads();
}
__device__ void load_smem(uint *st0, uint *gt0, uint *st1, uint *gt1, uint *st2, uint *gt2, uint *st3, uint *gt3) {
int tid = threadIdx.x;
st0[tid] = gt0[tid];
st1[tid] = gt1[tid];
st2[tid] = gt2[tid];
st3[tid] = gt3[tid];
__syncthreads();
}
#elif NUM_THREADS == 128
#define GET_TWO( tid, ptr_a, ptr_g ) {\
*( ((uint*)(ptr_a) ) + ( 2 * (tid) ) ) = *( ( (uint*)(ptr_g) ) + ( 2 * (tid) ) );\
*( ((uint*)(ptr_a) ) + ( ( 2 * (tid) ) + 1 ) ) = *( ( (uint*)(ptr_g) ) + ( ( 2 * (tid) ) + 1 ) ); \
}
__device__ void load_smem(uchar *st0, uchar *gt0, uchar *st1, uchar *gt1, uchar *st2, uchar *gt2, uchar *st3, uchar *gt3) {
int tid = threadIdx.x;
uint *s, *g;
GET_TWO( tid, st0, gt0 );
GET_TWO( tid, st1, gt1 );
GET_TWO( tid, st2, gt2 );
GET_TWO( tid, st3, gt3 );
__syncthreads();
}
__device__ void load_smem(uint *st0, uint *gt0, uint *st1, uint *gt1, uint *st2, uint *gt2, uint *st3, uint *gt3) {
int tid = threadIdx.x;
GET_TWO( tid, st0, gt0 );
GET_TWO( tid, st1, gt1 );
GET_TWO( tid, st2, gt2 );
GET_TWO( tid, st3, gt3 );
__syncthreads();
}
#else
#error NUM_THREADS must be 128, 256, 512 or 1024
#endif // NUM_THREADS
#endif
|
da76b433e9f74eb52ce8e48dbd088a522a800a22.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by op2.m on 30-May-2011 21:57:40
//
// user function
__device__
#include "update.h"
// CUDA kernel function
__global__ void op_cuda_update(
float *arg0,
float *arg1,
float *arg2,
float *arg3,
float *arg4,
int offset_s,
int set_size ) {
float arg3_l[1];
for (int d=0; d<1; d++) arg3_l[d]=ZERO_float;
float arg4_l[1];
for (int d=0; d<1; d++) arg4_l[d]=arg4[d+blockIdx.x*1];
// process set elements
for (int n=threadIdx.x+blockIdx.x*blockDim.x;
n<set_size; n+=blockDim.x*gridDim.x) {
// user-supplied kernel call
update( arg0+n,
arg1+n,
arg2+n,
arg3_l,
arg4_l );
}
// global reductions
for(int d=0; d<1; d++)
op_reduction<OP_INC>(&arg3[d+blockIdx.x*1],arg3_l[d]);
for(int d=0; d<1; d++)
op_reduction<OP_MAX>(&arg4[d+blockIdx.x*1],arg4_l[d]);
}
// host stub function
void op_par_loop_update(char const *name, op_set set,
op_arg arg0,
op_arg arg1,
op_arg arg2,
op_arg arg3,
op_arg arg4 ){
float *arg3h = (float *)arg3.data;
float *arg4h = (float *)arg4.data;
if (OP_diags>2) {
printf(" kernel routine w/o indirection: update \n");
}
// initialise timers
double cpu_t1, cpu_t2, wall_t1, wall_t2;
op_timers(&cpu_t1, &wall_t1);
// set CUDA execution parameters
#ifdef OP_BLOCK_SIZE_1
int nthread = OP_BLOCK_SIZE_1;
#else
// int nthread = OP_block_size;
int nthread = 128;
#endif
int nblocks = 200;
// transfer global reduction data to GPU
int maxblocks = nblocks;
int reduct_bytes = 0;
int reduct_size = 0;
reduct_bytes += ROUND_UP(maxblocks*1*sizeof(float));
reduct_size = MAX(reduct_size,sizeof(float));
reduct_bytes += ROUND_UP(maxblocks*1*sizeof(float));
reduct_size = MAX(reduct_size,sizeof(float));
reallocReductArrays(reduct_bytes);
reduct_bytes = 0;
arg3.data = OP_reduct_h + reduct_bytes;
arg3.data_d = OP_reduct_d + reduct_bytes;
for (int b=0; b<maxblocks; b++)
for (int d=0; d<1; d++)
((float *)arg3.data)[d+b*1] = ZERO_float;
reduct_bytes += ROUND_UP(maxblocks*1*sizeof(float));
arg4.data = OP_reduct_h + reduct_bytes;
arg4.data_d = OP_reduct_d + reduct_bytes;
for (int b=0; b<maxblocks; b++)
for (int d=0; d<1; d++)
((float *)arg4.data)[d+b*1] = arg4h[d];
reduct_bytes += ROUND_UP(maxblocks*1*sizeof(float));
mvReductArraysToDevice(reduct_bytes);
// work out shared memory requirements per element
int nshared = 0;
// execute plan
int offset_s = nshared*OP_WARPSIZE;
nshared = MAX(nshared*nthread,reduct_size*nthread);
hipLaunchKernelGGL(( op_cuda_update), dim3(nblocks),dim3(nthread),nshared, 0, (float *) arg0.data_d,
(float *) arg1.data_d,
(float *) arg2.data_d,
(float *) arg3.data_d,
(float *) arg4.data_d,
offset_s,
set->size );
cutilSafeCall(hipDeviceSynchronize());
cutilCheckMsg("op_cuda_update execution failed\n");
// transfer global reduction data back to CPU
mvReductArraysToHost(reduct_bytes);
for (int b=0; b<maxblocks; b++)
for (int d=0; d<1; d++)
arg3h[d] = arg3h[d] + ((float *)arg3.data)[d+b*1];
for (int b=0; b<maxblocks; b++)
for (int d=0; d<1; d++)
arg4h[d] = MAX(arg4h[d],((float *)arg4.data)[d+b*1]);
// update kernel record
op_timers(&cpu_t2, &wall_t2);
op_timing_realloc(1);
OP_kernels[1].name = name;
OP_kernels[1].count += 1;
OP_kernels[1].time += wall_t2 - wall_t1;
OP_kernels[1].transfer += (float)set->size * arg0.size;
OP_kernels[1].transfer += (float)set->size * arg1.size * 2.0f;
OP_kernels[1].transfer += (float)set->size * arg2.size * 2.0f;
}
| da76b433e9f74eb52ce8e48dbd088a522a800a22.cu | //
// auto-generated by op2.m on 30-May-2011 21:57:40
//
// user function
__device__
#include "update.h"
// CUDA kernel function
__global__ void op_cuda_update(
float *arg0,
float *arg1,
float *arg2,
float *arg3,
float *arg4,
int offset_s,
int set_size ) {
float arg3_l[1];
for (int d=0; d<1; d++) arg3_l[d]=ZERO_float;
float arg4_l[1];
for (int d=0; d<1; d++) arg4_l[d]=arg4[d+blockIdx.x*1];
// process set elements
for (int n=threadIdx.x+blockIdx.x*blockDim.x;
n<set_size; n+=blockDim.x*gridDim.x) {
// user-supplied kernel call
update( arg0+n,
arg1+n,
arg2+n,
arg3_l,
arg4_l );
}
// global reductions
for(int d=0; d<1; d++)
op_reduction<OP_INC>(&arg3[d+blockIdx.x*1],arg3_l[d]);
for(int d=0; d<1; d++)
op_reduction<OP_MAX>(&arg4[d+blockIdx.x*1],arg4_l[d]);
}
// host stub function
void op_par_loop_update(char const *name, op_set set,
op_arg arg0,
op_arg arg1,
op_arg arg2,
op_arg arg3,
op_arg arg4 ){
float *arg3h = (float *)arg3.data;
float *arg4h = (float *)arg4.data;
if (OP_diags>2) {
printf(" kernel routine w/o indirection: update \n");
}
// initialise timers
double cpu_t1, cpu_t2, wall_t1, wall_t2;
op_timers(&cpu_t1, &wall_t1);
// set CUDA execution parameters
#ifdef OP_BLOCK_SIZE_1
int nthread = OP_BLOCK_SIZE_1;
#else
// int nthread = OP_block_size;
int nthread = 128;
#endif
int nblocks = 200;
// transfer global reduction data to GPU
int maxblocks = nblocks;
int reduct_bytes = 0;
int reduct_size = 0;
reduct_bytes += ROUND_UP(maxblocks*1*sizeof(float));
reduct_size = MAX(reduct_size,sizeof(float));
reduct_bytes += ROUND_UP(maxblocks*1*sizeof(float));
reduct_size = MAX(reduct_size,sizeof(float));
reallocReductArrays(reduct_bytes);
reduct_bytes = 0;
arg3.data = OP_reduct_h + reduct_bytes;
arg3.data_d = OP_reduct_d + reduct_bytes;
for (int b=0; b<maxblocks; b++)
for (int d=0; d<1; d++)
((float *)arg3.data)[d+b*1] = ZERO_float;
reduct_bytes += ROUND_UP(maxblocks*1*sizeof(float));
arg4.data = OP_reduct_h + reduct_bytes;
arg4.data_d = OP_reduct_d + reduct_bytes;
for (int b=0; b<maxblocks; b++)
for (int d=0; d<1; d++)
((float *)arg4.data)[d+b*1] = arg4h[d];
reduct_bytes += ROUND_UP(maxblocks*1*sizeof(float));
mvReductArraysToDevice(reduct_bytes);
// work out shared memory requirements per element
int nshared = 0;
// execute plan
int offset_s = nshared*OP_WARPSIZE;
nshared = MAX(nshared*nthread,reduct_size*nthread);
op_cuda_update<<<nblocks,nthread,nshared>>>( (float *) arg0.data_d,
(float *) arg1.data_d,
(float *) arg2.data_d,
(float *) arg3.data_d,
(float *) arg4.data_d,
offset_s,
set->size );
cutilSafeCall(cudaThreadSynchronize());
cutilCheckMsg("op_cuda_update execution failed\n");
// transfer global reduction data back to CPU
mvReductArraysToHost(reduct_bytes);
for (int b=0; b<maxblocks; b++)
for (int d=0; d<1; d++)
arg3h[d] = arg3h[d] + ((float *)arg3.data)[d+b*1];
for (int b=0; b<maxblocks; b++)
for (int d=0; d<1; d++)
arg4h[d] = MAX(arg4h[d],((float *)arg4.data)[d+b*1]);
// update kernel record
op_timers(&cpu_t2, &wall_t2);
op_timing_realloc(1);
OP_kernels[1].name = name;
OP_kernels[1].count += 1;
OP_kernels[1].time += wall_t2 - wall_t1;
OP_kernels[1].transfer += (float)set->size * arg0.size;
OP_kernels[1].transfer += (float)set->size * arg1.size * 2.0f;
OP_kernels[1].transfer += (float)set->size * arg2.size * 2.0f;
}
|
1433767d158c49068bbb3505b9e8fb0bc573afd6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "bias_op.hpp"
namespace Shadow {
namespace Vision {
#if defined(USE_ROCM)
template <typename T>
__global__ void KernelBias(const T *in_data, int count, const T *bias_data,
int bias_dim, int inner_dim, T *out_data) {
CUDA_KERNEL_LOOP(globalid, count) {
int index = (globalid / inner_dim) % bias_dim;
out_data[globalid] = in_data[globalid] + bias_data[index];
}
}
template <typename T>
void Bias(const T *in_data, int count, const T *bias_data, int bias_dim,
int inner_dim, T *out_data) {
hipLaunchKernelGGL(( KernelBias<T>), dim3(GetBlocks(count)), dim3(NumThreads), 0, 0,
in_data, count, bias_data, bias_dim, inner_dim, out_data);
CUDA_CHECK(hipPeekAtLastError());
}
template void Bias(const float *in_data, int count, const float *bias_data,
int bias_dim, int inner_dim, float *out_data);
#endif
} // namespace Vision
} // namespace Shadow | 1433767d158c49068bbb3505b9e8fb0bc573afd6.cu | #include "bias_op.hpp"
namespace Shadow {
namespace Vision {
#if defined(USE_CUDA)
template <typename T>
__global__ void KernelBias(const T *in_data, int count, const T *bias_data,
int bias_dim, int inner_dim, T *out_data) {
CUDA_KERNEL_LOOP(globalid, count) {
int index = (globalid / inner_dim) % bias_dim;
out_data[globalid] = in_data[globalid] + bias_data[index];
}
}
template <typename T>
void Bias(const T *in_data, int count, const T *bias_data, int bias_dim,
int inner_dim, T *out_data) {
KernelBias<T><<<GetBlocks(count), NumThreads>>>(
in_data, count, bias_data, bias_dim, inner_dim, out_data);
CUDA_CHECK(cudaPeekAtLastError());
}
template void Bias(const float *in_data, int count, const float *bias_data,
int bias_dim, int inner_dim, float *out_data);
#endif
} // namespace Vision
} // namespace Shadow |
375474ac9cf0cb10f697154cc414482a6e3760a4.hip | // !!! This is a file automatically generated by hipify!!!
/**
* @file : main.cpp
* @brief : main driver file for 2-dim. Ising in CUDA C++11/14,
* @details :
*
* @author : Ernest Yeung <ernestyalumni@gmail.com>
* @date : 20180103
* @ref : M. Hjorth-Jensen, Computational Physics, University of Oslo (2015)
* https://github.com/CompPhysics/ComputationalPhysics/blob/master/doc/Programs/LecturePrograms/programs/StatPhys/cpp/ising_2dim.cpp
* https://www.paypal.com/cgi-bin/webscr?cmd=_donations&business=ernestsaveschristmas%2bpaypal%40gmail%2ecom&lc=US&item_name=ernestyalumni¤cy_code=USD&bn=PP%2dDonationsBF%3abtn_donateCC_LG%2egif%3aNonHosted
*
* which won't go through a 3rd. party such as indiegogo, kickstarter, patreon.
* Otherwise, I receive emails and messages on how all my (free) material on
* physics, math, and engineering have helped students with their studies,
* and I know what it's like to not have money as a student, but love physics
* (or math, sciences, etc.), so I am committed to keeping all my material
* open-source and free, whether or not
* sufficiently crowdfunded, under the open-source MIT license:
* feel free to copy, edit, paste, make your own versions, share, use as you wish.
* Just don't be an asshole and not give credit where credit is due.
* Peace out, never give up! -EY
*
* */
/*
* COMPILATION TIP
* nvcc main.cu ./grid2d/grid2d.cu ./grid2d/sysparam.cu ./dynam/XORMRGgens.cu ./dynam/metropolis.cu ./common/gridsetup.cu -o main
*
* */
#include "./grid2d/grid2d.h" // Spins2d (struct)
#include "./grid2d/sysparam.h" // Sysparam, Avg, TransProb, Sysparam_ptr, Avg_ptr, TransProb_ptr, constTransProb
#include "./dynam/metropolis.h" // initialize_allup, metropolis
#include "./common/gridsetup.h" // get_maxGridSize()
#include "./FileIO/output.h" // process_avgs
#include <iostream>
#include <chrono>
int main(int argc, char* argv[])
{
// number of spins, related to 2-dim. grid size Lx x Ly
std::array<size_t, 2> L_is { 1<<10, 1<<10 }; // 1<<10 = 1024
Spins2d spins = {L_is};
std::cout << " L : " << spins.L_is[0]*spins.L_is[1] << std::endl;
// number of trials or number of times to run the Metropolis algorithm
constexpr const unsigned int trials = 50000;
// file name
std::string filename = "./data/IsingMetroGPU.bin";
/* ***** (thread) grid,block dims ***** */
/* min of N_x, number of (thread) blocks on grid in x-direction, and MAX_BLOCKS allowed is
* determined here */
size_t MAXGRIDSIZE = get_maxGridSize();
auto MAXGRIDSIZES = get_maxGridSizes();
std::cout << " MAXGRIDSIZES : " << MAXGRIDSIZES[0] << std::endl;
// (thread) block dims., remember max. no. threads per block is 1024, as of compute capability 5.2
dim3 M_is { 1<<5, 1<<5 };
// (thread) grid, block dims for curandstates and other 1-dim. arrays
unsigned int M_x = 1<<8; // 2^8 = 256
unsigned long MAX_BLOCKS = (MAXGRIDSIZE + M_x - 1)/ M_x;
unsigned int N_x = min( MAX_BLOCKS, (( spins.L_is[0]*spins.L_is[1] + M_x - 1)/ M_x));
/* ***** END of (thread) grid,block dims ***** */
// physical parameters on the host CPU, used for File I/O
Sysparam h_sysparams_out;
Avg h_avgs_out;
constexpr const float initial_temp = 1.f; // typically 1.
constexpr const float final_temp = 3.f; // typically 3.
constexpr const float tempstep = 0.005f; // typically 0.05
/* Sysparam_ptr sysparams_ptr = { initial_temp } ;
TransProb_ptr transprob_ptr = { initial_temp , 1.f } ;
Avg_ptr avgs_ptr; */
/* ERROR warning: if you move this line for devStatesXOR "earlier", bus error obtained.
*
* */
// since hiprand_init calls are slow, do it once for the grid from the host main code
// devStatesXOR devstatesXOR = { spins.L_is[0]*spins.L_is[1], N_x,M_x };
// following 1 line is for timing code purposes only
auto start = std::chrono::steady_clock::now();
for (float temp=initial_temp; temp<=final_temp; temp+=tempstep) {
// following 1 line is for timing code purposes only
auto start1 = std::chrono::steady_clock::now();
Sysparam_ptr sysparams_ptr = { temp } ;
TransProb_ptr transprob_ptr = { temp , 1.f } ;
Avg_ptr avgs_ptr;
/* ERROR warning: if you move this line for devStatesXOR "earlier", bus error obtained.
*
* */
// since hiprand_init calls are slow, do it once for the grid from the host main code
devStatesXOR devstatesXOR = { spins.L_is[0]*spins.L_is[1], N_x,M_x };
initialize_allup(spins,sysparams_ptr, MAXGRIDSIZES, M_is);
metropolis(spins,sysparams_ptr,avgs_ptr,transprob_ptr,MAXGRIDSIZES,M_is,devstatesXOR,trials);
// following 3 line is for timing code purposes only
auto end1 = std::chrono::steady_clock::now();
auto diff1 = end1-start1;
std::cout << std::chrono::duration<double,std::milli>(diff1).count() << " ms" << std::endl;
hipMemcpy(&h_sysparams_out,sysparams_ptr.d_sysparams.get(),1*sizeof(Sysparam),hipMemcpyDeviceToHost);
hipMemcpy(&h_avgs_out,avgs_ptr.d_avgs.get(),1*sizeof(Avg),hipMemcpyDeviceToHost);
process_avgs(trials, spins.L_is[0]*L_is[1], filename, h_sysparams_out, h_avgs_out) ;
// following 3 line is for timing code purposes only
auto end2 = std::chrono::steady_clock::now();
auto diff2 = end2-start1;
std::cout << std::chrono::duration<double,std::milli>(diff2).count() << " ms" << std::endl;
}
// following 3 line is for timing code purposes only
auto end = std::chrono::steady_clock::now();
auto diff = end-start;
std::cout << std::chrono::duration<double,std::milli>(diff).count() << " ms" << std::endl;
}
/* sanity check */
/* Sysparam h_sysparams_out ;
hipMemcpy(&h_sysparams_out, sysparams_ptr.d_sysparams.get(), 1*sizeof(Sysparam), hipMemcpyDeviceToHost); // possible error have to be of same type
std::cout << " h_sysparams_out : " << h_sysparams_out.E << " " << h_sysparams_out.M << " "
<< h_sysparams_out.T << std::endl;
*/
| 375474ac9cf0cb10f697154cc414482a6e3760a4.cu | /**
* @file : main.cpp
* @brief : main driver file for 2-dim. Ising in CUDA C++11/14,
* @details :
*
* @author : Ernest Yeung <ernestyalumni@gmail.com>
* @date : 20180103
* @ref : M. Hjorth-Jensen, Computational Physics, University of Oslo (2015)
* https://github.com/CompPhysics/ComputationalPhysics/blob/master/doc/Programs/LecturePrograms/programs/StatPhys/cpp/ising_2dim.cpp
* https://www.paypal.com/cgi-bin/webscr?cmd=_donations&business=ernestsaveschristmas%2bpaypal%40gmail%2ecom&lc=US&item_name=ernestyalumni¤cy_code=USD&bn=PP%2dDonationsBF%3abtn_donateCC_LG%2egif%3aNonHosted
*
* which won't go through a 3rd. party such as indiegogo, kickstarter, patreon.
* Otherwise, I receive emails and messages on how all my (free) material on
* physics, math, and engineering have helped students with their studies,
* and I know what it's like to not have money as a student, but love physics
* (or math, sciences, etc.), so I am committed to keeping all my material
* open-source and free, whether or not
* sufficiently crowdfunded, under the open-source MIT license:
* feel free to copy, edit, paste, make your own versions, share, use as you wish.
* Just don't be an asshole and not give credit where credit is due.
* Peace out, never give up! -EY
*
* */
/*
* COMPILATION TIP
* nvcc main.cu ./grid2d/grid2d.cu ./grid2d/sysparam.cu ./dynam/XORMRGgens.cu ./dynam/metropolis.cu ./common/gridsetup.cu -o main
*
* */
#include "./grid2d/grid2d.h" // Spins2d (struct)
#include "./grid2d/sysparam.h" // Sysparam, Avg, TransProb, Sysparam_ptr, Avg_ptr, TransProb_ptr, constTransProb
#include "./dynam/metropolis.h" // initialize_allup, metropolis
#include "./common/gridsetup.h" // get_maxGridSize()
#include "./FileIO/output.h" // process_avgs
#include <iostream>
#include <chrono>
int main(int argc, char* argv[])
{
// number of spins, related to 2-dim. grid size Lx x Ly
std::array<size_t, 2> L_is { 1<<10, 1<<10 }; // 1<<10 = 1024
Spins2d spins = {L_is};
std::cout << " L : " << spins.L_is[0]*spins.L_is[1] << std::endl;
// number of trials or number of times to run the Metropolis algorithm
constexpr const unsigned int trials = 50000;
// file name
std::string filename = "./data/IsingMetroGPU.bin";
/* ***** (thread) grid,block dims ***** */
/* min of N_x, number of (thread) blocks on grid in x-direction, and MAX_BLOCKS allowed is
* determined here */
size_t MAXGRIDSIZE = get_maxGridSize();
auto MAXGRIDSIZES = get_maxGridSizes();
std::cout << " MAXGRIDSIZES : " << MAXGRIDSIZES[0] << std::endl;
// (thread) block dims., remember max. no. threads per block is 1024, as of compute capability 5.2
dim3 M_is { 1<<5, 1<<5 };
// (thread) grid, block dims for curandstates and other 1-dim. arrays
unsigned int M_x = 1<<8; // 2^8 = 256
unsigned long MAX_BLOCKS = (MAXGRIDSIZE + M_x - 1)/ M_x;
unsigned int N_x = min( MAX_BLOCKS, (( spins.L_is[0]*spins.L_is[1] + M_x - 1)/ M_x));
/* ***** END of (thread) grid,block dims ***** */
// physical parameters on the host CPU, used for File I/O
Sysparam h_sysparams_out;
Avg h_avgs_out;
constexpr const float initial_temp = 1.f; // typically 1.
constexpr const float final_temp = 3.f; // typically 3.
constexpr const float tempstep = 0.005f; // typically 0.05
/* Sysparam_ptr sysparams_ptr = { initial_temp } ;
TransProb_ptr transprob_ptr = { initial_temp , 1.f } ;
Avg_ptr avgs_ptr; */
/* ERROR warning: if you move this line for devStatesXOR "earlier", bus error obtained.
*
* */
// since curand_init calls are slow, do it once for the grid from the host main code
// devStatesXOR devstatesXOR = { spins.L_is[0]*spins.L_is[1], N_x,M_x };
// following 1 line is for timing code purposes only
auto start = std::chrono::steady_clock::now();
for (float temp=initial_temp; temp<=final_temp; temp+=tempstep) {
// following 1 line is for timing code purposes only
auto start1 = std::chrono::steady_clock::now();
Sysparam_ptr sysparams_ptr = { temp } ;
TransProb_ptr transprob_ptr = { temp , 1.f } ;
Avg_ptr avgs_ptr;
/* ERROR warning: if you move this line for devStatesXOR "earlier", bus error obtained.
*
* */
// since curand_init calls are slow, do it once for the grid from the host main code
devStatesXOR devstatesXOR = { spins.L_is[0]*spins.L_is[1], N_x,M_x };
initialize_allup(spins,sysparams_ptr, MAXGRIDSIZES, M_is);
metropolis(spins,sysparams_ptr,avgs_ptr,transprob_ptr,MAXGRIDSIZES,M_is,devstatesXOR,trials);
// following 3 line is for timing code purposes only
auto end1 = std::chrono::steady_clock::now();
auto diff1 = end1-start1;
std::cout << std::chrono::duration<double,std::milli>(diff1).count() << " ms" << std::endl;
cudaMemcpy(&h_sysparams_out,sysparams_ptr.d_sysparams.get(),1*sizeof(Sysparam),cudaMemcpyDeviceToHost);
cudaMemcpy(&h_avgs_out,avgs_ptr.d_avgs.get(),1*sizeof(Avg),cudaMemcpyDeviceToHost);
process_avgs(trials, spins.L_is[0]*L_is[1], filename, h_sysparams_out, h_avgs_out) ;
// following 3 line is for timing code purposes only
auto end2 = std::chrono::steady_clock::now();
auto diff2 = end2-start1;
std::cout << std::chrono::duration<double,std::milli>(diff2).count() << " ms" << std::endl;
}
// following 3 line is for timing code purposes only
auto end = std::chrono::steady_clock::now();
auto diff = end-start;
std::cout << std::chrono::duration<double,std::milli>(diff).count() << " ms" << std::endl;
}
/* sanity check */
/* Sysparam h_sysparams_out ;
cudaMemcpy(&h_sysparams_out, sysparams_ptr.d_sysparams.get(), 1*sizeof(Sysparam), cudaMemcpyDeviceToHost); // possible error have to be of same type
std::cout << " h_sysparams_out : " << h_sysparams_out.E << " " << h_sysparams_out.M << " "
<< h_sysparams_out.T << std::endl;
*/
|
bc56826eca03b1488af0c84bca65a4d66466f9ef.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* \file dnn/src/cuda/indexing_multi_axis_vec/kern_gen_offset_base.cu
* MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
*
* Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*/
#include "./kern.cuh"
#include "megdnn/internal/defs.h"
#include "src/cuda/query_blocksize.cuh"
using namespace megdnn;
using namespace cuda;
using namespace indexing_multi_axis_vec;
namespace {
template<int nidx>
__global__ void kgen_offset_base(GenOffsetBaseParam<nidx> param) {
int oidx = threadIdx.x + blockDim.x * blockIdx.x;
if (oidx < param.size) {
int offset = 0;
#pragma unroll
for (int i = 0; i < nidx; ++ i) {
int data_idx = param.indexer[i].ptr[
param.indexer[i].stride * oidx];
data_idx += (data_idx < 0 ? param.data_shape[i] : 0);
if (static_cast<uint32_t>(data_idx) >= param.data_shape[i]) {
// cast to uint32 to handle both negative and overflow
set_async_error_info(param.error_info, param.error_tracker,
"invalid advanced indexing: "
"indexer=%d idx=%d shape=%d",
i, data_idx, param.data_shape[i]);
data_idx = 0;
}
offset += data_idx * param.data_stride[i];
}
param.output[oidx] = offset;
}
}
}
template<int nidx>
void indexing_multi_axis_vec::gen_offset_base(
const GenOffsetBaseParam<nidx> ¶m, hipStream_t stream) {
void (*kptr)(GenOffsetBaseParam<nidx>) = kgen_offset_base<nidx>;
int bsize = query_blocksize_for_kernel(kptr);
(hipLaunchKernelGGL((*kptr)) , dim3(DIVUP(param.size, bsize)), dim3(bsize), 0, stream, param);
}
namespace megdnn {
namespace cuda {
namespace indexing_multi_axis_vec {
#define INST(_n) \
template void gen_offset_base( \
const GenOffsetBaseParam<_n> &, hipStream_t);
MEGDNN_FOREACH_TENSOR_NDIM(INST)
#undef INST
} // namespace indexing_multi_axis_vec
} // namespace cuda
} // namespace megdnn
// vim: ft=cuda syntax=cpp.doxygen
| bc56826eca03b1488af0c84bca65a4d66466f9ef.cu | /**
* \file dnn/src/cuda/indexing_multi_axis_vec/kern_gen_offset_base.cu
* MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
*
* Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*/
#include "./kern.cuh"
#include "megdnn/internal/defs.h"
#include "src/cuda/query_blocksize.cuh"
using namespace megdnn;
using namespace cuda;
using namespace indexing_multi_axis_vec;
namespace {
template<int nidx>
__global__ void kgen_offset_base(GenOffsetBaseParam<nidx> param) {
int oidx = threadIdx.x + blockDim.x * blockIdx.x;
if (oidx < param.size) {
int offset = 0;
#pragma unroll
for (int i = 0; i < nidx; ++ i) {
int data_idx = param.indexer[i].ptr[
param.indexer[i].stride * oidx];
data_idx += (data_idx < 0 ? param.data_shape[i] : 0);
if (static_cast<uint32_t>(data_idx) >= param.data_shape[i]) {
// cast to uint32 to handle both negative and overflow
set_async_error_info(param.error_info, param.error_tracker,
"invalid advanced indexing: "
"indexer=%d idx=%d shape=%d",
i, data_idx, param.data_shape[i]);
data_idx = 0;
}
offset += data_idx * param.data_stride[i];
}
param.output[oidx] = offset;
}
}
}
template<int nidx>
void indexing_multi_axis_vec::gen_offset_base(
const GenOffsetBaseParam<nidx> ¶m, cudaStream_t stream) {
void (*kptr)(GenOffsetBaseParam<nidx>) = kgen_offset_base<nidx>;
int bsize = query_blocksize_for_kernel(kptr);
(*kptr) <<<DIVUP(param.size, bsize), bsize, 0, stream>>> (param);
}
namespace megdnn {
namespace cuda {
namespace indexing_multi_axis_vec {
#define INST(_n) \
template void gen_offset_base( \
const GenOffsetBaseParam<_n> &, cudaStream_t);
MEGDNN_FOREACH_TENSOR_NDIM(INST)
#undef INST
} // namespace indexing_multi_axis_vec
} // namespace cuda
} // namespace megdnn
// vim: ft=cuda syntax=cpp.doxygen
|
82687ffde5a515980efe926c575bbc01989d999e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include <thrust/device_ptr.h>
#include <thrust/sort.h>
#include "opencv2/core/cuda/common.hpp"
#include "opencv2/core/cuda/utility.hpp"
namespace cv { namespace gpu { namespace cudev
{
namespace gfft
{
texture<float, hipTextureType2D, hipReadModeElementType> eigTex(0, hipFilterModePoint, hipAddressModeClamp);
__device__ int g_counter = 0;
template <class Mask> __global__ void findCorners(float threshold, const Mask mask, float2* corners, int max_count, int rows, int cols)
{
const int j = blockIdx.x * blockDim.x + threadIdx.x;
const int i = blockIdx.y * blockDim.y + threadIdx.y;
if (i > 0 && i < rows - 1 && j > 0 && j < cols - 1 && mask(i, j))
{
float val = tex2D(eigTex, j, i);
if (val > threshold)
{
float maxVal = val;
maxVal = ::fmax(tex2D(eigTex, j - 1, i - 1), maxVal);
maxVal = ::fmax(tex2D(eigTex, j , i - 1), maxVal);
maxVal = ::fmax(tex2D(eigTex, j + 1, i - 1), maxVal);
maxVal = ::fmax(tex2D(eigTex, j - 1, i), maxVal);
maxVal = ::fmax(tex2D(eigTex, j + 1, i), maxVal);
maxVal = ::fmax(tex2D(eigTex, j - 1, i + 1), maxVal);
maxVal = ::fmax(tex2D(eigTex, j , i + 1), maxVal);
maxVal = ::fmax(tex2D(eigTex, j + 1, i + 1), maxVal);
if (val == maxVal)
{
const int ind = ::atomicAdd(&g_counter, 1);
if (ind < max_count)
corners[ind] = make_float2(j, i);
}
}
}
}
int findCorners_gpu(PtrStepSzf eig, float threshold, PtrStepSzb mask, float2* corners, int max_count)
{
void* counter_ptr;
cudaSafeCall( hipGetSymbolAddress(&counter_ptr, g_counter) );
cudaSafeCall( hipMemset(counter_ptr, 0, sizeof(int)) );
bindTexture(&eigTex, eig);
dim3 block(16, 16);
dim3 grid(divUp(eig.cols, block.x), divUp(eig.rows, block.y));
if (mask.data)
hipLaunchKernelGGL(( findCorners), dim3(grid), dim3(block), 0, 0, threshold, SingleMask(mask), corners, max_count, eig.rows, eig.cols);
else
hipLaunchKernelGGL(( findCorners), dim3(grid), dim3(block), 0, 0, threshold, WithOutMask(), corners, max_count, eig.rows, eig.cols);
cudaSafeCall( hipGetLastError() );
cudaSafeCall( hipDeviceSynchronize() );
int count;
cudaSafeCall( hipMemcpy(&count, counter_ptr, sizeof(int), hipMemcpyDeviceToHost) );
return ::min(count, max_count);
}
class EigGreater
{
public:
__device__ __forceinline__ bool operator()(float2 a, float2 b) const
{
return tex2D(eigTex, a.x, a.y) > tex2D(eigTex, b.x, b.y);
}
};
void sortCorners_gpu(PtrStepSzf eig, float2* corners, int count)
{
bindTexture(&eigTex, eig);
thrust::device_ptr<float2> ptr(corners);
thrust::sort(ptr, ptr + count, EigGreater());
}
} // namespace optical_flow
}}}
#endif /* CUDA_DISABLER */
| 82687ffde5a515980efe926c575bbc01989d999e.cu | /*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include <thrust/device_ptr.h>
#include <thrust/sort.h>
#include "opencv2/core/cuda/common.hpp"
#include "opencv2/core/cuda/utility.hpp"
namespace cv { namespace gpu { namespace cudev
{
namespace gfft
{
texture<float, cudaTextureType2D, cudaReadModeElementType> eigTex(0, cudaFilterModePoint, cudaAddressModeClamp);
__device__ int g_counter = 0;
template <class Mask> __global__ void findCorners(float threshold, const Mask mask, float2* corners, int max_count, int rows, int cols)
{
const int j = blockIdx.x * blockDim.x + threadIdx.x;
const int i = blockIdx.y * blockDim.y + threadIdx.y;
if (i > 0 && i < rows - 1 && j > 0 && j < cols - 1 && mask(i, j))
{
float val = tex2D(eigTex, j, i);
if (val > threshold)
{
float maxVal = val;
maxVal = ::fmax(tex2D(eigTex, j - 1, i - 1), maxVal);
maxVal = ::fmax(tex2D(eigTex, j , i - 1), maxVal);
maxVal = ::fmax(tex2D(eigTex, j + 1, i - 1), maxVal);
maxVal = ::fmax(tex2D(eigTex, j - 1, i), maxVal);
maxVal = ::fmax(tex2D(eigTex, j + 1, i), maxVal);
maxVal = ::fmax(tex2D(eigTex, j - 1, i + 1), maxVal);
maxVal = ::fmax(tex2D(eigTex, j , i + 1), maxVal);
maxVal = ::fmax(tex2D(eigTex, j + 1, i + 1), maxVal);
if (val == maxVal)
{
const int ind = ::atomicAdd(&g_counter, 1);
if (ind < max_count)
corners[ind] = make_float2(j, i);
}
}
}
}
int findCorners_gpu(PtrStepSzf eig, float threshold, PtrStepSzb mask, float2* corners, int max_count)
{
void* counter_ptr;
cudaSafeCall( cudaGetSymbolAddress(&counter_ptr, g_counter) );
cudaSafeCall( cudaMemset(counter_ptr, 0, sizeof(int)) );
bindTexture(&eigTex, eig);
dim3 block(16, 16);
dim3 grid(divUp(eig.cols, block.x), divUp(eig.rows, block.y));
if (mask.data)
findCorners<<<grid, block>>>(threshold, SingleMask(mask), corners, max_count, eig.rows, eig.cols);
else
findCorners<<<grid, block>>>(threshold, WithOutMask(), corners, max_count, eig.rows, eig.cols);
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
int count;
cudaSafeCall( cudaMemcpy(&count, counter_ptr, sizeof(int), cudaMemcpyDeviceToHost) );
return std::min(count, max_count);
}
class EigGreater
{
public:
__device__ __forceinline__ bool operator()(float2 a, float2 b) const
{
return tex2D(eigTex, a.x, a.y) > tex2D(eigTex, b.x, b.y);
}
};
void sortCorners_gpu(PtrStepSzf eig, float2* corners, int count)
{
bindTexture(&eigTex, eig);
thrust::device_ptr<float2> ptr(corners);
thrust::sort(ptr, ptr + count, EigGreater());
}
} // namespace optical_flow
}}}
#endif /* CUDA_DISABLER */
|
fb6698d2c0d8e08c1c52afeb95c479dbc82a7eae.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//#include <torch/extension.h>
#include <torch/serialize/tensor.h>
#include <ATen/ATen.h>
#include <ATen/hip/HIPContext.h>
#include <vector>
#include "common.h"
#include "device_tensor.h"
namespace {
template <typename DType, typename Acctype, typename DeviceTensor3>
struct GradOp {
__device__ GradOp(Acctype m, const DeviceTensor3 i, const DeviceTensor3 g)
: beta(m), output(i), gradOutput(g) {}
__device__ __forceinline__ Float2<DType, Acctype> operator()(int batch, int plane, int n) {
DType g = gradOutput[batch][plane][n];
DType c = ScalarConvert<Acctype, DType>::to(output[batch][plane][n] - beta);
return Float2<DType, Acctype>(g, g * c);
}
const Acctype beta;
const DeviceTensor3 output;
const DeviceTensor3 gradOutput;
};
template <typename DType, typename Acctype>
struct SumOp {
__device__ SumOp(DeviceTensor<DType, 3> i) : input(i){}
__device__ __forceinline__ Float2<DType, Acctype> operator()(int batch, int plane, int n) {
DType g = input[batch][plane][n];
return Float2<DType, Acctype>(g, g * g);
}
DType mean;
DeviceTensor<DType, 3> input;
};
// Sum across (batch, x/y/z) applying Op() pointwise
template<typename T, typename Op, typename DeviceTensor3>
__device__ T reduce(Op op, DeviceTensor3 tensor, int plane) {
T sum = (T)0;
for (int batch = 0; batch < tensor.getSize(0); ++batch) {
for (int x = threadIdx.x; x < tensor.getSize(2); x += blockDim.x) {
sum += op(batch, plane, x);
}
}
// sum over NumThreads within a warp
sum = warpSum(sum);
// 'transpose', and reduce within warp again
__shared__ T shared[32];
__syncthreads();
if (threadIdx.x % WARP_SIZE == 0) {
shared[threadIdx.x / WARP_SIZE] = sum;
}
if (threadIdx.x >= blockDim.x / WARP_SIZE && threadIdx.x < WARP_SIZE) {
// zero out the other entries in shared
shared[threadIdx.x] = (T)0;
}
__syncthreads();
if (threadIdx.x / WARP_SIZE == 0) {
sum = warpSum(shared[threadIdx.x]);
if (threadIdx.x == 0) {
shared[0] = sum;
}
}
__syncthreads();
// Everyone picks it up, should be broadcast into the whole gradInput
return shared[0];
}
template <typename DType>
__global__ void BatchNorm_Forward_kernel (
DeviceTensor<DType, 3> output,
DeviceTensor<DType, 3> input,
DeviceTensor<DType, 1> mean,
DeviceTensor<DType, 1> std,
DeviceTensor<DType, 1> gamma,
DeviceTensor<DType, 1> beta) {
int c = blockIdx.x;
/* main operation */
for (int b = 0; b < input.getSize(0); ++b) {
for (int x = threadIdx.x; x < input.getSize(2); x += blockDim.x) {
DType inp = input[b][c][x];
output[b][c][x] = gamma[c] * (inp - mean[c]) /
std[c] + beta[c];
}
}
}
template <typename DType>
__global__ void BatchNorm_Forward_Inp_kernel (
DeviceTensor<DType, 3> input,
DeviceTensor<DType, 1> mean,
DeviceTensor<DType, 1> std,
DeviceTensor<DType, 1> gamma,
DeviceTensor<DType, 1> beta) {
int c = blockIdx.x;
/* main operation */
for (int b = 0; b < input.getSize(0); ++b) {
for (int x = threadIdx.x; x < input.getSize(2); x += blockDim.x) {
DType inp = input[b][c][x];
input[b][c][x] = gamma[c] * (inp - mean[c]) /
std[c] + beta[c];
}
}
}
template <typename DType>
__global__ void BatchNorm_Backward_Inp_kernel (
DeviceTensor<DType, 3> gradoutput,
DeviceTensor<DType, 3> output,
DeviceTensor<DType, 3> gradinput,
DeviceTensor<DType, 1> gradgamma,
DeviceTensor<DType, 1> gradbeta,
DeviceTensor<DType, 1> mean,
DeviceTensor<DType, 1> std,
DeviceTensor<DType, 1> gamma,
DeviceTensor<DType, 1> beta,
DeviceTensor<DType, 1> gradEx,
DeviceTensor<DType, 1> gradExs) {
/* declarations of the variables */
/* Get the index and channels */
int c = blockIdx.x;
/* main operation */
GradOp<DType, DType, DeviceTensor<DType, 3>> g(beta[c], output, gradoutput);
Float2<DType, DType> res = reduce<Float2<DType, DType>,
GradOp<DType, DType, DeviceTensor<DType, 3>>,
DeviceTensor<DType, 3>>(g, gradoutput, c);
DType gradOutputSum = res.v1;
DType dotP = res.v2;
DType invstd = DType(1.0) / std[c];
DType gradScale = invstd * gamma[c];
if (threadIdx.x == 0) {
gradEx[c] = - gradOutputSum * gradScale + mean[c] * invstd * invstd * dotP;
gradExs[c] = - 0.5 * invstd * invstd * dotP;
}
if (gradinput.numElements() > 0) {
for (int batch = 0; batch < gradoutput.getSize(0); ++batch) {
for (int x = threadIdx.x; x < gradoutput.getSize(2); x += blockDim.x) {
gradinput[batch][c][x] = gradoutput[batch][c][x] * gradScale;
}
}
}
if (gradgamma.numElements() > 0) {
if (threadIdx.x == 0) {
gradgamma[c] += dotP / gamma[c];
}
}
if (gradbeta.numElements() > 0) {
if (threadIdx.x == 0) {
gradbeta[c] += gradOutputSum;
}
}
}
template <typename DType>
__global__ void BatchNorm_Backward_kernel (
DeviceTensor<DType, 3> gradoutput,
DeviceTensor<DType, 3> input,
DeviceTensor<DType, 3> gradinput,
DeviceTensor<DType, 1> gradgamma,
DeviceTensor<DType, 1> gradbeta,
DeviceTensor<DType, 1> mean,
DeviceTensor<DType, 1> std,
DeviceTensor<DType, 1> gamma,
DeviceTensor<DType, 1> beta,
DeviceTensor<DType, 1> gradEx,
DeviceTensor<DType, 1> gradExs) {
/* declarations of the variables */
/* Get the index and channels */
int c = blockIdx.x;
/* main operation */
GradOp<DType, DType, DeviceTensor<DType, 3>> g(mean[c], input, gradoutput);
Float2<DType, DType> res = reduce<Float2<DType, DType>,
GradOp<DType, DType, DeviceTensor<DType, 3>>,
DeviceTensor<DType, 3>>(g, gradoutput, c);
DType gradOutputSum = res.v1;
DType dotP = res.v2;
DType invstd = DType(1.0) / std[c];
DType gradScale = invstd * gamma[c];
if (threadIdx.x == 0) {
gradEx[c] = - gradOutputSum * gradScale + mean[c] * invstd * invstd * dotP * gradScale;
gradExs[c] = - 0.5 * invstd * invstd * dotP * gradScale;
}
if (gradinput.numElements() > 0) {
for (int batch = 0; batch < gradoutput.getSize(0); ++batch) {
for (int x = threadIdx.x; x < gradoutput.getSize(2); x += blockDim.x) {
gradinput[batch][c][x] = gradoutput[batch][c][x] * gradScale;
}
}
}
if (gradgamma.numElements() > 0) {
if (threadIdx.x == 0) {
gradgamma[c] += dotP * invstd;
}
}
if (gradbeta.numElements() > 0) {
if (threadIdx.x == 0) {
gradbeta[c] += gradOutputSum;
}
}
}
template <typename DType>
__global__ void Expectation_Forward_kernel (
DeviceTensor<DType, 3> input,
DeviceTensor<DType, 1> ex,
DeviceTensor<DType, 1> exs,
DType norm) {
int c = blockIdx.x;
/* main operation */
SumOp<DType, DType> g(input);
Float2<DType, DType> res = reduce<Float2<DType, DType>,
SumOp<DType, DType>, DeviceTensor<DType, 3>>(g, input, c);
DType xsum = res.v1;
DType xsquare = res.v2;
if (threadIdx.x == 0) {
ex[c] = xsum * norm;
exs[c] = xsquare * norm;
}
}
template <typename DType>
__global__ void Expectation_Backward_kernel (
DeviceTensor<DType, 3> gradInput,
DeviceTensor<DType, 3> input,
DeviceTensor<DType, 1> gradEx,
DeviceTensor<DType, 1> gradExs,
DType norm) {
int c = blockIdx.x;
/* main operation */
for (int batch = 0; batch < gradInput.getSize(0); ++batch) {
for (int x = threadIdx.x; x < gradInput.getSize(2); x += blockDim.x) {
gradInput[batch][c][x] = gradEx[c] * norm + 2 * gradExs[c] *
input[batch][c][x] * norm;
}
}
}
template <typename DType>
__global__ void Expectation_Backward_Inp_kernel (
DeviceTensor<DType, 3> gradInput,
DeviceTensor<DType, 3> output,
DeviceTensor<DType, 1> gradEx,
DeviceTensor<DType, 1> gradExs,
DeviceTensor<DType, 1> mean,
DeviceTensor<DType, 1> std,
DeviceTensor<DType, 1> gamma,
DeviceTensor<DType, 1> beta,
DType norm) {
int c = blockIdx.x;
/* main operation */
for (int batch = 0; batch < gradInput.getSize(0); ++batch) {
for (int x = threadIdx.x; x < gradInput.getSize(2); x += blockDim.x) {
gradInput[batch][c][x] += gradEx[c] * norm + 2 * gradExs[c] *
((output[batch][c][x] - beta[c]) / gamma[c] * std[c] + mean[c]) * norm;
}
}
}
} // namespace
at::Tensor BatchNorm_Forward_CUDA(
const at::Tensor input_,
const at::Tensor ex_,
const at::Tensor exs_,
const at::Tensor gamma_,
const at::Tensor beta_,
float eps) {
auto output_ = at::zeros_like(input_);
auto std_ = (exs_ - ex_ * ex_ + eps).sqrt();
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
dim3 blocks(input_.size(1));
dim3 threads(getNumThreads(input_.size(2)));
AT_DISPATCH_FLOATING_TYPES(input_.type(), "BatchNorm_Forward_CUDA", ([&] {
/* Device tensors */
DeviceTensor<scalar_t, 3> output = devicetensor<scalar_t, 3>(output_);
DeviceTensor<scalar_t, 3> input = devicetensor<scalar_t, 3>(input_);
DeviceTensor<scalar_t, 1> ex = devicetensor<scalar_t, 1>(ex_);
DeviceTensor<scalar_t, 1> std = devicetensor<scalar_t, 1>(std_);
DeviceTensor<scalar_t, 1> gamma = devicetensor<scalar_t, 1>(gamma_);
DeviceTensor<scalar_t, 1> beta = devicetensor<scalar_t, 1>(beta_);
/* kernel function */
hipLaunchKernelGGL(( BatchNorm_Forward_kernel<scalar_t>), dim3(blocks), dim3(threads), 0, stream,
output, input, ex, std, gamma, beta);
}));
AT_ASSERT(hipGetLastError() == hipSuccess);
return output_;
}
at::Tensor BatchNorm_Forward_Inp_CUDA(
const at::Tensor input_,
const at::Tensor ex_,
const at::Tensor exs_,
const at::Tensor gamma_,
const at::Tensor beta_,
float eps) {
auto std_ = (exs_ - ex_ * ex_ + eps).sqrt();
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
dim3 blocks(input_.size(1));
dim3 threads(getNumThreads(input_.size(2)));
AT_DISPATCH_FLOATING_TYPES(input_.type(), "BatchNorm_Forward_CUDA", ([&] {
/* Device tensors */
DeviceTensor<scalar_t, 3> input = devicetensor<scalar_t, 3>(input_);
DeviceTensor<scalar_t, 1> ex = devicetensor<scalar_t, 1>(ex_);
DeviceTensor<scalar_t, 1> std = devicetensor<scalar_t, 1>(std_);
DeviceTensor<scalar_t, 1> gamma = devicetensor<scalar_t, 1>(gamma_);
DeviceTensor<scalar_t, 1> beta = devicetensor<scalar_t, 1>(beta_);
/* kernel function */
hipLaunchKernelGGL(( BatchNorm_Forward_Inp_kernel<scalar_t>), dim3(blocks), dim3(threads), 0, stream,
input, ex, std, gamma, beta);
}));
AT_ASSERT(hipGetLastError() == hipSuccess);
return input_;
}
std::vector<at::Tensor> BatchNorm_Inp_Backward_CUDA(
const at::Tensor gradoutput_,
const at::Tensor output_,
const at::Tensor ex_,
const at::Tensor exs_,
const at::Tensor gamma_,
const at::Tensor beta_,
float eps) {
/* outputs*/
auto std_ = (exs_ - ex_ * ex_ + eps).sqrt();
auto gradinput_ = at::zeros_like(output_);
auto gradgamma_ = at::zeros_like(gamma_);
auto gradbeta_ = at::zeros_like(beta_);
auto gradEx_ = at::zeros_like(ex_);
auto gradExs_ = at::zeros_like(std_);
/* cuda utils*/
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
dim3 blocks(output_.size(1));
dim3 threads(getNumThreads(output_.size(2)));
AT_DISPATCH_FLOATING_TYPES(output_.type(), "BatchNorm_Inp_Backward_CUDA", ([&] {
/* Device tensors */
DeviceTensor<scalar_t, 3> gradoutput = devicetensor<scalar_t, 3>(gradoutput_);
DeviceTensor<scalar_t, 3> output = devicetensor<scalar_t, 3>(output_);
DeviceTensor<scalar_t, 3> gradinput = devicetensor<scalar_t, 3>(gradinput_);
DeviceTensor<scalar_t, 1> gradgamma = devicetensor<scalar_t, 1>(gradgamma_);
DeviceTensor<scalar_t, 1> gradbeta = devicetensor<scalar_t, 1>(gradbeta_);
DeviceTensor<scalar_t, 1> ex = devicetensor<scalar_t, 1>(ex_);
DeviceTensor<scalar_t, 1> std = devicetensor<scalar_t, 1>(std_);
DeviceTensor<scalar_t, 1> gamma = devicetensor<scalar_t, 1>(gamma_);
DeviceTensor<scalar_t, 1> beta = devicetensor<scalar_t, 1>(beta_);
DeviceTensor<scalar_t, 1> gradEx = devicetensor<scalar_t, 1>(gradEx_);
DeviceTensor<scalar_t, 1> gradExs = devicetensor<scalar_t, 1>(gradExs_);
/* kernel function */
hipLaunchKernelGGL(( BatchNorm_Backward_Inp_kernel<scalar_t>)
, dim3(blocks), dim3(threads), 0, stream,
gradoutput, output, gradinput, gradgamma, gradbeta, ex, std,
gamma, beta, gradEx, gradExs);
}));
AT_ASSERT(hipGetLastError() == hipSuccess);
return {gradinput_, gradEx_, gradExs_, gradgamma_, gradbeta_};
}
std::vector<at::Tensor> BatchNorm_Backward_CUDA(
const at::Tensor gradoutput_,
const at::Tensor input_,
const at::Tensor ex_,
const at::Tensor exs_,
const at::Tensor gamma_,
const at::Tensor beta_,
float eps) {
/* outputs*/
auto std_ = (exs_ - ex_ * ex_ + eps).sqrt();
auto gradinput_ = at::zeros_like(input_);
auto gradgamma_ = at::zeros_like(gamma_);
auto gradbeta_ = at::zeros_like(beta_);
auto gradEx_ = at::zeros_like(ex_);
auto gradExs_ = at::zeros_like(std_);
/* cuda utils*/
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
dim3 blocks(input_.size(1));
dim3 threads(getNumThreads(input_.size(2)));
AT_DISPATCH_FLOATING_TYPES(input_.type(), "BatchNorm_Inp_Backward_CUDA", ([&] {
/* Device tensors */
DeviceTensor<scalar_t, 3> gradoutput = devicetensor<scalar_t, 3>(gradoutput_);
DeviceTensor<scalar_t, 3> input = devicetensor<scalar_t, 3>(input_);
DeviceTensor<scalar_t, 3> gradinput = devicetensor<scalar_t, 3>(gradinput_);
DeviceTensor<scalar_t, 1> gradgamma = devicetensor<scalar_t, 1>(gradgamma_);
DeviceTensor<scalar_t, 1> gradbeta = devicetensor<scalar_t, 1>(gradbeta_);
DeviceTensor<scalar_t, 1> ex = devicetensor<scalar_t, 1>(ex_);
DeviceTensor<scalar_t, 1> std = devicetensor<scalar_t, 1>(std_);
DeviceTensor<scalar_t, 1> gamma = devicetensor<scalar_t, 1>(gamma_);
DeviceTensor<scalar_t, 1> beta = devicetensor<scalar_t, 1>(beta_);
DeviceTensor<scalar_t, 1> gradEx = devicetensor<scalar_t, 1>(gradEx_);
DeviceTensor<scalar_t, 1> gradExs = devicetensor<scalar_t, 1>(gradExs_);
/* kernel function */
hipLaunchKernelGGL(( BatchNorm_Backward_kernel<scalar_t>)
, dim3(blocks), dim3(threads), 0, stream,
gradoutput, input, gradinput, gradgamma, gradbeta, ex, std,
gamma, beta, gradEx, gradExs);
}));
AT_ASSERT(hipGetLastError() == hipSuccess);
return {gradinput_, gradEx_, gradExs_, gradgamma_, gradbeta_};
}
std::vector<at::Tensor> Expectation_Forward_CUDA(
const at::Tensor input_) {
/* outputs */
auto ex_ = torch::zeros({input_.size(1)}, input_.options());
auto exs_ = torch::zeros({input_.size(1)}, input_.options());
/* cuda utils*/
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
dim3 blocks(input_.size(1));
dim3 threads(getNumThreads(input_.size(2)));
AT_DISPATCH_FLOATING_TYPES(input_.type(), "SumSquare_forward_CUDA", ([&] {
scalar_t norm = scalar_t(1) / (input_.size(0) * input_.size(2));
/* Device tensors */
DeviceTensor<scalar_t, 3> input = devicetensor<scalar_t, 3>(input_);
DeviceTensor<scalar_t, 1> ex = devicetensor<scalar_t, 1>(ex_);
DeviceTensor<scalar_t, 1> exs = devicetensor<scalar_t, 1>(exs_);
/* kernel function */
hipLaunchKernelGGL(( Expectation_Forward_kernel<scalar_t>)
, dim3(blocks), dim3(threads), 0, stream, input, ex, exs, norm);
}));
AT_ASSERT(hipGetLastError() == hipSuccess);
return {ex_, exs_};
}
at::Tensor Expectation_Backward_CUDA(
const at::Tensor input_,
const at::Tensor gradEx_,
const at::Tensor gradExs_) {
/* outputs */
at::Tensor gradInput_ = at::zeros_like(input_);
/* cuda utils*/
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
dim3 blocks(input_.size(1));
dim3 threads(getNumThreads(input_.size(2)));
AT_DISPATCH_FLOATING_TYPES(input_.type(), "SumSquare_Backward_CUDA", ([&] {
scalar_t norm = scalar_t(1) / (input_.size(0) * input_.size(2));
/* Device tensors */
DeviceTensor<scalar_t, 3> gradInput = devicetensor<scalar_t, 3>(gradInput_);
DeviceTensor<scalar_t, 3> input = devicetensor<scalar_t, 3>(input_);
DeviceTensor<scalar_t, 1> gradEx = devicetensor<scalar_t, 1>(gradEx_);
DeviceTensor<scalar_t, 1> gradExs =devicetensor<scalar_t, 1>(gradExs_);
/* kernel function */
hipLaunchKernelGGL(( Expectation_Backward_kernel<scalar_t>)
, dim3(blocks), dim3(threads), 0, stream, gradInput, input, gradEx, gradExs, norm);
}));
AT_ASSERT(hipGetLastError() == hipSuccess);
return gradInput_;
}
at::Tensor Expectation_Inp_Backward_CUDA(
const at::Tensor gradInput_,
const at::Tensor output_,
const at::Tensor gradEx_,
const at::Tensor gradExs_,
const at::Tensor ex_,
const at::Tensor exs_,
const at::Tensor gamma_,
const at::Tensor beta_,
float eps) {
/* outputs */
//auto gradInput_ = at::zeros_like(output_);
auto std_ = (exs_ - ex_ * ex_ + eps).sqrt();
/* cuda utils*/
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
dim3 blocks(output_.size(1));
dim3 threads(getNumThreads(output_.size(2)));
AT_DISPATCH_FLOATING_TYPES(output_.type(), "SumSquare_Backward_CUDA", ([&] {
scalar_t norm = scalar_t(1) / (output_.size(0) * output_.size(2));
/* Device tensors */
DeviceTensor<scalar_t, 3> gradInput = devicetensor<scalar_t, 3>(gradInput_);
DeviceTensor<scalar_t, 3> input = devicetensor<scalar_t, 3>(output_);
DeviceTensor<scalar_t, 1> gradEx = devicetensor<scalar_t, 1>(gradEx_);
DeviceTensor<scalar_t, 1> gradExs =devicetensor<scalar_t, 1>(gradExs_);
DeviceTensor<scalar_t, 1> ex = devicetensor<scalar_t, 1>(ex_);
DeviceTensor<scalar_t, 1> std = devicetensor<scalar_t, 1>(std_);
DeviceTensor<scalar_t, 1> gamma = devicetensor<scalar_t, 1>(gamma_);
DeviceTensor<scalar_t, 1> beta = devicetensor<scalar_t, 1>(beta_);
/* kernel function */
hipLaunchKernelGGL(( Expectation_Backward_Inp_kernel<scalar_t>)
, dim3(blocks), dim3(threads), 0, stream, gradInput, input, gradEx, gradExs,
ex, std, gamma, beta, norm);
}));
AT_ASSERT(hipGetLastError() == hipSuccess);
return gradInput_;
}
| fb6698d2c0d8e08c1c52afeb95c479dbc82a7eae.cu | //#include <torch/extension.h>
#include <torch/serialize/tensor.h>
#include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <vector>
#include "common.h"
#include "device_tensor.h"
namespace {
template <typename DType, typename Acctype, typename DeviceTensor3>
struct GradOp {
__device__ GradOp(Acctype m, const DeviceTensor3 i, const DeviceTensor3 g)
: beta(m), output(i), gradOutput(g) {}
__device__ __forceinline__ Float2<DType, Acctype> operator()(int batch, int plane, int n) {
DType g = gradOutput[batch][plane][n];
DType c = ScalarConvert<Acctype, DType>::to(output[batch][plane][n] - beta);
return Float2<DType, Acctype>(g, g * c);
}
const Acctype beta;
const DeviceTensor3 output;
const DeviceTensor3 gradOutput;
};
template <typename DType, typename Acctype>
struct SumOp {
__device__ SumOp(DeviceTensor<DType, 3> i) : input(i){}
__device__ __forceinline__ Float2<DType, Acctype> operator()(int batch, int plane, int n) {
DType g = input[batch][plane][n];
return Float2<DType, Acctype>(g, g * g);
}
DType mean;
DeviceTensor<DType, 3> input;
};
// Sum across (batch, x/y/z) applying Op() pointwise
template<typename T, typename Op, typename DeviceTensor3>
__device__ T reduce(Op op, DeviceTensor3 tensor, int plane) {
T sum = (T)0;
for (int batch = 0; batch < tensor.getSize(0); ++batch) {
for (int x = threadIdx.x; x < tensor.getSize(2); x += blockDim.x) {
sum += op(batch, plane, x);
}
}
// sum over NumThreads within a warp
sum = warpSum(sum);
// 'transpose', and reduce within warp again
__shared__ T shared[32];
__syncthreads();
if (threadIdx.x % WARP_SIZE == 0) {
shared[threadIdx.x / WARP_SIZE] = sum;
}
if (threadIdx.x >= blockDim.x / WARP_SIZE && threadIdx.x < WARP_SIZE) {
// zero out the other entries in shared
shared[threadIdx.x] = (T)0;
}
__syncthreads();
if (threadIdx.x / WARP_SIZE == 0) {
sum = warpSum(shared[threadIdx.x]);
if (threadIdx.x == 0) {
shared[0] = sum;
}
}
__syncthreads();
// Everyone picks it up, should be broadcast into the whole gradInput
return shared[0];
}
template <typename DType>
__global__ void BatchNorm_Forward_kernel (
DeviceTensor<DType, 3> output,
DeviceTensor<DType, 3> input,
DeviceTensor<DType, 1> mean,
DeviceTensor<DType, 1> std,
DeviceTensor<DType, 1> gamma,
DeviceTensor<DType, 1> beta) {
int c = blockIdx.x;
/* main operation */
for (int b = 0; b < input.getSize(0); ++b) {
for (int x = threadIdx.x; x < input.getSize(2); x += blockDim.x) {
DType inp = input[b][c][x];
output[b][c][x] = gamma[c] * (inp - mean[c]) /
std[c] + beta[c];
}
}
}
template <typename DType>
__global__ void BatchNorm_Forward_Inp_kernel (
DeviceTensor<DType, 3> input,
DeviceTensor<DType, 1> mean,
DeviceTensor<DType, 1> std,
DeviceTensor<DType, 1> gamma,
DeviceTensor<DType, 1> beta) {
int c = blockIdx.x;
/* main operation */
for (int b = 0; b < input.getSize(0); ++b) {
for (int x = threadIdx.x; x < input.getSize(2); x += blockDim.x) {
DType inp = input[b][c][x];
input[b][c][x] = gamma[c] * (inp - mean[c]) /
std[c] + beta[c];
}
}
}
template <typename DType>
__global__ void BatchNorm_Backward_Inp_kernel (
DeviceTensor<DType, 3> gradoutput,
DeviceTensor<DType, 3> output,
DeviceTensor<DType, 3> gradinput,
DeviceTensor<DType, 1> gradgamma,
DeviceTensor<DType, 1> gradbeta,
DeviceTensor<DType, 1> mean,
DeviceTensor<DType, 1> std,
DeviceTensor<DType, 1> gamma,
DeviceTensor<DType, 1> beta,
DeviceTensor<DType, 1> gradEx,
DeviceTensor<DType, 1> gradExs) {
/* declarations of the variables */
/* Get the index and channels */
int c = blockIdx.x;
/* main operation */
GradOp<DType, DType, DeviceTensor<DType, 3>> g(beta[c], output, gradoutput);
Float2<DType, DType> res = reduce<Float2<DType, DType>,
GradOp<DType, DType, DeviceTensor<DType, 3>>,
DeviceTensor<DType, 3>>(g, gradoutput, c);
DType gradOutputSum = res.v1;
DType dotP = res.v2;
DType invstd = DType(1.0) / std[c];
DType gradScale = invstd * gamma[c];
if (threadIdx.x == 0) {
gradEx[c] = - gradOutputSum * gradScale + mean[c] * invstd * invstd * dotP;
gradExs[c] = - 0.5 * invstd * invstd * dotP;
}
if (gradinput.numElements() > 0) {
for (int batch = 0; batch < gradoutput.getSize(0); ++batch) {
for (int x = threadIdx.x; x < gradoutput.getSize(2); x += blockDim.x) {
gradinput[batch][c][x] = gradoutput[batch][c][x] * gradScale;
}
}
}
if (gradgamma.numElements() > 0) {
if (threadIdx.x == 0) {
gradgamma[c] += dotP / gamma[c];
}
}
if (gradbeta.numElements() > 0) {
if (threadIdx.x == 0) {
gradbeta[c] += gradOutputSum;
}
}
}
template <typename DType>
__global__ void BatchNorm_Backward_kernel (
DeviceTensor<DType, 3> gradoutput,
DeviceTensor<DType, 3> input,
DeviceTensor<DType, 3> gradinput,
DeviceTensor<DType, 1> gradgamma,
DeviceTensor<DType, 1> gradbeta,
DeviceTensor<DType, 1> mean,
DeviceTensor<DType, 1> std,
DeviceTensor<DType, 1> gamma,
DeviceTensor<DType, 1> beta,
DeviceTensor<DType, 1> gradEx,
DeviceTensor<DType, 1> gradExs) {
/* declarations of the variables */
/* Get the index and channels */
int c = blockIdx.x;
/* main operation */
GradOp<DType, DType, DeviceTensor<DType, 3>> g(mean[c], input, gradoutput);
Float2<DType, DType> res = reduce<Float2<DType, DType>,
GradOp<DType, DType, DeviceTensor<DType, 3>>,
DeviceTensor<DType, 3>>(g, gradoutput, c);
DType gradOutputSum = res.v1;
DType dotP = res.v2;
DType invstd = DType(1.0) / std[c];
DType gradScale = invstd * gamma[c];
if (threadIdx.x == 0) {
gradEx[c] = - gradOutputSum * gradScale + mean[c] * invstd * invstd * dotP * gradScale;
gradExs[c] = - 0.5 * invstd * invstd * dotP * gradScale;
}
if (gradinput.numElements() > 0) {
for (int batch = 0; batch < gradoutput.getSize(0); ++batch) {
for (int x = threadIdx.x; x < gradoutput.getSize(2); x += blockDim.x) {
gradinput[batch][c][x] = gradoutput[batch][c][x] * gradScale;
}
}
}
if (gradgamma.numElements() > 0) {
if (threadIdx.x == 0) {
gradgamma[c] += dotP * invstd;
}
}
if (gradbeta.numElements() > 0) {
if (threadIdx.x == 0) {
gradbeta[c] += gradOutputSum;
}
}
}
template <typename DType>
__global__ void Expectation_Forward_kernel (
DeviceTensor<DType, 3> input,
DeviceTensor<DType, 1> ex,
DeviceTensor<DType, 1> exs,
DType norm) {
int c = blockIdx.x;
/* main operation */
SumOp<DType, DType> g(input);
Float2<DType, DType> res = reduce<Float2<DType, DType>,
SumOp<DType, DType>, DeviceTensor<DType, 3>>(g, input, c);
DType xsum = res.v1;
DType xsquare = res.v2;
if (threadIdx.x == 0) {
ex[c] = xsum * norm;
exs[c] = xsquare * norm;
}
}
template <typename DType>
__global__ void Expectation_Backward_kernel (
DeviceTensor<DType, 3> gradInput,
DeviceTensor<DType, 3> input,
DeviceTensor<DType, 1> gradEx,
DeviceTensor<DType, 1> gradExs,
DType norm) {
int c = blockIdx.x;
/* main operation */
for (int batch = 0; batch < gradInput.getSize(0); ++batch) {
for (int x = threadIdx.x; x < gradInput.getSize(2); x += blockDim.x) {
gradInput[batch][c][x] = gradEx[c] * norm + 2 * gradExs[c] *
input[batch][c][x] * norm;
}
}
}
template <typename DType>
__global__ void Expectation_Backward_Inp_kernel (
DeviceTensor<DType, 3> gradInput,
DeviceTensor<DType, 3> output,
DeviceTensor<DType, 1> gradEx,
DeviceTensor<DType, 1> gradExs,
DeviceTensor<DType, 1> mean,
DeviceTensor<DType, 1> std,
DeviceTensor<DType, 1> gamma,
DeviceTensor<DType, 1> beta,
DType norm) {
int c = blockIdx.x;
/* main operation */
for (int batch = 0; batch < gradInput.getSize(0); ++batch) {
for (int x = threadIdx.x; x < gradInput.getSize(2); x += blockDim.x) {
gradInput[batch][c][x] += gradEx[c] * norm + 2 * gradExs[c] *
((output[batch][c][x] - beta[c]) / gamma[c] * std[c] + mean[c]) * norm;
}
}
}
} // namespace
at::Tensor BatchNorm_Forward_CUDA(
const at::Tensor input_,
const at::Tensor ex_,
const at::Tensor exs_,
const at::Tensor gamma_,
const at::Tensor beta_,
float eps) {
auto output_ = at::zeros_like(input_);
auto std_ = (exs_ - ex_ * ex_ + eps).sqrt();
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
dim3 blocks(input_.size(1));
dim3 threads(getNumThreads(input_.size(2)));
AT_DISPATCH_FLOATING_TYPES(input_.type(), "BatchNorm_Forward_CUDA", ([&] {
/* Device tensors */
DeviceTensor<scalar_t, 3> output = devicetensor<scalar_t, 3>(output_);
DeviceTensor<scalar_t, 3> input = devicetensor<scalar_t, 3>(input_);
DeviceTensor<scalar_t, 1> ex = devicetensor<scalar_t, 1>(ex_);
DeviceTensor<scalar_t, 1> std = devicetensor<scalar_t, 1>(std_);
DeviceTensor<scalar_t, 1> gamma = devicetensor<scalar_t, 1>(gamma_);
DeviceTensor<scalar_t, 1> beta = devicetensor<scalar_t, 1>(beta_);
/* kernel function */
BatchNorm_Forward_kernel<scalar_t><<<blocks, threads, 0, stream>>>(
output, input, ex, std, gamma, beta);
}));
AT_ASSERT(cudaGetLastError() == cudaSuccess);
return output_;
}
at::Tensor BatchNorm_Forward_Inp_CUDA(
const at::Tensor input_,
const at::Tensor ex_,
const at::Tensor exs_,
const at::Tensor gamma_,
const at::Tensor beta_,
float eps) {
auto std_ = (exs_ - ex_ * ex_ + eps).sqrt();
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
dim3 blocks(input_.size(1));
dim3 threads(getNumThreads(input_.size(2)));
AT_DISPATCH_FLOATING_TYPES(input_.type(), "BatchNorm_Forward_CUDA", ([&] {
/* Device tensors */
DeviceTensor<scalar_t, 3> input = devicetensor<scalar_t, 3>(input_);
DeviceTensor<scalar_t, 1> ex = devicetensor<scalar_t, 1>(ex_);
DeviceTensor<scalar_t, 1> std = devicetensor<scalar_t, 1>(std_);
DeviceTensor<scalar_t, 1> gamma = devicetensor<scalar_t, 1>(gamma_);
DeviceTensor<scalar_t, 1> beta = devicetensor<scalar_t, 1>(beta_);
/* kernel function */
BatchNorm_Forward_Inp_kernel<scalar_t><<<blocks, threads, 0, stream>>>(
input, ex, std, gamma, beta);
}));
AT_ASSERT(cudaGetLastError() == cudaSuccess);
return input_;
}
std::vector<at::Tensor> BatchNorm_Inp_Backward_CUDA(
const at::Tensor gradoutput_,
const at::Tensor output_,
const at::Tensor ex_,
const at::Tensor exs_,
const at::Tensor gamma_,
const at::Tensor beta_,
float eps) {
/* outputs*/
auto std_ = (exs_ - ex_ * ex_ + eps).sqrt();
auto gradinput_ = at::zeros_like(output_);
auto gradgamma_ = at::zeros_like(gamma_);
auto gradbeta_ = at::zeros_like(beta_);
auto gradEx_ = at::zeros_like(ex_);
auto gradExs_ = at::zeros_like(std_);
/* cuda utils*/
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
dim3 blocks(output_.size(1));
dim3 threads(getNumThreads(output_.size(2)));
AT_DISPATCH_FLOATING_TYPES(output_.type(), "BatchNorm_Inp_Backward_CUDA", ([&] {
/* Device tensors */
DeviceTensor<scalar_t, 3> gradoutput = devicetensor<scalar_t, 3>(gradoutput_);
DeviceTensor<scalar_t, 3> output = devicetensor<scalar_t, 3>(output_);
DeviceTensor<scalar_t, 3> gradinput = devicetensor<scalar_t, 3>(gradinput_);
DeviceTensor<scalar_t, 1> gradgamma = devicetensor<scalar_t, 1>(gradgamma_);
DeviceTensor<scalar_t, 1> gradbeta = devicetensor<scalar_t, 1>(gradbeta_);
DeviceTensor<scalar_t, 1> ex = devicetensor<scalar_t, 1>(ex_);
DeviceTensor<scalar_t, 1> std = devicetensor<scalar_t, 1>(std_);
DeviceTensor<scalar_t, 1> gamma = devicetensor<scalar_t, 1>(gamma_);
DeviceTensor<scalar_t, 1> beta = devicetensor<scalar_t, 1>(beta_);
DeviceTensor<scalar_t, 1> gradEx = devicetensor<scalar_t, 1>(gradEx_);
DeviceTensor<scalar_t, 1> gradExs = devicetensor<scalar_t, 1>(gradExs_);
/* kernel function */
BatchNorm_Backward_Inp_kernel<scalar_t>
<<<blocks, threads, 0, stream>>>(
gradoutput, output, gradinput, gradgamma, gradbeta, ex, std,
gamma, beta, gradEx, gradExs);
}));
AT_ASSERT(cudaGetLastError() == cudaSuccess);
return {gradinput_, gradEx_, gradExs_, gradgamma_, gradbeta_};
}
std::vector<at::Tensor> BatchNorm_Backward_CUDA(
const at::Tensor gradoutput_,
const at::Tensor input_,
const at::Tensor ex_,
const at::Tensor exs_,
const at::Tensor gamma_,
const at::Tensor beta_,
float eps) {
/* outputs*/
auto std_ = (exs_ - ex_ * ex_ + eps).sqrt();
auto gradinput_ = at::zeros_like(input_);
auto gradgamma_ = at::zeros_like(gamma_);
auto gradbeta_ = at::zeros_like(beta_);
auto gradEx_ = at::zeros_like(ex_);
auto gradExs_ = at::zeros_like(std_);
/* cuda utils*/
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
dim3 blocks(input_.size(1));
dim3 threads(getNumThreads(input_.size(2)));
AT_DISPATCH_FLOATING_TYPES(input_.type(), "BatchNorm_Inp_Backward_CUDA", ([&] {
/* Device tensors */
DeviceTensor<scalar_t, 3> gradoutput = devicetensor<scalar_t, 3>(gradoutput_);
DeviceTensor<scalar_t, 3> input = devicetensor<scalar_t, 3>(input_);
DeviceTensor<scalar_t, 3> gradinput = devicetensor<scalar_t, 3>(gradinput_);
DeviceTensor<scalar_t, 1> gradgamma = devicetensor<scalar_t, 1>(gradgamma_);
DeviceTensor<scalar_t, 1> gradbeta = devicetensor<scalar_t, 1>(gradbeta_);
DeviceTensor<scalar_t, 1> ex = devicetensor<scalar_t, 1>(ex_);
DeviceTensor<scalar_t, 1> std = devicetensor<scalar_t, 1>(std_);
DeviceTensor<scalar_t, 1> gamma = devicetensor<scalar_t, 1>(gamma_);
DeviceTensor<scalar_t, 1> beta = devicetensor<scalar_t, 1>(beta_);
DeviceTensor<scalar_t, 1> gradEx = devicetensor<scalar_t, 1>(gradEx_);
DeviceTensor<scalar_t, 1> gradExs = devicetensor<scalar_t, 1>(gradExs_);
/* kernel function */
BatchNorm_Backward_kernel<scalar_t>
<<<blocks, threads, 0, stream>>>(
gradoutput, input, gradinput, gradgamma, gradbeta, ex, std,
gamma, beta, gradEx, gradExs);
}));
AT_ASSERT(cudaGetLastError() == cudaSuccess);
return {gradinput_, gradEx_, gradExs_, gradgamma_, gradbeta_};
}
std::vector<at::Tensor> Expectation_Forward_CUDA(
const at::Tensor input_) {
/* outputs */
auto ex_ = torch::zeros({input_.size(1)}, input_.options());
auto exs_ = torch::zeros({input_.size(1)}, input_.options());
/* cuda utils*/
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
dim3 blocks(input_.size(1));
dim3 threads(getNumThreads(input_.size(2)));
AT_DISPATCH_FLOATING_TYPES(input_.type(), "SumSquare_forward_CUDA", ([&] {
scalar_t norm = scalar_t(1) / (input_.size(0) * input_.size(2));
/* Device tensors */
DeviceTensor<scalar_t, 3> input = devicetensor<scalar_t, 3>(input_);
DeviceTensor<scalar_t, 1> ex = devicetensor<scalar_t, 1>(ex_);
DeviceTensor<scalar_t, 1> exs = devicetensor<scalar_t, 1>(exs_);
/* kernel function */
Expectation_Forward_kernel<scalar_t>
<<<blocks, threads, 0, stream>>>(input, ex, exs, norm);
}));
AT_ASSERT(cudaGetLastError() == cudaSuccess);
return {ex_, exs_};
}
at::Tensor Expectation_Backward_CUDA(
const at::Tensor input_,
const at::Tensor gradEx_,
const at::Tensor gradExs_) {
/* outputs */
at::Tensor gradInput_ = at::zeros_like(input_);
/* cuda utils*/
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
dim3 blocks(input_.size(1));
dim3 threads(getNumThreads(input_.size(2)));
AT_DISPATCH_FLOATING_TYPES(input_.type(), "SumSquare_Backward_CUDA", ([&] {
scalar_t norm = scalar_t(1) / (input_.size(0) * input_.size(2));
/* Device tensors */
DeviceTensor<scalar_t, 3> gradInput = devicetensor<scalar_t, 3>(gradInput_);
DeviceTensor<scalar_t, 3> input = devicetensor<scalar_t, 3>(input_);
DeviceTensor<scalar_t, 1> gradEx = devicetensor<scalar_t, 1>(gradEx_);
DeviceTensor<scalar_t, 1> gradExs =devicetensor<scalar_t, 1>(gradExs_);
/* kernel function */
Expectation_Backward_kernel<scalar_t>
<<<blocks, threads, 0, stream>>>(gradInput, input, gradEx, gradExs, norm);
}));
AT_ASSERT(cudaGetLastError() == cudaSuccess);
return gradInput_;
}
at::Tensor Expectation_Inp_Backward_CUDA(
const at::Tensor gradInput_,
const at::Tensor output_,
const at::Tensor gradEx_,
const at::Tensor gradExs_,
const at::Tensor ex_,
const at::Tensor exs_,
const at::Tensor gamma_,
const at::Tensor beta_,
float eps) {
/* outputs */
//auto gradInput_ = at::zeros_like(output_);
auto std_ = (exs_ - ex_ * ex_ + eps).sqrt();
/* cuda utils*/
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
dim3 blocks(output_.size(1));
dim3 threads(getNumThreads(output_.size(2)));
AT_DISPATCH_FLOATING_TYPES(output_.type(), "SumSquare_Backward_CUDA", ([&] {
scalar_t norm = scalar_t(1) / (output_.size(0) * output_.size(2));
/* Device tensors */
DeviceTensor<scalar_t, 3> gradInput = devicetensor<scalar_t, 3>(gradInput_);
DeviceTensor<scalar_t, 3> input = devicetensor<scalar_t, 3>(output_);
DeviceTensor<scalar_t, 1> gradEx = devicetensor<scalar_t, 1>(gradEx_);
DeviceTensor<scalar_t, 1> gradExs =devicetensor<scalar_t, 1>(gradExs_);
DeviceTensor<scalar_t, 1> ex = devicetensor<scalar_t, 1>(ex_);
DeviceTensor<scalar_t, 1> std = devicetensor<scalar_t, 1>(std_);
DeviceTensor<scalar_t, 1> gamma = devicetensor<scalar_t, 1>(gamma_);
DeviceTensor<scalar_t, 1> beta = devicetensor<scalar_t, 1>(beta_);
/* kernel function */
Expectation_Backward_Inp_kernel<scalar_t>
<<<blocks, threads, 0, stream>>>(gradInput, input, gradEx, gradExs,
ex, std, gamma, beta, norm);
}));
AT_ASSERT(cudaGetLastError() == cudaSuccess);
return gradInput_;
}
|
1f99c8c2caf6f6c12350a69cdb43a0535d840308.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define CUMAT_EIGEN_SUPPORT 1
#include "../SoftBodyGrid3D.h"
#include <cinder/app/AppBase.h>
#include "../Utils3D.h"
#include "../cuPrintf.cuh"
namespace ar3d
{
typedef WorldGridData<real>::DeviceArray_t Grid_t;
__global__ void AdvectLevelsetBlendingKernel(dim3 inputSize,
const Grid_t inputSdf, const WorldGridData<real3>::DeviceArray_t displacements,
Grid_t outputSdf, Grid_t outputWeights,
int3 outputSize, int3 offsetDifference, real step, real kernelDenom,
SoftBodyGrid3D::AdvectionSettings settings)
{
CUMAT_KERNEL_3D_LOOP(x, y, z, inputSize)
real value = inputSdf.coeff(x, y, z, -1);
if (value > 1e10) continue; //way outside
real3 v = displacements.coeff(x, y, z, -1);
real extraWeight = value <= -settings.outerSdfThreshold || value >= settings.outerSdfThreshold
? max(settings.outerSdfWeight, -abs(value) + settings.outerSdfThreshold)
: 1;
real3 p = make_real3(x, y, z) + step * v - make_real3(offsetDifference.x, offsetDifference.y, offsetDifference.z);
//cuPrintf("cell=(%d,%d,%d): sdf=%5.3f, v=(%5.3f, %5.3f, %5.3f) -> p=(%5.3f, %5.3f, %5.3f)\n", x, y, z, value, v.x, v.y, v.z, p.x, p.y, p.z);
for (int iz = max(0, (int)floor(p.z - settings.kernelRadiusOut)); iz <= min(outputSize.z - 1, (int)ceil(p.z + settings.kernelRadiusOut)); ++iz)
for (int iy = max(0, (int)floor(p.y - settings.kernelRadiusOut)); iy <= min(outputSize.y - 1, (int)ceil(p.y + settings.kernelRadiusOut)); ++iy)
for (int ix = max(0, (int)floor(p.x - settings.kernelRadiusOut)); ix <= min(outputSize.x - 1, (int)ceil(p.x + settings.kernelRadiusOut)); ++ix)
{
real d = lengthSquared3(make_real3(ix, iy, iz) - p);
cuMat::Index idx = outputSdf.index(ix, iy, iz);
if (d <= ar3d::utils::square(settings.kernelRadiusIn))
{
real w = exp(-d * kernelDenom) * extraWeight;
atomicAddReal(outputSdf.data() + idx, w * value);
atomicAddReal(outputWeights.data() + idx, w);
}
else if (d <= ar3d::utils::square(settings.kernelRadiusOut))
{
real w = exp(-d * kernelDenom) * settings.outerKernelWeight * extraWeight;
atomicAddReal(outputSdf.data() + idx, w * value);
atomicAddReal(outputWeights.data() + idx, w);
}
}
CUMAT_KERNEL_3D_LOOP_END
}
struct AdvectionNormalizeFunctor
{
private:
real outsideValue_;
public:
AdvectionNormalizeFunctor(real outsideValue) : outsideValue_(outsideValue) {}
typedef real ReturnType;
__device__ CUMAT_STRONG_INLINE real operator()(const real& output, const real& weight, cuMat::Index row, cuMat::Index col, cuMat::Index batch) const
{
return weight == 0 ? outsideValue_ : output / weight;
}
};
void SoftBodyGrid3D::advectLevelset(const Input& input, const WorldGridData<real3>::DeviceArray_t& gridDisp,
WorldGridDataPtr<real> advectSdf, const AdvectionSettings& settings)
{
const Eigen::Vector3i& srcSize = input.grid_->getSize();
const Eigen::Vector3i& dstSize = advectSdf->getGrid()->getSize();
const Eigen::Vector3i& srcOffset = input.grid_->getOffset();
const Eigen::Vector3i& dstOffset = advectSdf->getGrid()->getOffset();
assert(input.grid_->getVoxelResolution() == advectSdf->getGrid()->getVoxelResolution());
Grid_t weights(dstSize.x(), dstSize.y(), dstSize.z());
weights.setZero();
advectSdf->getDeviceMemory().setZero();
//settings
int3 outputSize = make_int3(dstSize.x(), dstSize.y(), dstSize.z());
int3 offsetDifference = make_int3(dstOffset.x() - srcOffset.x(), dstOffset.y() - srcOffset.y(), dstOffset.z() - srcOffset.z());
real step = input.grid_->getVoxelResolution();
real kernelDenom = 1.0 / (2 * ar3d::utils::square(settings.kernelRadiusIn / 3)); //98% of the Gaussian kernel is within kernelRadius
//cinder::app::console() << "Reference SDF:\n" << input.referenceSdf_->getDeviceMemory() << std::endl;
//blend into output grid
cuMat::Context& ctx = cuMat::Context::current();
cuMat::KernelLaunchConfig cfg = ctx.createLaunchConfig3D(srcSize.x(), srcSize.y(), srcSize.z(), AdvectLevelsetBlendingKernel);
hipLaunchKernelGGL(( AdvectLevelsetBlendingKernel) , dim3(cfg.block_count), dim3(cfg.thread_per_block), 0, ctx.stream() ,
cfg.virtual_size, input.referenceSdf_->getDeviceMemory(), gridDisp,
advectSdf->getDeviceMemory(), weights,
outputSize, offsetDifference, step, kernelDenom, settings);
CUMAT_CHECK_ERROR();
//cudaPrintfDisplay(cinder::app::console());
CI_LOG_D("Advection: Values blended into output");
//cinder::app::console() << "Advected SDF:\n" << advectSdf->getDeviceMemory() << std::endl;
//normalize
real outsideValue = dstSize.x() + dstSize.y() + dstSize.z();
advectSdf->getDeviceMemory().inplace() = cuMat::BinaryOp<Grid_t, Grid_t, AdvectionNormalizeFunctor>
(advectSdf->getDeviceMemory(), weights, AdvectionNormalizeFunctor(outsideValue));
CI_LOG_D("Advection: SDF normalized");
//cinder::app::console() << "Final SDF:\n" << advectSdf->getDeviceMemory() << std::endl;
}
}
| 1f99c8c2caf6f6c12350a69cdb43a0535d840308.cu | #define CUMAT_EIGEN_SUPPORT 1
#include "../SoftBodyGrid3D.h"
#include <cinder/app/AppBase.h>
#include "../Utils3D.h"
#include "../cuPrintf.cuh"
namespace ar3d
{
typedef WorldGridData<real>::DeviceArray_t Grid_t;
__global__ void AdvectLevelsetBlendingKernel(dim3 inputSize,
const Grid_t inputSdf, const WorldGridData<real3>::DeviceArray_t displacements,
Grid_t outputSdf, Grid_t outputWeights,
int3 outputSize, int3 offsetDifference, real step, real kernelDenom,
SoftBodyGrid3D::AdvectionSettings settings)
{
CUMAT_KERNEL_3D_LOOP(x, y, z, inputSize)
real value = inputSdf.coeff(x, y, z, -1);
if (value > 1e10) continue; //way outside
real3 v = displacements.coeff(x, y, z, -1);
real extraWeight = value <= -settings.outerSdfThreshold || value >= settings.outerSdfThreshold
? max(settings.outerSdfWeight, -abs(value) + settings.outerSdfThreshold)
: 1;
real3 p = make_real3(x, y, z) + step * v - make_real3(offsetDifference.x, offsetDifference.y, offsetDifference.z);
//cuPrintf("cell=(%d,%d,%d): sdf=%5.3f, v=(%5.3f, %5.3f, %5.3f) -> p=(%5.3f, %5.3f, %5.3f)\n", x, y, z, value, v.x, v.y, v.z, p.x, p.y, p.z);
for (int iz = max(0, (int)floor(p.z - settings.kernelRadiusOut)); iz <= min(outputSize.z - 1, (int)ceil(p.z + settings.kernelRadiusOut)); ++iz)
for (int iy = max(0, (int)floor(p.y - settings.kernelRadiusOut)); iy <= min(outputSize.y - 1, (int)ceil(p.y + settings.kernelRadiusOut)); ++iy)
for (int ix = max(0, (int)floor(p.x - settings.kernelRadiusOut)); ix <= min(outputSize.x - 1, (int)ceil(p.x + settings.kernelRadiusOut)); ++ix)
{
real d = lengthSquared3(make_real3(ix, iy, iz) - p);
cuMat::Index idx = outputSdf.index(ix, iy, iz);
if (d <= ar3d::utils::square(settings.kernelRadiusIn))
{
real w = exp(-d * kernelDenom) * extraWeight;
atomicAddReal(outputSdf.data() + idx, w * value);
atomicAddReal(outputWeights.data() + idx, w);
}
else if (d <= ar3d::utils::square(settings.kernelRadiusOut))
{
real w = exp(-d * kernelDenom) * settings.outerKernelWeight * extraWeight;
atomicAddReal(outputSdf.data() + idx, w * value);
atomicAddReal(outputWeights.data() + idx, w);
}
}
CUMAT_KERNEL_3D_LOOP_END
}
struct AdvectionNormalizeFunctor
{
private:
real outsideValue_;
public:
AdvectionNormalizeFunctor(real outsideValue) : outsideValue_(outsideValue) {}
typedef real ReturnType;
__device__ CUMAT_STRONG_INLINE real operator()(const real& output, const real& weight, cuMat::Index row, cuMat::Index col, cuMat::Index batch) const
{
return weight == 0 ? outsideValue_ : output / weight;
}
};
void SoftBodyGrid3D::advectLevelset(const Input& input, const WorldGridData<real3>::DeviceArray_t& gridDisp,
WorldGridDataPtr<real> advectSdf, const AdvectionSettings& settings)
{
const Eigen::Vector3i& srcSize = input.grid_->getSize();
const Eigen::Vector3i& dstSize = advectSdf->getGrid()->getSize();
const Eigen::Vector3i& srcOffset = input.grid_->getOffset();
const Eigen::Vector3i& dstOffset = advectSdf->getGrid()->getOffset();
assert(input.grid_->getVoxelResolution() == advectSdf->getGrid()->getVoxelResolution());
Grid_t weights(dstSize.x(), dstSize.y(), dstSize.z());
weights.setZero();
advectSdf->getDeviceMemory().setZero();
//settings
int3 outputSize = make_int3(dstSize.x(), dstSize.y(), dstSize.z());
int3 offsetDifference = make_int3(dstOffset.x() - srcOffset.x(), dstOffset.y() - srcOffset.y(), dstOffset.z() - srcOffset.z());
real step = input.grid_->getVoxelResolution();
real kernelDenom = 1.0 / (2 * ar3d::utils::square(settings.kernelRadiusIn / 3)); //98% of the Gaussian kernel is within kernelRadius
//cinder::app::console() << "Reference SDF:\n" << input.referenceSdf_->getDeviceMemory() << std::endl;
//blend into output grid
cuMat::Context& ctx = cuMat::Context::current();
cuMat::KernelLaunchConfig cfg = ctx.createLaunchConfig3D(srcSize.x(), srcSize.y(), srcSize.z(), AdvectLevelsetBlendingKernel);
AdvectLevelsetBlendingKernel <<< cfg.block_count, cfg.thread_per_block, 0, ctx.stream() >>>(
cfg.virtual_size, input.referenceSdf_->getDeviceMemory(), gridDisp,
advectSdf->getDeviceMemory(), weights,
outputSize, offsetDifference, step, kernelDenom, settings);
CUMAT_CHECK_ERROR();
//cudaPrintfDisplay(cinder::app::console());
CI_LOG_D("Advection: Values blended into output");
//cinder::app::console() << "Advected SDF:\n" << advectSdf->getDeviceMemory() << std::endl;
//normalize
real outsideValue = dstSize.x() + dstSize.y() + dstSize.z();
advectSdf->getDeviceMemory().inplace() = cuMat::BinaryOp<Grid_t, Grid_t, AdvectionNormalizeFunctor>
(advectSdf->getDeviceMemory(), weights, AdvectionNormalizeFunctor(outsideValue));
CI_LOG_D("Advection: SDF normalized");
//cinder::app::console() << "Final SDF:\n" << advectSdf->getDeviceMemory() << std::endl;
}
}
|
ec7795092d8c3389deaf1775ad4f089630db9a05.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification, are permitted
* provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright notice, this list of
* conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright notice, this list of
* conditions and the following disclaimer in the documentation and/or other materials
* provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TOR (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*//*
*/
/** @file bench-ours.cu
* @author Thomas Mller, NVIDIA
* @brief Generates performance data for comparison with TensorFlow.
*/
#include <tiny-cuda-nn/misc_kernels.h>
#include <tiny-cuda-nn/gpu_matrix.h>
#include <tiny-cuda-nn/encodings/oneblob.h>
#include <tiny-cuda-nn/optimizer.h>
#include <tiny-cuda-nn/loss.h>
#include <tiny-cuda-nn/network.h>
#include <tiny-cuda-nn/trainer.h>
#include <tinyexr/tinyexr.h>
#include <chrono>
#include <cstdlib>
#include <iostream>
#include <random>
#include <stdexcept>
#include <string>
#include <thread>
#include <vector>
#include <fstream>
using namespace tcnn;
using precision_t = network_precision_t;
bool SaveEXR(const float* data, int width, int height, int nChannels, int channelStride, const char* outfilename) {
EXRHeader header;
InitEXRHeader(&header);
EXRImage image;
InitEXRImage(&image);
image.num_channels = nChannels;
std::vector<std::vector<float>> images(nChannels);
std::vector<float*> image_ptr(nChannels);
for (int i = 0; i < nChannels; ++i) {
images[i].resize(width * height);
}
for (int i = 0; i < nChannels; ++i) {
image_ptr[i] = images[nChannels - i - 1].data();
}
for (size_t i = 0; i < (size_t)width * height; i++) {
for (int c = 0; c < nChannels; ++c) {
images[c][i] = data[channelStride*i+c];
}
}
image.images = (unsigned char**)image_ptr.data();
image.width = width;
image.height = height;
header.num_channels = nChannels;
header.channels = (EXRChannelInfo *)malloc(sizeof(EXRChannelInfo) * header.num_channels);
// Must be (A)BGR order, since most of EXR viewers expect this channel order.
strncpy(header.channels[0].name, "B", 255); header.channels[0].name[strlen("B")] = '\0';
if (nChannels > 1) {
strncpy(header.channels[1].name, "G", 255); header.channels[1].name[strlen("G")] = '\0';
}
if (nChannels > 2) {
strncpy(header.channels[2].name, "R", 255); header.channels[2].name[strlen("R")] = '\0';
}
if (nChannels > 3) {
strncpy(header.channels[3].name, "A", 255); header.channels[3].name[strlen("A")] = '\0';
}
header.pixel_types = (int *)malloc(sizeof(int) * header.num_channels);
header.requested_pixel_types = (int *)malloc(sizeof(int) * header.num_channels);
for (int i = 0; i < header.num_channels; i++) {
header.pixel_types[i] = TINYEXR_PIXELTYPE_FLOAT; // pixel type of input image
header.requested_pixel_types[i] = TINYEXR_PIXELTYPE_HALF; // pixel type of output image to be stored in .EXR
}
const char* err = NULL; // or nullptr in C++11 or later.
int ret = SaveEXRImageToFile(&image, &header, outfilename, &err);
if (ret != TINYEXR_SUCCESS) {
fprintf(stderr, "Save EXR err: %s\n", err);
FreeEXRErrorMessage(err); // free's buffer for an error message
return ret;
}
printf("Saved exr file. [ %s ] \n", outfilename);
free(header.channels);
free(header.pixel_types);
free(header.requested_pixel_types);
return true;
}
GPUMemory<float> load_image(const std::string& filename, int& width, int& height) {
float* out; // width * height * RGBA
const char* err = nullptr;
int ret = LoadEXR(&out, &width, &height, filename.c_str(), &err);
if (ret != TINYEXR_SUCCESS) {
if (err) {
std::string error_message = std::string("Failed to load EXR image: ") + err;
FreeEXRErrorMessage(err);
throw std::runtime_error(error_message);
} else {
throw std::runtime_error("Failed to load EXR image");
}
}
GPUMemory<float> result(width * height * 4);
result.copy_from_host(out);
free(out); // release memory of image data
return result;
}
template <typename T>
void save_image(const GPUMemory<T>& image, int width, int height, int n_channels, int channel_stride, const std::string& filename) {
std::vector<T> host_data(image.get_num_elements());
image.copy_to_host(host_data.data());
std::vector<float> float_host_data(host_data.size());
for (size_t i = 0; i < host_data.size(); ++i) {
float_host_data[i] = (float)host_data[i];
}
SaveEXR(float_host_data.data(), width, height, n_channels, channel_stride, filename.c_str());
}
template <uint32_t stride>
__global__ void eval_image(uint32_t n_elements, hipTextureObject_t texture, bool filter, int width, int height, float* __restrict__ xs_and_ys, float* __restrict__ result) {
uint32_t i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= n_elements) return;
uint32_t output_idx = i * stride;
uint32_t input_idx = i * 2;
float2 pos = {xs_and_ys[input_idx], xs_and_ys[input_idx+1]};
if (!filter) {
pos.x = (roundf(pos.x * width - 0.5f) + 0.5f) / width;
pos.y = (roundf(pos.y * height - 0.5f) + 0.5f) / height;
}
float4 val = tex2D<float4>(texture, pos.x, pos.y);
result[output_idx + 0] = val.x;
result[output_idx + 1] = val.y;
result[output_idx + 2] = val.z;
for (uint32_t i = 3; i < stride; ++i) {
result[output_idx + i] = 1;
}
}
int main(int argc, char* argv[]) {
if (!(__CUDACC_VER_MAJOR__ > 10 || (__CUDACC_VER_MAJOR__ == 10 && __CUDACC_VER_MINOR__ >= 2))) {
std::cout << "Turing Tensor Core operations must be compiled with CUDA 10.2 Toolkit or later." << std::endl;
return -1;
}
hipDeviceProp_t props;
hipError_t error = hipGetDeviceProperties(&props, 0);
if (error != hipSuccess) {
std::cout << "hipGetDeviceProperties() returned an error: " << hipGetErrorString(error) << std::endl;
return -1;
}
if (!((props.major * 10 + props.minor) >= 75)) {
std::cout << "Turing Tensor Core operations must be run on a machine with compute capability at least 75."
<< std::endl;
return -1;
}
if (argc < 3) {
std::cout << "USAGE: " << argv[0] << " " << "path-to-image.exr path-to-config.json" << std::endl;
std::cout << "Sample EXR files are provided in 'data/images'." << std::endl;
return 0;
}
// First step: load an image that we'd like to learn
int width, height;
GPUMemory<float> image = load_image(argv[1], width, height);
// Second step: create a cuda texture out of this image. It'll be used to generate training data efficiently on the fly
hipResourceDesc resDesc;
memset(&resDesc, 0, sizeof(resDesc));
resDesc.resType = hipResourceTypePitch2D;
resDesc.res.pitch2D.devPtr = image.data();
resDesc.res.pitch2D.desc = hipCreateChannelDesc(32, 32, 32, 32, hipChannelFormatKindFloat);
resDesc.res.pitch2D.width = width;
resDesc.res.pitch2D.height = height;
resDesc.res.pitch2D.pitchInBytes = width * 4 * sizeof(float);
hipTextureDesc texDesc;
memset(&texDesc, 0, sizeof(texDesc));
texDesc.filterMode = hipFilterModeLinear;
texDesc.normalizedCoords = true;
texDesc.addressMode[0] = hipAddressModeClamp;
texDesc.addressMode[1] = hipAddressModeClamp;
texDesc.addressMode[2] = hipAddressModeClamp;
hipResourceViewDesc viewDesc;
memset(&viewDesc, 0, sizeof(viewDesc));
viewDesc.format = hipResViewFormatFloat4;
viewDesc.width = width;
viewDesc.height = height;
hipTextureObject_t texture;
CUDA_CHECK_THROW(hipCreateTextureObject(&texture, &resDesc, &texDesc, &viewDesc));
// Third step: sample a reference image to dump to disk. Visual comparison of this reference image and the learned
// function will be eventually possible.
int sampling_width = 1024;
int sampling_height = 1024;
uint32_t n_coords = sampling_width * sampling_height;
GPUMemory<float> sampled_image(n_coords * 3);
GPUMemory<float> xs_and_ys(n_coords * 2);
std::vector<float> host_xs_and_ys(n_coords * 2);
for (int y = 0; y < sampling_height; ++y) {
for (int x = 0; x < sampling_width; ++x) {
int idx = (y * sampling_width + x) * 2;
host_xs_and_ys[idx+0] = (float)(x + 0.5) / (float)sampling_width;
host_xs_and_ys[idx+1] = (float)(y + 0.5) / (float)sampling_height;
}
}
xs_and_ys.copy_from_host(host_xs_and_ys.data());
bool filter = false;
hipLaunchKernelGGL(( eval_image<3>), dim3(n_blocks_linear(n_coords)), dim3(n_threads_linear), 0, 0, n_coords, texture, filter, width, height, xs_and_ys.data(), sampled_image.data());
save_image(sampled_image, sampling_width, sampling_height, 3, 3, "reference.exr");
// Fourth step: train the model by sampling the above image and optimizing relative squared error using Adam.
try {
std::vector<uint32_t> batch_sizes = {1 << 14, 1 << 15, 1 << 16, 1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 21};
std::vector<std::string> methods = {"cutlass", "fully_fused"};
json bench_result;
for (std::string method : methods) {
bench_result[method] = json::array();
for (uint32_t batch_size : batch_sizes) {
// Various constants for the network and optimization
uint32_t n_iterations = ::max(1000 * (1 << 18) / batch_size, 250u);
uint32_t n_iterations_warmup = n_iterations / 2;
const uint32_t num_dims_encoded = 2;
const uint32_t num_dims_passthrough = 0;
const uint32_t num_output_dims = 3;
// Input & corresponding RNG
GPUMemory<float> batch(batch_size * num_dims_encoded);
hiprandGenerator_t rng;
CURAND_CHECK_THROW(hiprandCreateGenerator(&rng, HIPRAND_RNG_PSEUDO_DEFAULT));
CURAND_CHECK_THROW(hiprandSetPseudoRandomGeneratorSeed(rng, 1337ULL));
hipStream_t inference_stream;
CUDA_CHECK_THROW(hipStreamCreate(&inference_stream));
hipStream_t training_stream = inference_stream;
CURAND_CHECK_THROW(hiprandSetStream(rng, training_stream));
std::ifstream f{argv[2]};
json config = json::parse(f, nullptr, true, /*skip_comments=*/true);
json encoding_opts = config.value("encoding", json::object());
std::shared_ptr<Encoding<precision_t>> encoding{create_encoding<precision_t>(num_dims_encoded, num_dims_passthrough, encoding_opts, 16)};
const uint32_t padded_num_input_dims = encoding->num_encoded_dims();
// Auxiliary matrices for training
GPUMatrix<precision_t, MatrixLayout::ColumnMajor> bench_obe_out(padded_num_input_dims, batch_size);
GPUMatrix<float, MatrixLayout::ColumnMajor> bench_target(num_output_dims, batch_size);
// Auxiliary matrices for evaluation
GPUMatrix<precision_t, MatrixLayout::ColumnMajor> eval_obe_out(padded_num_input_dims, n_coords);
GPUMemory<float> prediction_data(num_output_dims * n_coords);
GPUMatrix<float, MatrixLayout::ColumnMajor> prediction(prediction_data.data(), num_output_dims, n_coords);
json loss_opts = config.value("loss", json::object());
json optimizer_opts = config.value("optimizer", json::object());
json network_opts = config.value("network", json::object());
network_opts["otype"] = method == "cutlass" ? "MLP" : "FullyFusedMLP";
network_opts["n_output_dims"] = num_output_dims;
network_opts["n_input_dims"] = padded_num_input_dims;
std::shared_ptr<Loss<precision_t>> loss{create_loss<precision_t>(loss_opts)};
std::shared_ptr<Optimizer<precision_t>> optimizer{create_optimizer<precision_t>(optimizer_opts)};
std::shared_ptr<Network<precision_t>> network{create_network<precision_t>(network_opts)};
auto trainer = std::make_shared<Trainer<precision_t, precision_t>>(network, optimizer, loss);
std::chrono::steady_clock::time_point begin = std::chrono::steady_clock::now();
float tmp_loss = 0;
uint32_t tmp_loss_counter = 0;
uint32_t print_interval = n_iterations / 10;
const uint32_t STEPS_INCREMENT = 5;
double mean_training_throughput = 0;
size_t mean_counter = 0;
for (uint32_t i = 0; i < n_iterations; i += STEPS_INCREMENT) {
bool print_loss = i % print_interval == 0;
float loss_value;
for (uint32_t j = 0; j < STEPS_INCREMENT; ++j) {
// Compute reference values at random coordinates
CURAND_CHECK_THROW(hiprandGenerateUniform(rng, batch.data(), batch_size * num_dims_encoded));
linear_kernel(eval_image<num_output_dims>, 0, training_stream, batch_size, texture, filter, width, height, batch.data(), bench_target.data());
// Training step
float* p_loss = j == (STEPS_INCREMENT - 1) ? &loss_value : nullptr;
encoding->encode(batch_size, batch.data(), bench_obe_out.data(), training_stream);
trainer->training_step(training_stream, bench_obe_out, bench_target, p_loss);
}
tmp_loss += loss_value;
++tmp_loss_counter;
// Debug outputs
if (print_loss) {
hipDeviceSynchronize();
std::chrono::steady_clock::time_point end = std::chrono::steady_clock::now();
auto microseconds = std::chrono::duration_cast<std::chrono::microseconds>(end - begin).count();
double throughput = print_interval * batch_size / (microseconds / 1000000.0);
std::cout << "Iteration#" << i << ": " << "loss=" << tmp_loss/tmp_loss_counter << " time=" << microseconds << "[s] thp=" << throughput << "/s" << std::endl;
begin = end;
tmp_loss = 0;
tmp_loss_counter = 0;
if (i >= n_iterations_warmup) {
mean_training_throughput += throughput;
++mean_counter;
}
}
}
mean_training_throughput /= mean_counter;
// Dump learned image for sanity checking
encoding->encode(n_coords, xs_and_ys.data(), eval_obe_out.data(), inference_stream);
network->inference(inference_stream, eval_obe_out, prediction);
save_image(prediction_data, sampling_width, sampling_height, 3, num_output_dims, std::to_string(batch_size) + "-after-" + std::to_string(n_iterations) + "-iters-" + method + ".exr");
std::cout << "Finished training benchmark. Mean throughput is " << mean_training_throughput << "/s. Waiting 10 seconds for GPU to cool down." << std::endl;
std::this_thread::sleep_for(std::chrono::seconds{10});
// Inference benchmark
CURAND_CHECK_THROW(hiprandSetStream(rng, inference_stream));
double mean_inference_throughput = 0;
mean_counter = 0;
print_interval *= 5;
n_iterations *= 5;
n_iterations_warmup *= 5;
for (uint32_t i = 0; i < n_iterations; ++i) {
bool print_loss = i % print_interval == 0;
// Compute inference values at random coordinates
CURAND_CHECK_THROW(hiprandGenerateUniform(rng, batch.data(), batch_size * num_dims_encoded));
// Inference step
encoding->encode(batch_size, batch.data(), bench_obe_out.data(), inference_stream);
network->inference(inference_stream, bench_obe_out, bench_target);
// Debug outputs
if (print_loss) {
hipDeviceSynchronize();
std::chrono::steady_clock::time_point end = std::chrono::steady_clock::now();
auto microseconds = std::chrono::duration_cast<std::chrono::microseconds>(end - begin).count();
double throughput = print_interval * batch_size / (microseconds / 1000000.0);
std::cout << "Iteration#" << i << ": " << "time=" << microseconds << "[s] thp=" << throughput << "/s" << std::endl;
begin = end;
if (i >= n_iterations_warmup) {
mean_inference_throughput += throughput;
++mean_counter;
}
}
}
mean_inference_throughput /= mean_counter;
std::cout << "Finished inference benchmark. Mean throughput is " << mean_inference_throughput << "/s. Waiting 10 seconds for GPU to cool down." << std::endl;
std::this_thread::sleep_for(std::chrono::seconds{10});
bench_result[method].push_back({
{"batch_size", batch_size},
{"training_throughput", mean_training_throughput},
{"inference_throughput", mean_inference_throughput},
});
}
}
std::string json_string = bench_result.dump(4);
std::ofstream out{"bench_result_ours.json"};
out << json_string;
} catch (std::exception& e) {
std::cout << "Uncaught exception: " << e.what() << std::endl;
}
return EXIT_SUCCESS;
}
| ec7795092d8c3389deaf1775ad4f089630db9a05.cu | /*
* Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification, are permitted
* provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright notice, this list of
* conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright notice, this list of
* conditions and the following disclaimer in the documentation and/or other materials
* provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TOR (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*//*
*/
/** @file bench-ours.cu
* @author Thomas Müller, NVIDIA
* @brief Generates performance data for comparison with TensorFlow.
*/
#include <tiny-cuda-nn/misc_kernels.h>
#include <tiny-cuda-nn/gpu_matrix.h>
#include <tiny-cuda-nn/encodings/oneblob.h>
#include <tiny-cuda-nn/optimizer.h>
#include <tiny-cuda-nn/loss.h>
#include <tiny-cuda-nn/network.h>
#include <tiny-cuda-nn/trainer.h>
#include <tinyexr/tinyexr.h>
#include <chrono>
#include <cstdlib>
#include <iostream>
#include <random>
#include <stdexcept>
#include <string>
#include <thread>
#include <vector>
#include <fstream>
using namespace tcnn;
using precision_t = network_precision_t;
bool SaveEXR(const float* data, int width, int height, int nChannels, int channelStride, const char* outfilename) {
EXRHeader header;
InitEXRHeader(&header);
EXRImage image;
InitEXRImage(&image);
image.num_channels = nChannels;
std::vector<std::vector<float>> images(nChannels);
std::vector<float*> image_ptr(nChannels);
for (int i = 0; i < nChannels; ++i) {
images[i].resize(width * height);
}
for (int i = 0; i < nChannels; ++i) {
image_ptr[i] = images[nChannels - i - 1].data();
}
for (size_t i = 0; i < (size_t)width * height; i++) {
for (int c = 0; c < nChannels; ++c) {
images[c][i] = data[channelStride*i+c];
}
}
image.images = (unsigned char**)image_ptr.data();
image.width = width;
image.height = height;
header.num_channels = nChannels;
header.channels = (EXRChannelInfo *)malloc(sizeof(EXRChannelInfo) * header.num_channels);
// Must be (A)BGR order, since most of EXR viewers expect this channel order.
strncpy(header.channels[0].name, "B", 255); header.channels[0].name[strlen("B")] = '\0';
if (nChannels > 1) {
strncpy(header.channels[1].name, "G", 255); header.channels[1].name[strlen("G")] = '\0';
}
if (nChannels > 2) {
strncpy(header.channels[2].name, "R", 255); header.channels[2].name[strlen("R")] = '\0';
}
if (nChannels > 3) {
strncpy(header.channels[3].name, "A", 255); header.channels[3].name[strlen("A")] = '\0';
}
header.pixel_types = (int *)malloc(sizeof(int) * header.num_channels);
header.requested_pixel_types = (int *)malloc(sizeof(int) * header.num_channels);
for (int i = 0; i < header.num_channels; i++) {
header.pixel_types[i] = TINYEXR_PIXELTYPE_FLOAT; // pixel type of input image
header.requested_pixel_types[i] = TINYEXR_PIXELTYPE_HALF; // pixel type of output image to be stored in .EXR
}
const char* err = NULL; // or nullptr in C++11 or later.
int ret = SaveEXRImageToFile(&image, &header, outfilename, &err);
if (ret != TINYEXR_SUCCESS) {
fprintf(stderr, "Save EXR err: %s\n", err);
FreeEXRErrorMessage(err); // free's buffer for an error message
return ret;
}
printf("Saved exr file. [ %s ] \n", outfilename);
free(header.channels);
free(header.pixel_types);
free(header.requested_pixel_types);
return true;
}
GPUMemory<float> load_image(const std::string& filename, int& width, int& height) {
float* out; // width * height * RGBA
const char* err = nullptr;
int ret = LoadEXR(&out, &width, &height, filename.c_str(), &err);
if (ret != TINYEXR_SUCCESS) {
if (err) {
std::string error_message = std::string("Failed to load EXR image: ") + err;
FreeEXRErrorMessage(err);
throw std::runtime_error(error_message);
} else {
throw std::runtime_error("Failed to load EXR image");
}
}
GPUMemory<float> result(width * height * 4);
result.copy_from_host(out);
free(out); // release memory of image data
return result;
}
template <typename T>
void save_image(const GPUMemory<T>& image, int width, int height, int n_channels, int channel_stride, const std::string& filename) {
std::vector<T> host_data(image.get_num_elements());
image.copy_to_host(host_data.data());
std::vector<float> float_host_data(host_data.size());
for (size_t i = 0; i < host_data.size(); ++i) {
float_host_data[i] = (float)host_data[i];
}
SaveEXR(float_host_data.data(), width, height, n_channels, channel_stride, filename.c_str());
}
template <uint32_t stride>
__global__ void eval_image(uint32_t n_elements, cudaTextureObject_t texture, bool filter, int width, int height, float* __restrict__ xs_and_ys, float* __restrict__ result) {
uint32_t i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= n_elements) return;
uint32_t output_idx = i * stride;
uint32_t input_idx = i * 2;
float2 pos = {xs_and_ys[input_idx], xs_and_ys[input_idx+1]};
if (!filter) {
pos.x = (roundf(pos.x * width - 0.5f) + 0.5f) / width;
pos.y = (roundf(pos.y * height - 0.5f) + 0.5f) / height;
}
float4 val = tex2D<float4>(texture, pos.x, pos.y);
result[output_idx + 0] = val.x;
result[output_idx + 1] = val.y;
result[output_idx + 2] = val.z;
for (uint32_t i = 3; i < stride; ++i) {
result[output_idx + i] = 1;
}
}
int main(int argc, char* argv[]) {
if (!(__CUDACC_VER_MAJOR__ > 10 || (__CUDACC_VER_MAJOR__ == 10 && __CUDACC_VER_MINOR__ >= 2))) {
std::cout << "Turing Tensor Core operations must be compiled with CUDA 10.2 Toolkit or later." << std::endl;
return -1;
}
cudaDeviceProp props;
cudaError_t error = cudaGetDeviceProperties(&props, 0);
if (error != cudaSuccess) {
std::cout << "cudaGetDeviceProperties() returned an error: " << cudaGetErrorString(error) << std::endl;
return -1;
}
if (!((props.major * 10 + props.minor) >= 75)) {
std::cout << "Turing Tensor Core operations must be run on a machine with compute capability at least 75."
<< std::endl;
return -1;
}
if (argc < 3) {
std::cout << "USAGE: " << argv[0] << " " << "path-to-image.exr path-to-config.json" << std::endl;
std::cout << "Sample EXR files are provided in 'data/images'." << std::endl;
return 0;
}
// First step: load an image that we'd like to learn
int width, height;
GPUMemory<float> image = load_image(argv[1], width, height);
// Second step: create a cuda texture out of this image. It'll be used to generate training data efficiently on the fly
cudaResourceDesc resDesc;
memset(&resDesc, 0, sizeof(resDesc));
resDesc.resType = cudaResourceTypePitch2D;
resDesc.res.pitch2D.devPtr = image.data();
resDesc.res.pitch2D.desc = cudaCreateChannelDesc(32, 32, 32, 32, cudaChannelFormatKindFloat);
resDesc.res.pitch2D.width = width;
resDesc.res.pitch2D.height = height;
resDesc.res.pitch2D.pitchInBytes = width * 4 * sizeof(float);
cudaTextureDesc texDesc;
memset(&texDesc, 0, sizeof(texDesc));
texDesc.filterMode = cudaFilterModeLinear;
texDesc.normalizedCoords = true;
texDesc.addressMode[0] = cudaAddressModeClamp;
texDesc.addressMode[1] = cudaAddressModeClamp;
texDesc.addressMode[2] = cudaAddressModeClamp;
cudaResourceViewDesc viewDesc;
memset(&viewDesc, 0, sizeof(viewDesc));
viewDesc.format = cudaResViewFormatFloat4;
viewDesc.width = width;
viewDesc.height = height;
cudaTextureObject_t texture;
CUDA_CHECK_THROW(cudaCreateTextureObject(&texture, &resDesc, &texDesc, &viewDesc));
// Third step: sample a reference image to dump to disk. Visual comparison of this reference image and the learned
// function will be eventually possible.
int sampling_width = 1024;
int sampling_height = 1024;
uint32_t n_coords = sampling_width * sampling_height;
GPUMemory<float> sampled_image(n_coords * 3);
GPUMemory<float> xs_and_ys(n_coords * 2);
std::vector<float> host_xs_and_ys(n_coords * 2);
for (int y = 0; y < sampling_height; ++y) {
for (int x = 0; x < sampling_width; ++x) {
int idx = (y * sampling_width + x) * 2;
host_xs_and_ys[idx+0] = (float)(x + 0.5) / (float)sampling_width;
host_xs_and_ys[idx+1] = (float)(y + 0.5) / (float)sampling_height;
}
}
xs_and_ys.copy_from_host(host_xs_and_ys.data());
bool filter = false;
eval_image<3><<<n_blocks_linear(n_coords), n_threads_linear>>>(n_coords, texture, filter, width, height, xs_and_ys.data(), sampled_image.data());
save_image(sampled_image, sampling_width, sampling_height, 3, 3, "reference.exr");
// Fourth step: train the model by sampling the above image and optimizing relative squared error using Adam.
try {
std::vector<uint32_t> batch_sizes = {1 << 14, 1 << 15, 1 << 16, 1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 21};
std::vector<std::string> methods = {"cutlass", "fully_fused"};
json bench_result;
for (std::string method : methods) {
bench_result[method] = json::array();
for (uint32_t batch_size : batch_sizes) {
// Various constants for the network and optimization
uint32_t n_iterations = std::max(1000 * (1 << 18) / batch_size, 250u);
uint32_t n_iterations_warmup = n_iterations / 2;
const uint32_t num_dims_encoded = 2;
const uint32_t num_dims_passthrough = 0;
const uint32_t num_output_dims = 3;
// Input & corresponding RNG
GPUMemory<float> batch(batch_size * num_dims_encoded);
curandGenerator_t rng;
CURAND_CHECK_THROW(curandCreateGenerator(&rng, CURAND_RNG_PSEUDO_DEFAULT));
CURAND_CHECK_THROW(curandSetPseudoRandomGeneratorSeed(rng, 1337ULL));
cudaStream_t inference_stream;
CUDA_CHECK_THROW(cudaStreamCreate(&inference_stream));
cudaStream_t training_stream = inference_stream;
CURAND_CHECK_THROW(curandSetStream(rng, training_stream));
std::ifstream f{argv[2]};
json config = json::parse(f, nullptr, true, /*skip_comments=*/true);
json encoding_opts = config.value("encoding", json::object());
std::shared_ptr<Encoding<precision_t>> encoding{create_encoding<precision_t>(num_dims_encoded, num_dims_passthrough, encoding_opts, 16)};
const uint32_t padded_num_input_dims = encoding->num_encoded_dims();
// Auxiliary matrices for training
GPUMatrix<precision_t, MatrixLayout::ColumnMajor> bench_obe_out(padded_num_input_dims, batch_size);
GPUMatrix<float, MatrixLayout::ColumnMajor> bench_target(num_output_dims, batch_size);
// Auxiliary matrices for evaluation
GPUMatrix<precision_t, MatrixLayout::ColumnMajor> eval_obe_out(padded_num_input_dims, n_coords);
GPUMemory<float> prediction_data(num_output_dims * n_coords);
GPUMatrix<float, MatrixLayout::ColumnMajor> prediction(prediction_data.data(), num_output_dims, n_coords);
json loss_opts = config.value("loss", json::object());
json optimizer_opts = config.value("optimizer", json::object());
json network_opts = config.value("network", json::object());
network_opts["otype"] = method == "cutlass" ? "MLP" : "FullyFusedMLP";
network_opts["n_output_dims"] = num_output_dims;
network_opts["n_input_dims"] = padded_num_input_dims;
std::shared_ptr<Loss<precision_t>> loss{create_loss<precision_t>(loss_opts)};
std::shared_ptr<Optimizer<precision_t>> optimizer{create_optimizer<precision_t>(optimizer_opts)};
std::shared_ptr<Network<precision_t>> network{create_network<precision_t>(network_opts)};
auto trainer = std::make_shared<Trainer<precision_t, precision_t>>(network, optimizer, loss);
std::chrono::steady_clock::time_point begin = std::chrono::steady_clock::now();
float tmp_loss = 0;
uint32_t tmp_loss_counter = 0;
uint32_t print_interval = n_iterations / 10;
const uint32_t STEPS_INCREMENT = 5;
double mean_training_throughput = 0;
size_t mean_counter = 0;
for (uint32_t i = 0; i < n_iterations; i += STEPS_INCREMENT) {
bool print_loss = i % print_interval == 0;
float loss_value;
for (uint32_t j = 0; j < STEPS_INCREMENT; ++j) {
// Compute reference values at random coordinates
CURAND_CHECK_THROW(curandGenerateUniform(rng, batch.data(), batch_size * num_dims_encoded));
linear_kernel(eval_image<num_output_dims>, 0, training_stream, batch_size, texture, filter, width, height, batch.data(), bench_target.data());
// Training step
float* p_loss = j == (STEPS_INCREMENT - 1) ? &loss_value : nullptr;
encoding->encode(batch_size, batch.data(), bench_obe_out.data(), training_stream);
trainer->training_step(training_stream, bench_obe_out, bench_target, p_loss);
}
tmp_loss += loss_value;
++tmp_loss_counter;
// Debug outputs
if (print_loss) {
cudaDeviceSynchronize();
std::chrono::steady_clock::time_point end = std::chrono::steady_clock::now();
auto microseconds = std::chrono::duration_cast<std::chrono::microseconds>(end - begin).count();
double throughput = print_interval * batch_size / (microseconds / 1000000.0);
std::cout << "Iteration#" << i << ": " << "loss=" << tmp_loss/tmp_loss_counter << " time=" << microseconds << "[µs] thp=" << throughput << "/s" << std::endl;
begin = end;
tmp_loss = 0;
tmp_loss_counter = 0;
if (i >= n_iterations_warmup) {
mean_training_throughput += throughput;
++mean_counter;
}
}
}
mean_training_throughput /= mean_counter;
// Dump learned image for sanity checking
encoding->encode(n_coords, xs_and_ys.data(), eval_obe_out.data(), inference_stream);
network->inference(inference_stream, eval_obe_out, prediction);
save_image(prediction_data, sampling_width, sampling_height, 3, num_output_dims, std::to_string(batch_size) + "-after-" + std::to_string(n_iterations) + "-iters-" + method + ".exr");
std::cout << "Finished training benchmark. Mean throughput is " << mean_training_throughput << "/s. Waiting 10 seconds for GPU to cool down." << std::endl;
std::this_thread::sleep_for(std::chrono::seconds{10});
// Inference benchmark
CURAND_CHECK_THROW(curandSetStream(rng, inference_stream));
double mean_inference_throughput = 0;
mean_counter = 0;
print_interval *= 5;
n_iterations *= 5;
n_iterations_warmup *= 5;
for (uint32_t i = 0; i < n_iterations; ++i) {
bool print_loss = i % print_interval == 0;
// Compute inference values at random coordinates
CURAND_CHECK_THROW(curandGenerateUniform(rng, batch.data(), batch_size * num_dims_encoded));
// Inference step
encoding->encode(batch_size, batch.data(), bench_obe_out.data(), inference_stream);
network->inference(inference_stream, bench_obe_out, bench_target);
// Debug outputs
if (print_loss) {
cudaDeviceSynchronize();
std::chrono::steady_clock::time_point end = std::chrono::steady_clock::now();
auto microseconds = std::chrono::duration_cast<std::chrono::microseconds>(end - begin).count();
double throughput = print_interval * batch_size / (microseconds / 1000000.0);
std::cout << "Iteration#" << i << ": " << "time=" << microseconds << "[µs] thp=" << throughput << "/s" << std::endl;
begin = end;
if (i >= n_iterations_warmup) {
mean_inference_throughput += throughput;
++mean_counter;
}
}
}
mean_inference_throughput /= mean_counter;
std::cout << "Finished inference benchmark. Mean throughput is " << mean_inference_throughput << "/s. Waiting 10 seconds for GPU to cool down." << std::endl;
std::this_thread::sleep_for(std::chrono::seconds{10});
bench_result[method].push_back({
{"batch_size", batch_size},
{"training_throughput", mean_training_throughput},
{"inference_throughput", mean_inference_throughput},
});
}
}
std::string json_string = bench_result.dump(4);
std::ofstream out{"bench_result_ours.json"};
out << json_string;
} catch (std::exception& e) {
std::cout << "Uncaught exception: " << e.what() << std::endl;
}
return EXIT_SUCCESS;
}
|
da66c29fba9d09b7f5dfcb0f07ea214016602ff8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*******************************<GINKGO LICENSE>******************************
Copyright (c) 2017-2022, the Ginkgo authors
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
******************************<GINKGO LICENSE>*******************************/
#include "core/stop/residual_norm_kernels.hpp"
#include <ginkgo/core/base/exception_helpers.hpp>
#include <ginkgo/core/base/math.hpp>
#include <ginkgo/core/stop/residual_norm.hpp>
#include "cuda/base/math.hpp"
#include "cuda/base/types.hpp"
#include "cuda/components/thread_ids.cuh"
namespace gko {
namespace kernels {
namespace cuda {
/**
* @brief The Residual norm stopping criterion namespace.
* @ref resnorm
* @ingroup resnorm
*/
namespace residual_norm {
constexpr int default_block_size = 512;
template <typename ValueType>
__global__ __launch_bounds__(default_block_size) void residual_norm_kernel(
size_type num_cols, ValueType rel_residual_goal,
const ValueType* __restrict__ tau, const ValueType* __restrict__ orig_tau,
uint8 stoppingId, bool setFinalized,
stopping_status* __restrict__ stop_status,
bool* __restrict__ device_storage)
{
const auto tidx = thread::get_thread_id_flat();
if (tidx < num_cols) {
if (tau[tidx] < rel_residual_goal * orig_tau[tidx]) {
stop_status[tidx].converge(stoppingId, setFinalized);
device_storage[1] = true;
}
// because only false is written to all_converged, write conflicts
// should not cause any problem
else if (!stop_status[tidx].has_stopped()) {
device_storage[0] = false;
}
}
}
__global__ __launch_bounds__(1) void init_kernel(
bool* __restrict__ device_storage)
{
device_storage[0] = true;
device_storage[1] = false;
}
template <typename ValueType>
void residual_norm(std::shared_ptr<const CudaExecutor> exec,
const matrix::Dense<ValueType>* tau,
const matrix::Dense<ValueType>* orig_tau,
ValueType rel_residual_goal, uint8 stoppingId,
bool setFinalized, array<stopping_status>* stop_status,
array<bool>* device_storage, bool* all_converged,
bool* one_changed)
{
static_assert(is_complex_s<ValueType>::value == false,
"ValueType must not be complex in this function!");
hipLaunchKernelGGL(( init_kernel), dim3(1), dim3(1), 0, 0, as_cuda_type(device_storage->get_data()));
const auto block_size = default_block_size;
const auto grid_size = ceildiv(tau->get_size()[1], block_size);
if (grid_size > 0) {
hipLaunchKernelGGL(( residual_norm_kernel), dim3(grid_size), dim3(block_size), 0, 0,
tau->get_size()[1], rel_residual_goal,
as_cuda_type(tau->get_const_values()),
as_cuda_type(orig_tau->get_const_values()), stoppingId,
setFinalized, as_cuda_type(stop_status->get_data()),
as_cuda_type(device_storage->get_data()));
}
/* Represents all_converged, one_changed */
*all_converged = exec->copy_val_to_host(device_storage->get_const_data());
*one_changed = exec->copy_val_to_host(device_storage->get_const_data() + 1);
}
GKO_INSTANTIATE_FOR_EACH_NON_COMPLEX_VALUE_TYPE(
GKO_DECLARE_RESIDUAL_NORM_KERNEL);
} // namespace residual_norm
/**
* @brief The Implicit Residual norm stopping criterion.
* @ref implicit_resnorm
* @ingroup resnorm
*/
namespace implicit_residual_norm {
constexpr int default_block_size = 512;
template <typename ValueType>
__global__
__launch_bounds__(default_block_size) void implicit_residual_norm_kernel(
size_type num_cols, remove_complex<ValueType> rel_residual_goal,
const ValueType* __restrict__ tau,
const remove_complex<ValueType>* __restrict__ orig_tau,
uint8 stoppingId, bool setFinalized,
stopping_status* __restrict__ stop_status,
bool* __restrict__ device_storage)
{
const auto tidx = thread::get_thread_id_flat();
if (tidx < num_cols) {
if (sqrt(abs(tau[tidx])) < rel_residual_goal * orig_tau[tidx]) {
stop_status[tidx].converge(stoppingId, setFinalized);
device_storage[1] = true;
}
// because only false is written to all_converged, write conflicts
// should not cause any problem
else if (!stop_status[tidx].has_stopped()) {
device_storage[0] = false;
}
}
}
__global__ __launch_bounds__(1) void init_kernel(
bool* __restrict__ device_storage)
{
device_storage[0] = true;
device_storage[1] = false;
}
template <typename ValueType>
void implicit_residual_norm(
std::shared_ptr<const CudaExecutor> exec,
const matrix::Dense<ValueType>* tau,
const matrix::Dense<remove_complex<ValueType>>* orig_tau,
remove_complex<ValueType> rel_residual_goal, uint8 stoppingId,
bool setFinalized, array<stopping_status>* stop_status,
array<bool>* device_storage, bool* all_converged, bool* one_changed)
{
hipLaunchKernelGGL(( init_kernel), dim3(1), dim3(1), 0, 0, as_cuda_type(device_storage->get_data()));
const auto block_size = default_block_size;
const auto grid_size = ceildiv(tau->get_size()[1], block_size);
if (grid_size > 0) {
hipLaunchKernelGGL(( implicit_residual_norm_kernel), dim3(grid_size), dim3(block_size), 0, 0,
tau->get_size()[1], rel_residual_goal,
as_cuda_type(tau->get_const_values()),
as_cuda_type(orig_tau->get_const_values()), stoppingId,
setFinalized, as_cuda_type(stop_status->get_data()),
as_cuda_type(device_storage->get_data()));
}
/* Represents all_converged, one_changed */
*all_converged = exec->copy_val_to_host(device_storage->get_const_data());
*one_changed = exec->copy_val_to_host(device_storage->get_const_data() + 1);
}
GKO_INSTANTIATE_FOR_EACH_VALUE_TYPE(GKO_DECLARE_IMPLICIT_RESIDUAL_NORM_KERNEL);
} // namespace implicit_residual_norm
} // namespace cuda
} // namespace kernels
} // namespace gko
| da66c29fba9d09b7f5dfcb0f07ea214016602ff8.cu | /*******************************<GINKGO LICENSE>******************************
Copyright (c) 2017-2022, the Ginkgo authors
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
******************************<GINKGO LICENSE>*******************************/
#include "core/stop/residual_norm_kernels.hpp"
#include <ginkgo/core/base/exception_helpers.hpp>
#include <ginkgo/core/base/math.hpp>
#include <ginkgo/core/stop/residual_norm.hpp>
#include "cuda/base/math.hpp"
#include "cuda/base/types.hpp"
#include "cuda/components/thread_ids.cuh"
namespace gko {
namespace kernels {
namespace cuda {
/**
* @brief The Residual norm stopping criterion namespace.
* @ref resnorm
* @ingroup resnorm
*/
namespace residual_norm {
constexpr int default_block_size = 512;
template <typename ValueType>
__global__ __launch_bounds__(default_block_size) void residual_norm_kernel(
size_type num_cols, ValueType rel_residual_goal,
const ValueType* __restrict__ tau, const ValueType* __restrict__ orig_tau,
uint8 stoppingId, bool setFinalized,
stopping_status* __restrict__ stop_status,
bool* __restrict__ device_storage)
{
const auto tidx = thread::get_thread_id_flat();
if (tidx < num_cols) {
if (tau[tidx] < rel_residual_goal * orig_tau[tidx]) {
stop_status[tidx].converge(stoppingId, setFinalized);
device_storage[1] = true;
}
// because only false is written to all_converged, write conflicts
// should not cause any problem
else if (!stop_status[tidx].has_stopped()) {
device_storage[0] = false;
}
}
}
__global__ __launch_bounds__(1) void init_kernel(
bool* __restrict__ device_storage)
{
device_storage[0] = true;
device_storage[1] = false;
}
template <typename ValueType>
void residual_norm(std::shared_ptr<const CudaExecutor> exec,
const matrix::Dense<ValueType>* tau,
const matrix::Dense<ValueType>* orig_tau,
ValueType rel_residual_goal, uint8 stoppingId,
bool setFinalized, array<stopping_status>* stop_status,
array<bool>* device_storage, bool* all_converged,
bool* one_changed)
{
static_assert(is_complex_s<ValueType>::value == false,
"ValueType must not be complex in this function!");
init_kernel<<<1, 1>>>(as_cuda_type(device_storage->get_data()));
const auto block_size = default_block_size;
const auto grid_size = ceildiv(tau->get_size()[1], block_size);
if (grid_size > 0) {
residual_norm_kernel<<<grid_size, block_size>>>(
tau->get_size()[1], rel_residual_goal,
as_cuda_type(tau->get_const_values()),
as_cuda_type(orig_tau->get_const_values()), stoppingId,
setFinalized, as_cuda_type(stop_status->get_data()),
as_cuda_type(device_storage->get_data()));
}
/* Represents all_converged, one_changed */
*all_converged = exec->copy_val_to_host(device_storage->get_const_data());
*one_changed = exec->copy_val_to_host(device_storage->get_const_data() + 1);
}
GKO_INSTANTIATE_FOR_EACH_NON_COMPLEX_VALUE_TYPE(
GKO_DECLARE_RESIDUAL_NORM_KERNEL);
} // namespace residual_norm
/**
* @brief The Implicit Residual norm stopping criterion.
* @ref implicit_resnorm
* @ingroup resnorm
*/
namespace implicit_residual_norm {
constexpr int default_block_size = 512;
template <typename ValueType>
__global__
__launch_bounds__(default_block_size) void implicit_residual_norm_kernel(
size_type num_cols, remove_complex<ValueType> rel_residual_goal,
const ValueType* __restrict__ tau,
const remove_complex<ValueType>* __restrict__ orig_tau,
uint8 stoppingId, bool setFinalized,
stopping_status* __restrict__ stop_status,
bool* __restrict__ device_storage)
{
const auto tidx = thread::get_thread_id_flat();
if (tidx < num_cols) {
if (sqrt(abs(tau[tidx])) < rel_residual_goal * orig_tau[tidx]) {
stop_status[tidx].converge(stoppingId, setFinalized);
device_storage[1] = true;
}
// because only false is written to all_converged, write conflicts
// should not cause any problem
else if (!stop_status[tidx].has_stopped()) {
device_storage[0] = false;
}
}
}
__global__ __launch_bounds__(1) void init_kernel(
bool* __restrict__ device_storage)
{
device_storage[0] = true;
device_storage[1] = false;
}
template <typename ValueType>
void implicit_residual_norm(
std::shared_ptr<const CudaExecutor> exec,
const matrix::Dense<ValueType>* tau,
const matrix::Dense<remove_complex<ValueType>>* orig_tau,
remove_complex<ValueType> rel_residual_goal, uint8 stoppingId,
bool setFinalized, array<stopping_status>* stop_status,
array<bool>* device_storage, bool* all_converged, bool* one_changed)
{
init_kernel<<<1, 1>>>(as_cuda_type(device_storage->get_data()));
const auto block_size = default_block_size;
const auto grid_size = ceildiv(tau->get_size()[1], block_size);
if (grid_size > 0) {
implicit_residual_norm_kernel<<<grid_size, block_size>>>(
tau->get_size()[1], rel_residual_goal,
as_cuda_type(tau->get_const_values()),
as_cuda_type(orig_tau->get_const_values()), stoppingId,
setFinalized, as_cuda_type(stop_status->get_data()),
as_cuda_type(device_storage->get_data()));
}
/* Represents all_converged, one_changed */
*all_converged = exec->copy_val_to_host(device_storage->get_const_data());
*one_changed = exec->copy_val_to_host(device_storage->get_const_data() + 1);
}
GKO_INSTANTIATE_FOR_EACH_VALUE_TYPE(GKO_DECLARE_IMPLICIT_RESIDUAL_NORM_KERNEL);
} // namespace implicit_residual_norm
} // namespace cuda
} // namespace kernels
} // namespace gko
|
1e31a81c48fcaf50567dac6a3626ffa5b2fd5d7b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "THHUNN.h"
#include "THHHalf.h"
#include "THHHalfAutoNumerics.cuh"
#include "THHAtomics.cuh"
#include "THHTensor.hpp"
#define CUDA_MAX_THREADS 1024 // this is safe, in reality 256 is our limit
#define START_IND(a,b,c) (int)floor((float)(a * c) / b)
#define END_IND(a,b,c) (int)ceil((float)((a + 1) * c) / b)
// #define START_IND(a,b,c) a * c / b
// #define END_IND(a,b,c) (a + 1) * c / b + ((a + 1) * c % b > 0)?1:0
// 5d tensor B x D x T x H x W
/*
* Description:
* this function adaptively maxpools an input 4D tensor along dimensions 2 and 3
* 4D input, 4D output, 4D argmax x and y
*/
template <typename T>
__global__ void cunn_VolumetricAdaptiveMaxPooling_updateOutput_kernel(
T *input, T *output, THCIndex_t *indices,
int isizeT, int isizeH, int isizeW,
int osizeT, int osizeH, int osizeW,
int64_t istrideD,
int64_t istrideT, int64_t istrideH, int64_t istrideW,
int64_t offsetZ)
{
// iterators on output pixels
int ot, oh, ow;
// compute offsets based on thread/block ID
int ostartH = blockIdx.y * blockDim.y + threadIdx.y;
int oendH = osizeH;
int ostepH = gridDim.y * blockDim.y;
int ostartW = threadIdx.x;
int oendW = osizeW;
int ostepW = blockDim.x;
// select output plane
int64_t o_plane = blockIdx.x + offsetZ;
ot = o_plane % osizeT; // output frame/time
int d = o_plane / osizeT; // slice/feature
// input frame/time ramge is fixed.
int istartT = START_IND(ot, osizeT, isizeT);
int iendT = END_IND(ot, osizeT, isizeT);
int kT = iendT - istartT;
// input offset by slice/feature and earliest relevant frame/time
T *input_dt = input + d*istrideD + istartT*istrideT;
// output offset by slice/feature and frame/time
T *output_dt = output + o_plane*osizeH*osizeW;
// indices offset by slice/feature and frame/time
THCIndex_t *indices_dt = indices + o_plane*osizeH*osizeW;
// For all output pixels...
for(oh = ostartH; oh < oendH; oh += ostepH) {
int istartH = START_IND(oh, osizeH, isizeH);
int iendH = END_IND(oh, osizeH, isizeH);
int kH = iendH - istartH;
for(ow = ostartW; ow < oendW; ow += ostepW) {
int istartW = START_IND(ow, osizeW, isizeW);
int iendW = END_IND(ow, osizeW, isizeW);
int kW = iendW - istartW;
// Compute the average pooling from corresponding input pixels
T *ptr_input = input_dt + istartH*istrideH + istartW*istrideW;
T *ptr_output = output_dt + oh*osizeW + ow;
THCIndex_t *ptr_ind = indices_dt + oh*osizeW + ow;
int64_t argmax = -1;
T max = THCNumerics<T>::min();
int it, ih, iw;
for(it = 0; it < kT; ++it) {
for(ih = 0; ih < kH; ++ih) {
for(iw = 0; iw < kW; ++iw) {
T val = ptr_input[ih*istrideH + iw*istrideW];
if (val > max) {
max = val;
argmax = (it+istartT)*isizeH*isizeW + (ih+istartH)*isizeW + iw+istartW;
}
}
}
ptr_input += istrideT; // next input frame
}
// Update output and argmax
*ptr_output = max;
*ptr_ind = argmax + TH_INDEX_BASE;
}
}
}
/*
* Description:
* This function computes the gradInput from gradOutput.
*
* gridDim.y blocks work together on a single 2D output plane specified by
* (blockIdx.x + offsetZ).
*
* Assumes that input size can be perfectly divided by output size, i.e.
* each input pixel can only be argmax of one output pixel.
*/
template <typename T>
__global__ void cunn_VolumetricAdaptiveMaxPooling_updateGradInput_kernel(
T *gradInput, T *gradOutput, THCIndex_t *indices,
int isizeT, int isizeH, int isizeW,
int osizeT, int osizeH, int osizeW,
int64_t offsetZ
)
{
// iterators on output pixels
int oh, ow;
// compute offsets based on thread/block ID
int ostartH = blockIdx.y * blockDim.y + threadIdx.y;
int oendH = osizeH;
int ostepH = gridDim.y * blockDim.y;
int ostartW = threadIdx.x;
int oendW = osizeW;
int ostepW = blockDim.x;
// select output plane
int64_t o_plane = blockIdx.x + offsetZ;
int d = o_plane / osizeT; // output slice/feature
// gradInput offset by slice/feature
T *gradInput_d = gradInput + d*isizeT*isizeH*isizeW;
// gradOutput offset by slice/feature and frame/otme
T *gradOutput_dt = gradOutput + o_plane*osizeH*osizeW;
// indices offset by slice/feature and frame/otme
THCIndex_t *indices_dt = indices + o_plane*osizeH*osizeW;
// For all output pixels...
for(oh = ostartH; oh < oendH; oh += ostepH) {
for(ow = ostartW; ow < oendW; ow += ostepW) {
// Compute the gradients for the argmax input pixel
T *ptr_gradOutput = gradOutput_dt + oh*osizeW + ow;
THCIndex_t *ptr_ind = indices_dt + oh*osizeW + ow;
T grad_delta = *ptr_gradOutput;
int argmax = (*ptr_ind) - TH_INDEX_BASE;
gradInput_d[argmax] += grad_delta;
}
}
}
/*
* Description:
* This function computes the gradInput from gradOutput.
*
* gridDim.y blocks work together on a single 2D output plane specified by
* (blockIdx.x + offsetZ).
*
* Uses atomic add.
*/
template <typename T>
__global__ void cunn_atomic_VolumetricAdaptiveMaxPooling_updateGradInput_kernel(
T *gradInput, T *gradOutput, THCIndex_t *indices,
int isizeT, int isizeH, int isizeW,
int osizeT, int osizeH, int osizeW,
int64_t offsetZ
)
{
// iterators on output pixels
int oh, ow;
// compute offsets based on thread/block ID
int ostartH = blockIdx.y * blockDim.y + threadIdx.y;
int oendH = osizeH;
int ostepH = gridDim.y * blockDim.y;
int ostartW = threadIdx.x;
int oendW = osizeW;
int ostepW = blockDim.x;
// select output plane
int64_t o_plane = blockIdx.x + offsetZ;
int d = o_plane / osizeT; // output slice/feature
// gradInput offset by slice/feature
T *gradInput_d = gradInput + d*isizeT*isizeH*isizeW;
// gradOutput offset by slice/feature and frame/otme
T *gradOutput_dt = gradOutput + o_plane*osizeH*osizeW;
// indices offset by slice/feature and frame/otme
THCIndex_t *indices_dt = indices + o_plane*osizeH*osizeW;
// For all output pixels...
for(oh = ostartH; oh < oendH; oh += ostepH) {
for(ow = ostartW; ow < oendW; ow += ostepW) {
// Compute the gradients for the argmax input pixel
T *ptr_gradOutput = gradOutput_dt + oh*osizeW + ow;
THCIndex_t *ptr_ind = indices_dt + oh*osizeW + ow;
T grad_delta = *ptr_gradOutput;
int64_t argmax = (*ptr_ind) - TH_INDEX_BASE;
atomicAdd(&(gradInput_d[argmax]), grad_delta);
}
}
}
#include "generic/VolumetricAdaptiveMaxPooling.cu"
#include "THHGenerateFloatTypes.h"
#undef CUDA_MAX_THREADS
| 1e31a81c48fcaf50567dac6a3626ffa5b2fd5d7b.cu | #include "THCUNN.h"
#include "THCHalf.h"
#include "THCHalfAutoNumerics.cuh"
#include "THCAtomics.cuh"
#include "THCTensor.hpp"
#define CUDA_MAX_THREADS 1024 // this is safe, in reality 256 is our limit
#define START_IND(a,b,c) (int)floor((float)(a * c) / b)
#define END_IND(a,b,c) (int)ceil((float)((a + 1) * c) / b)
// #define START_IND(a,b,c) a * c / b
// #define END_IND(a,b,c) (a + 1) * c / b + ((a + 1) * c % b > 0)?1:0
// 5d tensor B x D x T x H x W
/*
* Description:
* this function adaptively maxpools an input 4D tensor along dimensions 2 and 3
* 4D input, 4D output, 4D argmax x and y
*/
template <typename T>
__global__ void cunn_VolumetricAdaptiveMaxPooling_updateOutput_kernel(
T *input, T *output, THCIndex_t *indices,
int isizeT, int isizeH, int isizeW,
int osizeT, int osizeH, int osizeW,
int64_t istrideD,
int64_t istrideT, int64_t istrideH, int64_t istrideW,
int64_t offsetZ)
{
// iterators on output pixels
int ot, oh, ow;
// compute offsets based on thread/block ID
int ostartH = blockIdx.y * blockDim.y + threadIdx.y;
int oendH = osizeH;
int ostepH = gridDim.y * blockDim.y;
int ostartW = threadIdx.x;
int oendW = osizeW;
int ostepW = blockDim.x;
// select output plane
int64_t o_plane = blockIdx.x + offsetZ;
ot = o_plane % osizeT; // output frame/time
int d = o_plane / osizeT; // slice/feature
// input frame/time ramge is fixed.
int istartT = START_IND(ot, osizeT, isizeT);
int iendT = END_IND(ot, osizeT, isizeT);
int kT = iendT - istartT;
// input offset by slice/feature and earliest relevant frame/time
T *input_dt = input + d*istrideD + istartT*istrideT;
// output offset by slice/feature and frame/time
T *output_dt = output + o_plane*osizeH*osizeW;
// indices offset by slice/feature and frame/time
THCIndex_t *indices_dt = indices + o_plane*osizeH*osizeW;
// For all output pixels...
for(oh = ostartH; oh < oendH; oh += ostepH) {
int istartH = START_IND(oh, osizeH, isizeH);
int iendH = END_IND(oh, osizeH, isizeH);
int kH = iendH - istartH;
for(ow = ostartW; ow < oendW; ow += ostepW) {
int istartW = START_IND(ow, osizeW, isizeW);
int iendW = END_IND(ow, osizeW, isizeW);
int kW = iendW - istartW;
// Compute the average pooling from corresponding input pixels
T *ptr_input = input_dt + istartH*istrideH + istartW*istrideW;
T *ptr_output = output_dt + oh*osizeW + ow;
THCIndex_t *ptr_ind = indices_dt + oh*osizeW + ow;
int64_t argmax = -1;
T max = THCNumerics<T>::min();
int it, ih, iw;
for(it = 0; it < kT; ++it) {
for(ih = 0; ih < kH; ++ih) {
for(iw = 0; iw < kW; ++iw) {
T val = ptr_input[ih*istrideH + iw*istrideW];
if (val > max) {
max = val;
argmax = (it+istartT)*isizeH*isizeW + (ih+istartH)*isizeW + iw+istartW;
}
}
}
ptr_input += istrideT; // next input frame
}
// Update output and argmax
*ptr_output = max;
*ptr_ind = argmax + TH_INDEX_BASE;
}
}
}
/*
* Description:
* This function computes the gradInput from gradOutput.
*
* gridDim.y blocks work together on a single 2D output plane specified by
* (blockIdx.x + offsetZ).
*
* Assumes that input size can be perfectly divided by output size, i.e.
* each input pixel can only be argmax of one output pixel.
*/
template <typename T>
__global__ void cunn_VolumetricAdaptiveMaxPooling_updateGradInput_kernel(
T *gradInput, T *gradOutput, THCIndex_t *indices,
int isizeT, int isizeH, int isizeW,
int osizeT, int osizeH, int osizeW,
int64_t offsetZ
)
{
// iterators on output pixels
int oh, ow;
// compute offsets based on thread/block ID
int ostartH = blockIdx.y * blockDim.y + threadIdx.y;
int oendH = osizeH;
int ostepH = gridDim.y * blockDim.y;
int ostartW = threadIdx.x;
int oendW = osizeW;
int ostepW = blockDim.x;
// select output plane
int64_t o_plane = blockIdx.x + offsetZ;
int d = o_plane / osizeT; // output slice/feature
// gradInput offset by slice/feature
T *gradInput_d = gradInput + d*isizeT*isizeH*isizeW;
// gradOutput offset by slice/feature and frame/otme
T *gradOutput_dt = gradOutput + o_plane*osizeH*osizeW;
// indices offset by slice/feature and frame/otme
THCIndex_t *indices_dt = indices + o_plane*osizeH*osizeW;
// For all output pixels...
for(oh = ostartH; oh < oendH; oh += ostepH) {
for(ow = ostartW; ow < oendW; ow += ostepW) {
// Compute the gradients for the argmax input pixel
T *ptr_gradOutput = gradOutput_dt + oh*osizeW + ow;
THCIndex_t *ptr_ind = indices_dt + oh*osizeW + ow;
T grad_delta = *ptr_gradOutput;
int argmax = (*ptr_ind) - TH_INDEX_BASE;
gradInput_d[argmax] += grad_delta;
}
}
}
/*
* Description:
* This function computes the gradInput from gradOutput.
*
* gridDim.y blocks work together on a single 2D output plane specified by
* (blockIdx.x + offsetZ).
*
* Uses atomic add.
*/
template <typename T>
__global__ void cunn_atomic_VolumetricAdaptiveMaxPooling_updateGradInput_kernel(
T *gradInput, T *gradOutput, THCIndex_t *indices,
int isizeT, int isizeH, int isizeW,
int osizeT, int osizeH, int osizeW,
int64_t offsetZ
)
{
// iterators on output pixels
int oh, ow;
// compute offsets based on thread/block ID
int ostartH = blockIdx.y * blockDim.y + threadIdx.y;
int oendH = osizeH;
int ostepH = gridDim.y * blockDim.y;
int ostartW = threadIdx.x;
int oendW = osizeW;
int ostepW = blockDim.x;
// select output plane
int64_t o_plane = blockIdx.x + offsetZ;
int d = o_plane / osizeT; // output slice/feature
// gradInput offset by slice/feature
T *gradInput_d = gradInput + d*isizeT*isizeH*isizeW;
// gradOutput offset by slice/feature and frame/otme
T *gradOutput_dt = gradOutput + o_plane*osizeH*osizeW;
// indices offset by slice/feature and frame/otme
THCIndex_t *indices_dt = indices + o_plane*osizeH*osizeW;
// For all output pixels...
for(oh = ostartH; oh < oendH; oh += ostepH) {
for(ow = ostartW; ow < oendW; ow += ostepW) {
// Compute the gradients for the argmax input pixel
T *ptr_gradOutput = gradOutput_dt + oh*osizeW + ow;
THCIndex_t *ptr_ind = indices_dt + oh*osizeW + ow;
T grad_delta = *ptr_gradOutput;
int64_t argmax = (*ptr_ind) - TH_INDEX_BASE;
atomicAdd(&(gradInput_d[argmax]), grad_delta);
}
}
}
#include "generic/VolumetricAdaptiveMaxPooling.cu"
#include "THCGenerateFloatTypes.h"
#undef CUDA_MAX_THREADS
|
20f32d115c3313fbe51a5fb71de9f5a49a085214.hip | // !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
#include <ATen/core/Tensor.h>
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/TensorUtils.h>
#include <ATen/Utils.h>
#include <ATen/hip/HIPBlas.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/native/ConvUtils.h>
#include <ATen/native/hip/vol2col.cuh>
#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/Functions.h>
#include <ATen/NativeFunctions.h>
#else
#include <ATen/ops/empty.h>
#include <ATen/ops/empty_like.h>
#include <ATen/ops/sum.h>
#include <ATen/ops/ones.h>
#include <ATen/ops/slow_conv_transpose3d_native.h>
#endif
namespace at {
namespace native {
namespace {
static inline void slow_conv_transpose3d_shape_check(
const Tensor& input,
const Tensor& grad_output,
const Tensor& weight,
const Tensor& bias,
int kernel_depth,
int kernel_width,
int kernel_height,
int stride_depth,
int stride_width,
int stride_height,
int padding_depth,
int padding_width,
int padding_height,
int dilation_depth,
int dilation_width,
int dilation_height,
int output_padding_depth,
int output_padding_width,
int output_padding_height,
int weight_nullable) {
TORCH_CHECK(
input.numel() != 0 && (input.dim() == 4 || input.dim() == 5),
"non-empty 4D or 5D (batch mode) tensor expected for input, but got: ",
input.sizes());
TORCH_CHECK(
stride_depth > 0 && stride_width > 0 && stride_height > 0,
"stride should be greater than zero, but got stride_depth: ",
stride_depth,
" stride_height: ",
stride_height,
" stride_width: ",
stride_width);
TORCH_CHECK(
dilation_depth > 0 && dilation_width > 0 && dilation_height > 0,
"dilation should be greater than zero, but got dilation_depth: ",
dilation_depth,
", dilation_height: ",
dilation_height,
", dilation_width: ",
dilation_width);
TORCH_CHECK(
(output_padding_depth < stride_depth ||
output_padding_depth < dilation_depth) &&
(output_padding_width < stride_width ||
output_padding_width < dilation_width) &&
(output_padding_height < stride_height ||
output_padding_height < dilation_height),
"output padding must be smaller than either stride or dilation,",
" but got output_padding_depth: ",
output_padding_depth,
" output_padding_height: ",
output_padding_height,
" output_padding_width: ",
output_padding_width,
" stride_depth: ",
stride_depth,
" stride_height: ",
stride_height,
" stride_width: ",
stride_width,
" dilation_depth: ",
dilation_depth,
" dilation_height: ",
dilation_height,
" dilation_width: ",
dilation_width);
// number of input & output planes and kernel size is indirectly defined by
// the weight tensor
if (weight.defined()) {
TORCH_CHECK(
weight.numel() != 0 && weight.dim() == 5,
"non-empty 5D (n_output_plane x n_input_plane ",
"x kernel_depth x kernel_height x kernel_width) tensor ",
"expected for weight, but got: ",
weight.sizes());
if (bias.defined()) {
check_dim_size(bias, 1, 0, weight.size(1));
}
} else if (!weight_nullable) {
AT_ERROR("weight tensor is expected to be non-nullable");
}
int ndim = input.dim();
int dimf = 0;
int dimd = 1;
int dimh = 2;
int dimw = 3;
if (ndim == 5) {
dimf++;
dimd++;
dimh++;
dimw++;
}
if (weight.defined()) {
const int64_t n_input_plane = weight.size(0);
check_dim_size(input, ndim, dimf, n_input_plane);
}
int64_t input_width = input.size(dimw);
int64_t input_height = input.size(dimh);
int64_t input_depth = input.size(dimd);
int64_t output_depth = (input_depth - 1) * stride_depth - 2 * padding_depth +
(dilation_depth * (kernel_depth - 1) + 1) + output_padding_depth;
int64_t output_height = (input_height - 1) * stride_height -
2 * padding_height + (dilation_height * (kernel_height - 1) + 1) +
output_padding_height;
int64_t output_width = (input_width - 1) * stride_width - 2 * padding_width +
(dilation_width * (kernel_width - 1) + 1) + output_padding_width;
if (output_depth < 1 || output_width < 1 || output_height < 1) {
AT_ERROR(
"Given input size per channel: (",
input_depth,
" x ",
input_height,
" x ",
input_width,
"). Calculated output size per channel: (",
output_depth,
" x ",
output_height,
" x ",
output_width,
"). Output size is too small");
}
if (grad_output.defined()) {
if (weight.defined()) {
const int64_t n_output_plane = weight.size(1);
check_dim_size(grad_output, ndim, dimf, n_output_plane);
} else if (bias.defined()) {
const int64_t n_output_plane = bias.size(0);
check_dim_size(grad_output, ndim, dimf, n_output_plane);
}
check_dim_size(grad_output, ndim, dimd, output_depth);
check_dim_size(grad_output, ndim, dimh, output_height);
check_dim_size(grad_output, ndim, dimw, output_width);
}
}
void slow_conv_transpose3d_out_cuda_template(
Tensor& output,
const Tensor& input_,
const Tensor& weight_,
IntArrayRef kernel_size,
const Tensor& bias,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef output_padding,
IntArrayRef dilation) {
TORCH_CHECK(
kernel_size.size() == 3,
"It is expected kernel_size equals to 3, but got size ",
kernel_size.size());
TORCH_CHECK(
dilation.size() == 3,
"It is expected dilation equals to 3, but got size ",
dilation.size());
TORCH_CHECK(
padding.size() == 3,
"It is expected padding equals to 3, but got size ",
padding.size());
TORCH_CHECK(
stride.size() == 3,
"It is expected stride equals to 3, but got size ",
stride.size());
TORCH_CHECK(
output_padding.size() == 3,
"It is expected stride equals to 3, but got size ",
output_padding.size());
int64_t kernel_depth = kernel_size[0];
int64_t kernel_height = kernel_size[1];
int64_t kernel_width = kernel_size[2];
int64_t dilation_depth = dilation[0];
int64_t dilation_height = dilation[1];
int64_t dilation_width = dilation[2];
int64_t padding_depth = padding[0];
int64_t padding_height = padding[1];
int64_t padding_width = padding[2];
int64_t stride_depth = stride[0];
int64_t stride_height = stride[1];
int64_t stride_width = stride[2];
int64_t output_padding_depth = output_padding[0];
int64_t output_padding_height = output_padding[1];
int64_t output_padding_width = output_padding[2];
int n_input_plane = weight_.size(0);
int n_output_plane = weight_.size(1);
TensorArg input_arg{input_, "input", 1}, output_arg{output, "output", 2},
weight_arg{weight_, "weight", 3}, bias_arg{bias, "bias", 4};
checkAllSameGPU(
"slow_conv_transpose3d_out_cuda",
{input_arg, output_arg, weight_arg, bias_arg});
slow_conv_transpose3d_shape_check(
input_,
Tensor(),
weight_,
bias,
kernel_depth,
kernel_width,
kernel_height,
stride_depth,
stride_width,
stride_height,
padding_depth,
padding_width,
padding_height,
dilation_depth,
dilation_width,
dilation_height,
output_padding_depth,
output_padding_width,
output_padding_height,
0);
TORCH_CHECK(
!bias.defined() || bias.is_contiguous(),
"bias tensor has to be contiguous");
Tensor input = input_.contiguous();
Tensor weight = weight_.contiguous();
int is_batch = false;
if (input.dim() == 4) {
// Force batch
is_batch = true;
input.resize_(
{1, input.size(0), input.size(1), input.size(2), input.size(3)});
}
int64_t input_width = input.size(4);
int64_t input_height = input.size(3);
int64_t input_depth = input.size(2);
int64_t output_depth = (input_depth - 1) * stride_depth - 2 * padding_depth +
(dilation_depth * (kernel_depth - 1) + 1) + output_padding_depth;
int64_t output_height = (input_height - 1) * stride_height -
2 * padding_height + (dilation_height * (kernel_height - 1) + 1) +
output_padding_height;
int64_t output_width = (input_width - 1) * stride_width - 2 * padding_width +
(dilation_width * (kernel_width - 1) + 1) + output_padding_width;
// Batch size + input planes
int64_t batch_size = input.size(0);
// Resize output
output.resize_(
{batch_size, n_output_plane, output_depth, output_height, output_width});
// Create temporary columns
Tensor columns = at::empty({n_output_plane * kernel_width * kernel_height * kernel_depth,
input_depth * input_height * input_width}, input.options());
// Define a buffer of ones, for bias accumulation
Tensor ones = bias.defined() ? at::ones({output_depth, output_height, output_width}, input_.options()) : Tensor();
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16,
input.scalar_type(), "slow_conv_transpose3d_out_cuda", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
// Helpers
Tensor input_n;
Tensor output_n;
// For each elt in batch, do:
for (int elt = 0; elt < batch_size; elt++) {
// Matrix mulitply per output:
input_n = input.select(0, elt);
output_n = output.select(0, elt);
// M,N,K are dims of matrix A and B
// (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm)
int64_t m =
weight.size(1) * weight.size(2) * weight.size(3) * weight.size(4);
int64_t n = columns.size(1);
int64_t k = weight.size(0);
// Do GEMM (note: this is a bit confusing because gemm assumes
// column-major matrices)
at::cuda::blas::gemm<scalar_t>(
'n',
't',
n,
m,
k,
static_cast<scalar_t>(1),
input_n.data_ptr<scalar_t>(),
n,
weight.data_ptr<scalar_t>(),
m,
static_cast<scalar_t>(0),
columns.data_ptr<scalar_t>(),
n);
// Unpack columns back into input:
at::native::col2vol<scalar_t, accscalar_t>(
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
columns.data_ptr<scalar_t>(),
n_output_plane,
output_depth,
output_height,
output_width,
input_depth,
input_height,
input_width,
kernel_depth,
kernel_height,
kernel_width,
padding_depth,
padding_height,
padding_width,
stride_depth,
stride_height,
stride_width,
dilation_depth,
dilation_height,
dilation_width,
output_n.data_ptr<scalar_t>());
// Do Bias after:
// M,N,K are dims of matrix A and B
// (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm)
int64_t m_ = n_output_plane;
int64_t n_ = output_depth * output_height * output_width;
int64_t k_ = 1;
// Do GEMM (note: this is a bit confusing because gemm assumes
// column-major matrices)
if (bias.defined()) {
at::cuda::blas::gemm<scalar_t>(
't',
'n',
n_,
m_,
k_,
static_cast<scalar_t>(1),
ones.data_ptr<scalar_t>(),
k_,
bias.data_ptr<scalar_t>(),
k_,
static_cast<scalar_t>(1),
output_n.data_ptr<scalar_t>(),
n_);
}
}
// Resize output
if (is_batch) {
output.resize_(
{n_output_plane, output_depth, output_height, output_width});
input.resize_(
{n_input_plane, input_depth, input_height, input_width});
}
});
}
void slow_conv_transpose3d_backward_out_cuda_template(
const Tensor& input_,
const Tensor& grad_output_,
Tensor& grad_input,
const Tensor& weight_,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef output_padding,
IntArrayRef dilation) {
TORCH_CHECK(
kernel_size.size() == 3,
"It is expected kernel_size equals to 3, but got size ",
kernel_size.size());
TORCH_CHECK(
dilation.size() == 3,
"It is expected dilation equals to 3, but got size ",
dilation.size());
TORCH_CHECK(
padding.size() == 3,
"It is expected padding equals to 3, but got size ",
padding.size());
TORCH_CHECK(
stride.size() == 3,
"It is expected stride equals to 3, but got size ",
stride.size());
TORCH_CHECK(
output_padding.size() == 3,
"It is expected stride equals to 3, but got size ",
output_padding.size());
int n_input_plane = weight_.size(0);
int n_output_plane = weight_.size(1);
int64_t kernel_depth = kernel_size[0];
int64_t kernel_height = kernel_size[1];
int64_t kernel_width = kernel_size[2];
int64_t dilation_depth = dilation[0];
int64_t dilation_height = dilation[1];
int64_t dilation_width = dilation[2];
int64_t padding_depth = padding[0];
int64_t padding_height = padding[1];
int64_t padding_width = padding[2];
int64_t stride_depth = stride[0];
int64_t stride_height = stride[1];
int64_t stride_width = stride[2];
int64_t output_padding_depth = output_padding[0];
int64_t output_padding_height = output_padding[1];
int64_t output_padding_width = output_padding[2];
TensorArg input_arg{input_, "input", 1},
grad_output_arg{grad_output_, "grad_output", 2},
weight_arg{weight_, "weight", 3},
grad_input_arg{grad_input, "grad_input", 4};
checkAllSameGPU(
"slow_conv_transpose3d_backward_out_cuda",
{input_arg,
grad_output_arg,
weight_arg,
grad_input_arg});
slow_conv_transpose3d_shape_check(
input_,
grad_output_,
weight_,
Tensor(),
kernel_depth,
kernel_width,
kernel_height,
stride_depth,
stride_width,
stride_height,
padding_depth,
padding_width,
padding_height,
dilation_depth,
dilation_width,
dilation_height,
output_padding_depth,
output_padding_width,
output_padding_height,
0);
Tensor input = input_.contiguous();
Tensor grad_output = grad_output_.contiguous();
Tensor weight = weight_.contiguous();
bool is_batch = false;
if (input.dim() == 4) {
// Force batch
is_batch = true;
input.resize_(
{1, input.size(0), input.size(1), input.size(2), input.size(3)});
grad_output.resize_({1,
grad_output.size(0),
grad_output.size(1),
grad_output.size(2),
grad_output.size(3)});
}
int64_t input_width = input.size(4);
int64_t input_height = input.size(3);
int64_t input_depth = input.size(2);
int64_t output_depth = (input_depth - 1) * stride_depth - 2 * padding_depth +
(dilation_depth * (kernel_depth - 1) + 1) + output_padding_depth;
int64_t output_height = (input_height - 1) * stride_height -
2 * padding_height + (dilation_height * (kernel_height - 1) + 1) +
output_padding_height;
int64_t output_width = (input_width - 1) * stride_width - 2 * padding_width +
(dilation_width * (kernel_width - 1) + 1) + output_padding_width;
// Batch size + input planes
int64_t batch_size = input.size(0);
// Resize output
grad_input.resize_(
{batch_size, n_input_plane, input_depth, input_height, input_width});
// Create temporary columns
bool need_columns = (kernel_depth != 1 || kernel_height != 1 || kernel_width != 1 ||
stride_depth != 1 || stride_height != 1 || stride_width != 1 ||
dilation_depth != 1 || dilation_height != 1 ||
dilation_width != 1 || padding_depth != 0 ||
padding_height != 0 || padding_width != 0);
Tensor grad_columns = need_columns ? at::empty({n_output_plane * kernel_width * kernel_height * kernel_depth,
input_depth * input_height * input_width}, input.options()) : Tensor();
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16,
input.scalar_type(), "slow_conv_transpose3d_backward_out_cuda", [&] {
// Helpers
Tensor grad_input_n;
Tensor grad_output_n;
// For each elt in batch, do:
for (int elt = 0; elt < batch_size; elt++) {
// Matrix mulitply per sample:
grad_input_n = grad_input.select(0, elt);
grad_output_n = grad_output.select(0, elt);
if (need_columns) {
// Extract columns:
at::native::vol2col<scalar_t>(
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
grad_output_n.data_ptr<scalar_t>(),
n_output_plane,
output_depth,
output_height,
output_width,
input_depth,
input_height,
input_width,
kernel_depth,
kernel_height,
kernel_width,
padding_depth,
padding_height,
padding_width,
stride_depth,
stride_height,
stride_width,
dilation_depth,
dilation_height,
dilation_width,
grad_columns.data_ptr<scalar_t>());
}
// M,N,K are dims of matrix A and B
// (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm)
int64_t m = weight.size(0);
int64_t n = input_depth * input_height * input_width;
int64_t k =
weight.size(1) * weight.size(2) * weight.size(3) * weight.size(4);
// Do GEMM (note: this is a bit confusing because gemm assumes
// column-major matrices)
auto gemm_in_ptr = need_columns ? grad_columns.data_ptr<scalar_t>()
: grad_output_n.data_ptr<scalar_t>();
at::cuda::blas::gemm<scalar_t>(
'n',
'n',
n,
m,
k,
static_cast<scalar_t>(1),
gemm_in_ptr,
n,
weight.data_ptr<scalar_t>(),
k,
static_cast<scalar_t>(0),
grad_input_n.data_ptr<scalar_t>(),
n);
}
// Resize output
if (is_batch) {
grad_output.resize_(
{n_output_plane, output_depth, output_height, output_width});
input.resize_(
{n_input_plane, input_depth, input_height, input_width});
grad_input.resize_(
{n_input_plane, input_depth, input_height, input_width});
}
});
}
void slow_conv_transpose3d_acc_grad_parameters_cuda(
const Tensor& input_,
const Tensor& grad_output_,
Tensor& grad_weight,
Tensor& grad_bias,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef output_padding,
IntArrayRef dilation,
int scale_) {
TORCH_CHECK(
kernel_size.size() == 3,
"It is expected kernel_size equals to 3, but got size ",
kernel_size.size());
TORCH_CHECK(
dilation.size() == 3,
"It is expected dilation equals to 3, but got size ",
dilation.size());
TORCH_CHECK(
padding.size() == 3,
"It is expected padding equals to 3, but got size ",
padding.size());
TORCH_CHECK(
stride.size() == 3,
"It is expected stride equals to 3, but got size ",
stride.size());
TORCH_CHECK(
output_padding.size() == 3,
"It is expected stride equals to 3, but got size ",
output_padding.size());
int64_t kernel_depth = kernel_size[0];
int64_t kernel_height = kernel_size[1];
int64_t kernel_width = kernel_size[2];
int64_t dilation_depth = dilation[0];
int64_t dilation_height = dilation[1];
int64_t dilation_width = dilation[2];
int64_t padding_depth = padding[0];
int64_t padding_height = padding[1];
int64_t padding_width = padding[2];
int64_t stride_depth = stride[0];
int64_t stride_height = stride[1];
int64_t stride_width = stride[2];
int64_t output_padding_depth = output_padding[0];
int64_t output_padding_height = output_padding[1];
int64_t output_padding_width = output_padding[2];
TensorArg input_arg{input_, "input", 1},
grad_output_arg{grad_output_, "grad_output", 2},
grad_weight_arg{grad_weight, "grad_weight", 3},
grad_bias_arg{grad_bias, "grad_bias", 4};
checkAllSameGPU(
"slow_conv_transpose3d_acc_grad_parameters_cuda",
{input_arg,
grad_output_arg,
grad_weight_arg,
grad_bias_arg});
slow_conv_transpose3d_shape_check(
input_,
grad_output_,
grad_weight,
grad_bias,
kernel_depth,
kernel_width,
kernel_height,
stride_depth,
stride_width,
stride_height,
padding_depth,
padding_width,
padding_height,
dilation_depth,
dilation_width,
dilation_height,
output_padding_depth,
output_padding_width,
output_padding_height,
1);
int n_output_plane;
if (grad_weight.defined()) {
n_output_plane = grad_weight.size(1);
} else if (grad_bias.defined()) {
n_output_plane = grad_bias.size(0);
} else {
return;
}
if (grad_weight.defined()) {
TORCH_CHECK(
grad_weight.is_contiguous(), "grad_weight needs to be contiguous");
}
if (grad_bias.defined()) {
TORCH_CHECK(grad_bias.is_contiguous(), "grad_bias needs to be contiguous");
}
Tensor input = input_.contiguous();
Tensor grad_output = grad_output_.contiguous();
bool is_batch = false;
if (input.dim() == 4) {
// Force batch
is_batch = true;
input.resize_(
{1, input.size(0), input.size(1), input.size(2), input.size(3)});
grad_output.resize_({1,
grad_output.size(0),
grad_output.size(1),
grad_output.size(2),
grad_output.size(3)});
}
int64_t input_width = input.size(4);
int64_t input_height = input.size(3);
int64_t input_depth = input.size(2);
int64_t output_depth = (input_depth - 1) * stride_depth - 2 * padding_depth +
(dilation_depth * (kernel_depth - 1) + 1) + output_padding_depth;
int64_t output_height = (input_height - 1) * stride_height -
2 * padding_height + (dilation_height * (kernel_height - 1) + 1) +
output_padding_height;
int64_t output_width = (input_width - 1) * stride_width - 2 * padding_width +
(dilation_width * (kernel_width - 1) + 1) + output_padding_width;
// Batch size + input planes
int64_t batch_size = input.size(0);
// Create temporary columns
bool need_columns = (kernel_depth != 1 || kernel_height != 1 || kernel_width != 1 ||
stride_depth != 1 || stride_height != 1 || stride_width != 1 ||
dilation_depth != 1 || dilation_height != 1 ||
dilation_width != 1 || padding_depth != 0 ||
padding_height != 0 || padding_width != 0);
Tensor columns = need_columns ? at::empty({n_output_plane * kernel_width * kernel_height * kernel_depth,
input_depth * input_height * input_width}, input.options()) : Tensor();
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16,
input.scalar_type(),
"slow_conv_transpose3d_acc_grad_parameters_cuda",
[&] {
// Helpers
Tensor input_n;
Tensor grad_output_n;
scalar_t scale = static_cast<scalar_t>(scale_);
// For each elt in batch, do:
for (int elt = 0; elt < batch_size; elt++) {
// Matrix mulitply per output:
grad_output_n = grad_output.select(0, elt);
// Do Weight:
if (grad_weight.defined()) {
// Matrix mulitply per output:
input_n = input.select(0, elt);
if (need_columns) {
// Extract columns:
at::native::vol2col<scalar_t>(
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
grad_output_n.data_ptr<scalar_t>(),
n_output_plane,
output_depth,
output_height,
output_width,
input_depth,
input_height,
input_width,
kernel_depth,
kernel_height,
kernel_width,
padding_depth,
padding_height,
padding_width,
stride_depth,
stride_height,
stride_width,
dilation_depth,
dilation_height,
dilation_width,
columns.data_ptr<scalar_t>());
}
// M,N,K are dims of matrix A and B
// (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm)
int64_t n = n_output_plane * kernel_width * kernel_height * kernel_depth;
int64_t m = input_n.size(0); // n_input_plane
int64_t k = input_depth * input_height * input_width;
// Do GEMM (note: this is a bit confusing because gemm assumes
// column-major matrices)
auto gemm_in_ptr = need_columns ? columns.data_ptr<scalar_t>() : grad_output_n.data_ptr<scalar_t>();
at::cuda::blas::gemm<scalar_t>(
't',
'n',
n,
m,
k,
scale,
gemm_in_ptr,
k,
input_n.data_ptr<scalar_t>(),
k,
static_cast<scalar_t>(1),
grad_weight.data_ptr<scalar_t>(),
n);
}
}
if (grad_bias.defined()) {
at::sum_out(grad_bias, grad_output, IntArrayRef{0, 2, 3, 4});
}
// Resize
if (is_batch) {
grad_output.resize_(
{n_output_plane, output_depth, output_height, output_width});
input.resize_(
{input.size(1), input_depth, input_height, input_width});
}
});
}
} // namespace
Tensor& slow_conv_transpose3d_out_cuda(const Tensor& input,
const Tensor& weight,
IntArrayRef kernel_size, const c10::optional<Tensor>& bias_opt,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef output_padding,
IntArrayRef dilation,
Tensor& output) {
// See [Note: hacky wrapper removal for optional tensor]
c10::MaybeOwned<Tensor> bias_maybe_owned = at::borrow_from_optional_tensor(bias_opt);
const Tensor& bias = *bias_maybe_owned;
slow_conv_transpose3d_out_cuda_template(
output,
input,
weight,
kernel_size,
bias,
stride,
padding,
output_padding,
dilation);
return output;
}
Tensor slow_conv_transpose3d_cuda(
const Tensor& input,
const Tensor& weight,
IntArrayRef kernel_size, const c10::optional<Tensor>& bias_opt,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef output_padding,
IntArrayRef dilation) {
// See [Note: hacky wrapper removal for optional tensor]
c10::MaybeOwned<Tensor> bias_maybe_owned = at::borrow_from_optional_tensor(bias_opt);
const Tensor& bias = *bias_maybe_owned;
Tensor output = at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
slow_conv_transpose3d_out_cuda_template(
output,
input,
weight,
kernel_size,
bias,
stride,
padding,
output_padding,
dilation);
return output;
}
std::tuple<Tensor&, Tensor&, Tensor&> slow_conv_transpose3d_backward_out_cuda(const Tensor& grad_output,
const Tensor& input,
const Tensor& weight,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef output_padding,
IntArrayRef dilation,
Tensor& grad_input,
Tensor& grad_weight,
Tensor& grad_bias) {
if (grad_input.defined()) {
slow_conv_transpose3d_backward_out_cuda_template(
input,
grad_output,
grad_input,
weight,
kernel_size,
stride,
padding,
output_padding,
dilation);
}
if (grad_weight.defined()) {
grad_weight.resize_(weight.sizes());
grad_weight.zero_();
}
if (grad_bias.defined()) {
grad_bias.resize_({weight.size(1)});
grad_bias.zero_();
}
if (grad_weight.defined() || grad_bias.defined()) {
slow_conv_transpose3d_acc_grad_parameters_cuda(
input,
grad_output,
grad_weight,
grad_bias,
kernel_size,
stride,
padding,
output_padding,
dilation,
1);
}
return std::tuple<Tensor&, Tensor&, Tensor&>(
grad_input, grad_weight, grad_bias);
}
std::tuple<Tensor, Tensor, Tensor> slow_conv_transpose3d_backward_cuda(
const Tensor& grad_output,
const Tensor& input,
const Tensor& weight,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef output_padding,
IntArrayRef dilation,
std::array<bool, 3> output_mask) {
Tensor grad_input;
Tensor grad_weight;
Tensor grad_bias;
if (output_mask[0]) {
grad_input = at::empty({0}, grad_output.options());
} else {
grad_input = Tensor();
}
if (output_mask[1]) {
grad_weight = at::empty({0}, grad_output.options());
} else {
grad_weight = Tensor();
}
if (output_mask[2]) {
grad_bias = at::empty({0}, grad_output.options());
} else {
grad_bias = Tensor();
}
if (grad_input.defined()) {
slow_conv_transpose3d_backward_out_cuda_template(
input,
grad_output,
grad_input,
weight,
kernel_size,
stride,
padding,
output_padding,
dilation);
}
if (grad_weight.defined()) {
grad_weight.resize_(weight.sizes());
grad_weight.zero_();
}
if (grad_bias.defined()) {
grad_bias.resize_({weight.size(1)});
grad_bias.zero_();
}
if (grad_weight.defined() || grad_bias.defined()) {
slow_conv_transpose3d_acc_grad_parameters_cuda(
input,
grad_output,
grad_weight,
grad_bias,
kernel_size,
stride,
padding,
output_padding,
dilation,
1);
}
return std::tuple<Tensor, Tensor, Tensor>(grad_input, grad_weight, grad_bias);
}
REGISTER_CUDA_DISPATCH(slow_conv_transpose3d_backward_stub, &slow_conv_transpose3d_backward_cuda);
} // namespace native
} // namespace at
| 20f32d115c3313fbe51a5fb71de9f5a49a085214.cu | #define TORCH_ASSERT_ONLY_METHOD_OPERATORS
#include <ATen/core/Tensor.h>
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/TensorUtils.h>
#include <ATen/Utils.h>
#include <ATen/cuda/CUDABlas.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/native/ConvUtils.h>
#include <ATen/native/cuda/vol2col.cuh>
#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/Functions.h>
#include <ATen/NativeFunctions.h>
#else
#include <ATen/ops/empty.h>
#include <ATen/ops/empty_like.h>
#include <ATen/ops/sum.h>
#include <ATen/ops/ones.h>
#include <ATen/ops/slow_conv_transpose3d_native.h>
#endif
namespace at {
namespace native {
namespace {
static inline void slow_conv_transpose3d_shape_check(
const Tensor& input,
const Tensor& grad_output,
const Tensor& weight,
const Tensor& bias,
int kernel_depth,
int kernel_width,
int kernel_height,
int stride_depth,
int stride_width,
int stride_height,
int padding_depth,
int padding_width,
int padding_height,
int dilation_depth,
int dilation_width,
int dilation_height,
int output_padding_depth,
int output_padding_width,
int output_padding_height,
int weight_nullable) {
TORCH_CHECK(
input.numel() != 0 && (input.dim() == 4 || input.dim() == 5),
"non-empty 4D or 5D (batch mode) tensor expected for input, but got: ",
input.sizes());
TORCH_CHECK(
stride_depth > 0 && stride_width > 0 && stride_height > 0,
"stride should be greater than zero, but got stride_depth: ",
stride_depth,
" stride_height: ",
stride_height,
" stride_width: ",
stride_width);
TORCH_CHECK(
dilation_depth > 0 && dilation_width > 0 && dilation_height > 0,
"dilation should be greater than zero, but got dilation_depth: ",
dilation_depth,
", dilation_height: ",
dilation_height,
", dilation_width: ",
dilation_width);
TORCH_CHECK(
(output_padding_depth < stride_depth ||
output_padding_depth < dilation_depth) &&
(output_padding_width < stride_width ||
output_padding_width < dilation_width) &&
(output_padding_height < stride_height ||
output_padding_height < dilation_height),
"output padding must be smaller than either stride or dilation,",
" but got output_padding_depth: ",
output_padding_depth,
" output_padding_height: ",
output_padding_height,
" output_padding_width: ",
output_padding_width,
" stride_depth: ",
stride_depth,
" stride_height: ",
stride_height,
" stride_width: ",
stride_width,
" dilation_depth: ",
dilation_depth,
" dilation_height: ",
dilation_height,
" dilation_width: ",
dilation_width);
// number of input & output planes and kernel size is indirectly defined by
// the weight tensor
if (weight.defined()) {
TORCH_CHECK(
weight.numel() != 0 && weight.dim() == 5,
"non-empty 5D (n_output_plane x n_input_plane ",
"x kernel_depth x kernel_height x kernel_width) tensor ",
"expected for weight, but got: ",
weight.sizes());
if (bias.defined()) {
check_dim_size(bias, 1, 0, weight.size(1));
}
} else if (!weight_nullable) {
AT_ERROR("weight tensor is expected to be non-nullable");
}
int ndim = input.dim();
int dimf = 0;
int dimd = 1;
int dimh = 2;
int dimw = 3;
if (ndim == 5) {
dimf++;
dimd++;
dimh++;
dimw++;
}
if (weight.defined()) {
const int64_t n_input_plane = weight.size(0);
check_dim_size(input, ndim, dimf, n_input_plane);
}
int64_t input_width = input.size(dimw);
int64_t input_height = input.size(dimh);
int64_t input_depth = input.size(dimd);
int64_t output_depth = (input_depth - 1) * stride_depth - 2 * padding_depth +
(dilation_depth * (kernel_depth - 1) + 1) + output_padding_depth;
int64_t output_height = (input_height - 1) * stride_height -
2 * padding_height + (dilation_height * (kernel_height - 1) + 1) +
output_padding_height;
int64_t output_width = (input_width - 1) * stride_width - 2 * padding_width +
(dilation_width * (kernel_width - 1) + 1) + output_padding_width;
if (output_depth < 1 || output_width < 1 || output_height < 1) {
AT_ERROR(
"Given input size per channel: (",
input_depth,
" x ",
input_height,
" x ",
input_width,
"). Calculated output size per channel: (",
output_depth,
" x ",
output_height,
" x ",
output_width,
"). Output size is too small");
}
if (grad_output.defined()) {
if (weight.defined()) {
const int64_t n_output_plane = weight.size(1);
check_dim_size(grad_output, ndim, dimf, n_output_plane);
} else if (bias.defined()) {
const int64_t n_output_plane = bias.size(0);
check_dim_size(grad_output, ndim, dimf, n_output_plane);
}
check_dim_size(grad_output, ndim, dimd, output_depth);
check_dim_size(grad_output, ndim, dimh, output_height);
check_dim_size(grad_output, ndim, dimw, output_width);
}
}
void slow_conv_transpose3d_out_cuda_template(
Tensor& output,
const Tensor& input_,
const Tensor& weight_,
IntArrayRef kernel_size,
const Tensor& bias,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef output_padding,
IntArrayRef dilation) {
TORCH_CHECK(
kernel_size.size() == 3,
"It is expected kernel_size equals to 3, but got size ",
kernel_size.size());
TORCH_CHECK(
dilation.size() == 3,
"It is expected dilation equals to 3, but got size ",
dilation.size());
TORCH_CHECK(
padding.size() == 3,
"It is expected padding equals to 3, but got size ",
padding.size());
TORCH_CHECK(
stride.size() == 3,
"It is expected stride equals to 3, but got size ",
stride.size());
TORCH_CHECK(
output_padding.size() == 3,
"It is expected stride equals to 3, but got size ",
output_padding.size());
int64_t kernel_depth = kernel_size[0];
int64_t kernel_height = kernel_size[1];
int64_t kernel_width = kernel_size[2];
int64_t dilation_depth = dilation[0];
int64_t dilation_height = dilation[1];
int64_t dilation_width = dilation[2];
int64_t padding_depth = padding[0];
int64_t padding_height = padding[1];
int64_t padding_width = padding[2];
int64_t stride_depth = stride[0];
int64_t stride_height = stride[1];
int64_t stride_width = stride[2];
int64_t output_padding_depth = output_padding[0];
int64_t output_padding_height = output_padding[1];
int64_t output_padding_width = output_padding[2];
int n_input_plane = weight_.size(0);
int n_output_plane = weight_.size(1);
TensorArg input_arg{input_, "input", 1}, output_arg{output, "output", 2},
weight_arg{weight_, "weight", 3}, bias_arg{bias, "bias", 4};
checkAllSameGPU(
"slow_conv_transpose3d_out_cuda",
{input_arg, output_arg, weight_arg, bias_arg});
slow_conv_transpose3d_shape_check(
input_,
Tensor(),
weight_,
bias,
kernel_depth,
kernel_width,
kernel_height,
stride_depth,
stride_width,
stride_height,
padding_depth,
padding_width,
padding_height,
dilation_depth,
dilation_width,
dilation_height,
output_padding_depth,
output_padding_width,
output_padding_height,
0);
TORCH_CHECK(
!bias.defined() || bias.is_contiguous(),
"bias tensor has to be contiguous");
Tensor input = input_.contiguous();
Tensor weight = weight_.contiguous();
int is_batch = false;
if (input.dim() == 4) {
// Force batch
is_batch = true;
input.resize_(
{1, input.size(0), input.size(1), input.size(2), input.size(3)});
}
int64_t input_width = input.size(4);
int64_t input_height = input.size(3);
int64_t input_depth = input.size(2);
int64_t output_depth = (input_depth - 1) * stride_depth - 2 * padding_depth +
(dilation_depth * (kernel_depth - 1) + 1) + output_padding_depth;
int64_t output_height = (input_height - 1) * stride_height -
2 * padding_height + (dilation_height * (kernel_height - 1) + 1) +
output_padding_height;
int64_t output_width = (input_width - 1) * stride_width - 2 * padding_width +
(dilation_width * (kernel_width - 1) + 1) + output_padding_width;
// Batch size + input planes
int64_t batch_size = input.size(0);
// Resize output
output.resize_(
{batch_size, n_output_plane, output_depth, output_height, output_width});
// Create temporary columns
Tensor columns = at::empty({n_output_plane * kernel_width * kernel_height * kernel_depth,
input_depth * input_height * input_width}, input.options());
// Define a buffer of ones, for bias accumulation
Tensor ones = bias.defined() ? at::ones({output_depth, output_height, output_width}, input_.options()) : Tensor();
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16,
input.scalar_type(), "slow_conv_transpose3d_out_cuda", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
// Helpers
Tensor input_n;
Tensor output_n;
// For each elt in batch, do:
for (int elt = 0; elt < batch_size; elt++) {
// Matrix mulitply per output:
input_n = input.select(0, elt);
output_n = output.select(0, elt);
// M,N,K are dims of matrix A and B
// (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm)
int64_t m =
weight.size(1) * weight.size(2) * weight.size(3) * weight.size(4);
int64_t n = columns.size(1);
int64_t k = weight.size(0);
// Do GEMM (note: this is a bit confusing because gemm assumes
// column-major matrices)
at::cuda::blas::gemm<scalar_t>(
'n',
't',
n,
m,
k,
static_cast<scalar_t>(1),
input_n.data_ptr<scalar_t>(),
n,
weight.data_ptr<scalar_t>(),
m,
static_cast<scalar_t>(0),
columns.data_ptr<scalar_t>(),
n);
// Unpack columns back into input:
at::native::col2vol<scalar_t, accscalar_t>(
at::cuda::getCurrentCUDAStream(),
columns.data_ptr<scalar_t>(),
n_output_plane,
output_depth,
output_height,
output_width,
input_depth,
input_height,
input_width,
kernel_depth,
kernel_height,
kernel_width,
padding_depth,
padding_height,
padding_width,
stride_depth,
stride_height,
stride_width,
dilation_depth,
dilation_height,
dilation_width,
output_n.data_ptr<scalar_t>());
// Do Bias after:
// M,N,K are dims of matrix A and B
// (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm)
int64_t m_ = n_output_plane;
int64_t n_ = output_depth * output_height * output_width;
int64_t k_ = 1;
// Do GEMM (note: this is a bit confusing because gemm assumes
// column-major matrices)
if (bias.defined()) {
at::cuda::blas::gemm<scalar_t>(
't',
'n',
n_,
m_,
k_,
static_cast<scalar_t>(1),
ones.data_ptr<scalar_t>(),
k_,
bias.data_ptr<scalar_t>(),
k_,
static_cast<scalar_t>(1),
output_n.data_ptr<scalar_t>(),
n_);
}
}
// Resize output
if (is_batch) {
output.resize_(
{n_output_plane, output_depth, output_height, output_width});
input.resize_(
{n_input_plane, input_depth, input_height, input_width});
}
});
}
void slow_conv_transpose3d_backward_out_cuda_template(
const Tensor& input_,
const Tensor& grad_output_,
Tensor& grad_input,
const Tensor& weight_,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef output_padding,
IntArrayRef dilation) {
TORCH_CHECK(
kernel_size.size() == 3,
"It is expected kernel_size equals to 3, but got size ",
kernel_size.size());
TORCH_CHECK(
dilation.size() == 3,
"It is expected dilation equals to 3, but got size ",
dilation.size());
TORCH_CHECK(
padding.size() == 3,
"It is expected padding equals to 3, but got size ",
padding.size());
TORCH_CHECK(
stride.size() == 3,
"It is expected stride equals to 3, but got size ",
stride.size());
TORCH_CHECK(
output_padding.size() == 3,
"It is expected stride equals to 3, but got size ",
output_padding.size());
int n_input_plane = weight_.size(0);
int n_output_plane = weight_.size(1);
int64_t kernel_depth = kernel_size[0];
int64_t kernel_height = kernel_size[1];
int64_t kernel_width = kernel_size[2];
int64_t dilation_depth = dilation[0];
int64_t dilation_height = dilation[1];
int64_t dilation_width = dilation[2];
int64_t padding_depth = padding[0];
int64_t padding_height = padding[1];
int64_t padding_width = padding[2];
int64_t stride_depth = stride[0];
int64_t stride_height = stride[1];
int64_t stride_width = stride[2];
int64_t output_padding_depth = output_padding[0];
int64_t output_padding_height = output_padding[1];
int64_t output_padding_width = output_padding[2];
TensorArg input_arg{input_, "input", 1},
grad_output_arg{grad_output_, "grad_output", 2},
weight_arg{weight_, "weight", 3},
grad_input_arg{grad_input, "grad_input", 4};
checkAllSameGPU(
"slow_conv_transpose3d_backward_out_cuda",
{input_arg,
grad_output_arg,
weight_arg,
grad_input_arg});
slow_conv_transpose3d_shape_check(
input_,
grad_output_,
weight_,
Tensor(),
kernel_depth,
kernel_width,
kernel_height,
stride_depth,
stride_width,
stride_height,
padding_depth,
padding_width,
padding_height,
dilation_depth,
dilation_width,
dilation_height,
output_padding_depth,
output_padding_width,
output_padding_height,
0);
Tensor input = input_.contiguous();
Tensor grad_output = grad_output_.contiguous();
Tensor weight = weight_.contiguous();
bool is_batch = false;
if (input.dim() == 4) {
// Force batch
is_batch = true;
input.resize_(
{1, input.size(0), input.size(1), input.size(2), input.size(3)});
grad_output.resize_({1,
grad_output.size(0),
grad_output.size(1),
grad_output.size(2),
grad_output.size(3)});
}
int64_t input_width = input.size(4);
int64_t input_height = input.size(3);
int64_t input_depth = input.size(2);
int64_t output_depth = (input_depth - 1) * stride_depth - 2 * padding_depth +
(dilation_depth * (kernel_depth - 1) + 1) + output_padding_depth;
int64_t output_height = (input_height - 1) * stride_height -
2 * padding_height + (dilation_height * (kernel_height - 1) + 1) +
output_padding_height;
int64_t output_width = (input_width - 1) * stride_width - 2 * padding_width +
(dilation_width * (kernel_width - 1) + 1) + output_padding_width;
// Batch size + input planes
int64_t batch_size = input.size(0);
// Resize output
grad_input.resize_(
{batch_size, n_input_plane, input_depth, input_height, input_width});
// Create temporary columns
bool need_columns = (kernel_depth != 1 || kernel_height != 1 || kernel_width != 1 ||
stride_depth != 1 || stride_height != 1 || stride_width != 1 ||
dilation_depth != 1 || dilation_height != 1 ||
dilation_width != 1 || padding_depth != 0 ||
padding_height != 0 || padding_width != 0);
Tensor grad_columns = need_columns ? at::empty({n_output_plane * kernel_width * kernel_height * kernel_depth,
input_depth * input_height * input_width}, input.options()) : Tensor();
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16,
input.scalar_type(), "slow_conv_transpose3d_backward_out_cuda", [&] {
// Helpers
Tensor grad_input_n;
Tensor grad_output_n;
// For each elt in batch, do:
for (int elt = 0; elt < batch_size; elt++) {
// Matrix mulitply per sample:
grad_input_n = grad_input.select(0, elt);
grad_output_n = grad_output.select(0, elt);
if (need_columns) {
// Extract columns:
at::native::vol2col<scalar_t>(
at::cuda::getCurrentCUDAStream(),
grad_output_n.data_ptr<scalar_t>(),
n_output_plane,
output_depth,
output_height,
output_width,
input_depth,
input_height,
input_width,
kernel_depth,
kernel_height,
kernel_width,
padding_depth,
padding_height,
padding_width,
stride_depth,
stride_height,
stride_width,
dilation_depth,
dilation_height,
dilation_width,
grad_columns.data_ptr<scalar_t>());
}
// M,N,K are dims of matrix A and B
// (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm)
int64_t m = weight.size(0);
int64_t n = input_depth * input_height * input_width;
int64_t k =
weight.size(1) * weight.size(2) * weight.size(3) * weight.size(4);
// Do GEMM (note: this is a bit confusing because gemm assumes
// column-major matrices)
auto gemm_in_ptr = need_columns ? grad_columns.data_ptr<scalar_t>()
: grad_output_n.data_ptr<scalar_t>();
at::cuda::blas::gemm<scalar_t>(
'n',
'n',
n,
m,
k,
static_cast<scalar_t>(1),
gemm_in_ptr,
n,
weight.data_ptr<scalar_t>(),
k,
static_cast<scalar_t>(0),
grad_input_n.data_ptr<scalar_t>(),
n);
}
// Resize output
if (is_batch) {
grad_output.resize_(
{n_output_plane, output_depth, output_height, output_width});
input.resize_(
{n_input_plane, input_depth, input_height, input_width});
grad_input.resize_(
{n_input_plane, input_depth, input_height, input_width});
}
});
}
void slow_conv_transpose3d_acc_grad_parameters_cuda(
const Tensor& input_,
const Tensor& grad_output_,
Tensor& grad_weight,
Tensor& grad_bias,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef output_padding,
IntArrayRef dilation,
int scale_) {
TORCH_CHECK(
kernel_size.size() == 3,
"It is expected kernel_size equals to 3, but got size ",
kernel_size.size());
TORCH_CHECK(
dilation.size() == 3,
"It is expected dilation equals to 3, but got size ",
dilation.size());
TORCH_CHECK(
padding.size() == 3,
"It is expected padding equals to 3, but got size ",
padding.size());
TORCH_CHECK(
stride.size() == 3,
"It is expected stride equals to 3, but got size ",
stride.size());
TORCH_CHECK(
output_padding.size() == 3,
"It is expected stride equals to 3, but got size ",
output_padding.size());
int64_t kernel_depth = kernel_size[0];
int64_t kernel_height = kernel_size[1];
int64_t kernel_width = kernel_size[2];
int64_t dilation_depth = dilation[0];
int64_t dilation_height = dilation[1];
int64_t dilation_width = dilation[2];
int64_t padding_depth = padding[0];
int64_t padding_height = padding[1];
int64_t padding_width = padding[2];
int64_t stride_depth = stride[0];
int64_t stride_height = stride[1];
int64_t stride_width = stride[2];
int64_t output_padding_depth = output_padding[0];
int64_t output_padding_height = output_padding[1];
int64_t output_padding_width = output_padding[2];
TensorArg input_arg{input_, "input", 1},
grad_output_arg{grad_output_, "grad_output", 2},
grad_weight_arg{grad_weight, "grad_weight", 3},
grad_bias_arg{grad_bias, "grad_bias", 4};
checkAllSameGPU(
"slow_conv_transpose3d_acc_grad_parameters_cuda",
{input_arg,
grad_output_arg,
grad_weight_arg,
grad_bias_arg});
slow_conv_transpose3d_shape_check(
input_,
grad_output_,
grad_weight,
grad_bias,
kernel_depth,
kernel_width,
kernel_height,
stride_depth,
stride_width,
stride_height,
padding_depth,
padding_width,
padding_height,
dilation_depth,
dilation_width,
dilation_height,
output_padding_depth,
output_padding_width,
output_padding_height,
1);
int n_output_plane;
if (grad_weight.defined()) {
n_output_plane = grad_weight.size(1);
} else if (grad_bias.defined()) {
n_output_plane = grad_bias.size(0);
} else {
return;
}
if (grad_weight.defined()) {
TORCH_CHECK(
grad_weight.is_contiguous(), "grad_weight needs to be contiguous");
}
if (grad_bias.defined()) {
TORCH_CHECK(grad_bias.is_contiguous(), "grad_bias needs to be contiguous");
}
Tensor input = input_.contiguous();
Tensor grad_output = grad_output_.contiguous();
bool is_batch = false;
if (input.dim() == 4) {
// Force batch
is_batch = true;
input.resize_(
{1, input.size(0), input.size(1), input.size(2), input.size(3)});
grad_output.resize_({1,
grad_output.size(0),
grad_output.size(1),
grad_output.size(2),
grad_output.size(3)});
}
int64_t input_width = input.size(4);
int64_t input_height = input.size(3);
int64_t input_depth = input.size(2);
int64_t output_depth = (input_depth - 1) * stride_depth - 2 * padding_depth +
(dilation_depth * (kernel_depth - 1) + 1) + output_padding_depth;
int64_t output_height = (input_height - 1) * stride_height -
2 * padding_height + (dilation_height * (kernel_height - 1) + 1) +
output_padding_height;
int64_t output_width = (input_width - 1) * stride_width - 2 * padding_width +
(dilation_width * (kernel_width - 1) + 1) + output_padding_width;
// Batch size + input planes
int64_t batch_size = input.size(0);
// Create temporary columns
bool need_columns = (kernel_depth != 1 || kernel_height != 1 || kernel_width != 1 ||
stride_depth != 1 || stride_height != 1 || stride_width != 1 ||
dilation_depth != 1 || dilation_height != 1 ||
dilation_width != 1 || padding_depth != 0 ||
padding_height != 0 || padding_width != 0);
Tensor columns = need_columns ? at::empty({n_output_plane * kernel_width * kernel_height * kernel_depth,
input_depth * input_height * input_width}, input.options()) : Tensor();
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16,
input.scalar_type(),
"slow_conv_transpose3d_acc_grad_parameters_cuda",
[&] {
// Helpers
Tensor input_n;
Tensor grad_output_n;
scalar_t scale = static_cast<scalar_t>(scale_);
// For each elt in batch, do:
for (int elt = 0; elt < batch_size; elt++) {
// Matrix mulitply per output:
grad_output_n = grad_output.select(0, elt);
// Do Weight:
if (grad_weight.defined()) {
// Matrix mulitply per output:
input_n = input.select(0, elt);
if (need_columns) {
// Extract columns:
at::native::vol2col<scalar_t>(
at::cuda::getCurrentCUDAStream(),
grad_output_n.data_ptr<scalar_t>(),
n_output_plane,
output_depth,
output_height,
output_width,
input_depth,
input_height,
input_width,
kernel_depth,
kernel_height,
kernel_width,
padding_depth,
padding_height,
padding_width,
stride_depth,
stride_height,
stride_width,
dilation_depth,
dilation_height,
dilation_width,
columns.data_ptr<scalar_t>());
}
// M,N,K are dims of matrix A and B
// (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm)
int64_t n = n_output_plane * kernel_width * kernel_height * kernel_depth;
int64_t m = input_n.size(0); // n_input_plane
int64_t k = input_depth * input_height * input_width;
// Do GEMM (note: this is a bit confusing because gemm assumes
// column-major matrices)
auto gemm_in_ptr = need_columns ? columns.data_ptr<scalar_t>() : grad_output_n.data_ptr<scalar_t>();
at::cuda::blas::gemm<scalar_t>(
't',
'n',
n,
m,
k,
scale,
gemm_in_ptr,
k,
input_n.data_ptr<scalar_t>(),
k,
static_cast<scalar_t>(1),
grad_weight.data_ptr<scalar_t>(),
n);
}
}
if (grad_bias.defined()) {
at::sum_out(grad_bias, grad_output, IntArrayRef{0, 2, 3, 4});
}
// Resize
if (is_batch) {
grad_output.resize_(
{n_output_plane, output_depth, output_height, output_width});
input.resize_(
{input.size(1), input_depth, input_height, input_width});
}
});
}
} // namespace
Tensor& slow_conv_transpose3d_out_cuda(const Tensor& input,
const Tensor& weight,
IntArrayRef kernel_size, const c10::optional<Tensor>& bias_opt,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef output_padding,
IntArrayRef dilation,
Tensor& output) {
// See [Note: hacky wrapper removal for optional tensor]
c10::MaybeOwned<Tensor> bias_maybe_owned = at::borrow_from_optional_tensor(bias_opt);
const Tensor& bias = *bias_maybe_owned;
slow_conv_transpose3d_out_cuda_template(
output,
input,
weight,
kernel_size,
bias,
stride,
padding,
output_padding,
dilation);
return output;
}
Tensor slow_conv_transpose3d_cuda(
const Tensor& input,
const Tensor& weight,
IntArrayRef kernel_size, const c10::optional<Tensor>& bias_opt,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef output_padding,
IntArrayRef dilation) {
// See [Note: hacky wrapper removal for optional tensor]
c10::MaybeOwned<Tensor> bias_maybe_owned = at::borrow_from_optional_tensor(bias_opt);
const Tensor& bias = *bias_maybe_owned;
Tensor output = at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
slow_conv_transpose3d_out_cuda_template(
output,
input,
weight,
kernel_size,
bias,
stride,
padding,
output_padding,
dilation);
return output;
}
std::tuple<Tensor&, Tensor&, Tensor&> slow_conv_transpose3d_backward_out_cuda(const Tensor& grad_output,
const Tensor& input,
const Tensor& weight,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef output_padding,
IntArrayRef dilation,
Tensor& grad_input,
Tensor& grad_weight,
Tensor& grad_bias) {
if (grad_input.defined()) {
slow_conv_transpose3d_backward_out_cuda_template(
input,
grad_output,
grad_input,
weight,
kernel_size,
stride,
padding,
output_padding,
dilation);
}
if (grad_weight.defined()) {
grad_weight.resize_(weight.sizes());
grad_weight.zero_();
}
if (grad_bias.defined()) {
grad_bias.resize_({weight.size(1)});
grad_bias.zero_();
}
if (grad_weight.defined() || grad_bias.defined()) {
slow_conv_transpose3d_acc_grad_parameters_cuda(
input,
grad_output,
grad_weight,
grad_bias,
kernel_size,
stride,
padding,
output_padding,
dilation,
1);
}
return std::tuple<Tensor&, Tensor&, Tensor&>(
grad_input, grad_weight, grad_bias);
}
std::tuple<Tensor, Tensor, Tensor> slow_conv_transpose3d_backward_cuda(
const Tensor& grad_output,
const Tensor& input,
const Tensor& weight,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef output_padding,
IntArrayRef dilation,
std::array<bool, 3> output_mask) {
Tensor grad_input;
Tensor grad_weight;
Tensor grad_bias;
if (output_mask[0]) {
grad_input = at::empty({0}, grad_output.options());
} else {
grad_input = Tensor();
}
if (output_mask[1]) {
grad_weight = at::empty({0}, grad_output.options());
} else {
grad_weight = Tensor();
}
if (output_mask[2]) {
grad_bias = at::empty({0}, grad_output.options());
} else {
grad_bias = Tensor();
}
if (grad_input.defined()) {
slow_conv_transpose3d_backward_out_cuda_template(
input,
grad_output,
grad_input,
weight,
kernel_size,
stride,
padding,
output_padding,
dilation);
}
if (grad_weight.defined()) {
grad_weight.resize_(weight.sizes());
grad_weight.zero_();
}
if (grad_bias.defined()) {
grad_bias.resize_({weight.size(1)});
grad_bias.zero_();
}
if (grad_weight.defined() || grad_bias.defined()) {
slow_conv_transpose3d_acc_grad_parameters_cuda(
input,
grad_output,
grad_weight,
grad_bias,
kernel_size,
stride,
padding,
output_padding,
dilation,
1);
}
return std::tuple<Tensor, Tensor, Tensor>(grad_input, grad_weight, grad_bias);
}
REGISTER_CUDA_DISPATCH(slow_conv_transpose3d_backward_stub, &slow_conv_transpose3d_backward_cuda);
} // namespace native
} // namespace at
|
1cb1b4d7fd197593a9b69d228bcf118b07326ea9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by op2.m on 25-Oct-2011 14:51:27
//
// user function
__device__
#include "bres_calc.h"
// CUDA kernel function
__global__ void op_cuda_bres_calc(
double *ind_arg0, int *ind_arg0_maps,
double *ind_arg1, int *ind_arg1_maps,
double *ind_arg2, int *ind_arg2_maps,
double *ind_arg3, int *ind_arg3_maps,
short *arg0_maps,
short *arg1_maps,
short *arg2_maps,
short *arg3_maps,
short *arg4_maps,
int *arg5,
int *ind_arg_sizes,
int *ind_arg_offs,
int block_offset,
int *blkmap,
int *offset,
int *nelems,
int *ncolors,
int *colors) {
double arg4_l[4];
__shared__ int *ind_arg0_map, ind_arg0_size;
__shared__ int *ind_arg1_map, ind_arg1_size;
__shared__ int *ind_arg2_map, ind_arg2_size;
__shared__ int *ind_arg3_map, ind_arg3_size;
__shared__ double *ind_arg0_s;
__shared__ double *ind_arg1_s;
__shared__ double *ind_arg2_s;
__shared__ double *ind_arg3_s;
__shared__ int nelems2, ncolor;
__shared__ int nelem, offset_b;
extern __shared__ char shared[];
if (threadIdx.x==0) {
// get sizes and shift pointers and direct-mapped data
int blockId = blkmap[blockIdx.x + block_offset];
nelem = nelems[blockId];
offset_b = offset[blockId];
nelems2 = blockDim.x*(1+(nelem-1)/blockDim.x);
ncolor = ncolors[blockId];
ind_arg0_size = ind_arg_sizes[0+blockId*4];
ind_arg1_size = ind_arg_sizes[1+blockId*4];
ind_arg2_size = ind_arg_sizes[2+blockId*4];
ind_arg3_size = ind_arg_sizes[3+blockId*4];
ind_arg0_map = ind_arg0_maps + ind_arg_offs[0+blockId*4];
ind_arg1_map = ind_arg1_maps + ind_arg_offs[1+blockId*4];
ind_arg2_map = ind_arg2_maps + ind_arg_offs[2+blockId*4];
ind_arg3_map = ind_arg3_maps + ind_arg_offs[3+blockId*4];
// set shared memory pointers
int nbytes = 0;
ind_arg0_s = (double *) &shared[nbytes];
nbytes += ROUND_UP(ind_arg0_size*sizeof(double)*2);
ind_arg1_s = (double *) &shared[nbytes];
nbytes += ROUND_UP(ind_arg1_size*sizeof(double)*4);
ind_arg2_s = (double *) &shared[nbytes];
nbytes += ROUND_UP(ind_arg2_size*sizeof(double)*1);
ind_arg3_s = (double *) &shared[nbytes];
}
__syncthreads(); // make sure all of above completed
// copy indirect datasets into shared memory or zero increment
for (int n=threadIdx.x; n<ind_arg0_size*2; n+=blockDim.x)
ind_arg0_s[n] = ind_arg0[n%2+ind_arg0_map[n/2]*2];
for (int n=threadIdx.x; n<ind_arg1_size*4; n+=blockDim.x)
ind_arg1_s[n] = ind_arg1[n%4+ind_arg1_map[n/4]*4];
for (int n=threadIdx.x; n<ind_arg2_size*1; n+=blockDim.x)
ind_arg2_s[n] = ind_arg2[n%1+ind_arg2_map[n/1]*1];
for (int n=threadIdx.x; n<ind_arg3_size*4; n+=blockDim.x)
ind_arg3_s[n] = ZERO_double;
__syncthreads();
// process set elements
for (int n=threadIdx.x; n<nelems2; n+=blockDim.x) {
int col2 = -1;
if (n<nelem) {
// initialise local variables
for (int d=0; d<4; d++)
arg4_l[d] = ZERO_double;
// user-supplied kernel call
bres_calc( ind_arg0_s+arg0_maps[n+offset_b]*2,
ind_arg0_s+arg1_maps[n+offset_b]*2,
ind_arg1_s+arg2_maps[n+offset_b]*4,
ind_arg2_s+arg3_maps[n+offset_b]*1,
arg4_l,
arg5+(n+offset_b)*1 );
col2 = colors[n+offset_b];
}
// store local variables
int arg4_map = arg4_maps[n+offset_b];
for (int col=0; col<ncolor; col++) {
if (col2==col) {
for (int d=0; d<4; d++)
ind_arg3_s[d+arg4_map*4] += arg4_l[d];
}
__syncthreads();
}
}
// apply pointered write/increment
for (int n=threadIdx.x; n<ind_arg3_size*4; n+=blockDim.x)
ind_arg3[n%4+ind_arg3_map[n/4]*4] += ind_arg3_s[n];
}
// host stub function
void op_par_loop_bres_calc(char const *name, op_set set,
op_arg arg0,
op_arg arg1,
op_arg arg2,
op_arg arg3,
op_arg arg4,
op_arg arg5 ){
int nargs = 6;
op_arg args[6] = {arg0,arg1,arg2,arg3,arg4,arg5};
int ninds = 4;
int inds[6] = {0,0,1,2,3,-1};
if (OP_diags>2) {
printf(" kernel routine with indirection: bres_calc \n");
}
// get plan
#ifdef OP_PART_SIZE_3
int part_size = OP_PART_SIZE_3;
#else
int part_size = OP_part_size;
#endif
op_plan *Plan = op_plan_get(name,set,part_size,nargs,args,ninds,inds);
// initialise timers
double cpu_t1, cpu_t2, wall_t1, wall_t2;
op_timers(&cpu_t1, &wall_t1);
// execute plan
int block_offset = 0;
for (int col=0; col < Plan->ncolors; col++) {
#ifdef OP_BLOCK_SIZE_3
int nthread = OP_BLOCK_SIZE_3;
#else
int nthread = OP_block_size;
#endif
int nblocks = Plan->ncolblk[col];
int nshared = Plan->nshared;
hipLaunchKernelGGL(( op_cuda_bres_calc), dim3(nblocks),dim3(nthread),nshared, 0,
(double *)arg0.data_d, Plan->ind_maps[0],
(double *)arg2.data_d, Plan->ind_maps[1],
(double *)arg3.data_d, Plan->ind_maps[2],
(double *)arg4.data_d, Plan->ind_maps[3],
Plan->loc_maps[0],
Plan->loc_maps[1],
Plan->loc_maps[2],
Plan->loc_maps[3],
Plan->loc_maps[4],
(int *)arg5.data_d,
Plan->ind_sizes,
Plan->ind_offs,
block_offset,
Plan->blkmap,
Plan->offset,
Plan->nelems,
Plan->nthrcol,
Plan->thrcol);
cutilSafeCall(hipDeviceSynchronize());
cutilCheckMsg("op_cuda_bres_calc execution failed\n");
block_offset += nblocks;
}
// update kernel record
op_timers(&cpu_t2, &wall_t2);
op_timing_realloc(3);
OP_kernels[3].name = name;
OP_kernels[3].count += 1;
OP_kernels[3].time += wall_t2 - wall_t1;
OP_kernels[3].transfer += Plan->transfer;
OP_kernels[3].transfer2 += Plan->transfer2;
}
| 1cb1b4d7fd197593a9b69d228bcf118b07326ea9.cu | //
// auto-generated by op2.m on 25-Oct-2011 14:51:27
//
// user function
__device__
#include "bres_calc.h"
// CUDA kernel function
__global__ void op_cuda_bres_calc(
double *ind_arg0, int *ind_arg0_maps,
double *ind_arg1, int *ind_arg1_maps,
double *ind_arg2, int *ind_arg2_maps,
double *ind_arg3, int *ind_arg3_maps,
short *arg0_maps,
short *arg1_maps,
short *arg2_maps,
short *arg3_maps,
short *arg4_maps,
int *arg5,
int *ind_arg_sizes,
int *ind_arg_offs,
int block_offset,
int *blkmap,
int *offset,
int *nelems,
int *ncolors,
int *colors) {
double arg4_l[4];
__shared__ int *ind_arg0_map, ind_arg0_size;
__shared__ int *ind_arg1_map, ind_arg1_size;
__shared__ int *ind_arg2_map, ind_arg2_size;
__shared__ int *ind_arg3_map, ind_arg3_size;
__shared__ double *ind_arg0_s;
__shared__ double *ind_arg1_s;
__shared__ double *ind_arg2_s;
__shared__ double *ind_arg3_s;
__shared__ int nelems2, ncolor;
__shared__ int nelem, offset_b;
extern __shared__ char shared[];
if (threadIdx.x==0) {
// get sizes and shift pointers and direct-mapped data
int blockId = blkmap[blockIdx.x + block_offset];
nelem = nelems[blockId];
offset_b = offset[blockId];
nelems2 = blockDim.x*(1+(nelem-1)/blockDim.x);
ncolor = ncolors[blockId];
ind_arg0_size = ind_arg_sizes[0+blockId*4];
ind_arg1_size = ind_arg_sizes[1+blockId*4];
ind_arg2_size = ind_arg_sizes[2+blockId*4];
ind_arg3_size = ind_arg_sizes[3+blockId*4];
ind_arg0_map = ind_arg0_maps + ind_arg_offs[0+blockId*4];
ind_arg1_map = ind_arg1_maps + ind_arg_offs[1+blockId*4];
ind_arg2_map = ind_arg2_maps + ind_arg_offs[2+blockId*4];
ind_arg3_map = ind_arg3_maps + ind_arg_offs[3+blockId*4];
// set shared memory pointers
int nbytes = 0;
ind_arg0_s = (double *) &shared[nbytes];
nbytes += ROUND_UP(ind_arg0_size*sizeof(double)*2);
ind_arg1_s = (double *) &shared[nbytes];
nbytes += ROUND_UP(ind_arg1_size*sizeof(double)*4);
ind_arg2_s = (double *) &shared[nbytes];
nbytes += ROUND_UP(ind_arg2_size*sizeof(double)*1);
ind_arg3_s = (double *) &shared[nbytes];
}
__syncthreads(); // make sure all of above completed
// copy indirect datasets into shared memory or zero increment
for (int n=threadIdx.x; n<ind_arg0_size*2; n+=blockDim.x)
ind_arg0_s[n] = ind_arg0[n%2+ind_arg0_map[n/2]*2];
for (int n=threadIdx.x; n<ind_arg1_size*4; n+=blockDim.x)
ind_arg1_s[n] = ind_arg1[n%4+ind_arg1_map[n/4]*4];
for (int n=threadIdx.x; n<ind_arg2_size*1; n+=blockDim.x)
ind_arg2_s[n] = ind_arg2[n%1+ind_arg2_map[n/1]*1];
for (int n=threadIdx.x; n<ind_arg3_size*4; n+=blockDim.x)
ind_arg3_s[n] = ZERO_double;
__syncthreads();
// process set elements
for (int n=threadIdx.x; n<nelems2; n+=blockDim.x) {
int col2 = -1;
if (n<nelem) {
// initialise local variables
for (int d=0; d<4; d++)
arg4_l[d] = ZERO_double;
// user-supplied kernel call
bres_calc( ind_arg0_s+arg0_maps[n+offset_b]*2,
ind_arg0_s+arg1_maps[n+offset_b]*2,
ind_arg1_s+arg2_maps[n+offset_b]*4,
ind_arg2_s+arg3_maps[n+offset_b]*1,
arg4_l,
arg5+(n+offset_b)*1 );
col2 = colors[n+offset_b];
}
// store local variables
int arg4_map = arg4_maps[n+offset_b];
for (int col=0; col<ncolor; col++) {
if (col2==col) {
for (int d=0; d<4; d++)
ind_arg3_s[d+arg4_map*4] += arg4_l[d];
}
__syncthreads();
}
}
// apply pointered write/increment
for (int n=threadIdx.x; n<ind_arg3_size*4; n+=blockDim.x)
ind_arg3[n%4+ind_arg3_map[n/4]*4] += ind_arg3_s[n];
}
// host stub function
void op_par_loop_bres_calc(char const *name, op_set set,
op_arg arg0,
op_arg arg1,
op_arg arg2,
op_arg arg3,
op_arg arg4,
op_arg arg5 ){
int nargs = 6;
op_arg args[6] = {arg0,arg1,arg2,arg3,arg4,arg5};
int ninds = 4;
int inds[6] = {0,0,1,2,3,-1};
if (OP_diags>2) {
printf(" kernel routine with indirection: bres_calc \n");
}
// get plan
#ifdef OP_PART_SIZE_3
int part_size = OP_PART_SIZE_3;
#else
int part_size = OP_part_size;
#endif
op_plan *Plan = op_plan_get(name,set,part_size,nargs,args,ninds,inds);
// initialise timers
double cpu_t1, cpu_t2, wall_t1, wall_t2;
op_timers(&cpu_t1, &wall_t1);
// execute plan
int block_offset = 0;
for (int col=0; col < Plan->ncolors; col++) {
#ifdef OP_BLOCK_SIZE_3
int nthread = OP_BLOCK_SIZE_3;
#else
int nthread = OP_block_size;
#endif
int nblocks = Plan->ncolblk[col];
int nshared = Plan->nshared;
op_cuda_bres_calc<<<nblocks,nthread,nshared>>>(
(double *)arg0.data_d, Plan->ind_maps[0],
(double *)arg2.data_d, Plan->ind_maps[1],
(double *)arg3.data_d, Plan->ind_maps[2],
(double *)arg4.data_d, Plan->ind_maps[3],
Plan->loc_maps[0],
Plan->loc_maps[1],
Plan->loc_maps[2],
Plan->loc_maps[3],
Plan->loc_maps[4],
(int *)arg5.data_d,
Plan->ind_sizes,
Plan->ind_offs,
block_offset,
Plan->blkmap,
Plan->offset,
Plan->nelems,
Plan->nthrcol,
Plan->thrcol);
cutilSafeCall(cudaThreadSynchronize());
cutilCheckMsg("op_cuda_bres_calc execution failed\n");
block_offset += nblocks;
}
// update kernel record
op_timers(&cpu_t2, &wall_t2);
op_timing_realloc(3);
OP_kernels[3].name = name;
OP_kernels[3].count += 1;
OP_kernels[3].time += wall_t2 - wall_t1;
OP_kernels[3].transfer += Plan->transfer;
OP_kernels[3].transfer2 += Plan->transfer2;
}
|
d12e8576f79e592e3d178ee67016261e99862052.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <regularizers/l2_regularizer.hpp>
#include <utility>
#include <utils.cuh>
namespace HugeCTR {
namespace {
template <typename T>
void launch_initialize_wgrad_kernel(const float* weight, T* wgrad, int num_elements, int batch_size,
float lambda, int n_sms, hipStream_t stream) {
auto op = [lambda, batch_size] __device__(const float in) { return (lambda / batch_size) * in; };
hipLaunchKernelGGL(( transform_array), dim3(n_sms * 4), dim3(512), 0, stream, weight, wgrad, num_elements, op);
}
} // namespace
template <typename T>
L2Regularizer<T>::L2Regularizer(const Tensor2<float>& weight_buff, const Tensor2<T>& wgrad_buff,
const int batch_size, const float lambda,
const std::shared_ptr<GPUResource>& gpu_resource)
: Regularizer<T>(weight_buff, wgrad_buff, batch_size, gpu_resource), lambda_(lambda) {}
template <typename T>
void L2Regularizer<T>::do_compute_rterm(const float* weight, float* h_rterm, int num_elements) {
CK_CUBLAS_THROW_(hipblasSdot(Regularizer<T>::get_gpu().get_cublas_handle(), num_elements, weight,
1, weight, 1, h_rterm));
const float alpha = lambda_ / (Regularizer<T>::get_batch_size() * 2);
*h_rterm *= alpha;
}
template <typename T>
void L2Regularizer<T>::do_initialize_wgrad(const float* weight, T* wgrad, int num_elements) {
launch_initialize_wgrad_kernel(weight, wgrad, num_elements, Regularizer<T>::get_batch_size(),
lambda_, Regularizer<T>::get_gpu().get_sm_count(),
Regularizer<T>::get_gpu().get_stream());
}
template class L2Regularizer<__half>;
template class L2Regularizer<float>;
} // namespace HugeCTR
| d12e8576f79e592e3d178ee67016261e99862052.cu | /*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <regularizers/l2_regularizer.hpp>
#include <utility>
#include <utils.cuh>
namespace HugeCTR {
namespace {
template <typename T>
void launch_initialize_wgrad_kernel(const float* weight, T* wgrad, int num_elements, int batch_size,
float lambda, int n_sms, cudaStream_t stream) {
auto op = [lambda, batch_size] __device__(const float in) { return (lambda / batch_size) * in; };
transform_array<<<n_sms * 4, 512, 0, stream>>>(weight, wgrad, num_elements, op);
}
} // namespace
template <typename T>
L2Regularizer<T>::L2Regularizer(const Tensor2<float>& weight_buff, const Tensor2<T>& wgrad_buff,
const int batch_size, const float lambda,
const std::shared_ptr<GPUResource>& gpu_resource)
: Regularizer<T>(weight_buff, wgrad_buff, batch_size, gpu_resource), lambda_(lambda) {}
template <typename T>
void L2Regularizer<T>::do_compute_rterm(const float* weight, float* h_rterm, int num_elements) {
CK_CUBLAS_THROW_(cublasSdot(Regularizer<T>::get_gpu().get_cublas_handle(), num_elements, weight,
1, weight, 1, h_rterm));
const float alpha = lambda_ / (Regularizer<T>::get_batch_size() * 2);
*h_rterm *= alpha;
}
template <typename T>
void L2Regularizer<T>::do_initialize_wgrad(const float* weight, T* wgrad, int num_elements) {
launch_initialize_wgrad_kernel(weight, wgrad, num_elements, Regularizer<T>::get_batch_size(),
lambda_, Regularizer<T>::get_gpu().get_sm_count(),
Regularizer<T>::get_gpu().get_stream());
}
template class L2Regularizer<__half>;
template class L2Regularizer<float>;
} // namespace HugeCTR
|
6b22f97da428fee815132318603f3187b33f4f3e.hip | // !!! This is a file automatically generated by hipify!!!
#include <quda_internal.h>
#include <malloc_quda.h>
#include <reduce_helper.h>
#include <tunable_nd.h>
#include <kernels/reduce_init.cuh>
// These are used for reduction kernels
static device_reduce_t *d_reduce = nullptr;
static device_reduce_t *h_reduce = nullptr;
static device_reduce_t *hd_reduce = nullptr;
static count_t *reduce_count = nullptr;
static qudaEvent_t reduceEnd;
namespace quda
{
namespace reducer
{
// FIXME need to dynamically resize these
void *get_device_buffer() { return d_reduce; }
void *get_mapped_buffer() { return hd_reduce; }
void *get_host_buffer() { return h_reduce; }
count_t *get_count() { return reduce_count; }
qudaEvent_t &get_event() { return reduceEnd; }
size_t buffer_size()
{
/* we have these different reductions to cater for:
- regular reductions (reduce_quda.cu) where are reducing to a
single vector type (max length 4 presently), and a
grid-stride loop with max number of blocks = 2 x SM count
- multi-reductions where we are reducing to a matrix of size
of size QUDA_MAX_MULTI_REDUCE of vectors (max length 4),
and a grid-stride loop with maximum number of blocks = 2 x
SM count
*/
int reduce_size = 4 * sizeof(device_reduce_t);
int max_reduce = reduce_size;
int max_multi_reduce = max_n_reduce() * reduce_size;
int max_reduce_blocks = 2 * device::processor_count();
// reduction buffer size
size_t bytes = max_reduce_blocks * ::max(max_reduce, max_multi_reduce);
return bytes;
}
template <typename T>
struct init_reduce : public TunableKernel1D {
T *reduce_count;
long long bytes() const { return max_n_reduce() * sizeof(T); }
unsigned int minThreads() const { return max_n_reduce(); }
init_reduce(T *reduce_count) :
TunableKernel1D(max_n_reduce()),
reduce_count(reduce_count)
{ apply(device::get_default_stream()); }
void apply(const qudaStream_t &stream)
{
auto tp = tuneLaunch(*this, getTuning(), getVerbosity());
launch_device<init_count>(tp, stream, init_arg<T>(reduce_count));
}
};
void init()
{
auto bytes = buffer_size();
if (!d_reduce) d_reduce = (device_reduce_t *)device_malloc(bytes);
// these arrays are actually oversized currently (only needs to be device_reduce_t x 3)
// if the device supports host-mapped memory then use a host-mapped array for the reduction
if (!h_reduce) {
h_reduce = (device_reduce_t *)mapped_malloc(bytes);
hd_reduce = (device_reduce_t *)get_mapped_device_pointer(h_reduce); // set the matching device pointer
#ifdef HETEROGENEOUS_ATOMIC
using system_atomic_t = device_reduce_t;
size_t n_reduce = bytes / sizeof(system_atomic_t);
auto *atomic_buf = reinterpret_cast<system_atomic_t *>(h_reduce); // FIXME
for (size_t i = 0; i < n_reduce; i++) new (atomic_buf + i) system_atomic_t {0}; // placement new constructor
#else
memset(h_reduce, 0, bytes); // added to ensure that valgrind doesn't report h_reduce is unitialised
#endif
}
if (!reduce_count) {
reduce_count = static_cast<count_t *>(device_malloc(max_n_reduce() * sizeof(decltype(*reduce_count))));
init_reduce<count_t> init(reduce_count);
}
reduceEnd = qudaEventCreate();
}
void destroy()
{
qudaEventDestroy(reduceEnd);
if (reduce_count) {
device_free(reduce_count);
reduce_count = nullptr;
}
if (d_reduce) {
device_free(d_reduce);
d_reduce = 0;
}
if (h_reduce) {
host_free(h_reduce);
h_reduce = 0;
}
hd_reduce = 0;
}
} // namespace reducer
} // namespace quda
| 6b22f97da428fee815132318603f3187b33f4f3e.cu | #include <quda_internal.h>
#include <malloc_quda.h>
#include <reduce_helper.h>
#include <tunable_nd.h>
#include <kernels/reduce_init.cuh>
// These are used for reduction kernels
static device_reduce_t *d_reduce = nullptr;
static device_reduce_t *h_reduce = nullptr;
static device_reduce_t *hd_reduce = nullptr;
static count_t *reduce_count = nullptr;
static qudaEvent_t reduceEnd;
namespace quda
{
namespace reducer
{
// FIXME need to dynamically resize these
void *get_device_buffer() { return d_reduce; }
void *get_mapped_buffer() { return hd_reduce; }
void *get_host_buffer() { return h_reduce; }
count_t *get_count() { return reduce_count; }
qudaEvent_t &get_event() { return reduceEnd; }
size_t buffer_size()
{
/* we have these different reductions to cater for:
- regular reductions (reduce_quda.cu) where are reducing to a
single vector type (max length 4 presently), and a
grid-stride loop with max number of blocks = 2 x SM count
- multi-reductions where we are reducing to a matrix of size
of size QUDA_MAX_MULTI_REDUCE of vectors (max length 4),
and a grid-stride loop with maximum number of blocks = 2 x
SM count
*/
int reduce_size = 4 * sizeof(device_reduce_t);
int max_reduce = reduce_size;
int max_multi_reduce = max_n_reduce() * reduce_size;
int max_reduce_blocks = 2 * device::processor_count();
// reduction buffer size
size_t bytes = max_reduce_blocks * std::max(max_reduce, max_multi_reduce);
return bytes;
}
template <typename T>
struct init_reduce : public TunableKernel1D {
T *reduce_count;
long long bytes() const { return max_n_reduce() * sizeof(T); }
unsigned int minThreads() const { return max_n_reduce(); }
init_reduce(T *reduce_count) :
TunableKernel1D(max_n_reduce()),
reduce_count(reduce_count)
{ apply(device::get_default_stream()); }
void apply(const qudaStream_t &stream)
{
auto tp = tuneLaunch(*this, getTuning(), getVerbosity());
launch_device<init_count>(tp, stream, init_arg<T>(reduce_count));
}
};
void init()
{
auto bytes = buffer_size();
if (!d_reduce) d_reduce = (device_reduce_t *)device_malloc(bytes);
// these arrays are actually oversized currently (only needs to be device_reduce_t x 3)
// if the device supports host-mapped memory then use a host-mapped array for the reduction
if (!h_reduce) {
h_reduce = (device_reduce_t *)mapped_malloc(bytes);
hd_reduce = (device_reduce_t *)get_mapped_device_pointer(h_reduce); // set the matching device pointer
#ifdef HETEROGENEOUS_ATOMIC
using system_atomic_t = device_reduce_t;
size_t n_reduce = bytes / sizeof(system_atomic_t);
auto *atomic_buf = reinterpret_cast<system_atomic_t *>(h_reduce); // FIXME
for (size_t i = 0; i < n_reduce; i++) new (atomic_buf + i) system_atomic_t {0}; // placement new constructor
#else
memset(h_reduce, 0, bytes); // added to ensure that valgrind doesn't report h_reduce is unitialised
#endif
}
if (!reduce_count) {
reduce_count = static_cast<count_t *>(device_malloc(max_n_reduce() * sizeof(decltype(*reduce_count))));
init_reduce<count_t> init(reduce_count);
}
reduceEnd = qudaEventCreate();
}
void destroy()
{
qudaEventDestroy(reduceEnd);
if (reduce_count) {
device_free(reduce_count);
reduce_count = nullptr;
}
if (d_reduce) {
device_free(d_reduce);
d_reduce = 0;
}
if (h_reduce) {
host_free(h_reduce);
h_reduce = 0;
}
hd_reduce = 0;
}
} // namespace reducer
} // namespace quda
|
7e6a659360fb064e4e543de3532ff7e47f726cd6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// User: g114@157.88.139.133
// ExecutionRequest[P:'final-secuencial-V3.cu',P:1,T:1,args:'',q:'cudalb']
// May 16 2019 17:09:25
#include "cputils.h" // Added by tablon
/*
* Simplified simulation of fire extinguishing
*
* Computacion Paralela, Grado en Informatica (Universidad de Valladolid)
* 2018/2019
*
* v1.4
*
* (c) 2019 Arturo Gonzalez Escribano
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <float.h>
#include <cputils.h>
#define RADIUS_TYPE_1 3
#define RADIUS_TYPE_2_3 9
#define THRESHOLD 0.1f
/* Structure to store data of an extinguishing team */
typedef struct {
int x,y;
int type;
int target;
} Team;
/* Structure to store data of a fire focal point */
typedef struct {
int x,y;
int start;
int heat;
int active; // States: 0 Not yet activated; 1 Active; 2 Deactivated by a team
} FocalPoint;
/* Macro function to simplify accessing with two coordinates to a flattened array */
#define accessMat( arr, exp1, exp2 ) arr[ (exp1) * columns + (exp2) ]
/*
* Function: Print usage line in stderr
*/
void show_usage( char *program_name ) {
fprintf(stderr,"Usage: %s <config_file> | <command_line_args>\n", program_name );
fprintf(stderr,"\t<config_file> ::= -f <file_name>\n");
fprintf(stderr,"\t<command_line_args> ::= <rows> <columns> <maxIter> <numTeams> [ <teamX> <teamY> <teamType> ... ] <numFocalPoints> [ <focalX> <focalY> <focalStart> <focalTemperature> ... ]\n");
fprintf(stderr,"\n");
}
#ifdef DEBUG
/*
* Function: Print the current state of the simulation
*/
void print_status( int iteration, int rows, int columns, float *surface, int num_teams, Team *teams, int num_focal, FocalPoint *focal, float global_residual ) {
/*
* You don't need to optimize this function, it is only for pretty printing and debugging purposes.
* It is not compiled in the production versions of the program.
* Thus, it is never used when measuring times in the leaderboard
*/
int i,j;
printf("Iteration: %d\n", iteration );
printf("+");
for( j=0; j<columns; j++ ) printf("---");
printf("+\n");
for( i=0; i<rows; i++ ) {
printf("|");
for( j=0; j<columns; j++ ) {
char symbol;
if ( accessMat( surface, i, j ) >= 1000 ) symbol = '*';
else if ( accessMat( surface, i, j ) >= 100 ) symbol = '0' + (int)(accessMat( surface, i, j )/100);
else if ( accessMat( surface, i, j ) >= 50 ) symbol = '+';
else if ( accessMat( surface, i, j ) >= 25 ) symbol = '.';
else symbol = '0';
int t;
int flag_team = 0;
for( t=0; t<num_teams; t++ )
if ( teams[t].x == i && teams[t].y == j ) { flag_team = 1; break; }
if ( flag_team ) printf("[%c]", symbol );
else {
int f;
int flag_focal = 0;
for( f=0; f<num_focal; f++ )
if ( focal[f].x == i && focal[f].y == j && focal[f].active == 1 ) { flag_focal = 1; break; }
if ( flag_focal ) printf("(%c)", symbol );
else printf(" %c ", symbol );
}
}
printf("|\n");
}
printf("+");
for( j=0; j<columns; j++ ) printf("---");
printf("+\n");
printf("Global residual: %f\n\n", global_residual);
}
#endif
/********************************************************
* Inicio De La Definicion De Kernels *
********************************************************/
__global__ void kernelInicializar( float *vector) //Kernel para paralelizar el primer rellenado de bucle
{
int posicion = threadIdx.x+(blockDim.x*blockIdx.x);
vector[posicion]=1.0;
}
__global__ void kernelCopia( float *vector, float *copia, int elementos)
{
int posicion = threadIdx.x+(blockDim.x*blockIdx.x);
if (elementos>posicion){
copia[posicion]=vector[posicion];
}
}
__global__ void kernelupdate(float *vector, float *copia, int columns, int rows)
{
int posicion = threadIdx.x+(blockDim.x*blockIdx.x);
int i=posicion/columns;
int j=posicion%columns;
if( i>0 && (i<(rows-1) && (j>0 && (j<(columns-1)))) ){
accessMat( vector, i, j ) = (
accessMat( copia, i-1, j ) +
accessMat( copia, i+1, j ) +
accessMat( copia, i, j-1 ) +
accessMat( copia, i, j+1 ) ) / 4;
}
}
/* *******************************************************
* Fin De La Definicion De Kernels **
**********************************************************/
/***********************************************************
* Funcion para comprobar errores *
************************************************************/
void CUDA_CHECK(int id) { \
hipError_t check = hipGetLastError(); \
if ( check != hipSuccess ) { \
printf("Error --: %d %s \n", id,hipGetErrorString( check ) ); \
exit( EXIT_FAILURE ); \
} }
/**********************************************************
***********************************************************/
/*
* MAIN PROGRAM
*/
int main(int argc, char *argv[]) {
int i,j,t;
// Simulation data
int rows, columns, max_iter;
float *surface, *surfaceCopy;
int num_teams, num_focal;
Team *teams;
FocalPoint *focal;
/* 1. Read simulation arguments */
/* 1.1. Check minimum number of arguments */
if (argc<2) {
fprintf(stderr,"-- Error in arguments: No arguments\n");
show_usage( argv[0] );
exit( EXIT_FAILURE );
}
int read_from_file = ! strcmp( argv[1], "-f" );
/* 1.2. Read configuration from file */
if ( read_from_file ) {
/* 1.2.1. Open file */
if (argc<3) {
fprintf(stderr,"-- Error in arguments: file-name argument missing\n");
show_usage( argv[0] );
exit( EXIT_FAILURE );
}
FILE *args = cp_abrir_fichero( argv[2] );
if ( args == NULL ) {
fprintf(stderr,"-- Error in file: not found: %s\n", argv[1]);
exit( EXIT_FAILURE );
}
/* 1.2.2. Read surface and maximum number of iterations */
int ok;
ok = fscanf(args, "%d %d %d", &rows, &columns, &max_iter);
if ( ok != 3 ) {
fprintf(stderr,"-- Error in file: reading rows, columns, max_iter from file: %s\n", argv[1]);
exit( EXIT_FAILURE );
}
surface = (float *)malloc( sizeof(float) * (size_t)rows * (size_t)columns );
surfaceCopy = (float *)malloc( sizeof(float) * (size_t)rows * (size_t)columns );
if ( surface == NULL || surfaceCopy == NULL ) {
fprintf(stderr,"-- Error allocating: surface structures\n");
exit( EXIT_FAILURE );
}
/* 1.2.3. Teams information */
ok = fscanf(args, "%d", &num_teams );
if ( ok != 1 ) {
fprintf(stderr,"-- Error file, reading num_teams from file: %s\n", argv[1]);
exit( EXIT_FAILURE );
}
teams = (Team *)malloc( sizeof(Team) * (size_t)num_teams );
if ( teams == NULL ) {
fprintf(stderr,"-- Error allocating: %d teams\n", num_teams );
exit( EXIT_FAILURE );
}
for( i=0; i<num_teams; i++ ) {
ok = fscanf(args, "%d %d %d", &teams[i].x, &teams[i].y, &teams[i].type);
if ( ok != 3 ) {
fprintf(stderr,"-- Error in file: reading team %d from file: %s\n", i, argv[1]);
exit( EXIT_FAILURE );
}
}
/* 1.2.4. Focal points information */
ok = fscanf(args, "%d", &num_focal );
if ( ok != 1 ) {
fprintf(stderr,"-- Error in file: reading num_focal from file: %s\n", argv[1]);
exit( EXIT_FAILURE );
}
focal = (FocalPoint *)malloc( sizeof(FocalPoint) * (size_t)num_focal );
if ( focal == NULL ) {
fprintf(stderr,"-- Error allocating: %d focal points\n", num_focal );
exit( EXIT_FAILURE );
}
for( i=0; i<num_focal; i++ ) {
ok = fscanf(args, "%d %d %d %d", &focal[i].x, &focal[i].y, &focal[i].start, &focal[i].heat);
if ( ok != 4 ) {
fprintf(stderr,"-- Error in file: reading focal point %d from file: %s\n", i, argv[1]);
exit( EXIT_FAILURE );
}
focal[i].active = 0;
}
}
/* 1.3. Read configuration from arguments */
else {
/* 1.3.1. Check minimum number of arguments */
if (argc<6) {
fprintf(stderr, "-- Error in arguments: not enough arguments when reading configuration from the command line\n");
show_usage( argv[0] );
exit( EXIT_FAILURE );
}
/* 1.3.2. Surface and maximum number of iterations */
rows = atoi( argv[1] );
columns = atoi( argv[2] );
max_iter = atoi( argv[3] );
surface = (float *)malloc( sizeof(float) * (size_t)rows * (size_t)columns );
surfaceCopy = (float *)malloc( sizeof(float) * (size_t)rows * (size_t)columns );
/* 1.3.3. Teams information */
num_teams = atoi( argv[4] );
teams = (Team *)malloc( sizeof(Team) * (size_t)num_teams );
if ( teams == NULL ) {
fprintf(stderr,"-- Error allocating: %d teams\n", num_teams );
exit( EXIT_FAILURE );
}
if ( argc < num_teams*3 + 5 ) {
fprintf(stderr,"-- Error in arguments: not enough arguments for %d teams\n", num_teams );
exit( EXIT_FAILURE );
}
for( i=0; i<num_teams; i++ ) {
teams[i].x = atoi( argv[5+i*3] );
teams[i].y = atoi( argv[6+i*3] );
teams[i].type = atoi( argv[7+i*3] );
}
/* 1.3.4. Focal points information */
int focal_args = 5 + i*3;
if ( argc < focal_args+1 ) {
fprintf(stderr,"-- Error in arguments: not enough arguments for the number of focal points\n");
show_usage( argv[0] );
exit( EXIT_FAILURE );
}
num_focal = atoi( argv[focal_args] );
focal = (FocalPoint *)malloc( sizeof(FocalPoint) * (size_t)num_focal );
if ( teams == NULL ) {
fprintf(stderr,"-- Error allocating: %d focal points\n", num_focal );
exit( EXIT_FAILURE );
}
if ( argc < focal_args + 1 + num_focal*4 ) {
fprintf(stderr,"-- Error in arguments: not enough arguments for %d focal points\n", num_focal );
exit( EXIT_FAILURE );
}
for( i=0; i<num_focal; i++ ) {
focal[i].x = atoi( argv[focal_args+i*4+1] );
focal[i].y = atoi( argv[focal_args+i*4+2] );
focal[i].start = atoi( argv[focal_args+i*4+3] );
focal[i].heat = atoi( argv[focal_args+i*4+4] );
focal[i].active = 0;
}
/* 1.3.5. Sanity check: No extra arguments at the end of line */
if ( argc > focal_args+i*4+1 ) {
fprintf(stderr,"-- Error in arguments: extra arguments at the end of the command line\n");
show_usage( argv[0] );
exit( EXIT_FAILURE );
}
}
#ifdef DEBUG
/* 1.4. Print arguments */
printf("Arguments, Rows: %d, Columns: %d, max_iter: %d\n", rows, columns, max_iter);
printf("Arguments, Teams: %d, Focal points: %d\n", num_teams, num_focal );
for( i=0; i<num_teams; i++ ) {
printf("\tTeam %d, position (%d,%d), type: %d\n", i, teams[i].x, teams[i].y, teams[i].type );
}
for( i=0; i<num_focal; i++ ) {
printf("\tFocal_point %d, position (%d,%d), start time: %d, temperature: %d\n", i,
focal[i].x,
focal[i].y,
focal[i].start,
focal[i].heat );
}
#endif // DEBUG
/* 2. Select GPU and start global timer */
hipSetDevice(0);
hipDeviceSynchronize();
double ttotal = cp_Wtime();
/*
*
* START HERE: DO NOT CHANGE THE CODE ABOVE THIS POINT
*
*/
/********************************************
* Variables aadidas para la practica *
*******************************************/
int nelementos=rows*columns ;
int maxhilos=256;//512 mejor tiempo
int nblock=nelementos/maxhilos;
if((nelementos%maxhilos)!=0) nblock++;
int mhilos=maxhilos;
int sizeSurface = sizeof(float)*(rows *columns);
float *auxiliar=(float *)malloc( sizeof(float) * (size_t)rows * (size_t)columns );;
float * devauxiliar=(float *)malloc( sizeof(float) * (size_t)rows * (size_t)columns );;
float *devSurface;
float *devSurfaceCopy;
hipMalloc((void **)&devSurface, sizeSurface);
CUDA_CHECK(100);
hipMalloc((void **)&devSurfaceCopy, sizeSurface);
CUDA_CHECK(101);
/*******************************************
* Fin variables aadidas * ADD
******************************************/
/* 3. Initialize surface */
for( i=0; i<rows; i++ )
for( j=0; j<columns; j++ )
accessMat( surface, i, j ) = 0.0;
//Copiamos la inicializacion del vector al vector de la GPU
hipMemcpy(devSurface, surface, sizeSurface, hipMemcpyHostToDevice);
CUDA_CHECK(1);
/* 4. Simulation */
int iter;
int flag_stability = 0;
int first_activation = 0;
for( iter=0; iter<max_iter && ! flag_stability; iter++ ) {
/* 4.1. Activate focal points */
int num_deactivated = 0;
for( i=0; i<num_focal; i++ ) {
if ( focal[i].start == iter ) {
focal[i].active = 1;
if ( ! first_activation ) first_activation = 1;
}
// Count focal points already deactivated by a team
if ( focal[i].active == 2 ) num_deactivated++;
}
if (!first_activation) continue;// secuencial
/* 4.2. Propagate heat (10 steps per each team movement) */
float global_residual = 0.0f;
int step;
for( step=0; step<10; step++ ) {
/* 4.2.1. Update heat on active focal points */
for( i=0; i<num_focal; i++ ) {
if ( focal[i].active != 1 ) continue;
int x = focal[i].x;
int y = focal[i].y;
accessMat( surface, x, y ) = focal[i].heat;
}
hipMemcpy(devSurface, surface, sizeSurface, hipMemcpyHostToDevice);
CUDA_CHECK(2);
/***************************************************************************
4.2.2. Copy values of the surface in ancillary structure (Skip borders)
Realizamos la copia mediante intercambio de punteros
Tanto en la CPU como en la GPU
****************************************************************************/
auxiliar= surface;//declaradas al principio
surface=surfaceCopy;
surfaceCopy=auxiliar;
devauxiliar= devSurface;
devSurface=devSurfaceCopy;
devSurfaceCopy=devauxiliar;
/* 4.2.3. Update surface values (skip borders) */
hipLaunchKernelGGL(( kernelupdate), dim3(nblock),dim3(mhilos), 0, 0, devSurface,devSurfaceCopy,columns,rows);
CUDA_CHECK(3);
hipDeviceSynchronize();
hipMemcpy(surface,devSurface,sizeSurface,hipMemcpyDeviceToHost);
/* 4.2.4. Compute the maximum residual difference (absolute value) */
if(step==0){
for( i=1; i<rows-1; i++ )
for( j=1; j<columns-1; j++ )
if ( fabs( accessMat( surface, i, j ) - accessMat( surfaceCopy, i, j ) ) > global_residual ) {
global_residual = fabs( accessMat( surface, i, j ) - accessMat( surfaceCopy, i, j ) );
}
}
}
/* If the global residual is lower than THRESHOLD, we have reached enough stability, stop simulation at the end of this iteration */
if( num_deactivated == num_focal && global_residual < THRESHOLD ) flag_stability = 1;
/* 4.3. Move teams */
for( t=0; t<num_teams; t++ ) {
/* 4.3.1. Choose nearest focal point */
float distance = FLT_MAX;
int target = -1;
for( j=0; j<num_focal; j++ ) {
if ( focal[j].active != 1 ) continue; // Skip non-active focal points
float dx = focal[j].x - teams[t].x;
float dy = focal[j].y - teams[t].y;
float local_distance = sqrtf( dx*dx + dy*dy );
if ( local_distance < distance ) {
distance = local_distance;
target = j;
}
}
/* 4.3.2. Annotate target for the next stage */
teams[t].target = target;
/* 4.3.3. No active focal point to choose, no movement */
if ( target == -1 ) continue;
/* 4.3.4. Move in the focal point direction */
if ( teams[t].type == 1 ) {
// Type 1: Can move in diagonal
if ( focal[target].x < teams[t].x ) teams[t].x--;
if ( focal[target].x > teams[t].x ) teams[t].x++;
if ( focal[target].y < teams[t].y ) teams[t].y--;
if ( focal[target].y > teams[t].y ) teams[t].y++;
}
else if ( teams[t].type == 2 ) {
// Type 2: First in horizontal direction, then in vertical direction
if ( focal[target].y < teams[t].y ) teams[t].y--;
else if ( focal[target].y > teams[t].y ) teams[t].y++;
else if ( focal[target].x < teams[t].x ) teams[t].x--;
else if ( focal[target].x > teams[t].x ) teams[t].x++;
}
else {
// Type 3: First in vertical direction, then in horizontal direction
if ( focal[target].x < teams[t].x ) teams[t].x--;
else if ( focal[target].x > teams[t].x ) teams[t].x++;
else if ( focal[target].y < teams[t].y ) teams[t].y--;
else if ( focal[target].y > teams[t].y ) teams[t].y++;
}
}
/* 4.4. Team actions */
for( t=0; t<num_teams; t++ ) {
/* 4.4.1. Deactivate the target focal point when it is reached */
int target = teams[t].target;
if ( target != -1 && focal[target].x == teams[t].x && focal[target].y == teams[t].y
&& focal[target].active == 1 )
focal[target].active = 2;
/* 4.4.2. Reduce heat in a circle around the team */
int radius;
// Influence area of fixed radius depending on type
if ( teams[t].type == 1 ) radius = RADIUS_TYPE_1;
else radius = RADIUS_TYPE_2_3;
for( i=teams[t].x-radius; i<=teams[t].x+radius; i++ ) {
for( j=teams[t].y-radius; j<=teams[t].y+radius; j++ ) {
if ( i<1 || i>=rows-1 || j<1 || j>=columns-1 ) continue; // Out of the heated surface
float dx = teams[t].x - i;
float dy = teams[t].y - j;
float distance = sqrtf( dx*dx + dy*dy );
if ( distance <= radius ) {
accessMat( surface, i, j ) = accessMat( surface, i, j ) * ( 1 - 0.25 ); // Team efficiency factor
}
}
}
}
#ifdef DEBUG
/* 4.5. DEBUG: Print the current state of the simulation at the end of each iteration */
print_status( iter, rows, columns, surface, num_teams, teams, num_focal, focal, global_residual );
#endif // DEBUG
}
/*
*
* STOP HERE: DO NOT CHANGE THE CODE BELOW THIS POINT
*
*/
/* 5. Stop global time */
hipDeviceSynchronize();
ttotal = cp_Wtime() - ttotal;
/* 6. Output for leaderboard */
printf("\n");
/* 6.1. Total computation time */
printf("Time: %lf\n", ttotal );
/* 6.2. Results: Number of iterations, position of teams, residual heat on the focal points */
printf("Result: %d", iter);
/*
for (i=0; i<num_teams; i++)
printf(" %d %d", teams[i].x, teams[i].y );
*/
for (i=0; i<num_focal; i++)
printf(" %.6f", accessMat( surface, focal[i].x, focal[i].y ) );
printf("\n");
/* 7. Free resources */
free( teams );
free( focal );
free( surface );
free( surfaceCopy );
/* 8. End */
return 0;
}
| 7e6a659360fb064e4e543de3532ff7e47f726cd6.cu | // User: g114@157.88.139.133
// ExecutionRequest[P:'final-secuencial-V3.cu',P:1,T:1,args:'',q:'cudalb']
// May 16 2019 17:09:25
#include "cputils.h" // Added by tablon
/*
* Simplified simulation of fire extinguishing
*
* Computacion Paralela, Grado en Informatica (Universidad de Valladolid)
* 2018/2019
*
* v1.4
*
* (c) 2019 Arturo Gonzalez Escribano
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <float.h>
#include <cputils.h>
#define RADIUS_TYPE_1 3
#define RADIUS_TYPE_2_3 9
#define THRESHOLD 0.1f
/* Structure to store data of an extinguishing team */
typedef struct {
int x,y;
int type;
int target;
} Team;
/* Structure to store data of a fire focal point */
typedef struct {
int x,y;
int start;
int heat;
int active; // States: 0 Not yet activated; 1 Active; 2 Deactivated by a team
} FocalPoint;
/* Macro function to simplify accessing with two coordinates to a flattened array */
#define accessMat( arr, exp1, exp2 ) arr[ (exp1) * columns + (exp2) ]
/*
* Function: Print usage line in stderr
*/
void show_usage( char *program_name ) {
fprintf(stderr,"Usage: %s <config_file> | <command_line_args>\n", program_name );
fprintf(stderr,"\t<config_file> ::= -f <file_name>\n");
fprintf(stderr,"\t<command_line_args> ::= <rows> <columns> <maxIter> <numTeams> [ <teamX> <teamY> <teamType> ... ] <numFocalPoints> [ <focalX> <focalY> <focalStart> <focalTemperature> ... ]\n");
fprintf(stderr,"\n");
}
#ifdef DEBUG
/*
* Function: Print the current state of the simulation
*/
void print_status( int iteration, int rows, int columns, float *surface, int num_teams, Team *teams, int num_focal, FocalPoint *focal, float global_residual ) {
/*
* You don't need to optimize this function, it is only for pretty printing and debugging purposes.
* It is not compiled in the production versions of the program.
* Thus, it is never used when measuring times in the leaderboard
*/
int i,j;
printf("Iteration: %d\n", iteration );
printf("+");
for( j=0; j<columns; j++ ) printf("---");
printf("+\n");
for( i=0; i<rows; i++ ) {
printf("|");
for( j=0; j<columns; j++ ) {
char symbol;
if ( accessMat( surface, i, j ) >= 1000 ) symbol = '*';
else if ( accessMat( surface, i, j ) >= 100 ) symbol = '0' + (int)(accessMat( surface, i, j )/100);
else if ( accessMat( surface, i, j ) >= 50 ) symbol = '+';
else if ( accessMat( surface, i, j ) >= 25 ) symbol = '.';
else symbol = '0';
int t;
int flag_team = 0;
for( t=0; t<num_teams; t++ )
if ( teams[t].x == i && teams[t].y == j ) { flag_team = 1; break; }
if ( flag_team ) printf("[%c]", symbol );
else {
int f;
int flag_focal = 0;
for( f=0; f<num_focal; f++ )
if ( focal[f].x == i && focal[f].y == j && focal[f].active == 1 ) { flag_focal = 1; break; }
if ( flag_focal ) printf("(%c)", symbol );
else printf(" %c ", symbol );
}
}
printf("|\n");
}
printf("+");
for( j=0; j<columns; j++ ) printf("---");
printf("+\n");
printf("Global residual: %f\n\n", global_residual);
}
#endif
/********************************************************
* Inicio De La Definicion De Kernels *
********************************************************/
__global__ void kernelInicializar( float *vector) //Kernel para paralelizar el primer rellenado de bucle
{
int posicion = threadIdx.x+(blockDim.x*blockIdx.x);
vector[posicion]=1.0;
}
__global__ void kernelCopia( float *vector, float *copia, int elementos)
{
int posicion = threadIdx.x+(blockDim.x*blockIdx.x);
if (elementos>posicion){
copia[posicion]=vector[posicion];
}
}
__global__ void kernelupdate(float *vector, float *copia, int columns, int rows)
{
int posicion = threadIdx.x+(blockDim.x*blockIdx.x);
int i=posicion/columns;
int j=posicion%columns;
if( i>0 && (i<(rows-1) && (j>0 && (j<(columns-1)))) ){
accessMat( vector, i, j ) = (
accessMat( copia, i-1, j ) +
accessMat( copia, i+1, j ) +
accessMat( copia, i, j-1 ) +
accessMat( copia, i, j+1 ) ) / 4;
}
}
/* *******************************************************
* Fin De La Definicion De Kernels **
**********************************************************/
/***********************************************************
* Funcion para comprobar errores *
************************************************************/
void CUDA_CHECK(int id) { \
cudaError_t check = cudaGetLastError(); \
if ( check != cudaSuccess ) { \
printf("Error --: %d %s \n", id,cudaGetErrorString( check ) ); \
exit( EXIT_FAILURE ); \
} }
/**********************************************************
***********************************************************/
/*
* MAIN PROGRAM
*/
int main(int argc, char *argv[]) {
int i,j,t;
// Simulation data
int rows, columns, max_iter;
float *surface, *surfaceCopy;
int num_teams, num_focal;
Team *teams;
FocalPoint *focal;
/* 1. Read simulation arguments */
/* 1.1. Check minimum number of arguments */
if (argc<2) {
fprintf(stderr,"-- Error in arguments: No arguments\n");
show_usage( argv[0] );
exit( EXIT_FAILURE );
}
int read_from_file = ! strcmp( argv[1], "-f" );
/* 1.2. Read configuration from file */
if ( read_from_file ) {
/* 1.2.1. Open file */
if (argc<3) {
fprintf(stderr,"-- Error in arguments: file-name argument missing\n");
show_usage( argv[0] );
exit( EXIT_FAILURE );
}
FILE *args = cp_abrir_fichero( argv[2] );
if ( args == NULL ) {
fprintf(stderr,"-- Error in file: not found: %s\n", argv[1]);
exit( EXIT_FAILURE );
}
/* 1.2.2. Read surface and maximum number of iterations */
int ok;
ok = fscanf(args, "%d %d %d", &rows, &columns, &max_iter);
if ( ok != 3 ) {
fprintf(stderr,"-- Error in file: reading rows, columns, max_iter from file: %s\n", argv[1]);
exit( EXIT_FAILURE );
}
surface = (float *)malloc( sizeof(float) * (size_t)rows * (size_t)columns );
surfaceCopy = (float *)malloc( sizeof(float) * (size_t)rows * (size_t)columns );
if ( surface == NULL || surfaceCopy == NULL ) {
fprintf(stderr,"-- Error allocating: surface structures\n");
exit( EXIT_FAILURE );
}
/* 1.2.3. Teams information */
ok = fscanf(args, "%d", &num_teams );
if ( ok != 1 ) {
fprintf(stderr,"-- Error file, reading num_teams from file: %s\n", argv[1]);
exit( EXIT_FAILURE );
}
teams = (Team *)malloc( sizeof(Team) * (size_t)num_teams );
if ( teams == NULL ) {
fprintf(stderr,"-- Error allocating: %d teams\n", num_teams );
exit( EXIT_FAILURE );
}
for( i=0; i<num_teams; i++ ) {
ok = fscanf(args, "%d %d %d", &teams[i].x, &teams[i].y, &teams[i].type);
if ( ok != 3 ) {
fprintf(stderr,"-- Error in file: reading team %d from file: %s\n", i, argv[1]);
exit( EXIT_FAILURE );
}
}
/* 1.2.4. Focal points information */
ok = fscanf(args, "%d", &num_focal );
if ( ok != 1 ) {
fprintf(stderr,"-- Error in file: reading num_focal from file: %s\n", argv[1]);
exit( EXIT_FAILURE );
}
focal = (FocalPoint *)malloc( sizeof(FocalPoint) * (size_t)num_focal );
if ( focal == NULL ) {
fprintf(stderr,"-- Error allocating: %d focal points\n", num_focal );
exit( EXIT_FAILURE );
}
for( i=0; i<num_focal; i++ ) {
ok = fscanf(args, "%d %d %d %d", &focal[i].x, &focal[i].y, &focal[i].start, &focal[i].heat);
if ( ok != 4 ) {
fprintf(stderr,"-- Error in file: reading focal point %d from file: %s\n", i, argv[1]);
exit( EXIT_FAILURE );
}
focal[i].active = 0;
}
}
/* 1.3. Read configuration from arguments */
else {
/* 1.3.1. Check minimum number of arguments */
if (argc<6) {
fprintf(stderr, "-- Error in arguments: not enough arguments when reading configuration from the command line\n");
show_usage( argv[0] );
exit( EXIT_FAILURE );
}
/* 1.3.2. Surface and maximum number of iterations */
rows = atoi( argv[1] );
columns = atoi( argv[2] );
max_iter = atoi( argv[3] );
surface = (float *)malloc( sizeof(float) * (size_t)rows * (size_t)columns );
surfaceCopy = (float *)malloc( sizeof(float) * (size_t)rows * (size_t)columns );
/* 1.3.3. Teams information */
num_teams = atoi( argv[4] );
teams = (Team *)malloc( sizeof(Team) * (size_t)num_teams );
if ( teams == NULL ) {
fprintf(stderr,"-- Error allocating: %d teams\n", num_teams );
exit( EXIT_FAILURE );
}
if ( argc < num_teams*3 + 5 ) {
fprintf(stderr,"-- Error in arguments: not enough arguments for %d teams\n", num_teams );
exit( EXIT_FAILURE );
}
for( i=0; i<num_teams; i++ ) {
teams[i].x = atoi( argv[5+i*3] );
teams[i].y = atoi( argv[6+i*3] );
teams[i].type = atoi( argv[7+i*3] );
}
/* 1.3.4. Focal points information */
int focal_args = 5 + i*3;
if ( argc < focal_args+1 ) {
fprintf(stderr,"-- Error in arguments: not enough arguments for the number of focal points\n");
show_usage( argv[0] );
exit( EXIT_FAILURE );
}
num_focal = atoi( argv[focal_args] );
focal = (FocalPoint *)malloc( sizeof(FocalPoint) * (size_t)num_focal );
if ( teams == NULL ) {
fprintf(stderr,"-- Error allocating: %d focal points\n", num_focal );
exit( EXIT_FAILURE );
}
if ( argc < focal_args + 1 + num_focal*4 ) {
fprintf(stderr,"-- Error in arguments: not enough arguments for %d focal points\n", num_focal );
exit( EXIT_FAILURE );
}
for( i=0; i<num_focal; i++ ) {
focal[i].x = atoi( argv[focal_args+i*4+1] );
focal[i].y = atoi( argv[focal_args+i*4+2] );
focal[i].start = atoi( argv[focal_args+i*4+3] );
focal[i].heat = atoi( argv[focal_args+i*4+4] );
focal[i].active = 0;
}
/* 1.3.5. Sanity check: No extra arguments at the end of line */
if ( argc > focal_args+i*4+1 ) {
fprintf(stderr,"-- Error in arguments: extra arguments at the end of the command line\n");
show_usage( argv[0] );
exit( EXIT_FAILURE );
}
}
#ifdef DEBUG
/* 1.4. Print arguments */
printf("Arguments, Rows: %d, Columns: %d, max_iter: %d\n", rows, columns, max_iter);
printf("Arguments, Teams: %d, Focal points: %d\n", num_teams, num_focal );
for( i=0; i<num_teams; i++ ) {
printf("\tTeam %d, position (%d,%d), type: %d\n", i, teams[i].x, teams[i].y, teams[i].type );
}
for( i=0; i<num_focal; i++ ) {
printf("\tFocal_point %d, position (%d,%d), start time: %d, temperature: %d\n", i,
focal[i].x,
focal[i].y,
focal[i].start,
focal[i].heat );
}
#endif // DEBUG
/* 2. Select GPU and start global timer */
cudaSetDevice(0);
cudaDeviceSynchronize();
double ttotal = cp_Wtime();
/*
*
* START HERE: DO NOT CHANGE THE CODE ABOVE THIS POINT
*
*/
/********************************************
* Variables añadidas para la practica *
*******************************************/
int nelementos=rows*columns ;
int maxhilos=256;//512 mejor tiempo
int nblock=nelementos/maxhilos;
if((nelementos%maxhilos)!=0) nblock++;
int mhilos=maxhilos;
int sizeSurface = sizeof(float)*(rows *columns);
float *auxiliar=(float *)malloc( sizeof(float) * (size_t)rows * (size_t)columns );;
float * devauxiliar=(float *)malloc( sizeof(float) * (size_t)rows * (size_t)columns );;
float *devSurface;
float *devSurfaceCopy;
cudaMalloc((void **)&devSurface, sizeSurface);
CUDA_CHECK(100);
cudaMalloc((void **)&devSurfaceCopy, sizeSurface);
CUDA_CHECK(101);
/*******************************************
* Fin variables añadidas * ADD
******************************************/
/* 3. Initialize surface */
for( i=0; i<rows; i++ )
for( j=0; j<columns; j++ )
accessMat( surface, i, j ) = 0.0;
//Copiamos la inicializacion del vector al vector de la GPU
cudaMemcpy(devSurface, surface, sizeSurface, cudaMemcpyHostToDevice);
CUDA_CHECK(1);
/* 4. Simulation */
int iter;
int flag_stability = 0;
int first_activation = 0;
for( iter=0; iter<max_iter && ! flag_stability; iter++ ) {
/* 4.1. Activate focal points */
int num_deactivated = 0;
for( i=0; i<num_focal; i++ ) {
if ( focal[i].start == iter ) {
focal[i].active = 1;
if ( ! first_activation ) first_activation = 1;
}
// Count focal points already deactivated by a team
if ( focal[i].active == 2 ) num_deactivated++;
}
if (!first_activation) continue;// secuencial
/* 4.2. Propagate heat (10 steps per each team movement) */
float global_residual = 0.0f;
int step;
for( step=0; step<10; step++ ) {
/* 4.2.1. Update heat on active focal points */
for( i=0; i<num_focal; i++ ) {
if ( focal[i].active != 1 ) continue;
int x = focal[i].x;
int y = focal[i].y;
accessMat( surface, x, y ) = focal[i].heat;
}
cudaMemcpy(devSurface, surface, sizeSurface, cudaMemcpyHostToDevice);
CUDA_CHECK(2);
/***************************************************************************
4.2.2. Copy values of the surface in ancillary structure (Skip borders)
Realizamos la copia mediante intercambio de punteros
Tanto en la CPU como en la GPU
****************************************************************************/
auxiliar= surface;//declaradas al principio
surface=surfaceCopy;
surfaceCopy=auxiliar;
devauxiliar= devSurface;
devSurface=devSurfaceCopy;
devSurfaceCopy=devauxiliar;
/* 4.2.3. Update surface values (skip borders) */
kernelupdate<<<nblock,mhilos>>>(devSurface,devSurfaceCopy,columns,rows);
CUDA_CHECK(3);
cudaDeviceSynchronize();
cudaMemcpy(surface,devSurface,sizeSurface,cudaMemcpyDeviceToHost);
/* 4.2.4. Compute the maximum residual difference (absolute value) */
if(step==0){
for( i=1; i<rows-1; i++ )
for( j=1; j<columns-1; j++ )
if ( fabs( accessMat( surface, i, j ) - accessMat( surfaceCopy, i, j ) ) > global_residual ) {
global_residual = fabs( accessMat( surface, i, j ) - accessMat( surfaceCopy, i, j ) );
}
}
}
/* If the global residual is lower than THRESHOLD, we have reached enough stability, stop simulation at the end of this iteration */
if( num_deactivated == num_focal && global_residual < THRESHOLD ) flag_stability = 1;
/* 4.3. Move teams */
for( t=0; t<num_teams; t++ ) {
/* 4.3.1. Choose nearest focal point */
float distance = FLT_MAX;
int target = -1;
for( j=0; j<num_focal; j++ ) {
if ( focal[j].active != 1 ) continue; // Skip non-active focal points
float dx = focal[j].x - teams[t].x;
float dy = focal[j].y - teams[t].y;
float local_distance = sqrtf( dx*dx + dy*dy );
if ( local_distance < distance ) {
distance = local_distance;
target = j;
}
}
/* 4.3.2. Annotate target for the next stage */
teams[t].target = target;
/* 4.3.3. No active focal point to choose, no movement */
if ( target == -1 ) continue;
/* 4.3.4. Move in the focal point direction */
if ( teams[t].type == 1 ) {
// Type 1: Can move in diagonal
if ( focal[target].x < teams[t].x ) teams[t].x--;
if ( focal[target].x > teams[t].x ) teams[t].x++;
if ( focal[target].y < teams[t].y ) teams[t].y--;
if ( focal[target].y > teams[t].y ) teams[t].y++;
}
else if ( teams[t].type == 2 ) {
// Type 2: First in horizontal direction, then in vertical direction
if ( focal[target].y < teams[t].y ) teams[t].y--;
else if ( focal[target].y > teams[t].y ) teams[t].y++;
else if ( focal[target].x < teams[t].x ) teams[t].x--;
else if ( focal[target].x > teams[t].x ) teams[t].x++;
}
else {
// Type 3: First in vertical direction, then in horizontal direction
if ( focal[target].x < teams[t].x ) teams[t].x--;
else if ( focal[target].x > teams[t].x ) teams[t].x++;
else if ( focal[target].y < teams[t].y ) teams[t].y--;
else if ( focal[target].y > teams[t].y ) teams[t].y++;
}
}
/* 4.4. Team actions */
for( t=0; t<num_teams; t++ ) {
/* 4.4.1. Deactivate the target focal point when it is reached */
int target = teams[t].target;
if ( target != -1 && focal[target].x == teams[t].x && focal[target].y == teams[t].y
&& focal[target].active == 1 )
focal[target].active = 2;
/* 4.4.2. Reduce heat in a circle around the team */
int radius;
// Influence area of fixed radius depending on type
if ( teams[t].type == 1 ) radius = RADIUS_TYPE_1;
else radius = RADIUS_TYPE_2_3;
for( i=teams[t].x-radius; i<=teams[t].x+radius; i++ ) {
for( j=teams[t].y-radius; j<=teams[t].y+radius; j++ ) {
if ( i<1 || i>=rows-1 || j<1 || j>=columns-1 ) continue; // Out of the heated surface
float dx = teams[t].x - i;
float dy = teams[t].y - j;
float distance = sqrtf( dx*dx + dy*dy );
if ( distance <= radius ) {
accessMat( surface, i, j ) = accessMat( surface, i, j ) * ( 1 - 0.25 ); // Team efficiency factor
}
}
}
}
#ifdef DEBUG
/* 4.5. DEBUG: Print the current state of the simulation at the end of each iteration */
print_status( iter, rows, columns, surface, num_teams, teams, num_focal, focal, global_residual );
#endif // DEBUG
}
/*
*
* STOP HERE: DO NOT CHANGE THE CODE BELOW THIS POINT
*
*/
/* 5. Stop global time */
cudaDeviceSynchronize();
ttotal = cp_Wtime() - ttotal;
/* 6. Output for leaderboard */
printf("\n");
/* 6.1. Total computation time */
printf("Time: %lf\n", ttotal );
/* 6.2. Results: Number of iterations, position of teams, residual heat on the focal points */
printf("Result: %d", iter);
/*
for (i=0; i<num_teams; i++)
printf(" %d %d", teams[i].x, teams[i].y );
*/
for (i=0; i<num_focal; i++)
printf(" %.6f", accessMat( surface, focal[i].x, focal[i].y ) );
printf("\n");
/* 7. Free resources */
free( teams );
free( focal );
free( surface );
free( surfaceCopy );
/* 8. End */
return 0;
}
|
c4308b99b3e2cc10611b0fe8f022fec2a5bb3dd2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.1.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date August 2016
@generated from sparse-iter/blas/zmgeelltmv.cu, normal z -> c, Tue Aug 30 09:38:45 2016
*/
#include "magmasparse_internal.h"
#define BLOCK_SIZE 512
template<bool betazero>
__global__ void
cmgeelltmv_kernel(
int num_rows,
int num_cols,
int num_vecs,
int num_cols_per_row,
magmaFloatComplex alpha,
magmaFloatComplex * dval,
magma_index_t * dcolind,
magmaFloatComplex * dx,
magmaFloatComplex beta,
magmaFloatComplex * dy)
{
extern __shared__ magmaFloatComplex dot[];
int row = blockDim.x * blockIdx.x + threadIdx.x;
if(row < num_rows ) {
for( int i=0; i<num_vecs; i++ )
dot[ threadIdx.x+ i*blockDim.x ] = MAGMA_C_MAKE(0.0, 0.0);
for ( int n = 0; n < num_cols_per_row; n++ ) {
int col = dcolind [ num_rows * n + row ];
magmaFloatComplex val = dval [ num_rows * n + row ];
for( int i=0; i<num_vecs; i++ )
dot[ threadIdx.x + i*blockDim.x ] +=
val * dx[col + i * num_cols ];
}
for( int i=0; i<num_vecs; i++ ) {
if (betazero) {
dy[ row + i*num_cols ] = dot[ threadIdx.x + i*blockDim.x ] *alpha;
} else {
dy[ row + i*num_cols ] = dot[ threadIdx.x + i*blockDim.x ]
* alpha + beta * dy [ row + i*num_cols ];
}
}
}
}
/**
Purpose
-------
This routine computes Y = alpha * A * X + beta * Y for X and Y sets of
num_vec vectors on the GPU. Input format is ELL.
Arguments
---------
@param[in]
transA magma_trans_t
transposition parameter for A
@param[in]
m magma_int_t
number of rows in A
@param[in]
n magma_int_t
number of columns in A
@param[in]
num_vecs mama_int_t
number of vectors
@param[in]
nnz_per_row magma_int_t
number of elements in the longest row
@param[in]
alpha magmaFloatComplex
scalar multiplier
@param[in]
dval magmaFloatComplex_ptr
array containing values of A in ELL
@param[in]
dcolind magmaIndex_ptr
columnindices of A in ELL
@param[in]
dx magmaFloatComplex_ptr
input vector x
@param[in]
beta magmaFloatComplex
scalar multiplier
@param[out]
dy magmaFloatComplex_ptr
input/output vector y
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_cblas
********************************************************************/
extern "C" magma_int_t
magma_cmgeelltmv(
magma_trans_t transA,
magma_int_t m, magma_int_t n,
magma_int_t num_vecs,
magma_int_t nnz_per_row,
magmaFloatComplex alpha,
magmaFloatComplex_ptr dval,
magmaIndex_ptr dcolind,
magmaFloatComplex_ptr dx,
magmaFloatComplex beta,
magmaFloatComplex_ptr dy,
magma_queue_t queue )
{
dim3 grid( magma_ceildiv( m, BLOCK_SIZE ) );
magma_int_t threads = BLOCK_SIZE;
unsigned int MEM_SIZE = num_vecs* BLOCK_SIZE
* sizeof( magmaFloatComplex ); // num_vecs vectors
if (beta == MAGMA_C_ZERO) {
hipLaunchKernelGGL(( cmgeelltmv_kernel<true>), dim3(grid), dim3(threads), MEM_SIZE, queue->cuda_stream() ,
m, n, num_vecs, nnz_per_row, alpha, dval, dcolind, dx, beta, dy );
} else {
hipLaunchKernelGGL(( cmgeelltmv_kernel<false>), dim3(grid), dim3(threads), MEM_SIZE, queue->cuda_stream() ,
m, n, num_vecs, nnz_per_row, alpha, dval, dcolind, dx, beta, dy );
}
return MAGMA_SUCCESS;
}
| c4308b99b3e2cc10611b0fe8f022fec2a5bb3dd2.cu | /*
-- MAGMA (version 2.1.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date August 2016
@generated from sparse-iter/blas/zmgeelltmv.cu, normal z -> c, Tue Aug 30 09:38:45 2016
*/
#include "magmasparse_internal.h"
#define BLOCK_SIZE 512
template<bool betazero>
__global__ void
cmgeelltmv_kernel(
int num_rows,
int num_cols,
int num_vecs,
int num_cols_per_row,
magmaFloatComplex alpha,
magmaFloatComplex * dval,
magma_index_t * dcolind,
magmaFloatComplex * dx,
magmaFloatComplex beta,
magmaFloatComplex * dy)
{
extern __shared__ magmaFloatComplex dot[];
int row = blockDim.x * blockIdx.x + threadIdx.x;
if(row < num_rows ) {
for( int i=0; i<num_vecs; i++ )
dot[ threadIdx.x+ i*blockDim.x ] = MAGMA_C_MAKE(0.0, 0.0);
for ( int n = 0; n < num_cols_per_row; n++ ) {
int col = dcolind [ num_rows * n + row ];
magmaFloatComplex val = dval [ num_rows * n + row ];
for( int i=0; i<num_vecs; i++ )
dot[ threadIdx.x + i*blockDim.x ] +=
val * dx[col + i * num_cols ];
}
for( int i=0; i<num_vecs; i++ ) {
if (betazero) {
dy[ row + i*num_cols ] = dot[ threadIdx.x + i*blockDim.x ] *alpha;
} else {
dy[ row + i*num_cols ] = dot[ threadIdx.x + i*blockDim.x ]
* alpha + beta * dy [ row + i*num_cols ];
}
}
}
}
/**
Purpose
-------
This routine computes Y = alpha * A * X + beta * Y for X and Y sets of
num_vec vectors on the GPU. Input format is ELL.
Arguments
---------
@param[in]
transA magma_trans_t
transposition parameter for A
@param[in]
m magma_int_t
number of rows in A
@param[in]
n magma_int_t
number of columns in A
@param[in]
num_vecs mama_int_t
number of vectors
@param[in]
nnz_per_row magma_int_t
number of elements in the longest row
@param[in]
alpha magmaFloatComplex
scalar multiplier
@param[in]
dval magmaFloatComplex_ptr
array containing values of A in ELL
@param[in]
dcolind magmaIndex_ptr
columnindices of A in ELL
@param[in]
dx magmaFloatComplex_ptr
input vector x
@param[in]
beta magmaFloatComplex
scalar multiplier
@param[out]
dy magmaFloatComplex_ptr
input/output vector y
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_cblas
********************************************************************/
extern "C" magma_int_t
magma_cmgeelltmv(
magma_trans_t transA,
magma_int_t m, magma_int_t n,
magma_int_t num_vecs,
magma_int_t nnz_per_row,
magmaFloatComplex alpha,
magmaFloatComplex_ptr dval,
magmaIndex_ptr dcolind,
magmaFloatComplex_ptr dx,
magmaFloatComplex beta,
magmaFloatComplex_ptr dy,
magma_queue_t queue )
{
dim3 grid( magma_ceildiv( m, BLOCK_SIZE ) );
magma_int_t threads = BLOCK_SIZE;
unsigned int MEM_SIZE = num_vecs* BLOCK_SIZE
* sizeof( magmaFloatComplex ); // num_vecs vectors
if (beta == MAGMA_C_ZERO) {
cmgeelltmv_kernel<true><<< grid, threads, MEM_SIZE, queue->cuda_stream() >>>
( m, n, num_vecs, nnz_per_row, alpha, dval, dcolind, dx, beta, dy );
} else {
cmgeelltmv_kernel<false><<< grid, threads, MEM_SIZE, queue->cuda_stream() >>>
( m, n, num_vecs, nnz_per_row, alpha, dval, dcolind, dx, beta, dy );
}
return MAGMA_SUCCESS;
}
|
2dcf1d1f65d27b7150d8e9188075f01492e34ccf.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdbool.h>
// #undef SEARCH_ALL_THE_BEST
#define SEARCH_ALL_THE_BEST
#undef PACKED
#undef COLLECT_LOG
#define MAX_BLOCK_SIZE 64535
#define BLOCK_DIM (32) /* NOTE: broken when more than 32 */
#define N_INIT_DISTRIBUTION (BLOCK_DIM * 64)
#define STACK_BUF_LEN (48 * (BLOCK_DIM/DIR_N))
/* XXX: should be defined dynamically, but hipMalloc after hipFree fails */
#define MAX_BUF_RATIO (256)
#define STATE_WIDTH 5
#define STATE_N (STATE_WIDTH * STATE_WIDTH)
typedef unsigned char uchar;
typedef signed char Direction;
#define dir_reverse(dir) ((Direction)(3 - (dir)))
#define DIR_N 4
#define DIR_FIRST 0
#define DIR_UP 0
#define DIR_RIGHT 1
#define DIR_LEFT 2
#define DIR_DOWN 3
#define POS_X(pos) ((pos) % STATE_WIDTH)
#define POS_Y(pos) ((pos) / STATE_WIDTH)
typedef struct search_stat_tag
{
bool solved;
int len;
unsigned long long int loads;
#ifdef COLLECT_LOG
unsigned long long int nodes_expanded;
#endif
//bool assert_failed;
} search_stat;
typedef struct input_tag
{
uchar tiles[STATE_N];
int init_depth;
Direction parent_dir;
} Input;
/* state implementation */
/*
* goal: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
*/
//__device__ __shared__ static signed char h_diff_table_shared[STATE_N][STATE_N] [DIR_N];
typedef struct state_tag
{
#ifndef PACKED
uchar tile[STATE_N];
#else
unsigned long long tile;
#endif
uchar empty;
uchar depth;
Direction parent_dir;
uchar h_value; /* ub of h_value is STATE_WIDTH*2*(STATE_N-1), e.g. 90 */
} d_State;
#ifndef PACKED
#define state_tile_get(i) (state->tile[i])
#define state_tile_set(i, v) (state->tile[i] = (v))
#else
#define STATE_TILE_BITS 4
#define STATE_TILE_MASK ((1ull << STATE_TILE_BITS) - 1)
#define state_tile_ofs(i) (i << 2)
#define state_tile_get(i) \
((state->tile & (STATE_TILE_MASK << state_tile_ofs(i))) >> \
state_tile_ofs(i))
#define state_tile_set(i, val) \
do \
{ \
state->tile &= ~((STATE_TILE_MASK) << state_tile_ofs(i)); \
state->tile |= ((unsigned long long) val) << state_tile_ofs(i); \
} while (0)
#endif
#define distance(i, j) ((i) > (j) ? (i) - (j) : (j) - (i))
__device__ static void
state_init(d_State *state, Input *input)
{
state->depth = input->init_depth;
state->parent_dir = input->parent_dir;
for (int i = 0; i < STATE_N; ++i)
{
if (input->tiles[i] == 0)
state->empty = i;
state_tile_set(i, input->tiles[i]);
}
state->h_value = 0;
for (int i = 0; i < STATE_N; ++i)
{
uchar tile = state_tile_get(i);
if (tile == 0)
continue;
state->h_value += distance(POS_X(i), POS_X(tile));
state->h_value += distance(POS_Y(i), POS_Y(tile));
}
}
__device__ static inline bool
state_is_goal(d_State state)
{
return state.h_value == 0;
}
__device__ static inline int
state_get_f(d_State state)
{
return state.depth + state.h_value;
}
__device__ __shared__ static bool movable_table_shared[STATE_N][DIR_N];
__device__ static inline bool
state_movable(d_State state, Direction dir)
{
return movable_table_shared[state.empty][dir];
}
__device__ __constant__ const static int pos_diff_table[DIR_N] = {
-STATE_WIDTH, 1, -1, +STATE_WIDTH};
__device__ static inline int
calc_h_diff(int opponent, int from, int rev_dir)
{
int goal_x = POS_X(opponent), goal_y = POS_Y(opponent);
int from_x = POS_X(from), from_y = POS_Y(from);
if (rev_dir == DIR_LEFT)
return goal_x > from_x ? -1 : 1;
else if (rev_dir == DIR_RIGHT)
return goal_x < from_x ? -1 : 1;
else if (rev_dir == DIR_UP)
return goal_y > from_y ? -1 : 1;
else
return goal_y < from_y ? -1 : 1;
}
__device__ static inline void
state_move(d_State *state, Direction dir)
{
int new_empty = state->empty + pos_diff_table[dir];
int opponent = state_tile_get(new_empty);
//state->h_value += h_diff_table_shared[opponent][new_empty][dir];
state->h_value += calc_h_diff(opponent, new_empty, dir);
state_tile_set(state->empty, opponent);
state->empty = new_empty;
state->parent_dir = dir;
++state->depth;
}
/* stack implementation */
typedef struct div_stack_tag
{
unsigned int n;
d_State buf[STACK_BUF_LEN];
} d_Stack;
__device__ static inline bool
stack_is_empty(d_Stack *stack)
{
bool ret = (stack->n == 0);
__syncthreads();
return ret;
}
__device__ static inline void
stack_put(d_Stack *stack, d_State *state, bool put)
{
if (put)
{
unsigned int i = atomicInc( &stack->n, UINT_MAX); /* slow? especially in old CC environment */
stack->buf[i] = *state;
}
__syncthreads();
}
__device__ static inline bool
stack_pop(d_Stack *stack, d_State *state)
{
int tid = threadIdx.x;
int i = (int) stack->n - 1 - (int) (tid >> 2);
if (i >= 0)
*state = stack->buf[i];
__syncthreads();
if (tid == 0)
stack->n = stack->n >= BLOCK_DIM / DIR_N ?
stack->n - BLOCK_DIM / DIR_N : 0;
__syncthreads();
return i >= 0;
}
//__device__ __shared__ Direction candidate_dir_table[4][3] = {}
/*
* solver implementation
*/
__device__ static void
idas_internal(d_Stack *stack, int f_limit, search_stat *stat)
{
d_State state;
unsigned long long int loop_cnt = 0;
#ifdef COLLECT_LOG
unsigned long long int nodes_expanded = 0;
#endif
if (threadIdx.x == 0)
stat->solved = false;
for (;;)
{
if (stack_is_empty(stack))
{
stat->loads = loop_cnt;
#ifdef COLLECT_LOG
atomicAdd(&stat->nodes_expanded, nodes_expanded);
#endif
break;
}
++loop_cnt;
bool found = stack_pop(stack, &state),
put = false;
if (found)
{
Direction dir = threadIdx.x & 3;
#ifdef COLLECT_LOG
nodes_expanded++;
#endif
/* NOTE: candidate_dir_table may be effective to avoid divergence */
if (state.parent_dir == dir_reverse(dir))
continue;
if (state_movable(state, dir))
{
state_move(&state, dir);
if (state_get_f(state) <= f_limit)
{
if (state_is_goal(state))
{
#ifndef SEARCH_ALL_THE_BEST
asm("trap;");
#else
stat->loads = loop_cnt;
stat->len = state.depth;
stat->solved = true;
#endif
#ifdef COLLECT_LOG
atomicAdd(&stat->nodes_expanded, nodes_expanded);
#endif
}
else
put = true;
}
}
}
//__syncthreads(); // maybe useless
stack_put(stack, &state, put);
}
}
/* XXX: movable table is effective in this case? */
__global__ void
idas_kernel(Input *input, search_stat *stat, int f_limit,
signed char *h_diff_table, bool *movable_table)
{
__shared__ d_Stack stack;
int tid = threadIdx.x;
int bid = blockIdx.x;
if (tid == 0)
stat[bid].loads = 0;
d_State state;
state_init(&state, &input[bid]);
if (state_get_f(state) > f_limit)
return;
if (tid == 0)
{
stack.buf[0] = state;
stack.n = 1;
}
for (int i = tid; i < STATE_N * DIR_N; i += blockDim.x)
if (i < STATE_N * DIR_N)
movable_table_shared[i / DIR_N][i % DIR_N] = movable_table[i];
__syncthreads();
idas_internal(&stack, f_limit, &stat[bid]);
}
/* host library implementation */
#include <errno.h>
#include <limits.h>
#include <stddef.h>
#include <stdio.h>
#include <stdlib.h>
#ifndef UNABLE_LOG
#define elog(...) fprintf(stderr, __VA_ARGS__)
#else
#define elog(...) ;
#endif
void *
palloc(size_t size)
{
void *ptr = malloc(size);
if (!ptr)
elog("malloc failed\n");
return ptr;
}
void *
repalloc(void *old_ptr, size_t new_size)
{
void *ptr = realloc(old_ptr, new_size);
if (!ptr)
elog("realloc failed\n");
return ptr;
}
void
pfree(void *ptr)
{
if (!ptr)
elog("empty ptr\n");
free(ptr);
}
#include <assert.h>
#include <stdbool.h>
#include <stdlib.h>
#include <string.h>
typedef unsigned char idx_t;
/*
* [0,0] [1,0] [2,0] [3,0]
* [0,1] [1,1] [2,1] [3,1]
* [0,2] [1,2] [2,2] [3,2]
* [0,3] [1,3] [2,3] [3,3]
*/
/*
* goal state is
* [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
*/
typedef struct state_tag_cpu
{
int depth; /* XXX: needed? */
uchar pos[STATE_WIDTH][STATE_WIDTH];
idx_t i, j; /* pos of empty */
Direction parent_dir;
int h_value;
} * State;
#define v(state, i, j) ((state)->pos[i][j])
#define ev(state) (v(state, state->i, state->j))
#define lv(state) (v(state, state->i - 1, state->j))
#define dv(state) (v(state, state->i, state->j + 1))
#define rv(state) (v(state, state->i + 1, state->j))
#define uv(state) (v(state, state->i, state->j - 1))
static uchar from_x[STATE_WIDTH * STATE_WIDTH],
from_y[STATE_WIDTH * STATE_WIDTH];
static inline void
fill_from_xy(State from)
{
for (idx_t x = 0; x < STATE_WIDTH; ++x)
for (idx_t y = 0; y < STATE_WIDTH; ++y)
{
from_x[v(from, x, y)] = x;
from_y[v(from, x, y)] = y;
}
}
static inline int
heuristic_manhattan_distance(State from)
{
int h_value = 0;
fill_from_xy(from);
for (idx_t i = 1; i < STATE_N; ++i)
{
h_value += distance(from_x[i], POS_X(i));
h_value += distance(from_y[i], POS_Y(i));
}
return h_value;
}
bool
state_is_goal(State state)
{
return state->h_value == 0;
}
static inline State
state_alloc(void)
{
return (State) palloc(sizeof(struct state_tag_cpu));
}
static inline void
state_free(State state)
{
pfree(state);
}
State
state_init(uchar v_list[STATE_WIDTH * STATE_WIDTH], int init_depth)
{
State state = state_alloc();
int cnt = 0;
state->depth = init_depth;
state->parent_dir = (Direction) -1;
for (idx_t j = 0; j < STATE_WIDTH; ++j)
for (idx_t i = 0; i < STATE_WIDTH; ++i)
{
if (v_list[cnt] == 0)
{
state->i = i;
state->j = j;
}
v(state, i, j) = v_list[cnt++];
}
state->h_value = heuristic_manhattan_distance(state);
return state;
}
void
state_fini(State state)
{
state_free(state);
}
State
state_copy(State src)
{
State dst = state_alloc();
memcpy(dst, src, sizeof(*src));
return dst;
}
static inline bool
state_left_movable(State state)
{
return state->i != 0;
}
static inline bool
state_down_movable(State state)
{
return state->j != STATE_WIDTH - 1;
}
static inline bool
state_right_movable(State state)
{
return state->i != STATE_WIDTH - 1;
}
static inline bool
state_up_movable(State state)
{
return state->j != 0;
}
bool
state_movable(State state, Direction dir)
{
return (dir != DIR_LEFT || state_left_movable(state)) &&
(dir != DIR_DOWN || state_down_movable(state)) &&
(dir != DIR_RIGHT || state_right_movable(state)) &&
(dir != DIR_UP || state_up_movable(state));
}
#define h_diff(who, opponent, dir) \
(h_diff_table[((who) * STATE_N * DIR_N) + ((opponent) << 2) + (dir)])
static int h_diff_table[STATE_N * STATE_N * DIR_N];
void
state_move(State state, Direction dir)
{
idx_t who;
assert(state_movable(state, dir));
switch (dir)
{
case DIR_LEFT:
who = ev(state) = lv(state);
state->i--;
break;
case DIR_DOWN:
who = ev(state) = dv(state);
state->j++;
break;
case DIR_RIGHT:
who = ev(state) = rv(state);
state->i++;
break;
case DIR_UP:
who = ev(state) = uv(state);
state->j--;
break;
default:
elog("unexpected direction");
assert(false);
}
state->h_value =
state->h_value + h_diff(who, state->i + state->j * STATE_WIDTH, dir_reverse(dir));
state->parent_dir = dir;
}
bool
state_pos_equal(State s1, State s2)
{
for (idx_t i = 0; i < STATE_WIDTH; ++i)
for (idx_t j = 0; j < STATE_WIDTH; ++j)
if (v(s1, i, j) != v(s2, i, j))
return false;
return true;
}
size_t
state_hash(State state)
{
/* FIXME: for A* */
size_t hash_value = 0;
for (idx_t i = 0; i < STATE_WIDTH; ++i)
for (idx_t j = 0; j < STATE_WIDTH; ++j)
hash_value ^= (v(state, i, j) << ((i * 3 + j) << 2));
return hash_value;
}
int
state_get_hvalue(State state)
{
return state->h_value;
}
int
state_get_depth(State state)
{
return state->depth;
}
static void
state_dump(State state)
{
elog("LOG(state): depth=%d, h=%d, f=%d, ", state->depth, state->h_value,
state->depth + state->h_value);
for (int i = 0; i < STATE_N; ++i)
elog("%d%c", i == state->i + STATE_WIDTH * state->j
? 0
: state->pos[i % STATE_WIDTH][i / STATE_WIDTH],
i == STATE_N - 1 ? '\n' : ',');
}
#include <stddef.h>
#include <stdint.h>
#include <string.h>
#ifndef SIZE_MAX
#define SIZE_MAX ((size_t) -1)
#endif
typedef enum {
HT_SUCCESS = 0,
HT_FAILED_FOUND,
HT_FAILED_NOT_FOUND,
} HTStatus;
/* XXX: hash function for State should be surveyed */
inline static size_t
hashfunc(State key)
{
return state_hash(key);
}
typedef struct ht_entry_tag *HTEntry;
struct ht_entry_tag
{
HTEntry next;
State key;
int value;
};
static HTEntry
ht_entry_init(State key)
{
HTEntry entry = (HTEntry) palloc(sizeof(*entry));
entry->key = state_copy(key);
entry->next = NULL;
return entry;
}
static void
ht_entry_fini(HTEntry entry)
{
pfree(entry);
}
typedef struct ht_tag
{
size_t n_bins;
size_t n_elems;
HTEntry *bin;
} * HT;
static bool
ht_rehash_required(HT ht)
{
return ht->n_bins <= ht->n_elems; /* TODO: local policy is also needed */
}
static size_t
calc_n_bins(size_t required)
{
/* NOTE: n_bins is used for mask and hence it should be pow of 2, fon now */
size_t size = 1;
assert(required > 0);
while (required > size)
size <<= 1;
return size;
}
HT
ht_init(size_t init_size_hint)
{
size_t n_bins = calc_n_bins(init_size_hint);
HT ht = (HT) palloc(sizeof(*ht));
ht->n_bins = n_bins;
ht->n_elems = 0;
assert(sizeof(*ht->bin) <= SIZE_MAX / n_bins);
ht->bin = (HTEntry *) palloc(sizeof(*ht->bin) * n_bins);
memset(ht->bin, 0, sizeof(*ht->bin) * n_bins);
return ht;
}
static void
ht_rehash(HT ht)
{
HTEntry *new_bin;
size_t new_size = ht->n_bins << 1;
assert(ht->n_bins<SIZE_MAX>> 1);
new_bin = (HTEntry *) palloc(sizeof(*new_bin) * new_size);
memset(new_bin, 0, sizeof(*new_bin) * new_size);
for (size_t i = 0; i < ht->n_bins; ++i)
{
HTEntry entry = ht->bin[i];
while (entry)
{
HTEntry next = entry->next;
size_t idx = hashfunc(entry->key) & (new_size - 1);
entry->next = new_bin[idx];
new_bin[idx] = entry;
entry = next;
}
}
pfree(ht->bin);
ht->n_bins = new_size;
ht->bin = new_bin;
}
void
ht_fini(HT ht)
{
for (size_t i = 0; i < ht->n_bins; ++i)
{
HTEntry entry = ht->bin[i];
while (entry)
{
HTEntry next = entry->next;
state_fini(entry->key);
ht_entry_fini(entry);
entry = next;
}
}
pfree(ht->bin);
pfree(ht);
}
HTStatus
ht_insert(HT ht, State key, int **value)
{
size_t i;
HTEntry entry, new_entry;
if (ht_rehash_required(ht))
ht_rehash(ht);
i = hashfunc(key) & (ht->n_bins - 1);
entry = ht->bin[i];
while (entry)
{
if (state_pos_equal(key, entry->key))
{
*value = &entry->value;
return HT_FAILED_FOUND;
}
entry = entry->next;
}
new_entry = ht_entry_init(key);
new_entry->next = ht->bin[i];
ht->bin[i] = new_entry;
*value = &new_entry->value;
assert(ht->n_elems < SIZE_MAX);
ht->n_elems++;
return HT_SUCCESS;
}
/*
* Priority Queue implementation
*/
#include <assert.h>
#include <stdint.h>
typedef struct pq_entry_tag
{
State state;
int f, g;
} PQEntryData;
typedef PQEntryData *PQEntry;
/* tiebreaking is done comparing g value */
static inline bool
pq_entry_higher_priority(PQEntry e1, PQEntry e2)
{
return e1->f < e2->f || (e1->f == e2->f && e1->g >= e2->g);
}
/*
* NOTE:
* This priority queue is implemented doubly reallocated array.
* It will only extend and will not shrink, for now.
* It may be improved by using array of layers of iteratively widened array
*/
typedef struct pq_tag
{
size_t n_elems;
size_t capa;
PQEntryData *array;
} * PQ;
static inline size_t
calc_init_capa(size_t capa_hint)
{
size_t capa = 1;
assert(capa_hint > 0);
while (capa < capa_hint)
capa <<= 1;
return capa - 1;
}
PQ
pq_init(size_t init_capa_hint)
{
PQ pq = (PQ) palloc(sizeof(*pq));
pq->n_elems = 0;
pq->capa = calc_init_capa(init_capa_hint);
assert(pq->capa <= SIZE_MAX / sizeof(PQEntryData));
pq->array = (PQEntryData *) palloc(sizeof(PQEntryData) * pq->capa);
return pq;
}
void
pq_fini(PQ pq)
{
for (size_t i = 0; i < pq->n_elems; ++i)
state_fini(pq->array[i].state);
pfree(pq->array);
pfree(pq);
}
static inline bool
pq_is_full(PQ pq)
{
assert(pq->n_elems <= pq->capa);
return pq->n_elems == pq->capa;
}
static inline void
pq_extend(PQ pq)
{
pq->capa = (pq->capa << 1) + 1;
assert(pq->capa <= SIZE_MAX / sizeof(PQEntryData));
pq->array =
(PQEntryData *) repalloc(pq->array, sizeof(PQEntryData) * pq->capa);
}
static inline void
pq_swap_entry(PQ pq, size_t i, size_t j)
{
PQEntryData tmp = pq->array[i];
pq->array[i] = pq->array[j];
pq->array[j] = tmp;
}
static inline size_t
pq_up(size_t i)
{
/* NOTE: By using 1-origin, it may be written more simply, i >> 1 */
return (i - 1) >> 1;
}
static inline size_t
pq_left(size_t i)
{
return (i << 1) + 1;
}
static void
heapify_up(PQ pq)
{
for (size_t i = pq->n_elems; i > 0;)
{
size_t ui = pq_up(i);
assert(i > 0);
if (!pq_entry_higher_priority(&pq->array[i], &pq->array[ui]))
break;
pq_swap_entry(pq, i, ui);
i = ui;
}
}
void
pq_put(PQ pq, State state, int f, int g)
{
if (pq_is_full(pq))
pq_extend(pq);
pq->array[pq->n_elems].state = state_copy(state);
pq->array[pq->n_elems].f = f; /* this may be abundant */
pq->array[pq->n_elems].g = g;
heapify_up(pq);
++pq->n_elems;
}
static void
heapify_down(PQ pq)
{
size_t sentinel = pq->n_elems;
for (size_t i = 0;;)
{
size_t ri, li = pq_left(i);
if (li >= sentinel)
break;
ri = li + 1;
if (ri >= sentinel)
{
if (pq_entry_higher_priority(&pq->array[li], &pq->array[i]))
pq_swap_entry(pq, i, li);
/* Reached the bottom */
break;
}
/* NOTE: If p(ri) == p(li), it may be good to go right
* since the filling order is left-first */
if (pq_entry_higher_priority(&pq->array[li], &pq->array[ri]))
{
if (!pq_entry_higher_priority(&pq->array[li], &pq->array[i]))
break;
pq_swap_entry(pq, i, li);
i = li;
}
else
{
if (!pq_entry_higher_priority(&pq->array[ri], &pq->array[i]))
break;
pq_swap_entry(pq, i, ri);
i = ri;
}
}
}
State
pq_pop(PQ pq)
{
State ret_state;
if (pq->n_elems == 0)
return NULL;
ret_state = pq->array[0].state;
--pq->n_elems;
pq->array[0] = pq->array[pq->n_elems];
heapify_down(pq);
return ret_state;
}
void
pq_dump(PQ pq)
{
elog("%s: n_elems=%zu, capa=%zu\n", __func__, pq->n_elems, pq->capa);
for (size_t i = 0, cr_required = 1; i < pq->n_elems; i++)
{
if (i == cr_required)
{
elog("\n");
cr_required = (cr_required << 1) + 1;
}
elog("%d,", pq->array[i].f);
elog("%d ", pq->array[i].g);
}
elog("\n");
}
#include <stdlib.h>
#include <string.h>
int
rrand(int m)
{
return (int) ((double) m * (rand() / (RAND_MAX + 1.0)));
}
void
shuffle_input(Input input[], int n_inputs)
{
Input tmp;
size_t n = n_inputs;
while (n > 1)
{
size_t k = rrand(n--);
memcpy(&tmp, &input[n], sizeof(Input));
memcpy(&input[n], &input[k], sizeof(Input));
memcpy(&input[k], &tmp, sizeof(Input));
}
}
static HT closed;
bool
distribute_astar(State init_state, Input input[], int distr_n, int *cnt_inputs,
int *min_fvalue)
{
int cnt = 0;
State state;
PQ q = pq_init(distr_n + 10);
HTStatus ht_status;
int * ht_value;
bool solved = false;
closed = ht_init(10000);
ht_status = ht_insert(closed, init_state, &ht_value);
*ht_value = 0;
pq_put(q, state_copy(init_state), state_get_hvalue(init_state), 0);
++cnt;
while ((state = pq_pop(q)))
{
--cnt;
if (state_is_goal(state))
{
solved = true;
break;
}
ht_status = ht_insert(closed, state, &ht_value);
if (ht_status == HT_FAILED_FOUND && *ht_value < state_get_depth(state))
{
state_fini(state);
continue;
}
else
*ht_value = state_get_depth(state);
for (int dir = 0; dir < DIR_N; ++dir)
{
if (state->parent_dir != dir_reverse(dir) &&
state_movable(state, (Direction) dir))
{
State next_state = state_copy(state);
state_move(next_state, (Direction) dir);
next_state->depth++;
ht_status = ht_insert(closed, next_state, &ht_value);
if (ht_status == HT_FAILED_FOUND &&
*ht_value <= state_get_depth(next_state))
state_fini(next_state);
else
{
++cnt;
*ht_value = state_get_depth(next_state);
pq_put(q, next_state,
*ht_value + state_get_hvalue(next_state), *ht_value);
}
}
}
state_fini(state);
if (cnt >= distr_n)
break;
}
*cnt_inputs = cnt;
elog("LOG: init_distr, cnt=%d\n", cnt);
if (!solved)
{
int minf = INT_MAX;
for (int id = 0; id < cnt; ++id)
{
State state = pq_pop(q);
assert(state);
for (int i = 0; i < STATE_N; ++i)
input[id].tiles[i] =
state->pos[i % STATE_WIDTH][i / STATE_WIDTH];
input[id].tiles[state->i + (state->j * STATE_WIDTH)] = 0;
input[id].init_depth = state_get_depth(state);
input[id].parent_dir = state->parent_dir;
if (minf > state_get_depth(state) + state_get_hvalue(state))
minf = state_get_depth(state) + state_get_hvalue(state);
}
assert(pq_pop(q) == NULL);
// shuffle_input(input, cnt);
*min_fvalue = minf;
}
pq_fini(q);
return solved;
}
static int
input_devide(Input input[], search_stat stat[], int i, int devide_n, int tail,
int *buf_len)
{
int cnt = 0;
int * ht_value;
State state = state_init(input[i].tiles, input[i].init_depth);
state->parent_dir = input[i].parent_dir;
PQ pq = pq_init(devide_n);
HTStatus ht_status;
pq_put(pq, state, state_get_hvalue(state), 0);
++cnt;
assert(devide_n > 0);
while ((state = pq_pop(pq)))
{
--cnt;
if (state_is_goal(state))
{
/* It may not be optimal goal */
pq_put(pq, state, state_get_depth(state) + state_get_hvalue(state),
state_get_depth(state));
++cnt;
break;
}
ht_status = ht_insert(closed, state, &ht_value);
if (ht_status == HT_FAILED_FOUND && *ht_value < state_get_depth(state))
{
state_fini(state);
continue;
}
else
*ht_value = state_get_depth(state);
for (int dir = 0; dir < DIR_N; ++dir)
{
if (state->parent_dir != dir_reverse(dir) &&
state_movable(state, (Direction) dir))
{
State next_state = state_copy(state);
state_move(next_state, (Direction) dir);
next_state->depth++;
ht_status = ht_insert(closed, next_state, &ht_value);
if (ht_status == HT_FAILED_FOUND &&
*ht_value < state_get_depth(next_state))
state_fini(next_state);
else
{
++cnt;
*ht_value = state_get_depth(next_state);
pq_put(pq, next_state,
*ht_value + state_get_hvalue(next_state), *ht_value);
}
}
}
state_fini(state);
if (cnt >= devide_n)
break;
}
int new_buf_len = *buf_len;
while (tail + cnt >= new_buf_len)
new_buf_len <<= 1;
if (new_buf_len != *buf_len)
{
*buf_len = new_buf_len;
repalloc(input, sizeof(*input) * new_buf_len);
elog("LOG: host buf resize\n");
}
input[i] = input[tail - 1];
for (int id = 0; id < cnt; ++id)
{
int ofs = tail - 1 + id;
State state = pq_pop(pq);
assert(state);
for (int j = 0; j < STATE_N; ++j)
input[ofs].tiles[j] = state->pos[j % STATE_WIDTH][j / STATE_WIDTH];
input[ofs].tiles[state->i + (state->j * STATE_WIDTH)] = 0;
input[ofs].init_depth = state_get_depth(state);
input[ofs].parent_dir = state->parent_dir;
}
pq_fini(pq);
return cnt - 1;
}
/* main */
#include <errno.h>
#include <stdio.h>
#include <stdlib.h>
#define exit_failure(...) \
do \
{ \
printf(__VA_ARGS__); \
exit(EXIT_FAILURE); \
} while (0)
static int
pop_int_from_str(const char *str, char **end_ptr)
{
long int rv = strtol(str, end_ptr, 0);
errno = 0;
if (errno != 0)
exit_failure("%s: %s cannot be converted into long\n", __func__, str);
else if (end_ptr && str == *end_ptr)
exit_failure("%s: reach end of string", __func__);
if (rv > INT_MAX || rv < INT_MIN)
exit_failure("%s: too big number, %ld\n", __func__, rv);
return (int) rv;
}
#define MAX_LINE_LEN 100
static void
load_state_from_file(const char *fname, uchar *s)
{
FILE *fp;
char str[MAX_LINE_LEN];
char *str_ptr = str, *end_ptr;
fp = fopen(fname, "r");
if (!fp)
exit_failure("%s: %s cannot be opened\n", __func__, fname);
if (!fgets(str, MAX_LINE_LEN, fp))
exit_failure("%s: fgets failed\n", __func__);
for (int i = 0; i < STATE_N; ++i)
{
s[i] = pop_int_from_str(str_ptr, &end_ptr);
str_ptr = end_ptr;
}
fclose(fp);
}
#undef MAX_LINE_LEN
#define CUDA_CHECK(call) \
do \
{ \
const hipError_t e = call; \
if (e != hipSuccess) \
exit_failure("Error: %s:%d code:%d, reason: %s\n", __FILE__, \
__LINE__, e, hipGetErrorString(e)); \
} while (0)
__host__ static void *
cudaPalloc(size_t size)
{
void *ptr;
CUDA_CHECK(hipMalloc(&ptr, size));
return ptr;
}
__host__ static void
cudaPfree(void *ptr)
{
CUDA_CHECK(hipFree(ptr));
}
#define h_d_t(op, i, dir) \
(h_diff_table[(op) *STATE_N * DIR_N + (i) *DIR_N + (dir)])
__host__ static void
init_mdist(signed char h_diff_table[])
{
for (int opponent = 0; opponent < STATE_N; ++opponent)
{
int goal_x = POS_X(opponent), goal_y = POS_Y(opponent);
for (int i = 0; i < STATE_N; ++i)
{
int from_x = POS_X(i), from_y = POS_Y(i);
for (uchar dir = 0; dir < DIR_N; ++dir)
{
if (dir == DIR_LEFT)
h_d_t(opponent, i, dir) = goal_x > from_x ? -1 : 1;
if (dir == DIR_RIGHT)
h_d_t(opponent, i, dir) = goal_x < from_x ? -1 : 1;
if (dir == DIR_UP)
h_d_t(opponent, i, dir) = goal_y > from_y ? -1 : 1;
if (dir == DIR_DOWN)
h_d_t(opponent, i, dir) = goal_y < from_y ? -1 : 1;
}
}
}
}
#undef h_d_t
#define m_t(i, d) (movable_table[(i) *DIR_N + (d)])
__host__ static void
init_movable_table(bool movable_table[])
{
for (int i = 0; i < STATE_N; ++i)
for (unsigned int d = 0; d < DIR_N; ++d)
{
if (d == DIR_RIGHT)
m_t(i, d) = (POS_X(i) < STATE_WIDTH - 1);
else if (d == DIR_LEFT)
m_t(i, d) = (POS_X(i) > 0);
else if (d == DIR_DOWN)
m_t(i, d) = (POS_Y(i) < STATE_WIDTH - 1);
else if (d == DIR_UP)
m_t(i, d) = (POS_Y(i) > 0);
}
}
#undef m_t
// static char dir_char[] = {'U', 'R', 'L', 'D'};
#define INPUT_SIZE (sizeof(Input) * buf_len)
#define STAT_SIZE (sizeof(search_stat) * buf_len)
#define MOVABLE_TABLE_SIZE (sizeof(bool) * STATE_N * DIR_N)
#define H_DIFF_TABLE_SIZE (STATE_N * STATE_N * DIR_N)
int
main(int argc, char *argv[])
{
int n_roots;
int buf_len = N_INIT_DISTRIBUTION * MAX_BUF_RATIO;
Input *input = (Input *) palloc(INPUT_SIZE),
*d_input = (Input *) cudaPalloc(INPUT_SIZE);
search_stat *stat = (search_stat *) palloc(STAT_SIZE),
*d_stat = (search_stat *) cudaPalloc(STAT_SIZE);
bool *movable_table = (bool *) palloc(MOVABLE_TABLE_SIZE),
*d_movable_table = (bool *) cudaPalloc(MOVABLE_TABLE_SIZE);
signed char *h_diff_table = (signed char *) palloc(H_DIFF_TABLE_SIZE),
*d_h_diff_table = (signed char *) cudaPalloc(H_DIFF_TABLE_SIZE);
int min_fvalue = 0;
if (argc != 2)
exit_failure("usage: bin/cumain <ifname>\n");
load_state_from_file(argv[1], input[0].tiles);
{
State init_state = state_init(input[0].tiles, 0);
state_dump(init_state);
if (distribute_astar(init_state, input, N_INIT_DISTRIBUTION, &n_roots,
&min_fvalue))
{
elog("solution is found by distributor\n");
goto solution_found;
}
state_fini(init_state);
}
init_mdist(h_diff_table);
init_movable_table(movable_table);
CUDA_CHECK(hipMemcpy(d_movable_table, movable_table, MOVABLE_TABLE_SIZE,
hipMemcpyHostToDevice));
CUDA_CHECK(hipMemcpy(d_h_diff_table, h_diff_table, H_DIFF_TABLE_SIZE,
hipMemcpyHostToDevice));
CUDA_CHECK(hipMemset(d_input, 0, INPUT_SIZE));
for (uchar f_limit = min_fvalue;; f_limit += 2)
{
CUDA_CHECK(hipMemset(d_stat, 0, STAT_SIZE));
CUDA_CHECK(
hipMemcpy(d_input, input, INPUT_SIZE, hipMemcpyHostToDevice));
elog("f_limit=%d\n", (int) f_limit);
hipLaunchKernelGGL(( idas_kernel), dim3(n_roots), dim3(BLOCK_DIM), 0, 0, d_input, d_stat, f_limit,
d_h_diff_table, d_movable_table);
CUDA_CHECK(
hipGetLastError()); /* asm trap is called when find solution */
CUDA_CHECK(hipMemcpy(stat, d_stat, STAT_SIZE, hipMemcpyDeviceToHost));
unsigned long long int loads_sum = 0;
for (int i = 0; i < n_roots; ++i)
loads_sum += stat[i].loads;
#ifdef COLLECT_LOG
elog("STAT: loop\n");
for (int i = 0; i < n_roots; ++i)
elog("%lld, ", stat[i].loads);
putchar('\n');
elog("STAT: nodes_expanded\n");
for (int i = 0; i < n_roots; ++i)
elog("%lld, ", stat[i].nodes_expanded);
putchar('\n');
elog("STAT: efficiency\n");
for (int i = 0; i < n_roots; ++i)
if (stat[i].loads != 0)
elog("%lld, ", stat[i].nodes_expanded / stat[i].loads);
putchar('\n');
#endif
int increased = 0;
unsigned long long int loads_av = loads_sum / n_roots;
int stat_cnt[10] = {0, 0, 0, 0, 0, 0, 0, 0, 0};
for (int i = 0; i < n_roots; ++i)
{
if (stat[i].loads < loads_av)
stat_cnt[0]++;
else if (stat[i].loads < 2 * loads_av)
stat_cnt[1]++;
else if (stat[i].loads < 4 * loads_av)
stat_cnt[2]++;
else if (stat[i].loads < 8 * loads_av)
stat_cnt[3]++;
else if (stat[i].loads < 16 * loads_av)
stat_cnt[4]++;
else if (stat[i].loads < 32 * loads_av)
stat_cnt[5]++;
else if (stat[i].loads < 64 * loads_av)
stat_cnt[6]++;
else if (stat[i].loads < 128 * loads_av)
stat_cnt[7]++;
else
stat_cnt[8]++;
int policy = loads_av == 0 ? stat[i].loads
: (stat[i].loads - 1) / loads_av + 1;
int buf_len_old = buf_len;
if (policy > 1 && stat[i].loads > 10 && MAX_BLOCK_SIZE > increased + n_roots)
increased += input_devide(input, stat, i, policy,
n_roots + increased, &buf_len);
if (buf_len != buf_len_old)
{
elog("XXX: fix MAX_BUF_RATIO\n");
stat = (search_stat *) repalloc(stat, STAT_SIZE);
cudaPfree(d_input);
cudaPfree(d_stat);
d_input = (Input *) cudaPalloc(INPUT_SIZE);
d_stat = (search_stat *) cudaPalloc(STAT_SIZE);
}
}
elog("STAT: loads: sum=%lld, av=%lld\n", loads_sum, loads_av);
elog("STAT: distr: av=%d, 2av=%d, 4av=%d, 8av=%d, 16av=%d, 32av=%d, "
"64av=%d, 128av=%d, more=%d\n",
stat_cnt[0], stat_cnt[1], stat_cnt[2], stat_cnt[3], stat_cnt[4],
stat_cnt[5], stat_cnt[6], stat_cnt[7], stat_cnt[8]);
n_roots += increased;
elog("STAT: n_roots=%d(+%d)\n", n_roots, increased);
#ifdef SEARCH_ALL_THE_BEST
for (int i = 0; i < n_roots; ++i)
if (stat[i].solved)
{
elog("find all the optimal solution(s), at depth=%d\n", stat[i].len);
goto solution_found;
}
#endif
}
solution_found:
cudaPfree(d_input);
cudaPfree(d_stat);
cudaPfree(d_movable_table);
cudaPfree(d_h_diff_table);
CUDA_CHECK(hipDeviceReset());
pfree(input);
pfree(stat);
pfree(movable_table);
pfree(h_diff_table);
return 0;
}
| 2dcf1d1f65d27b7150d8e9188075f01492e34ccf.cu | #include <stdbool.h>
// #undef SEARCH_ALL_THE_BEST
#define SEARCH_ALL_THE_BEST
#undef PACKED
#undef COLLECT_LOG
#define MAX_BLOCK_SIZE 64535
#define BLOCK_DIM (32) /* NOTE: broken when more than 32 */
#define N_INIT_DISTRIBUTION (BLOCK_DIM * 64)
#define STACK_BUF_LEN (48 * (BLOCK_DIM/DIR_N))
/* XXX: should be defined dynamically, but cudaMalloc after cudaFree fails */
#define MAX_BUF_RATIO (256)
#define STATE_WIDTH 5
#define STATE_N (STATE_WIDTH * STATE_WIDTH)
typedef unsigned char uchar;
typedef signed char Direction;
#define dir_reverse(dir) ((Direction)(3 - (dir)))
#define DIR_N 4
#define DIR_FIRST 0
#define DIR_UP 0
#define DIR_RIGHT 1
#define DIR_LEFT 2
#define DIR_DOWN 3
#define POS_X(pos) ((pos) % STATE_WIDTH)
#define POS_Y(pos) ((pos) / STATE_WIDTH)
typedef struct search_stat_tag
{
bool solved;
int len;
unsigned long long int loads;
#ifdef COLLECT_LOG
unsigned long long int nodes_expanded;
#endif
//bool assert_failed;
} search_stat;
typedef struct input_tag
{
uchar tiles[STATE_N];
int init_depth;
Direction parent_dir;
} Input;
/* state implementation */
/*
* goal: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
*/
//__device__ __shared__ static signed char h_diff_table_shared[STATE_N][STATE_N] [DIR_N];
typedef struct state_tag
{
#ifndef PACKED
uchar tile[STATE_N];
#else
unsigned long long tile;
#endif
uchar empty;
uchar depth;
Direction parent_dir;
uchar h_value; /* ub of h_value is STATE_WIDTH*2*(STATE_N-1), e.g. 90 */
} d_State;
#ifndef PACKED
#define state_tile_get(i) (state->tile[i])
#define state_tile_set(i, v) (state->tile[i] = (v))
#else
#define STATE_TILE_BITS 4
#define STATE_TILE_MASK ((1ull << STATE_TILE_BITS) - 1)
#define state_tile_ofs(i) (i << 2)
#define state_tile_get(i) \
((state->tile & (STATE_TILE_MASK << state_tile_ofs(i))) >> \
state_tile_ofs(i))
#define state_tile_set(i, val) \
do \
{ \
state->tile &= ~((STATE_TILE_MASK) << state_tile_ofs(i)); \
state->tile |= ((unsigned long long) val) << state_tile_ofs(i); \
} while (0)
#endif
#define distance(i, j) ((i) > (j) ? (i) - (j) : (j) - (i))
__device__ static void
state_init(d_State *state, Input *input)
{
state->depth = input->init_depth;
state->parent_dir = input->parent_dir;
for (int i = 0; i < STATE_N; ++i)
{
if (input->tiles[i] == 0)
state->empty = i;
state_tile_set(i, input->tiles[i]);
}
state->h_value = 0;
for (int i = 0; i < STATE_N; ++i)
{
uchar tile = state_tile_get(i);
if (tile == 0)
continue;
state->h_value += distance(POS_X(i), POS_X(tile));
state->h_value += distance(POS_Y(i), POS_Y(tile));
}
}
__device__ static inline bool
state_is_goal(d_State state)
{
return state.h_value == 0;
}
__device__ static inline int
state_get_f(d_State state)
{
return state.depth + state.h_value;
}
__device__ __shared__ static bool movable_table_shared[STATE_N][DIR_N];
__device__ static inline bool
state_movable(d_State state, Direction dir)
{
return movable_table_shared[state.empty][dir];
}
__device__ __constant__ const static int pos_diff_table[DIR_N] = {
-STATE_WIDTH, 1, -1, +STATE_WIDTH};
__device__ static inline int
calc_h_diff(int opponent, int from, int rev_dir)
{
int goal_x = POS_X(opponent), goal_y = POS_Y(opponent);
int from_x = POS_X(from), from_y = POS_Y(from);
if (rev_dir == DIR_LEFT)
return goal_x > from_x ? -1 : 1;
else if (rev_dir == DIR_RIGHT)
return goal_x < from_x ? -1 : 1;
else if (rev_dir == DIR_UP)
return goal_y > from_y ? -1 : 1;
else
return goal_y < from_y ? -1 : 1;
}
__device__ static inline void
state_move(d_State *state, Direction dir)
{
int new_empty = state->empty + pos_diff_table[dir];
int opponent = state_tile_get(new_empty);
//state->h_value += h_diff_table_shared[opponent][new_empty][dir];
state->h_value += calc_h_diff(opponent, new_empty, dir);
state_tile_set(state->empty, opponent);
state->empty = new_empty;
state->parent_dir = dir;
++state->depth;
}
/* stack implementation */
typedef struct div_stack_tag
{
unsigned int n;
d_State buf[STACK_BUF_LEN];
} d_Stack;
__device__ static inline bool
stack_is_empty(d_Stack *stack)
{
bool ret = (stack->n == 0);
__syncthreads();
return ret;
}
__device__ static inline void
stack_put(d_Stack *stack, d_State *state, bool put)
{
if (put)
{
unsigned int i = atomicInc( &stack->n, UINT_MAX); /* slow? especially in old CC environment */
stack->buf[i] = *state;
}
__syncthreads();
}
__device__ static inline bool
stack_pop(d_Stack *stack, d_State *state)
{
int tid = threadIdx.x;
int i = (int) stack->n - 1 - (int) (tid >> 2);
if (i >= 0)
*state = stack->buf[i];
__syncthreads();
if (tid == 0)
stack->n = stack->n >= BLOCK_DIM / DIR_N ?
stack->n - BLOCK_DIM / DIR_N : 0;
__syncthreads();
return i >= 0;
}
//__device__ __shared__ Direction candidate_dir_table[4][3] = {}
/*
* solver implementation
*/
__device__ static void
idas_internal(d_Stack *stack, int f_limit, search_stat *stat)
{
d_State state;
unsigned long long int loop_cnt = 0;
#ifdef COLLECT_LOG
unsigned long long int nodes_expanded = 0;
#endif
if (threadIdx.x == 0)
stat->solved = false;
for (;;)
{
if (stack_is_empty(stack))
{
stat->loads = loop_cnt;
#ifdef COLLECT_LOG
atomicAdd(&stat->nodes_expanded, nodes_expanded);
#endif
break;
}
++loop_cnt;
bool found = stack_pop(stack, &state),
put = false;
if (found)
{
Direction dir = threadIdx.x & 3;
#ifdef COLLECT_LOG
nodes_expanded++;
#endif
/* NOTE: candidate_dir_table may be effective to avoid divergence */
if (state.parent_dir == dir_reverse(dir))
continue;
if (state_movable(state, dir))
{
state_move(&state, dir);
if (state_get_f(state) <= f_limit)
{
if (state_is_goal(state))
{
#ifndef SEARCH_ALL_THE_BEST
asm("trap;");
#else
stat->loads = loop_cnt;
stat->len = state.depth;
stat->solved = true;
#endif
#ifdef COLLECT_LOG
atomicAdd(&stat->nodes_expanded, nodes_expanded);
#endif
}
else
put = true;
}
}
}
//__syncthreads(); // maybe useless
stack_put(stack, &state, put);
}
}
/* XXX: movable table is effective in this case? */
__global__ void
idas_kernel(Input *input, search_stat *stat, int f_limit,
signed char *h_diff_table, bool *movable_table)
{
__shared__ d_Stack stack;
int tid = threadIdx.x;
int bid = blockIdx.x;
if (tid == 0)
stat[bid].loads = 0;
d_State state;
state_init(&state, &input[bid]);
if (state_get_f(state) > f_limit)
return;
if (tid == 0)
{
stack.buf[0] = state;
stack.n = 1;
}
for (int i = tid; i < STATE_N * DIR_N; i += blockDim.x)
if (i < STATE_N * DIR_N)
movable_table_shared[i / DIR_N][i % DIR_N] = movable_table[i];
__syncthreads();
idas_internal(&stack, f_limit, &stat[bid]);
}
/* host library implementation */
#include <errno.h>
#include <limits.h>
#include <stddef.h>
#include <stdio.h>
#include <stdlib.h>
#ifndef UNABLE_LOG
#define elog(...) fprintf(stderr, __VA_ARGS__)
#else
#define elog(...) ;
#endif
void *
palloc(size_t size)
{
void *ptr = malloc(size);
if (!ptr)
elog("malloc failed\n");
return ptr;
}
void *
repalloc(void *old_ptr, size_t new_size)
{
void *ptr = realloc(old_ptr, new_size);
if (!ptr)
elog("realloc failed\n");
return ptr;
}
void
pfree(void *ptr)
{
if (!ptr)
elog("empty ptr\n");
free(ptr);
}
#include <assert.h>
#include <stdbool.h>
#include <stdlib.h>
#include <string.h>
typedef unsigned char idx_t;
/*
* [0,0] [1,0] [2,0] [3,0]
* [0,1] [1,1] [2,1] [3,1]
* [0,2] [1,2] [2,2] [3,2]
* [0,3] [1,3] [2,3] [3,3]
*/
/*
* goal state is
* [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
*/
typedef struct state_tag_cpu
{
int depth; /* XXX: needed? */
uchar pos[STATE_WIDTH][STATE_WIDTH];
idx_t i, j; /* pos of empty */
Direction parent_dir;
int h_value;
} * State;
#define v(state, i, j) ((state)->pos[i][j])
#define ev(state) (v(state, state->i, state->j))
#define lv(state) (v(state, state->i - 1, state->j))
#define dv(state) (v(state, state->i, state->j + 1))
#define rv(state) (v(state, state->i + 1, state->j))
#define uv(state) (v(state, state->i, state->j - 1))
static uchar from_x[STATE_WIDTH * STATE_WIDTH],
from_y[STATE_WIDTH * STATE_WIDTH];
static inline void
fill_from_xy(State from)
{
for (idx_t x = 0; x < STATE_WIDTH; ++x)
for (idx_t y = 0; y < STATE_WIDTH; ++y)
{
from_x[v(from, x, y)] = x;
from_y[v(from, x, y)] = y;
}
}
static inline int
heuristic_manhattan_distance(State from)
{
int h_value = 0;
fill_from_xy(from);
for (idx_t i = 1; i < STATE_N; ++i)
{
h_value += distance(from_x[i], POS_X(i));
h_value += distance(from_y[i], POS_Y(i));
}
return h_value;
}
bool
state_is_goal(State state)
{
return state->h_value == 0;
}
static inline State
state_alloc(void)
{
return (State) palloc(sizeof(struct state_tag_cpu));
}
static inline void
state_free(State state)
{
pfree(state);
}
State
state_init(uchar v_list[STATE_WIDTH * STATE_WIDTH], int init_depth)
{
State state = state_alloc();
int cnt = 0;
state->depth = init_depth;
state->parent_dir = (Direction) -1;
for (idx_t j = 0; j < STATE_WIDTH; ++j)
for (idx_t i = 0; i < STATE_WIDTH; ++i)
{
if (v_list[cnt] == 0)
{
state->i = i;
state->j = j;
}
v(state, i, j) = v_list[cnt++];
}
state->h_value = heuristic_manhattan_distance(state);
return state;
}
void
state_fini(State state)
{
state_free(state);
}
State
state_copy(State src)
{
State dst = state_alloc();
memcpy(dst, src, sizeof(*src));
return dst;
}
static inline bool
state_left_movable(State state)
{
return state->i != 0;
}
static inline bool
state_down_movable(State state)
{
return state->j != STATE_WIDTH - 1;
}
static inline bool
state_right_movable(State state)
{
return state->i != STATE_WIDTH - 1;
}
static inline bool
state_up_movable(State state)
{
return state->j != 0;
}
bool
state_movable(State state, Direction dir)
{
return (dir != DIR_LEFT || state_left_movable(state)) &&
(dir != DIR_DOWN || state_down_movable(state)) &&
(dir != DIR_RIGHT || state_right_movable(state)) &&
(dir != DIR_UP || state_up_movable(state));
}
#define h_diff(who, opponent, dir) \
(h_diff_table[((who) * STATE_N * DIR_N) + ((opponent) << 2) + (dir)])
static int h_diff_table[STATE_N * STATE_N * DIR_N];
void
state_move(State state, Direction dir)
{
idx_t who;
assert(state_movable(state, dir));
switch (dir)
{
case DIR_LEFT:
who = ev(state) = lv(state);
state->i--;
break;
case DIR_DOWN:
who = ev(state) = dv(state);
state->j++;
break;
case DIR_RIGHT:
who = ev(state) = rv(state);
state->i++;
break;
case DIR_UP:
who = ev(state) = uv(state);
state->j--;
break;
default:
elog("unexpected direction");
assert(false);
}
state->h_value =
state->h_value + h_diff(who, state->i + state->j * STATE_WIDTH, dir_reverse(dir));
state->parent_dir = dir;
}
bool
state_pos_equal(State s1, State s2)
{
for (idx_t i = 0; i < STATE_WIDTH; ++i)
for (idx_t j = 0; j < STATE_WIDTH; ++j)
if (v(s1, i, j) != v(s2, i, j))
return false;
return true;
}
size_t
state_hash(State state)
{
/* FIXME: for A* */
size_t hash_value = 0;
for (idx_t i = 0; i < STATE_WIDTH; ++i)
for (idx_t j = 0; j < STATE_WIDTH; ++j)
hash_value ^= (v(state, i, j) << ((i * 3 + j) << 2));
return hash_value;
}
int
state_get_hvalue(State state)
{
return state->h_value;
}
int
state_get_depth(State state)
{
return state->depth;
}
static void
state_dump(State state)
{
elog("LOG(state): depth=%d, h=%d, f=%d, ", state->depth, state->h_value,
state->depth + state->h_value);
for (int i = 0; i < STATE_N; ++i)
elog("%d%c", i == state->i + STATE_WIDTH * state->j
? 0
: state->pos[i % STATE_WIDTH][i / STATE_WIDTH],
i == STATE_N - 1 ? '\n' : ',');
}
#include <stddef.h>
#include <stdint.h>
#include <string.h>
#ifndef SIZE_MAX
#define SIZE_MAX ((size_t) -1)
#endif
typedef enum {
HT_SUCCESS = 0,
HT_FAILED_FOUND,
HT_FAILED_NOT_FOUND,
} HTStatus;
/* XXX: hash function for State should be surveyed */
inline static size_t
hashfunc(State key)
{
return state_hash(key);
}
typedef struct ht_entry_tag *HTEntry;
struct ht_entry_tag
{
HTEntry next;
State key;
int value;
};
static HTEntry
ht_entry_init(State key)
{
HTEntry entry = (HTEntry) palloc(sizeof(*entry));
entry->key = state_copy(key);
entry->next = NULL;
return entry;
}
static void
ht_entry_fini(HTEntry entry)
{
pfree(entry);
}
typedef struct ht_tag
{
size_t n_bins;
size_t n_elems;
HTEntry *bin;
} * HT;
static bool
ht_rehash_required(HT ht)
{
return ht->n_bins <= ht->n_elems; /* TODO: local policy is also needed */
}
static size_t
calc_n_bins(size_t required)
{
/* NOTE: n_bins is used for mask and hence it should be pow of 2, fon now */
size_t size = 1;
assert(required > 0);
while (required > size)
size <<= 1;
return size;
}
HT
ht_init(size_t init_size_hint)
{
size_t n_bins = calc_n_bins(init_size_hint);
HT ht = (HT) palloc(sizeof(*ht));
ht->n_bins = n_bins;
ht->n_elems = 0;
assert(sizeof(*ht->bin) <= SIZE_MAX / n_bins);
ht->bin = (HTEntry *) palloc(sizeof(*ht->bin) * n_bins);
memset(ht->bin, 0, sizeof(*ht->bin) * n_bins);
return ht;
}
static void
ht_rehash(HT ht)
{
HTEntry *new_bin;
size_t new_size = ht->n_bins << 1;
assert(ht->n_bins<SIZE_MAX>> 1);
new_bin = (HTEntry *) palloc(sizeof(*new_bin) * new_size);
memset(new_bin, 0, sizeof(*new_bin) * new_size);
for (size_t i = 0; i < ht->n_bins; ++i)
{
HTEntry entry = ht->bin[i];
while (entry)
{
HTEntry next = entry->next;
size_t idx = hashfunc(entry->key) & (new_size - 1);
entry->next = new_bin[idx];
new_bin[idx] = entry;
entry = next;
}
}
pfree(ht->bin);
ht->n_bins = new_size;
ht->bin = new_bin;
}
void
ht_fini(HT ht)
{
for (size_t i = 0; i < ht->n_bins; ++i)
{
HTEntry entry = ht->bin[i];
while (entry)
{
HTEntry next = entry->next;
state_fini(entry->key);
ht_entry_fini(entry);
entry = next;
}
}
pfree(ht->bin);
pfree(ht);
}
HTStatus
ht_insert(HT ht, State key, int **value)
{
size_t i;
HTEntry entry, new_entry;
if (ht_rehash_required(ht))
ht_rehash(ht);
i = hashfunc(key) & (ht->n_bins - 1);
entry = ht->bin[i];
while (entry)
{
if (state_pos_equal(key, entry->key))
{
*value = &entry->value;
return HT_FAILED_FOUND;
}
entry = entry->next;
}
new_entry = ht_entry_init(key);
new_entry->next = ht->bin[i];
ht->bin[i] = new_entry;
*value = &new_entry->value;
assert(ht->n_elems < SIZE_MAX);
ht->n_elems++;
return HT_SUCCESS;
}
/*
* Priority Queue implementation
*/
#include <assert.h>
#include <stdint.h>
typedef struct pq_entry_tag
{
State state;
int f, g;
} PQEntryData;
typedef PQEntryData *PQEntry;
/* tiebreaking is done comparing g value */
static inline bool
pq_entry_higher_priority(PQEntry e1, PQEntry e2)
{
return e1->f < e2->f || (e1->f == e2->f && e1->g >= e2->g);
}
/*
* NOTE:
* This priority queue is implemented doubly reallocated array.
* It will only extend and will not shrink, for now.
* It may be improved by using array of layers of iteratively widened array
*/
typedef struct pq_tag
{
size_t n_elems;
size_t capa;
PQEntryData *array;
} * PQ;
static inline size_t
calc_init_capa(size_t capa_hint)
{
size_t capa = 1;
assert(capa_hint > 0);
while (capa < capa_hint)
capa <<= 1;
return capa - 1;
}
PQ
pq_init(size_t init_capa_hint)
{
PQ pq = (PQ) palloc(sizeof(*pq));
pq->n_elems = 0;
pq->capa = calc_init_capa(init_capa_hint);
assert(pq->capa <= SIZE_MAX / sizeof(PQEntryData));
pq->array = (PQEntryData *) palloc(sizeof(PQEntryData) * pq->capa);
return pq;
}
void
pq_fini(PQ pq)
{
for (size_t i = 0; i < pq->n_elems; ++i)
state_fini(pq->array[i].state);
pfree(pq->array);
pfree(pq);
}
static inline bool
pq_is_full(PQ pq)
{
assert(pq->n_elems <= pq->capa);
return pq->n_elems == pq->capa;
}
static inline void
pq_extend(PQ pq)
{
pq->capa = (pq->capa << 1) + 1;
assert(pq->capa <= SIZE_MAX / sizeof(PQEntryData));
pq->array =
(PQEntryData *) repalloc(pq->array, sizeof(PQEntryData) * pq->capa);
}
static inline void
pq_swap_entry(PQ pq, size_t i, size_t j)
{
PQEntryData tmp = pq->array[i];
pq->array[i] = pq->array[j];
pq->array[j] = tmp;
}
static inline size_t
pq_up(size_t i)
{
/* NOTE: By using 1-origin, it may be written more simply, i >> 1 */
return (i - 1) >> 1;
}
static inline size_t
pq_left(size_t i)
{
return (i << 1) + 1;
}
static void
heapify_up(PQ pq)
{
for (size_t i = pq->n_elems; i > 0;)
{
size_t ui = pq_up(i);
assert(i > 0);
if (!pq_entry_higher_priority(&pq->array[i], &pq->array[ui]))
break;
pq_swap_entry(pq, i, ui);
i = ui;
}
}
void
pq_put(PQ pq, State state, int f, int g)
{
if (pq_is_full(pq))
pq_extend(pq);
pq->array[pq->n_elems].state = state_copy(state);
pq->array[pq->n_elems].f = f; /* this may be abundant */
pq->array[pq->n_elems].g = g;
heapify_up(pq);
++pq->n_elems;
}
static void
heapify_down(PQ pq)
{
size_t sentinel = pq->n_elems;
for (size_t i = 0;;)
{
size_t ri, li = pq_left(i);
if (li >= sentinel)
break;
ri = li + 1;
if (ri >= sentinel)
{
if (pq_entry_higher_priority(&pq->array[li], &pq->array[i]))
pq_swap_entry(pq, i, li);
/* Reached the bottom */
break;
}
/* NOTE: If p(ri) == p(li), it may be good to go right
* since the filling order is left-first */
if (pq_entry_higher_priority(&pq->array[li], &pq->array[ri]))
{
if (!pq_entry_higher_priority(&pq->array[li], &pq->array[i]))
break;
pq_swap_entry(pq, i, li);
i = li;
}
else
{
if (!pq_entry_higher_priority(&pq->array[ri], &pq->array[i]))
break;
pq_swap_entry(pq, i, ri);
i = ri;
}
}
}
State
pq_pop(PQ pq)
{
State ret_state;
if (pq->n_elems == 0)
return NULL;
ret_state = pq->array[0].state;
--pq->n_elems;
pq->array[0] = pq->array[pq->n_elems];
heapify_down(pq);
return ret_state;
}
void
pq_dump(PQ pq)
{
elog("%s: n_elems=%zu, capa=%zu\n", __func__, pq->n_elems, pq->capa);
for (size_t i = 0, cr_required = 1; i < pq->n_elems; i++)
{
if (i == cr_required)
{
elog("\n");
cr_required = (cr_required << 1) + 1;
}
elog("%d,", pq->array[i].f);
elog("%d ", pq->array[i].g);
}
elog("\n");
}
#include <stdlib.h>
#include <string.h>
int
rrand(int m)
{
return (int) ((double) m * (rand() / (RAND_MAX + 1.0)));
}
void
shuffle_input(Input input[], int n_inputs)
{
Input tmp;
size_t n = n_inputs;
while (n > 1)
{
size_t k = rrand(n--);
memcpy(&tmp, &input[n], sizeof(Input));
memcpy(&input[n], &input[k], sizeof(Input));
memcpy(&input[k], &tmp, sizeof(Input));
}
}
static HT closed;
bool
distribute_astar(State init_state, Input input[], int distr_n, int *cnt_inputs,
int *min_fvalue)
{
int cnt = 0;
State state;
PQ q = pq_init(distr_n + 10);
HTStatus ht_status;
int * ht_value;
bool solved = false;
closed = ht_init(10000);
ht_status = ht_insert(closed, init_state, &ht_value);
*ht_value = 0;
pq_put(q, state_copy(init_state), state_get_hvalue(init_state), 0);
++cnt;
while ((state = pq_pop(q)))
{
--cnt;
if (state_is_goal(state))
{
solved = true;
break;
}
ht_status = ht_insert(closed, state, &ht_value);
if (ht_status == HT_FAILED_FOUND && *ht_value < state_get_depth(state))
{
state_fini(state);
continue;
}
else
*ht_value = state_get_depth(state);
for (int dir = 0; dir < DIR_N; ++dir)
{
if (state->parent_dir != dir_reverse(dir) &&
state_movable(state, (Direction) dir))
{
State next_state = state_copy(state);
state_move(next_state, (Direction) dir);
next_state->depth++;
ht_status = ht_insert(closed, next_state, &ht_value);
if (ht_status == HT_FAILED_FOUND &&
*ht_value <= state_get_depth(next_state))
state_fini(next_state);
else
{
++cnt;
*ht_value = state_get_depth(next_state);
pq_put(q, next_state,
*ht_value + state_get_hvalue(next_state), *ht_value);
}
}
}
state_fini(state);
if (cnt >= distr_n)
break;
}
*cnt_inputs = cnt;
elog("LOG: init_distr, cnt=%d\n", cnt);
if (!solved)
{
int minf = INT_MAX;
for (int id = 0; id < cnt; ++id)
{
State state = pq_pop(q);
assert(state);
for (int i = 0; i < STATE_N; ++i)
input[id].tiles[i] =
state->pos[i % STATE_WIDTH][i / STATE_WIDTH];
input[id].tiles[state->i + (state->j * STATE_WIDTH)] = 0;
input[id].init_depth = state_get_depth(state);
input[id].parent_dir = state->parent_dir;
if (minf > state_get_depth(state) + state_get_hvalue(state))
minf = state_get_depth(state) + state_get_hvalue(state);
}
assert(pq_pop(q) == NULL);
// shuffle_input(input, cnt);
*min_fvalue = minf;
}
pq_fini(q);
return solved;
}
static int
input_devide(Input input[], search_stat stat[], int i, int devide_n, int tail,
int *buf_len)
{
int cnt = 0;
int * ht_value;
State state = state_init(input[i].tiles, input[i].init_depth);
state->parent_dir = input[i].parent_dir;
PQ pq = pq_init(devide_n);
HTStatus ht_status;
pq_put(pq, state, state_get_hvalue(state), 0);
++cnt;
assert(devide_n > 0);
while ((state = pq_pop(pq)))
{
--cnt;
if (state_is_goal(state))
{
/* It may not be optimal goal */
pq_put(pq, state, state_get_depth(state) + state_get_hvalue(state),
state_get_depth(state));
++cnt;
break;
}
ht_status = ht_insert(closed, state, &ht_value);
if (ht_status == HT_FAILED_FOUND && *ht_value < state_get_depth(state))
{
state_fini(state);
continue;
}
else
*ht_value = state_get_depth(state);
for (int dir = 0; dir < DIR_N; ++dir)
{
if (state->parent_dir != dir_reverse(dir) &&
state_movable(state, (Direction) dir))
{
State next_state = state_copy(state);
state_move(next_state, (Direction) dir);
next_state->depth++;
ht_status = ht_insert(closed, next_state, &ht_value);
if (ht_status == HT_FAILED_FOUND &&
*ht_value < state_get_depth(next_state))
state_fini(next_state);
else
{
++cnt;
*ht_value = state_get_depth(next_state);
pq_put(pq, next_state,
*ht_value + state_get_hvalue(next_state), *ht_value);
}
}
}
state_fini(state);
if (cnt >= devide_n)
break;
}
int new_buf_len = *buf_len;
while (tail + cnt >= new_buf_len)
new_buf_len <<= 1;
if (new_buf_len != *buf_len)
{
*buf_len = new_buf_len;
repalloc(input, sizeof(*input) * new_buf_len);
elog("LOG: host buf resize\n");
}
input[i] = input[tail - 1];
for (int id = 0; id < cnt; ++id)
{
int ofs = tail - 1 + id;
State state = pq_pop(pq);
assert(state);
for (int j = 0; j < STATE_N; ++j)
input[ofs].tiles[j] = state->pos[j % STATE_WIDTH][j / STATE_WIDTH];
input[ofs].tiles[state->i + (state->j * STATE_WIDTH)] = 0;
input[ofs].init_depth = state_get_depth(state);
input[ofs].parent_dir = state->parent_dir;
}
pq_fini(pq);
return cnt - 1;
}
/* main */
#include <errno.h>
#include <stdio.h>
#include <stdlib.h>
#define exit_failure(...) \
do \
{ \
printf(__VA_ARGS__); \
exit(EXIT_FAILURE); \
} while (0)
static int
pop_int_from_str(const char *str, char **end_ptr)
{
long int rv = strtol(str, end_ptr, 0);
errno = 0;
if (errno != 0)
exit_failure("%s: %s cannot be converted into long\n", __func__, str);
else if (end_ptr && str == *end_ptr)
exit_failure("%s: reach end of string", __func__);
if (rv > INT_MAX || rv < INT_MIN)
exit_failure("%s: too big number, %ld\n", __func__, rv);
return (int) rv;
}
#define MAX_LINE_LEN 100
static void
load_state_from_file(const char *fname, uchar *s)
{
FILE *fp;
char str[MAX_LINE_LEN];
char *str_ptr = str, *end_ptr;
fp = fopen(fname, "r");
if (!fp)
exit_failure("%s: %s cannot be opened\n", __func__, fname);
if (!fgets(str, MAX_LINE_LEN, fp))
exit_failure("%s: fgets failed\n", __func__);
for (int i = 0; i < STATE_N; ++i)
{
s[i] = pop_int_from_str(str_ptr, &end_ptr);
str_ptr = end_ptr;
}
fclose(fp);
}
#undef MAX_LINE_LEN
#define CUDA_CHECK(call) \
do \
{ \
const cudaError_t e = call; \
if (e != cudaSuccess) \
exit_failure("Error: %s:%d code:%d, reason: %s\n", __FILE__, \
__LINE__, e, cudaGetErrorString(e)); \
} while (0)
__host__ static void *
cudaPalloc(size_t size)
{
void *ptr;
CUDA_CHECK(cudaMalloc(&ptr, size));
return ptr;
}
__host__ static void
cudaPfree(void *ptr)
{
CUDA_CHECK(cudaFree(ptr));
}
#define h_d_t(op, i, dir) \
(h_diff_table[(op) *STATE_N * DIR_N + (i) *DIR_N + (dir)])
__host__ static void
init_mdist(signed char h_diff_table[])
{
for (int opponent = 0; opponent < STATE_N; ++opponent)
{
int goal_x = POS_X(opponent), goal_y = POS_Y(opponent);
for (int i = 0; i < STATE_N; ++i)
{
int from_x = POS_X(i), from_y = POS_Y(i);
for (uchar dir = 0; dir < DIR_N; ++dir)
{
if (dir == DIR_LEFT)
h_d_t(opponent, i, dir) = goal_x > from_x ? -1 : 1;
if (dir == DIR_RIGHT)
h_d_t(opponent, i, dir) = goal_x < from_x ? -1 : 1;
if (dir == DIR_UP)
h_d_t(opponent, i, dir) = goal_y > from_y ? -1 : 1;
if (dir == DIR_DOWN)
h_d_t(opponent, i, dir) = goal_y < from_y ? -1 : 1;
}
}
}
}
#undef h_d_t
#define m_t(i, d) (movable_table[(i) *DIR_N + (d)])
__host__ static void
init_movable_table(bool movable_table[])
{
for (int i = 0; i < STATE_N; ++i)
for (unsigned int d = 0; d < DIR_N; ++d)
{
if (d == DIR_RIGHT)
m_t(i, d) = (POS_X(i) < STATE_WIDTH - 1);
else if (d == DIR_LEFT)
m_t(i, d) = (POS_X(i) > 0);
else if (d == DIR_DOWN)
m_t(i, d) = (POS_Y(i) < STATE_WIDTH - 1);
else if (d == DIR_UP)
m_t(i, d) = (POS_Y(i) > 0);
}
}
#undef m_t
// static char dir_char[] = {'U', 'R', 'L', 'D'};
#define INPUT_SIZE (sizeof(Input) * buf_len)
#define STAT_SIZE (sizeof(search_stat) * buf_len)
#define MOVABLE_TABLE_SIZE (sizeof(bool) * STATE_N * DIR_N)
#define H_DIFF_TABLE_SIZE (STATE_N * STATE_N * DIR_N)
int
main(int argc, char *argv[])
{
int n_roots;
int buf_len = N_INIT_DISTRIBUTION * MAX_BUF_RATIO;
Input *input = (Input *) palloc(INPUT_SIZE),
*d_input = (Input *) cudaPalloc(INPUT_SIZE);
search_stat *stat = (search_stat *) palloc(STAT_SIZE),
*d_stat = (search_stat *) cudaPalloc(STAT_SIZE);
bool *movable_table = (bool *) palloc(MOVABLE_TABLE_SIZE),
*d_movable_table = (bool *) cudaPalloc(MOVABLE_TABLE_SIZE);
signed char *h_diff_table = (signed char *) palloc(H_DIFF_TABLE_SIZE),
*d_h_diff_table = (signed char *) cudaPalloc(H_DIFF_TABLE_SIZE);
int min_fvalue = 0;
if (argc != 2)
exit_failure("usage: bin/cumain <ifname>\n");
load_state_from_file(argv[1], input[0].tiles);
{
State init_state = state_init(input[0].tiles, 0);
state_dump(init_state);
if (distribute_astar(init_state, input, N_INIT_DISTRIBUTION, &n_roots,
&min_fvalue))
{
elog("solution is found by distributor\n");
goto solution_found;
}
state_fini(init_state);
}
init_mdist(h_diff_table);
init_movable_table(movable_table);
CUDA_CHECK(cudaMemcpy(d_movable_table, movable_table, MOVABLE_TABLE_SIZE,
cudaMemcpyHostToDevice));
CUDA_CHECK(cudaMemcpy(d_h_diff_table, h_diff_table, H_DIFF_TABLE_SIZE,
cudaMemcpyHostToDevice));
CUDA_CHECK(cudaMemset(d_input, 0, INPUT_SIZE));
for (uchar f_limit = min_fvalue;; f_limit += 2)
{
CUDA_CHECK(cudaMemset(d_stat, 0, STAT_SIZE));
CUDA_CHECK(
cudaMemcpy(d_input, input, INPUT_SIZE, cudaMemcpyHostToDevice));
elog("f_limit=%d\n", (int) f_limit);
idas_kernel<<<n_roots, BLOCK_DIM>>>(d_input, d_stat, f_limit,
d_h_diff_table, d_movable_table);
CUDA_CHECK(
cudaGetLastError()); /* asm trap is called when find solution */
CUDA_CHECK(cudaMemcpy(stat, d_stat, STAT_SIZE, cudaMemcpyDeviceToHost));
unsigned long long int loads_sum = 0;
for (int i = 0; i < n_roots; ++i)
loads_sum += stat[i].loads;
#ifdef COLLECT_LOG
elog("STAT: loop\n");
for (int i = 0; i < n_roots; ++i)
elog("%lld, ", stat[i].loads);
putchar('\n');
elog("STAT: nodes_expanded\n");
for (int i = 0; i < n_roots; ++i)
elog("%lld, ", stat[i].nodes_expanded);
putchar('\n');
elog("STAT: efficiency\n");
for (int i = 0; i < n_roots; ++i)
if (stat[i].loads != 0)
elog("%lld, ", stat[i].nodes_expanded / stat[i].loads);
putchar('\n');
#endif
int increased = 0;
unsigned long long int loads_av = loads_sum / n_roots;
int stat_cnt[10] = {0, 0, 0, 0, 0, 0, 0, 0, 0};
for (int i = 0; i < n_roots; ++i)
{
if (stat[i].loads < loads_av)
stat_cnt[0]++;
else if (stat[i].loads < 2 * loads_av)
stat_cnt[1]++;
else if (stat[i].loads < 4 * loads_av)
stat_cnt[2]++;
else if (stat[i].loads < 8 * loads_av)
stat_cnt[3]++;
else if (stat[i].loads < 16 * loads_av)
stat_cnt[4]++;
else if (stat[i].loads < 32 * loads_av)
stat_cnt[5]++;
else if (stat[i].loads < 64 * loads_av)
stat_cnt[6]++;
else if (stat[i].loads < 128 * loads_av)
stat_cnt[7]++;
else
stat_cnt[8]++;
int policy = loads_av == 0 ? stat[i].loads
: (stat[i].loads - 1) / loads_av + 1;
int buf_len_old = buf_len;
if (policy > 1 && stat[i].loads > 10 && MAX_BLOCK_SIZE > increased + n_roots)
increased += input_devide(input, stat, i, policy,
n_roots + increased, &buf_len);
if (buf_len != buf_len_old)
{
elog("XXX: fix MAX_BUF_RATIO\n");
stat = (search_stat *) repalloc(stat, STAT_SIZE);
cudaPfree(d_input);
cudaPfree(d_stat);
d_input = (Input *) cudaPalloc(INPUT_SIZE);
d_stat = (search_stat *) cudaPalloc(STAT_SIZE);
}
}
elog("STAT: loads: sum=%lld, av=%lld\n", loads_sum, loads_av);
elog("STAT: distr: av=%d, 2av=%d, 4av=%d, 8av=%d, 16av=%d, 32av=%d, "
"64av=%d, 128av=%d, more=%d\n",
stat_cnt[0], stat_cnt[1], stat_cnt[2], stat_cnt[3], stat_cnt[4],
stat_cnt[5], stat_cnt[6], stat_cnt[7], stat_cnt[8]);
n_roots += increased;
elog("STAT: n_roots=%d(+%d)\n", n_roots, increased);
#ifdef SEARCH_ALL_THE_BEST
for (int i = 0; i < n_roots; ++i)
if (stat[i].solved)
{
elog("find all the optimal solution(s), at depth=%d\n", stat[i].len);
goto solution_found;
}
#endif
}
solution_found:
cudaPfree(d_input);
cudaPfree(d_stat);
cudaPfree(d_movable_table);
cudaPfree(d_h_diff_table);
CUDA_CHECK(cudaDeviceReset());
pfree(input);
pfree(stat);
pfree(movable_table);
pfree(h_diff_table);
return 0;
}
|
02dc6f2c888dcbd8cdc880eee41b203e0d35a1fe.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
extern "C"
#include <hip/hip_complex.h>
__global__ void rsub_strided_double(int n,int xOffset,int yOffset, double *dx, double *dy,int incx,int incy,double *result) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) {
if(i >= xOffset && i >= yOffset && i % incx == 0 && i % incy == 0)
result[i] = dx[i] / dy[i];
}
}
| 02dc6f2c888dcbd8cdc880eee41b203e0d35a1fe.cu | extern "C"
#include <cuComplex.h>
__global__ void rsub_strided_double(int n,int xOffset,int yOffset, double *dx, double *dy,int incx,int incy,double *result) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) {
if(i >= xOffset && i >= yOffset && i % incx == 0 && i % incy == 0)
result[i] = dx[i] / dy[i];
}
}
|
e6a5f3eafa8a05ab8449dead0518d68d3e270fc4.hip | // !!! This is a file automatically generated by hipify!!!
// CIS565 CUDA Rasterizer: A simple rasterization pipeline for Patrick Cozzi's CIS565: GPU Computing at the University of Pennsylvania
// Written by Yining Karl Li, Copyright (c) 2012 University of Pennsylvania
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <cmath>
#include <thrust/random.h>
#include "rasterizeKernels.h"
#include "rasterizeTools.h"
#include "glm/gtc/matrix_transform.hpp"
#if TORCH_HIP_VERSION >= 5000
#include <helper_math.h>
#else
#include <cutil_math.h>
#endif
#define DEG2RAD 180/PI
glm::vec3* framebuffer;
fragment* depthbuffer;
float* device_vbo;
float* device_cbo;
float* device_nbo;
int* device_ibo;
triangle* primitives;
// NEW
int* lockbuffer;
glm::vec3 up(0, 1, 0);
float fovy = 60;
float zNear = 0.01;
float zFar = 1000;
light* lights;
int lightsize = 4;
void checkCUDAError(const char *msg) {
hipError_t err = hipGetLastError();
if( hipSuccess != err) {
fprintf(stderr, "Cuda error: %s: %s.\n", msg, hipGetErrorString( err) );
exit(EXIT_FAILURE);
}
}
//Handy dandy little hashing function that provides seeds for random number generation
__host__ __device__ unsigned int hash(unsigned int a){
a = (a+0x7ed55d16) + (a<<12);
a = (a^0xc761c23c) ^ (a>>19);
a = (a+0x165667b1) + (a<<5);
a = (a+0xd3a2646c) ^ (a<<9);
a = (a+0xfd7046c5) + (a<<3);
a = (a^0xb55a4f09) ^ (a>>16);
return a;
}
//Writes a given fragment to a fragment buffer at a given location
__host__ __device__ void writeToDepthbuffer(int x, int y, fragment frag, fragment* depthbuffer, glm::vec2 resolution){
if(x<resolution.x && y<resolution.y){
int index = (y*resolution.x) + x;
depthbuffer[index] = frag;
}
}
//Reads a fragment from a given location in a fragment buffer
__host__ __device__ fragment getFromDepthbuffer(int x, int y, fragment* depthbuffer, glm::vec2 resolution){
if(x<resolution.x && y<resolution.y){
int index = (y*resolution.x) + x;
return depthbuffer[index];
}else{
fragment f;
return f;
}
}
//Writes a given pixel to a pixel buffer at a given location
__host__ __device__ void writeToFramebuffer(int x, int y, glm::vec3 value, glm::vec3* framebuffer, glm::vec2 resolution){
if(x<resolution.x && y<resolution.y){
int index = (y*resolution.x) + x;
framebuffer[index] = value;
}
}
//Reads a pixel from a pixel buffer at a given location
__host__ __device__ glm::vec3 getFromFramebuffer(int x, int y, glm::vec3* framebuffer, glm::vec2 resolution){
if(x<resolution.x && y<resolution.y){
int index = (y*resolution.x) + x;
return framebuffer[index];
}else{
return glm::vec3(0,0,0);
}
}
//Kernel that clears a given pixel buffer with a given color
__global__ void clearImage(glm::vec2 resolution, glm::vec3* image, glm::vec3 color){
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * resolution.x);
if(x<=resolution.x && y<=resolution.y){
image[index] = color;
}
}
//Kernel that clears a given fragment buffer with a given fragment
__global__ void clearDepthBuffer(glm::vec2 resolution, fragment* buffer, fragment frag){
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * resolution.x);
if(x<=resolution.x && y<=resolution.y){
fragment f = frag;
f.position.x = x;
f.position.y = y;
buffer[index] = f;
}
}
//Set the locks of all pixels to be 0 (unlocked)
__global__ void clearLockBuffer(glm::vec2 resolution, int* lockbuffer) {
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * resolution.x);
if(x<=resolution.x && y<=resolution.y){
lockbuffer[index] = 0;
}
}
//Clears depth buffer if it passes the stencil test
__global__ void clearDepthBufferOnStencil(glm::vec2 resolution, fragment* buffer, fragment frag){
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * resolution.x);
if(x<=resolution.x && y<=resolution.y){
if (buffer[index].s == frag.s) {
fragment f = frag;
f.position.x = x;
f.position.y = y;
buffer[index] = f;
}
}
}
//Kernel that writes the image to the OpenGL PBO directly.
__global__ void sendImageToPBO(uchar4* PBOpos, glm::vec2 resolution, glm::vec3* image){
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * resolution.x);
if(x<=resolution.x && y<=resolution.y){
glm::vec3 color;
color.x = image[index].x*255.0;
color.y = image[index].y*255.0;
color.z = image[index].z*255.0;
if(color.x>255){
color.x = 255;
}
if(color.y>255){
color.y = 255;
}
if(color.z>255){
color.z = 255;
}
// Each thread writes one pixel location in the texture (textel)
PBOpos[index].w = 0;
PBOpos[index].x = color.x;
PBOpos[index].y = color.y;
PBOpos[index].z = color.z;
}
}
__host__ __device__ glm::vec3 transformPos(glm::vec3 v, glm::mat4 matrix, glm::vec2 resolution) {
glm::vec4 v4(v, 1);
v4 = matrix * v4;
// perspective division
v4.x = v4.x/v4.w;
v4.y = v4.y/v4.w;
v4.z = v4.z/v4.w;
// viewport transform
v4.x = resolution.x/2 * (v4.x+1);
v4.y = resolution.y/2 * (v4.y+1);
v4.z = -0.5 * v4.z + 0.5;
return glm::vec3(v4);
}
__global__ void transformVertices(float* vbo, int vbosize, glm::mat4 modelMatrix) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if(index<vbosize/3){
glm::vec4 v(vbo[index*3], vbo[index*3+1], vbo[index*3+2], 1);
v = modelMatrix * v;
vbo[index*3] = v.x;
vbo[index*3+1] = v.y;
vbo[index*3+2] = v.z;
}
}
__global__ void transformNormals(float* nbo, int nbosize, glm::mat4 modelMatrix) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if(index<nbosize/3){
glm::vec4 n(nbo[index*3], nbo[index*3+1], nbo[index*3+2], 0);
n = modelMatrix * n;
nbo[index*3] = n.x;
nbo[index*3+1] = n.y;
nbo[index*3+2] = n.z;
}
}
//TODO: Implement primative assembly
__global__ void primitiveAssemblyKernel(float* vbo, int vbosize, float* cbo, int cbosize, float* nbo, int nbosize, int* ibo, int ibosize, triangle* primitives){
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
int primitivesCount = ibosize/3;
if(index<primitivesCount){
int v0 = ibo[index*3];
int v1 = ibo[index*3+1];
int v2 = ibo[index*3+2];
glm::vec3 p0(vbo[v0*3], vbo[v0*3+1], vbo[v0*3+2]);
glm::vec3 p1(vbo[v1*3], vbo[v1*3+1], vbo[v1*3+2]);
glm::vec3 p2(vbo[v2*3], vbo[v2*3+1], vbo[v2*3+2]);
glm::vec3 c0(cbo[0], cbo[1], cbo[2]);
glm::vec3 c1(cbo[0], cbo[1], cbo[2]);
glm::vec3 c2(cbo[0], cbo[1], cbo[2]);
//glm::vec3 c0(cbo[v0*3], cbo[v0*3+1], cbo[v0*3+2]);
//glm::vec3 c1(cbo[v1*3], cbo[v1*3+1], cbo[v1*3+2]);
//glm::vec3 c2(cbo[v2*3], cbo[v2*3+1], cbo[v2*3+2]);
glm::vec3 n0(nbo[v0*3], nbo[v0*3+1], nbo[v0*3+2]);
glm::vec3 n1(nbo[v1*3], nbo[v1*3+1], nbo[v1*3+2]);
glm::vec3 n2(nbo[v2*3], nbo[v2*3+1], nbo[v2*3+2]);
primitives[index] = triangle(p0, p1, p2, c0, c1, c2, n0, n1, n2);
}
}
//TODO: Implement a vertex shader
__global__ void vertexShadeKernel(float* vbo, int vbosize, glm::mat4 cameraMatrix, glm::vec2 resolution){
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if(index<vbosize/3){
glm::vec3 v(vbo[index*3], vbo[index*3+1], vbo[index*3+2]);
v = transformPos(v, cameraMatrix, resolution);
vbo[index*3] = v.x;
vbo[index*3+1] = v.y;
vbo[index*3+2] = v.z;
}
}
//Fill primitives with transformed vertex positions
__global__ void updatePrimitiveKernel(float* vbo, int vbosize, int* ibo, int ibosize, triangle* primitives){
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
int primitivesCount = ibosize/3;
if(index<primitivesCount){
int v0 = ibo[index*3];
int v1 = ibo[index*3+1];
int v2 = ibo[index*3+2];
primitives[index].pt0 = glm::vec3(vbo[v0*3], vbo[v0*3+1], vbo[v0*3+2]);
primitives[index].pt1 = glm::vec3(vbo[v1*3], vbo[v1*3+1], vbo[v1*3+2]);
primitives[index].pt2 = glm::vec3(vbo[v2*3], vbo[v2*3+1], vbo[v2*3+2]);
}
}
//Populate the primitive list for portal
__global__ void stencilPrimitiveKernel(float* vbo, int vbosize, int* ibo, int ibosize, vertTriangle* primitives){
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
int primitivesCount = ibosize/3;
if(index<primitivesCount){
int v0 = ibo[index*3];
int v1 = ibo[index*3+1];
int v2 = ibo[index*3+2];
glm::vec3 pt0 = glm::vec3(vbo[v0*3], vbo[v0*3+1], vbo[v0*3+2]);
glm::vec3 pt1 = glm::vec3(vbo[v1*3], vbo[v1*3+1], vbo[v1*3+2]);
glm::vec3 pt2 = glm::vec3(vbo[v2*3], vbo[v2*3+1], vbo[v2*3+2]);
primitives[index] = vertTriangle(pt0, pt1, pt2);
}
}
__device__ glm::vec3 getScanlineIntersection(glm::vec3 v1, glm::vec3 v2, float y) {
float t = (y-v1.y)/(v2.y-v1.y);
return glm::vec3(t*v2.x + (1-t)*v1.x, y, t*v2.z + (1-t)*v1.z);
}
__device__ bool isInScreen(glm::vec3 p, glm::vec2 resolution) {
return (p.x > 0&& p.x < resolution.x && p.y > 0 && p.y < resolution.y);
}
//primitive parallel rasterization
__global__ void rasterizationPerPrimKernel(triangle* primitives, int primitiveCount, fragment* depthbuffer, glm::vec2 resolution, bool stencilTest, int stencil, int* lockbuffer) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < primitiveCount) {
triangle prim = primitives[index];
float topy = min(min(prim.pt0.y, prim.pt1.y), prim.pt2.y);
float boty = max(max(prim.pt0.y, prim.pt1.y), prim.pt2.y);
int top = max((int)floor(topy), 0);
int bot = min((int)ceil(boty), (int)resolution.y);
for (int y=top; y<bot; ++y) {
float dy0 = prim.pt0.y - y;
float dy1 = prim.pt1.y - y;
float dy2 = prim.pt2.y - y;
int onPositiveSide = (int)(dy0>=0) + (int)(dy1>=0) + (int)(dy2>=0);
int onNegativeSide = (int)(dy0<=0) + (int)(dy1<=0) + (int)(dy2<=0);
glm::vec3 intersection1, intersection2;
if (onPositiveSide == 3 || onNegativeSide == 3) {
if (dy0 == 0) {
intersection1 = prim.pt0;
intersection2 = prim.pt0;
}
else if (dy1 == 0) {
intersection1 = prim.pt1;
intersection2 = prim.pt1;
}
else if (dy2 == 0) {
intersection1 = prim.pt2;
intersection2 = prim.pt2;
}
}
else if (onPositiveSide == 2 && onNegativeSide == 2) { // one vertex is on the scanline
// doesn't really happen due to the floating point error
if (dy0 == 0) {
intersection1 = prim.pt0;
intersection2 = getScanlineIntersection(prim.pt1, prim.pt2, y);
}
else if (dy1 == 0) {
intersection1 = prim.pt1;
intersection2 = getScanlineIntersection(prim.pt0, prim.pt2, y);
}
else { // dy2 == 0
intersection1 = prim.pt2;
intersection2 = getScanlineIntersection(prim.pt1, prim.pt0, y);
}
}
else if (onPositiveSide == 2) {
if (dy0 < 0) {
intersection1 = getScanlineIntersection(prim.pt0, prim.pt1, y);
intersection2 = getScanlineIntersection(prim.pt0, prim.pt2, y);
}
else if (dy1 < 0) {
intersection1 = getScanlineIntersection(prim.pt1, prim.pt0, y);
intersection2 = getScanlineIntersection(prim.pt1, prim.pt2, y);
}
else { // dy2 < 0
intersection1 = getScanlineIntersection(prim.pt2, prim.pt0, y);
intersection2 = getScanlineIntersection(prim.pt2, prim.pt1, y);
}
}
else { // onNegativeSide == 2
if (dy0 > 0) {
intersection1 = getScanlineIntersection(prim.pt0, prim.pt1, y);
intersection2 = getScanlineIntersection(prim.pt0, prim.pt2, y);
}
else if (dy1 > 0) {
intersection1 = getScanlineIntersection(prim.pt1, prim.pt0, y);
intersection2 = getScanlineIntersection(prim.pt1, prim.pt2, y);
}
else { // dy2 > 0
intersection1 = getScanlineIntersection(prim.pt2, prim.pt0, y);
intersection2 = getScanlineIntersection(prim.pt2, prim.pt1, y);
}
}
// make sure intersection1's x value is less than intersection2's
if (intersection2.x < intersection1.x) {
glm::vec3 temp = intersection1;
intersection1 = intersection2;
intersection2 = temp;
}
int left = min((int)(resolution.x)-1,max(0, (int)floor(intersection1.x)));
int right = min((int)(resolution.x-1),max(0, (int)floor(intersection2.x)));
for (int x=left; x<=right; ++x) {
int pixelIndex = (resolution.x-1-x) + (resolution.y-1-y) * resolution.x;
float t = (x-intersection1.x)/(intersection2.x-intersection1.x);
glm::vec3 point = t*intersection2 + (1-t)*intersection1;
// lock stuff
bool wait = true;
while (wait) {
if (0 == atomicExch(&lockbuffer[pixelIndex], 1)) {
bool draw = true;
if (stencilTest) {
draw = (depthbuffer[pixelIndex].s == stencil) && (point.z > depthbuffer[pixelIndex].z);
}
else {
draw = (point.z > depthbuffer[pixelIndex].z);
}
if (draw) {
glm::vec3 bc = calculateBarycentricCoordinate(prim, glm::vec2(point.x, point.y));
depthbuffer[pixelIndex].color = prim.c0 * bc.x + prim.c1 * bc.y + prim.c2 * bc.z;
depthbuffer[pixelIndex].normal = glm::normalize(prim.n0 * bc.x + prim.n1 * bc.y + prim.n2 * bc.z);
depthbuffer[pixelIndex].position = prim.p0 * bc.x + prim.p1 * bc.y + prim.p2 * bc.z;
depthbuffer[pixelIndex].z = point.z;
}
atomicExch(&lockbuffer[pixelIndex], 0);
wait = false;
}
}
}
}
}
}
//scanline parallel rasterization
__global__ void rasterizationKernel(triangle* primitives, int primitivesCount, fragment* depthbuffer, glm::vec2 resolution, bool stencilTest, int stencil, int* lockbuffer){
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if(index < resolution.y){
for (int i=0; i<primitivesCount; ++i) {
triangle prim = primitives[i];
float dy0 = prim.pt0.y - index;
float dy1 = prim.pt1.y - index;
float dy2 = prim.pt2.y - index;
int onPositiveSide = (int)(dy0>=-FLT_EPSILON) + (int)(dy1>=-FLT_EPSILON) + (int)(dy2>=-FLT_EPSILON);
int onNegativeSide = (int)(dy0<=FLT_EPSILON) + (int)(dy1<=FLT_EPSILON) + (int)(dy2<=FLT_EPSILON);
if (onPositiveSide != 3 && onNegativeSide != 3) { // the primitive intersects the scanline
glm::vec3 intersection1, intersection2;
if (onPositiveSide == 2 && onNegativeSide == 2) { // one vertex is on the scanline, doesn't really happen due to the floating point error
if (dy0 == 0) {
intersection1 = prim.pt0;
intersection2 = getScanlineIntersection(prim.pt1, prim.pt2, index);
}
else if (dy1 == 0) {
intersection1 = prim.pt1;
intersection2 = getScanlineIntersection(prim.pt0, prim.pt2, index);
}
else { // dy2 == 0
intersection1 = prim.pt2;
intersection2 = getScanlineIntersection(prim.pt1, prim.pt0, index);
}
}
else if (onPositiveSide == 2) {
if (dy0 < 0) {
intersection1 = getScanlineIntersection(prim.pt0, prim.pt1, index);
intersection2 = getScanlineIntersection(prim.pt0, prim.pt2, index);
}
else if (dy1 < 0) {
intersection1 = getScanlineIntersection(prim.pt1, prim.pt0, index);
intersection2 = getScanlineIntersection(prim.pt1, prim.pt2, index);
}
else { // dy2 < 0
intersection1 = getScanlineIntersection(prim.pt2, prim.pt0, index);
intersection2 = getScanlineIntersection(prim.pt2, prim.pt1, index);
}
}
else { // onNegativeSide == 2
if (dy0 > 0) {
intersection1 = getScanlineIntersection(prim.pt0, prim.pt1, index);
intersection2 = getScanlineIntersection(prim.pt0, prim.pt2, index);
}
else if (dy1 > 0) {
intersection1 = getScanlineIntersection(prim.pt1, prim.pt0, index);
intersection2 = getScanlineIntersection(prim.pt1, prim.pt2, index);
}
else { // dy2 > 0
intersection1 = getScanlineIntersection(prim.pt2, prim.pt0, index);
intersection2 = getScanlineIntersection(prim.pt2, prim.pt1, index);
}
}
// make sure intersection1's x value is less than intersection2's
if (intersection2.x < intersection1.x) {
glm::vec3 temp = intersection1;
intersection1 = intersection2;
intersection2 = temp;
}
int start = min((int)(resolution.x)-1,max(0, (int)floor(intersection1.x)));
int end = min((int)(resolution.x-1),max(0, (int)floor(intersection2.x)));
for (int j=start; j<=end; ++j) {
int pixelIndex = (resolution.x-1-j) + (resolution.y-1-index) * resolution.x;
float t = (j-intersection1.x)/(intersection2.x-intersection1.x);
glm::vec3 point = t*intersection2 + (1-t)*intersection1;
// lock stuff
bool wait = true;
while (wait) {
if (0 == atomicExch(&lockbuffer[pixelIndex], 1)) {
bool draw = true;
if (stencilTest) {
draw = (depthbuffer[pixelIndex].s == stencil) && (point.z > depthbuffer[pixelIndex].z);
}
else {
draw = (point.z > depthbuffer[pixelIndex].z);
}
if (draw) {
glm::vec3 bc = calculateBarycentricCoordinate(prim, glm::vec2(point.x, point.y));
depthbuffer[pixelIndex].color = prim.c0 * bc.x + prim.c1 * bc.y + prim.c2 * bc.z;
depthbuffer[pixelIndex].normal = glm::normalize(prim.n0 * bc.x + prim.n1 * bc.y + prim.n2 * bc.z);
depthbuffer[pixelIndex].position = prim.p0 * bc.x + prim.p1 * bc.y + prim.p2 * bc.z;
depthbuffer[pixelIndex].z = point.z;
}
atomicExch(&lockbuffer[pixelIndex], 0);
wait = false;
}
}
}
}
}
}
}
//rasterization for stencil buffer
__global__ void rasterizationStencilKernel(vertTriangle* primitives, int primitivesCount, fragment* depthbuffer, glm::vec2 resolution, int stencil, int* lockbuffer){
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if(index < resolution.y){
for (int i=0; i<primitivesCount; ++i) {
vertTriangle prim = primitives[i];
float dy0 = prim.pt0.y - index;
float dy1 = prim.pt1.y - index;
float dy2 = prim.pt2.y - index;
int onPositiveSide = (int)(dy0>=-FLT_EPSILON) + (int)(dy1>=-FLT_EPSILON) + (int)(dy2>=-FLT_EPSILON);
int onNegativeSide = (int)(dy0<=FLT_EPSILON) + (int)(dy1<=FLT_EPSILON) + (int)(dy2<=FLT_EPSILON);
if (onPositiveSide != 3 && onNegativeSide != 3) { // the primitive intersects the scanline
glm::vec3 intersection1, intersection2;
if (onPositiveSide == 2 && onNegativeSide == 2) { // one vertex is on the scanline // doesn't really happen due to the floating point error
if (dy0 == 0) {
intersection1 = prim.pt0;
intersection2 = getScanlineIntersection(prim.pt1, prim.pt2, index);
}
else if (dy1 == 0) {
intersection1 = prim.pt1;
intersection2 = getScanlineIntersection(prim.pt0, prim.pt2, index);
}
else { // dy2 == 0
intersection1 = prim.pt2;
intersection2 = getScanlineIntersection(prim.pt1, prim.pt0, index);
}
}
else if (onPositiveSide == 2) {
if (dy0 < 0) {
intersection1 = getScanlineIntersection(prim.pt0, prim.pt1, index);
intersection2 = getScanlineIntersection(prim.pt0, prim.pt2, index);
}
else if (dy1 < 0) {
intersection1 = getScanlineIntersection(prim.pt1, prim.pt0, index);
intersection2 = getScanlineIntersection(prim.pt1, prim.pt2, index);
}
else { // dy2 < 0
intersection1 = getScanlineIntersection(prim.pt2, prim.pt0, index);
intersection2 = getScanlineIntersection(prim.pt2, prim.pt1, index);
}
}
else { // onNegativeSide == 2
if (dy0 > 0) {
intersection1 = getScanlineIntersection(prim.pt0, prim.pt1, index);
intersection2 = getScanlineIntersection(prim.pt0, prim.pt2, index);
}
else if (dy1 > 0) {
intersection1 = getScanlineIntersection(prim.pt1, prim.pt0, index);
intersection2 = getScanlineIntersection(prim.pt1, prim.pt2, index);
}
else { // dy2 > 0
intersection1 = getScanlineIntersection(prim.pt2, prim.pt0, index);
intersection2 = getScanlineIntersection(prim.pt2, prim.pt1, index);
}
}
// make sure intersection1's x value is less than intersection2's
if (intersection2.x < intersection1.x) {
glm::vec3 temp = intersection1;
intersection1 = intersection2;
intersection2 = temp;
}
int start = min((int)(resolution.x)-1,max(0, (int)floor(intersection1.x)));
int end = min((int)(resolution.x-1),max(0, (int)floor(intersection2.x)));
for (int j=start; j<=end; ++j) {
int pixelIndex = (resolution.x-1-j) + (resolution.y-1-index) * resolution.x;
float t = (j-intersection1.x)/(intersection2.x-intersection1.x);
glm::vec3 point = t*intersection2 + (1-t)*intersection1;
// lock stuff
bool wait = true;
while (wait) {
if (0 == atomicExch(&lockbuffer[pixelIndex], 1)) {
if (point.z > depthbuffer[pixelIndex].z) {
depthbuffer[pixelIndex].s = stencil;
depthbuffer[pixelIndex].z = point.z;
}
atomicExch(&lockbuffer[pixelIndex], 0);
wait = false;
}
}
}
}
}
}
}
//TODO: Implement a fragment shader
__global__ void fragmentShadeKernel(fragment* depthbuffer, glm::vec2 resolution, glm::vec3 eye, light* lights, int lightsize){
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * resolution.x);
if(x<=resolution.x && y<=resolution.y){
glm::vec3 diffuseColor(0);
glm::vec3 specularColor(0);
float ks = 0;
if (glm::distance(depthbuffer[index].color, glm::vec3(245.0/255.0, 222.0/255.0, 179.0/255.0)) > 0.1) {
ks = 0.3;
}
glm::vec3 norm = depthbuffer[index].normal;
glm::vec3 pos = depthbuffer[index].position;
for (int i=0; i<lightsize; ++i) {
//diffuse component
glm::vec3 lightDir = glm::normalize(glm::vec3(lights[i].pos - pos));
float diffuseTerm = glm::clamp(glm::dot(lightDir, norm), 0.0f, 1.0f);
diffuseColor += diffuseTerm * lights[i].color;
//specular component
if (ks > 0.0001) {
glm::vec3 LR; // reflected light direction
if (glm::length(lightDir - norm) < 0.0001) {
LR = norm;
}
else if (abs(glm::dot(lightDir, norm)) < 0.0001) {
LR = -lightDir;
}
else {
LR = glm::normalize(-lightDir - 2.0f * glm::dot(-lightDir, norm) * norm);
}
float specularTerm = min(1.0f, pow(max(0.0f, glm::dot(LR, glm::normalize(eye - pos))), 20.0f));
specularColor += specularTerm * glm::vec3(1.0f);
}
}
depthbuffer[index].color = diffuseColor * depthbuffer[index].color + ks * specularColor;
//set background color
if (depthbuffer[index].z == -FLT_MAX) {
depthbuffer[index].color = glm::vec3(0.6, 0.6, 0.6);
}
}
}
//Writes fragment colors to the framebuffer
__global__ void render(glm::vec2 resolution, fragment* depthbuffer, glm::vec3* framebuffer){
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * resolution.x);
if(x<=resolution.x && y<=resolution.y){
framebuffer[index] = depthbuffer[index].color;
}
}
void initLights() {
light l1(glm::vec3(0.8, 0.8, 0.8), glm::vec3(4, 4, 4));
light l2(glm::vec3(0.4, 0.4, 0.4), glm::vec3(-4, 4, 4));
light l3(glm::vec3(0.3, 0.3, 0.3), glm::vec3(0, 0, -5));
light l4(glm::vec3(0.3, 0.3, 0.3), glm::vec3(0, -5, 0));
light* cpulights = new light[lightsize];
cpulights[0] = l1;
cpulights[1] = l2;
cpulights[2] = l3;
cpulights[3] = l4;
checkCUDAError("Kernel failed!");
hipMalloc((void**)&lights, lightsize*sizeof(light));
checkCUDAError("Kernel failed!");
hipMemcpy(lights, cpulights, lightsize*sizeof(light), hipMemcpyHostToDevice);
checkCUDAError("Kernel failed!");
delete [] cpulights;
}
void initBuffers(glm::vec2 resolution) {
//set up framebuffer
framebuffer = NULL;
hipMalloc((void**)&framebuffer, (int)resolution.x*(int)resolution.y*sizeof(glm::vec3));
//set up depthbuffer
depthbuffer = NULL;
hipMalloc((void**)&depthbuffer, (int)resolution.x*(int)resolution.y*sizeof(fragment));
lockbuffer = NULL;
hipMalloc((void**)&lockbuffer, (int)resolution.x*(int)resolution.y*sizeof(int));
clearBuffers(resolution);
}
void clearBuffers(glm::vec2 resolution) {
int tileSize = 8;
dim3 threadsPerBlock(tileSize, tileSize);
dim3 fullBlocksPerGrid((int)ceil(float(resolution.x)/float(tileSize)), (int)ceil(float(resolution.y)/float(tileSize)));
//kernel launches to black out accumulated/unaccumlated pixel buffers and clear our scattering states
hipLaunchKernelGGL(( clearImage), dim3(fullBlocksPerGrid), dim3(threadsPerBlock), 0, 0, resolution, framebuffer, glm::vec3(0,0,0));
hipDeviceSynchronize();
fragment frag;
frag.color = glm::vec3(0,0,0);
frag.normal = glm::vec3(0,0,0);
frag.position = glm::vec3(0,0,0);
frag.z = -FLT_MAX;
frag.s = 0;
hipLaunchKernelGGL(( clearDepthBuffer), dim3(fullBlocksPerGrid), dim3(threadsPerBlock), 0, 0, resolution, depthbuffer,frag);
hipDeviceSynchronize();
hipLaunchKernelGGL(( clearLockBuffer), dim3(fullBlocksPerGrid), dim3(threadsPerBlock), 0, 0, resolution, lockbuffer);
hipDeviceSynchronize();
}
void drawToStencilBuffer(glm::vec2 resolution, glm::mat4 rotation, glm::vec3 eye, glm::vec3 center, float* vbo, int vbosize, int* ibo, int ibosize, int stencil) {
// set up crucial magic
int tileSize = 8;
dim3 threadsPerBlock(tileSize, tileSize);
dim3 fullBlocksPerGrid((int)ceil(float(resolution.x)/float(tileSize)), (int)ceil(float(resolution.y)/float(tileSize)));
vertTriangle* stencilPrimitives = NULL;
hipMalloc((void**)&stencilPrimitives, (ibosize/3)*sizeof(vertTriangle));
device_ibo = NULL;
hipMalloc((void**)&device_ibo, ibosize*sizeof(int));
hipMemcpy( device_ibo, ibo, ibosize*sizeof(int), hipMemcpyHostToDevice);
device_vbo = NULL;
hipMalloc((void**)&device_vbo, vbosize*sizeof(float));
hipMemcpy( device_vbo, vbo, vbosize*sizeof(float), hipMemcpyHostToDevice);
//------------------------------
//compute the camera matrix
//------------------------------
float aspect = resolution.x / resolution.y;
glm::mat4 perspMatrix = glm::perspective(fovy, resolution.x/resolution.y, zNear, zFar);
glm::mat4 lookatMatrix = glm::lookAt(eye, center, up);
glm::mat4 cameraMatrix = perspMatrix * lookatMatrix;
//------------------------------
//vertex shader
//------------------------------
tileSize = 64;
int primitiveBlocks = ceil(((float)vbosize/3)/((float)tileSize));
hipLaunchKernelGGL(( transformVertices), dim3(primitiveBlocks), dim3(tileSize), 0, 0, device_vbo, vbosize, rotation);
hipLaunchKernelGGL(( vertexShadeKernel), dim3(primitiveBlocks), dim3(tileSize), 0, 0, device_vbo, vbosize, cameraMatrix, resolution);
hipDeviceSynchronize();
//------------------------------
//update stencil primitives
//------------------------------
primitiveBlocks = ceil(((float)ibosize/3)/((float)tileSize));
hipLaunchKernelGGL(( stencilPrimitiveKernel), dim3(primitiveBlocks), dim3(tileSize), 0, 0, device_vbo, vbosize, device_ibo, ibosize, stencilPrimitives);
hipDeviceSynchronize();
//------------------------------
//rasterization
//------------------------------
int scanlineBlocks = ceil(((float)resolution.y)/((float)tileSize));
hipLaunchKernelGGL(( rasterizationStencilKernel), dim3(scanlineBlocks), dim3(tileSize), 0, 0, stencilPrimitives, ibosize/3, depthbuffer, resolution, stencil, lockbuffer);
hipDeviceSynchronize();
hipFree(stencilPrimitives);
}
// Clear the depth buffer based on stencil test
void clearOnStencil(glm::vec2 resolution, int stencil) {
// set up crucial magic
int tileSize = 8;
dim3 threadsPerBlock(tileSize, tileSize);
dim3 fullBlocksPerGrid((int)ceil(float(resolution.x)/float(tileSize)), (int)ceil(float(resolution.y)/float(tileSize)));
fragment frag;
frag.color = glm::vec3(0,0,0);
frag.normal = glm::vec3(0,0,0);
frag.position = glm::vec3(0,0,0);
frag.z = -FLT_MAX;
frag.s = stencil;
hipLaunchKernelGGL(( clearDepthBufferOnStencil), dim3(fullBlocksPerGrid), dim3(threadsPerBlock), 0, 0, resolution, depthbuffer, frag);
hipDeviceSynchronize();
hipLaunchKernelGGL(( clearLockBuffer), dim3(fullBlocksPerGrid), dim3(threadsPerBlock), 0, 0, resolution, lockbuffer);
hipDeviceSynchronize();
}
__global__ void stencilTestKernel(bool* result, glm::vec2 resolution, fragment* depthbuffer, int stencil) {
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * resolution.x);
if(x<=resolution.x && y<=resolution.y){
if (depthbuffer[index].s == stencil) {
result[0] = true;
}
}
}
// Wrapper for the __global__ call that sets up the kernel calls and does a ton of memory management
void cudaRasterizeCore(glm::vec2 resolution, glm::mat4 rotation, glm::vec3 eye, glm::vec3 center,
float* vbo, int vbosize, float* cbo, int cbosize, float* nbo,
int nbosize, int* ibo, int ibosize, bool stencilTest, bool perPrimitive, int stencil){
//------------------------------
//test stencil values, if there's no buffer with desired stencil value, return
//------------------------------
if (stencilTest) {
int tileSize = 8;
dim3 threadsPerBlock(tileSize, tileSize);
dim3 fullBlocksPerGrid((int)ceil(float(resolution.x)/float(tileSize)), (int)ceil(float(resolution.y)/float(tileSize)));
bool cpuresult[1] = {false};
bool* result;
hipMalloc((void**)&result, sizeof(bool));
hipMemcpy(result, cpuresult, sizeof(bool), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( stencilTestKernel), dim3(fullBlocksPerGrid), dim3(threadsPerBlock), 0, 0, result, resolution, depthbuffer, stencil);
bool* cpuresult2 = new bool[1];
hipMemcpy(cpuresult2, result, sizeof(bool), hipMemcpyDeviceToHost);
if (!cpuresult2[0]) {
return;
}
}
//------------------------------
//memory stuff
//------------------------------
primitives = NULL;
hipMalloc((void**)&primitives, (ibosize/3)*sizeof(triangle));
device_ibo = NULL;
hipMalloc((void**)&device_ibo, ibosize*sizeof(int));
hipMemcpy( device_ibo, ibo, ibosize*sizeof(int), hipMemcpyHostToDevice);
device_vbo = NULL;
hipMalloc((void**)&device_vbo, vbosize*sizeof(float));
hipMemcpy( device_vbo, vbo, vbosize*sizeof(float), hipMemcpyHostToDevice);
device_cbo = NULL;
hipMalloc((void**)&device_cbo, cbosize*sizeof(float));
hipMemcpy( device_cbo, cbo, cbosize*sizeof(float), hipMemcpyHostToDevice);
device_nbo = NULL;
hipMalloc((void**)&device_nbo, nbosize*sizeof(float));
hipMemcpy( device_nbo, nbo, nbosize*sizeof(float), hipMemcpyHostToDevice);
int tileSize = 64;
//------------------------------
//compute the camera matrix
//------------------------------
float aspect = resolution.x / resolution.y;
glm::mat4 perspMatrix = glm::perspective(fovy, resolution.x/resolution.y, zNear, zFar);
glm::mat4 lookatMatrix = glm::lookAt(eye, center, up);
glm::mat4 cameraMatrix = perspMatrix * lookatMatrix;
//------------------------------
//transform vertices and normals
//------------------------------
int primitiveBlocks = ceil(((float)vbosize/3)/((float)tileSize));
hipLaunchKernelGGL(( transformVertices), dim3(primitiveBlocks), dim3(tileSize), 0, 0, device_vbo, vbosize, rotation);
hipLaunchKernelGGL(( transformNormals), dim3(primitiveBlocks), dim3(tileSize), 0, 0, device_nbo, nbosize, rotation);
//------------------------------
//primitive assembly
//------------------------------
primitiveBlocks = ceil(((float)ibosize/3)/((float)tileSize));
hipLaunchKernelGGL(( primitiveAssemblyKernel), dim3(primitiveBlocks), dim3(tileSize), 0, 0, device_vbo, vbosize, device_cbo, cbosize, device_nbo, nbosize, device_ibo, ibosize, primitives);
hipDeviceSynchronize();
//------------------------------
//vertex shader
//------------------------------
primitiveBlocks = ceil(((float)vbosize/3)/((float)tileSize));
hipLaunchKernelGGL(( vertexShadeKernel), dim3(primitiveBlocks), dim3(tileSize), 0, 0, device_vbo, vbosize, cameraMatrix, resolution);
hipDeviceSynchronize();
//------------------------------
//update primitives
//------------------------------
primitiveBlocks = ceil(((float)ibosize/3)/((float)tileSize));
hipLaunchKernelGGL(( updatePrimitiveKernel), dim3(primitiveBlocks), dim3(tileSize), 0, 0, device_vbo, vbosize, device_ibo, ibosize, primitives);
hipDeviceSynchronize();
//hipMemcpy(prim_cpu, primitives, ibosize/3*sizeof(triangle), hipMemcpyDeviceToHost);
//t = prim_cpu[0];
//------------------------------
//rasterization
//------------------------------
if (perPrimitive) {
//parallel by primitive
hipLaunchKernelGGL(( rasterizationPerPrimKernel), dim3(primitiveBlocks), dim3(tileSize), 0, 0, primitives, ibosize/3, depthbuffer, resolution, stencilTest, stencil, lockbuffer);
}
else {
int scanlineBlocks = ceil(((float)resolution.y)/((float)tileSize));
hipLaunchKernelGGL(( rasterizationKernel), dim3(scanlineBlocks), dim3(tileSize), 0, 0, primitives, ibosize/3, depthbuffer, resolution, stencilTest, stencil, lockbuffer);
}
hipDeviceSynchronize();
kernelCleanup();
checkCUDAError("Kernel failed!");
}
//fragment shader and render
void renderToPBO(uchar4* PBOpos, glm::vec2 resolution, glm::vec3 eye) {
// set up crucial magic
int tileSize = 8;
dim3 threadsPerBlock(tileSize, tileSize);
dim3 fullBlocksPerGrid((int)ceil(float(resolution.x)/float(tileSize)), (int)ceil(float(resolution.y)/float(tileSize)));
//------------------------------
//fragment shader
//------------------------------
hipLaunchKernelGGL(( fragmentShadeKernel), dim3(fullBlocksPerGrid), dim3(threadsPerBlock), 0, 0, depthbuffer, resolution, eye, lights, lightsize);
hipDeviceSynchronize();
//------------------------------
//write fragments to framebuffer
//------------------------------
hipLaunchKernelGGL(( render), dim3(fullBlocksPerGrid), dim3(threadsPerBlock), 0, 0, resolution, depthbuffer, framebuffer);
hipLaunchKernelGGL(( sendImageToPBO), dim3(fullBlocksPerGrid), dim3(threadsPerBlock), 0, 0, PBOpos, resolution, framebuffer);
hipDeviceSynchronize();
}
void kernelCleanup(){
hipFree( primitives );
hipFree( device_vbo );
hipFree( device_cbo );
hipFree( device_nbo );
hipFree( device_ibo );
}
void freeBuffers() {
hipFree( framebuffer );
hipFree( depthbuffer );
hipFree( lockbuffer );
}
| e6a5f3eafa8a05ab8449dead0518d68d3e270fc4.cu | // CIS565 CUDA Rasterizer: A simple rasterization pipeline for Patrick Cozzi's CIS565: GPU Computing at the University of Pennsylvania
// Written by Yining Karl Li, Copyright (c) 2012 University of Pennsylvania
#include <stdio.h>
#include <cuda.h>
#include <cmath>
#include <thrust/random.h>
#include "rasterizeKernels.h"
#include "rasterizeTools.h"
#include "glm/gtc/matrix_transform.hpp"
#if CUDA_VERSION >= 5000
#include <helper_math.h>
#else
#include <cutil_math.h>
#endif
#define DEG2RAD 180/PI
glm::vec3* framebuffer;
fragment* depthbuffer;
float* device_vbo;
float* device_cbo;
float* device_nbo;
int* device_ibo;
triangle* primitives;
// NEW
int* lockbuffer;
glm::vec3 up(0, 1, 0);
float fovy = 60;
float zNear = 0.01;
float zFar = 1000;
light* lights;
int lightsize = 4;
void checkCUDAError(const char *msg) {
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err) {
fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString( err) );
exit(EXIT_FAILURE);
}
}
//Handy dandy little hashing function that provides seeds for random number generation
__host__ __device__ unsigned int hash(unsigned int a){
a = (a+0x7ed55d16) + (a<<12);
a = (a^0xc761c23c) ^ (a>>19);
a = (a+0x165667b1) + (a<<5);
a = (a+0xd3a2646c) ^ (a<<9);
a = (a+0xfd7046c5) + (a<<3);
a = (a^0xb55a4f09) ^ (a>>16);
return a;
}
//Writes a given fragment to a fragment buffer at a given location
__host__ __device__ void writeToDepthbuffer(int x, int y, fragment frag, fragment* depthbuffer, glm::vec2 resolution){
if(x<resolution.x && y<resolution.y){
int index = (y*resolution.x) + x;
depthbuffer[index] = frag;
}
}
//Reads a fragment from a given location in a fragment buffer
__host__ __device__ fragment getFromDepthbuffer(int x, int y, fragment* depthbuffer, glm::vec2 resolution){
if(x<resolution.x && y<resolution.y){
int index = (y*resolution.x) + x;
return depthbuffer[index];
}else{
fragment f;
return f;
}
}
//Writes a given pixel to a pixel buffer at a given location
__host__ __device__ void writeToFramebuffer(int x, int y, glm::vec3 value, glm::vec3* framebuffer, glm::vec2 resolution){
if(x<resolution.x && y<resolution.y){
int index = (y*resolution.x) + x;
framebuffer[index] = value;
}
}
//Reads a pixel from a pixel buffer at a given location
__host__ __device__ glm::vec3 getFromFramebuffer(int x, int y, glm::vec3* framebuffer, glm::vec2 resolution){
if(x<resolution.x && y<resolution.y){
int index = (y*resolution.x) + x;
return framebuffer[index];
}else{
return glm::vec3(0,0,0);
}
}
//Kernel that clears a given pixel buffer with a given color
__global__ void clearImage(glm::vec2 resolution, glm::vec3* image, glm::vec3 color){
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * resolution.x);
if(x<=resolution.x && y<=resolution.y){
image[index] = color;
}
}
//Kernel that clears a given fragment buffer with a given fragment
__global__ void clearDepthBuffer(glm::vec2 resolution, fragment* buffer, fragment frag){
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * resolution.x);
if(x<=resolution.x && y<=resolution.y){
fragment f = frag;
f.position.x = x;
f.position.y = y;
buffer[index] = f;
}
}
//Set the locks of all pixels to be 0 (unlocked)
__global__ void clearLockBuffer(glm::vec2 resolution, int* lockbuffer) {
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * resolution.x);
if(x<=resolution.x && y<=resolution.y){
lockbuffer[index] = 0;
}
}
//Clears depth buffer if it passes the stencil test
__global__ void clearDepthBufferOnStencil(glm::vec2 resolution, fragment* buffer, fragment frag){
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * resolution.x);
if(x<=resolution.x && y<=resolution.y){
if (buffer[index].s == frag.s) {
fragment f = frag;
f.position.x = x;
f.position.y = y;
buffer[index] = f;
}
}
}
//Kernel that writes the image to the OpenGL PBO directly.
__global__ void sendImageToPBO(uchar4* PBOpos, glm::vec2 resolution, glm::vec3* image){
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * resolution.x);
if(x<=resolution.x && y<=resolution.y){
glm::vec3 color;
color.x = image[index].x*255.0;
color.y = image[index].y*255.0;
color.z = image[index].z*255.0;
if(color.x>255){
color.x = 255;
}
if(color.y>255){
color.y = 255;
}
if(color.z>255){
color.z = 255;
}
// Each thread writes one pixel location in the texture (textel)
PBOpos[index].w = 0;
PBOpos[index].x = color.x;
PBOpos[index].y = color.y;
PBOpos[index].z = color.z;
}
}
__host__ __device__ glm::vec3 transformPos(glm::vec3 v, glm::mat4 matrix, glm::vec2 resolution) {
glm::vec4 v4(v, 1);
v4 = matrix * v4;
// perspective division
v4.x = v4.x/v4.w;
v4.y = v4.y/v4.w;
v4.z = v4.z/v4.w;
// viewport transform
v4.x = resolution.x/2 * (v4.x+1);
v4.y = resolution.y/2 * (v4.y+1);
v4.z = -0.5 * v4.z + 0.5;
return glm::vec3(v4);
}
__global__ void transformVertices(float* vbo, int vbosize, glm::mat4 modelMatrix) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if(index<vbosize/3){
glm::vec4 v(vbo[index*3], vbo[index*3+1], vbo[index*3+2], 1);
v = modelMatrix * v;
vbo[index*3] = v.x;
vbo[index*3+1] = v.y;
vbo[index*3+2] = v.z;
}
}
__global__ void transformNormals(float* nbo, int nbosize, glm::mat4 modelMatrix) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if(index<nbosize/3){
glm::vec4 n(nbo[index*3], nbo[index*3+1], nbo[index*3+2], 0);
n = modelMatrix * n;
nbo[index*3] = n.x;
nbo[index*3+1] = n.y;
nbo[index*3+2] = n.z;
}
}
//TODO: Implement primative assembly
__global__ void primitiveAssemblyKernel(float* vbo, int vbosize, float* cbo, int cbosize, float* nbo, int nbosize, int* ibo, int ibosize, triangle* primitives){
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
int primitivesCount = ibosize/3;
if(index<primitivesCount){
int v0 = ibo[index*3];
int v1 = ibo[index*3+1];
int v2 = ibo[index*3+2];
glm::vec3 p0(vbo[v0*3], vbo[v0*3+1], vbo[v0*3+2]);
glm::vec3 p1(vbo[v1*3], vbo[v1*3+1], vbo[v1*3+2]);
glm::vec3 p2(vbo[v2*3], vbo[v2*3+1], vbo[v2*3+2]);
glm::vec3 c0(cbo[0], cbo[1], cbo[2]);
glm::vec3 c1(cbo[0], cbo[1], cbo[2]);
glm::vec3 c2(cbo[0], cbo[1], cbo[2]);
//glm::vec3 c0(cbo[v0*3], cbo[v0*3+1], cbo[v0*3+2]);
//glm::vec3 c1(cbo[v1*3], cbo[v1*3+1], cbo[v1*3+2]);
//glm::vec3 c2(cbo[v2*3], cbo[v2*3+1], cbo[v2*3+2]);
glm::vec3 n0(nbo[v0*3], nbo[v0*3+1], nbo[v0*3+2]);
glm::vec3 n1(nbo[v1*3], nbo[v1*3+1], nbo[v1*3+2]);
glm::vec3 n2(nbo[v2*3], nbo[v2*3+1], nbo[v2*3+2]);
primitives[index] = triangle(p0, p1, p2, c0, c1, c2, n0, n1, n2);
}
}
//TODO: Implement a vertex shader
__global__ void vertexShadeKernel(float* vbo, int vbosize, glm::mat4 cameraMatrix, glm::vec2 resolution){
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if(index<vbosize/3){
glm::vec3 v(vbo[index*3], vbo[index*3+1], vbo[index*3+2]);
v = transformPos(v, cameraMatrix, resolution);
vbo[index*3] = v.x;
vbo[index*3+1] = v.y;
vbo[index*3+2] = v.z;
}
}
//Fill primitives with transformed vertex positions
__global__ void updatePrimitiveKernel(float* vbo, int vbosize, int* ibo, int ibosize, triangle* primitives){
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
int primitivesCount = ibosize/3;
if(index<primitivesCount){
int v0 = ibo[index*3];
int v1 = ibo[index*3+1];
int v2 = ibo[index*3+2];
primitives[index].pt0 = glm::vec3(vbo[v0*3], vbo[v0*3+1], vbo[v0*3+2]);
primitives[index].pt1 = glm::vec3(vbo[v1*3], vbo[v1*3+1], vbo[v1*3+2]);
primitives[index].pt2 = glm::vec3(vbo[v2*3], vbo[v2*3+1], vbo[v2*3+2]);
}
}
//Populate the primitive list for portal
__global__ void stencilPrimitiveKernel(float* vbo, int vbosize, int* ibo, int ibosize, vertTriangle* primitives){
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
int primitivesCount = ibosize/3;
if(index<primitivesCount){
int v0 = ibo[index*3];
int v1 = ibo[index*3+1];
int v2 = ibo[index*3+2];
glm::vec3 pt0 = glm::vec3(vbo[v0*3], vbo[v0*3+1], vbo[v0*3+2]);
glm::vec3 pt1 = glm::vec3(vbo[v1*3], vbo[v1*3+1], vbo[v1*3+2]);
glm::vec3 pt2 = glm::vec3(vbo[v2*3], vbo[v2*3+1], vbo[v2*3+2]);
primitives[index] = vertTriangle(pt0, pt1, pt2);
}
}
__device__ glm::vec3 getScanlineIntersection(glm::vec3 v1, glm::vec3 v2, float y) {
float t = (y-v1.y)/(v2.y-v1.y);
return glm::vec3(t*v2.x + (1-t)*v1.x, y, t*v2.z + (1-t)*v1.z);
}
__device__ bool isInScreen(glm::vec3 p, glm::vec2 resolution) {
return (p.x > 0&& p.x < resolution.x && p.y > 0 && p.y < resolution.y);
}
//primitive parallel rasterization
__global__ void rasterizationPerPrimKernel(triangle* primitives, int primitiveCount, fragment* depthbuffer, glm::vec2 resolution, bool stencilTest, int stencil, int* lockbuffer) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < primitiveCount) {
triangle prim = primitives[index];
float topy = min(min(prim.pt0.y, prim.pt1.y), prim.pt2.y);
float boty = max(max(prim.pt0.y, prim.pt1.y), prim.pt2.y);
int top = max((int)floor(topy), 0);
int bot = min((int)ceil(boty), (int)resolution.y);
for (int y=top; y<bot; ++y) {
float dy0 = prim.pt0.y - y;
float dy1 = prim.pt1.y - y;
float dy2 = prim.pt2.y - y;
int onPositiveSide = (int)(dy0>=0) + (int)(dy1>=0) + (int)(dy2>=0);
int onNegativeSide = (int)(dy0<=0) + (int)(dy1<=0) + (int)(dy2<=0);
glm::vec3 intersection1, intersection2;
if (onPositiveSide == 3 || onNegativeSide == 3) {
if (dy0 == 0) {
intersection1 = prim.pt0;
intersection2 = prim.pt0;
}
else if (dy1 == 0) {
intersection1 = prim.pt1;
intersection2 = prim.pt1;
}
else if (dy2 == 0) {
intersection1 = prim.pt2;
intersection2 = prim.pt2;
}
}
else if (onPositiveSide == 2 && onNegativeSide == 2) { // one vertex is on the scanline
// doesn't really happen due to the floating point error
if (dy0 == 0) {
intersection1 = prim.pt0;
intersection2 = getScanlineIntersection(prim.pt1, prim.pt2, y);
}
else if (dy1 == 0) {
intersection1 = prim.pt1;
intersection2 = getScanlineIntersection(prim.pt0, prim.pt2, y);
}
else { // dy2 == 0
intersection1 = prim.pt2;
intersection2 = getScanlineIntersection(prim.pt1, prim.pt0, y);
}
}
else if (onPositiveSide == 2) {
if (dy0 < 0) {
intersection1 = getScanlineIntersection(prim.pt0, prim.pt1, y);
intersection2 = getScanlineIntersection(prim.pt0, prim.pt2, y);
}
else if (dy1 < 0) {
intersection1 = getScanlineIntersection(prim.pt1, prim.pt0, y);
intersection2 = getScanlineIntersection(prim.pt1, prim.pt2, y);
}
else { // dy2 < 0
intersection1 = getScanlineIntersection(prim.pt2, prim.pt0, y);
intersection2 = getScanlineIntersection(prim.pt2, prim.pt1, y);
}
}
else { // onNegativeSide == 2
if (dy0 > 0) {
intersection1 = getScanlineIntersection(prim.pt0, prim.pt1, y);
intersection2 = getScanlineIntersection(prim.pt0, prim.pt2, y);
}
else if (dy1 > 0) {
intersection1 = getScanlineIntersection(prim.pt1, prim.pt0, y);
intersection2 = getScanlineIntersection(prim.pt1, prim.pt2, y);
}
else { // dy2 > 0
intersection1 = getScanlineIntersection(prim.pt2, prim.pt0, y);
intersection2 = getScanlineIntersection(prim.pt2, prim.pt1, y);
}
}
// make sure intersection1's x value is less than intersection2's
if (intersection2.x < intersection1.x) {
glm::vec3 temp = intersection1;
intersection1 = intersection2;
intersection2 = temp;
}
int left = min((int)(resolution.x)-1,max(0, (int)floor(intersection1.x)));
int right = min((int)(resolution.x-1),max(0, (int)floor(intersection2.x)));
for (int x=left; x<=right; ++x) {
int pixelIndex = (resolution.x-1-x) + (resolution.y-1-y) * resolution.x;
float t = (x-intersection1.x)/(intersection2.x-intersection1.x);
glm::vec3 point = t*intersection2 + (1-t)*intersection1;
// lock stuff
bool wait = true;
while (wait) {
if (0 == atomicExch(&lockbuffer[pixelIndex], 1)) {
bool draw = true;
if (stencilTest) {
draw = (depthbuffer[pixelIndex].s == stencil) && (point.z > depthbuffer[pixelIndex].z);
}
else {
draw = (point.z > depthbuffer[pixelIndex].z);
}
if (draw) {
glm::vec3 bc = calculateBarycentricCoordinate(prim, glm::vec2(point.x, point.y));
depthbuffer[pixelIndex].color = prim.c0 * bc.x + prim.c1 * bc.y + prim.c2 * bc.z;
depthbuffer[pixelIndex].normal = glm::normalize(prim.n0 * bc.x + prim.n1 * bc.y + prim.n2 * bc.z);
depthbuffer[pixelIndex].position = prim.p0 * bc.x + prim.p1 * bc.y + prim.p2 * bc.z;
depthbuffer[pixelIndex].z = point.z;
}
atomicExch(&lockbuffer[pixelIndex], 0);
wait = false;
}
}
}
}
}
}
//scanline parallel rasterization
__global__ void rasterizationKernel(triangle* primitives, int primitivesCount, fragment* depthbuffer, glm::vec2 resolution, bool stencilTest, int stencil, int* lockbuffer){
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if(index < resolution.y){
for (int i=0; i<primitivesCount; ++i) {
triangle prim = primitives[i];
float dy0 = prim.pt0.y - index;
float dy1 = prim.pt1.y - index;
float dy2 = prim.pt2.y - index;
int onPositiveSide = (int)(dy0>=-FLT_EPSILON) + (int)(dy1>=-FLT_EPSILON) + (int)(dy2>=-FLT_EPSILON);
int onNegativeSide = (int)(dy0<=FLT_EPSILON) + (int)(dy1<=FLT_EPSILON) + (int)(dy2<=FLT_EPSILON);
if (onPositiveSide != 3 && onNegativeSide != 3) { // the primitive intersects the scanline
glm::vec3 intersection1, intersection2;
if (onPositiveSide == 2 && onNegativeSide == 2) { // one vertex is on the scanline, doesn't really happen due to the floating point error
if (dy0 == 0) {
intersection1 = prim.pt0;
intersection2 = getScanlineIntersection(prim.pt1, prim.pt2, index);
}
else if (dy1 == 0) {
intersection1 = prim.pt1;
intersection2 = getScanlineIntersection(prim.pt0, prim.pt2, index);
}
else { // dy2 == 0
intersection1 = prim.pt2;
intersection2 = getScanlineIntersection(prim.pt1, prim.pt0, index);
}
}
else if (onPositiveSide == 2) {
if (dy0 < 0) {
intersection1 = getScanlineIntersection(prim.pt0, prim.pt1, index);
intersection2 = getScanlineIntersection(prim.pt0, prim.pt2, index);
}
else if (dy1 < 0) {
intersection1 = getScanlineIntersection(prim.pt1, prim.pt0, index);
intersection2 = getScanlineIntersection(prim.pt1, prim.pt2, index);
}
else { // dy2 < 0
intersection1 = getScanlineIntersection(prim.pt2, prim.pt0, index);
intersection2 = getScanlineIntersection(prim.pt2, prim.pt1, index);
}
}
else { // onNegativeSide == 2
if (dy0 > 0) {
intersection1 = getScanlineIntersection(prim.pt0, prim.pt1, index);
intersection2 = getScanlineIntersection(prim.pt0, prim.pt2, index);
}
else if (dy1 > 0) {
intersection1 = getScanlineIntersection(prim.pt1, prim.pt0, index);
intersection2 = getScanlineIntersection(prim.pt1, prim.pt2, index);
}
else { // dy2 > 0
intersection1 = getScanlineIntersection(prim.pt2, prim.pt0, index);
intersection2 = getScanlineIntersection(prim.pt2, prim.pt1, index);
}
}
// make sure intersection1's x value is less than intersection2's
if (intersection2.x < intersection1.x) {
glm::vec3 temp = intersection1;
intersection1 = intersection2;
intersection2 = temp;
}
int start = min((int)(resolution.x)-1,max(0, (int)floor(intersection1.x)));
int end = min((int)(resolution.x-1),max(0, (int)floor(intersection2.x)));
for (int j=start; j<=end; ++j) {
int pixelIndex = (resolution.x-1-j) + (resolution.y-1-index) * resolution.x;
float t = (j-intersection1.x)/(intersection2.x-intersection1.x);
glm::vec3 point = t*intersection2 + (1-t)*intersection1;
// lock stuff
bool wait = true;
while (wait) {
if (0 == atomicExch(&lockbuffer[pixelIndex], 1)) {
bool draw = true;
if (stencilTest) {
draw = (depthbuffer[pixelIndex].s == stencil) && (point.z > depthbuffer[pixelIndex].z);
}
else {
draw = (point.z > depthbuffer[pixelIndex].z);
}
if (draw) {
glm::vec3 bc = calculateBarycentricCoordinate(prim, glm::vec2(point.x, point.y));
depthbuffer[pixelIndex].color = prim.c0 * bc.x + prim.c1 * bc.y + prim.c2 * bc.z;
depthbuffer[pixelIndex].normal = glm::normalize(prim.n0 * bc.x + prim.n1 * bc.y + prim.n2 * bc.z);
depthbuffer[pixelIndex].position = prim.p0 * bc.x + prim.p1 * bc.y + prim.p2 * bc.z;
depthbuffer[pixelIndex].z = point.z;
}
atomicExch(&lockbuffer[pixelIndex], 0);
wait = false;
}
}
}
}
}
}
}
//rasterization for stencil buffer
__global__ void rasterizationStencilKernel(vertTriangle* primitives, int primitivesCount, fragment* depthbuffer, glm::vec2 resolution, int stencil, int* lockbuffer){
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if(index < resolution.y){
for (int i=0; i<primitivesCount; ++i) {
vertTriangle prim = primitives[i];
float dy0 = prim.pt0.y - index;
float dy1 = prim.pt1.y - index;
float dy2 = prim.pt2.y - index;
int onPositiveSide = (int)(dy0>=-FLT_EPSILON) + (int)(dy1>=-FLT_EPSILON) + (int)(dy2>=-FLT_EPSILON);
int onNegativeSide = (int)(dy0<=FLT_EPSILON) + (int)(dy1<=FLT_EPSILON) + (int)(dy2<=FLT_EPSILON);
if (onPositiveSide != 3 && onNegativeSide != 3) { // the primitive intersects the scanline
glm::vec3 intersection1, intersection2;
if (onPositiveSide == 2 && onNegativeSide == 2) { // one vertex is on the scanline // doesn't really happen due to the floating point error
if (dy0 == 0) {
intersection1 = prim.pt0;
intersection2 = getScanlineIntersection(prim.pt1, prim.pt2, index);
}
else if (dy1 == 0) {
intersection1 = prim.pt1;
intersection2 = getScanlineIntersection(prim.pt0, prim.pt2, index);
}
else { // dy2 == 0
intersection1 = prim.pt2;
intersection2 = getScanlineIntersection(prim.pt1, prim.pt0, index);
}
}
else if (onPositiveSide == 2) {
if (dy0 < 0) {
intersection1 = getScanlineIntersection(prim.pt0, prim.pt1, index);
intersection2 = getScanlineIntersection(prim.pt0, prim.pt2, index);
}
else if (dy1 < 0) {
intersection1 = getScanlineIntersection(prim.pt1, prim.pt0, index);
intersection2 = getScanlineIntersection(prim.pt1, prim.pt2, index);
}
else { // dy2 < 0
intersection1 = getScanlineIntersection(prim.pt2, prim.pt0, index);
intersection2 = getScanlineIntersection(prim.pt2, prim.pt1, index);
}
}
else { // onNegativeSide == 2
if (dy0 > 0) {
intersection1 = getScanlineIntersection(prim.pt0, prim.pt1, index);
intersection2 = getScanlineIntersection(prim.pt0, prim.pt2, index);
}
else if (dy1 > 0) {
intersection1 = getScanlineIntersection(prim.pt1, prim.pt0, index);
intersection2 = getScanlineIntersection(prim.pt1, prim.pt2, index);
}
else { // dy2 > 0
intersection1 = getScanlineIntersection(prim.pt2, prim.pt0, index);
intersection2 = getScanlineIntersection(prim.pt2, prim.pt1, index);
}
}
// make sure intersection1's x value is less than intersection2's
if (intersection2.x < intersection1.x) {
glm::vec3 temp = intersection1;
intersection1 = intersection2;
intersection2 = temp;
}
int start = min((int)(resolution.x)-1,max(0, (int)floor(intersection1.x)));
int end = min((int)(resolution.x-1),max(0, (int)floor(intersection2.x)));
for (int j=start; j<=end; ++j) {
int pixelIndex = (resolution.x-1-j) + (resolution.y-1-index) * resolution.x;
float t = (j-intersection1.x)/(intersection2.x-intersection1.x);
glm::vec3 point = t*intersection2 + (1-t)*intersection1;
// lock stuff
bool wait = true;
while (wait) {
if (0 == atomicExch(&lockbuffer[pixelIndex], 1)) {
if (point.z > depthbuffer[pixelIndex].z) {
depthbuffer[pixelIndex].s = stencil;
depthbuffer[pixelIndex].z = point.z;
}
atomicExch(&lockbuffer[pixelIndex], 0);
wait = false;
}
}
}
}
}
}
}
//TODO: Implement a fragment shader
__global__ void fragmentShadeKernel(fragment* depthbuffer, glm::vec2 resolution, glm::vec3 eye, light* lights, int lightsize){
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * resolution.x);
if(x<=resolution.x && y<=resolution.y){
glm::vec3 diffuseColor(0);
glm::vec3 specularColor(0);
float ks = 0;
if (glm::distance(depthbuffer[index].color, glm::vec3(245.0/255.0, 222.0/255.0, 179.0/255.0)) > 0.1) {
ks = 0.3;
}
glm::vec3 norm = depthbuffer[index].normal;
glm::vec3 pos = depthbuffer[index].position;
for (int i=0; i<lightsize; ++i) {
//diffuse component
glm::vec3 lightDir = glm::normalize(glm::vec3(lights[i].pos - pos));
float diffuseTerm = glm::clamp(glm::dot(lightDir, norm), 0.0f, 1.0f);
diffuseColor += diffuseTerm * lights[i].color;
//specular component
if (ks > 0.0001) {
glm::vec3 LR; // reflected light direction
if (glm::length(lightDir - norm) < 0.0001) {
LR = norm;
}
else if (abs(glm::dot(lightDir, norm)) < 0.0001) {
LR = -lightDir;
}
else {
LR = glm::normalize(-lightDir - 2.0f * glm::dot(-lightDir, norm) * norm);
}
float specularTerm = min(1.0f, pow(max(0.0f, glm::dot(LR, glm::normalize(eye - pos))), 20.0f));
specularColor += specularTerm * glm::vec3(1.0f);
}
}
depthbuffer[index].color = diffuseColor * depthbuffer[index].color + ks * specularColor;
//set background color
if (depthbuffer[index].z == -FLT_MAX) {
depthbuffer[index].color = glm::vec3(0.6, 0.6, 0.6);
}
}
}
//Writes fragment colors to the framebuffer
__global__ void render(glm::vec2 resolution, fragment* depthbuffer, glm::vec3* framebuffer){
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * resolution.x);
if(x<=resolution.x && y<=resolution.y){
framebuffer[index] = depthbuffer[index].color;
}
}
void initLights() {
light l1(glm::vec3(0.8, 0.8, 0.8), glm::vec3(4, 4, 4));
light l2(glm::vec3(0.4, 0.4, 0.4), glm::vec3(-4, 4, 4));
light l3(glm::vec3(0.3, 0.3, 0.3), glm::vec3(0, 0, -5));
light l4(glm::vec3(0.3, 0.3, 0.3), glm::vec3(0, -5, 0));
light* cpulights = new light[lightsize];
cpulights[0] = l1;
cpulights[1] = l2;
cpulights[2] = l3;
cpulights[3] = l4;
checkCUDAError("Kernel failed!");
cudaMalloc((void**)&lights, lightsize*sizeof(light));
checkCUDAError("Kernel failed!");
cudaMemcpy(lights, cpulights, lightsize*sizeof(light), cudaMemcpyHostToDevice);
checkCUDAError("Kernel failed!");
delete [] cpulights;
}
void initBuffers(glm::vec2 resolution) {
//set up framebuffer
framebuffer = NULL;
cudaMalloc((void**)&framebuffer, (int)resolution.x*(int)resolution.y*sizeof(glm::vec3));
//set up depthbuffer
depthbuffer = NULL;
cudaMalloc((void**)&depthbuffer, (int)resolution.x*(int)resolution.y*sizeof(fragment));
lockbuffer = NULL;
cudaMalloc((void**)&lockbuffer, (int)resolution.x*(int)resolution.y*sizeof(int));
clearBuffers(resolution);
}
void clearBuffers(glm::vec2 resolution) {
int tileSize = 8;
dim3 threadsPerBlock(tileSize, tileSize);
dim3 fullBlocksPerGrid((int)ceil(float(resolution.x)/float(tileSize)), (int)ceil(float(resolution.y)/float(tileSize)));
//kernel launches to black out accumulated/unaccumlated pixel buffers and clear our scattering states
clearImage<<<fullBlocksPerGrid, threadsPerBlock>>>(resolution, framebuffer, glm::vec3(0,0,0));
cudaDeviceSynchronize();
fragment frag;
frag.color = glm::vec3(0,0,0);
frag.normal = glm::vec3(0,0,0);
frag.position = glm::vec3(0,0,0);
frag.z = -FLT_MAX;
frag.s = 0;
clearDepthBuffer<<<fullBlocksPerGrid, threadsPerBlock>>>(resolution, depthbuffer,frag);
cudaDeviceSynchronize();
clearLockBuffer<<<fullBlocksPerGrid, threadsPerBlock>>>(resolution, lockbuffer);
cudaDeviceSynchronize();
}
void drawToStencilBuffer(glm::vec2 resolution, glm::mat4 rotation, glm::vec3 eye, glm::vec3 center, float* vbo, int vbosize, int* ibo, int ibosize, int stencil) {
// set up crucial magic
int tileSize = 8;
dim3 threadsPerBlock(tileSize, tileSize);
dim3 fullBlocksPerGrid((int)ceil(float(resolution.x)/float(tileSize)), (int)ceil(float(resolution.y)/float(tileSize)));
vertTriangle* stencilPrimitives = NULL;
cudaMalloc((void**)&stencilPrimitives, (ibosize/3)*sizeof(vertTriangle));
device_ibo = NULL;
cudaMalloc((void**)&device_ibo, ibosize*sizeof(int));
cudaMemcpy( device_ibo, ibo, ibosize*sizeof(int), cudaMemcpyHostToDevice);
device_vbo = NULL;
cudaMalloc((void**)&device_vbo, vbosize*sizeof(float));
cudaMemcpy( device_vbo, vbo, vbosize*sizeof(float), cudaMemcpyHostToDevice);
//------------------------------
//compute the camera matrix
//------------------------------
float aspect = resolution.x / resolution.y;
glm::mat4 perspMatrix = glm::perspective(fovy, resolution.x/resolution.y, zNear, zFar);
glm::mat4 lookatMatrix = glm::lookAt(eye, center, up);
glm::mat4 cameraMatrix = perspMatrix * lookatMatrix;
//------------------------------
//vertex shader
//------------------------------
tileSize = 64;
int primitiveBlocks = ceil(((float)vbosize/3)/((float)tileSize));
transformVertices<<<primitiveBlocks, tileSize>>>(device_vbo, vbosize, rotation);
vertexShadeKernel<<<primitiveBlocks, tileSize>>>(device_vbo, vbosize, cameraMatrix, resolution);
cudaDeviceSynchronize();
//------------------------------
//update stencil primitives
//------------------------------
primitiveBlocks = ceil(((float)ibosize/3)/((float)tileSize));
stencilPrimitiveKernel<<<primitiveBlocks, tileSize>>>(device_vbo, vbosize, device_ibo, ibosize, stencilPrimitives);
cudaDeviceSynchronize();
//------------------------------
//rasterization
//------------------------------
int scanlineBlocks = ceil(((float)resolution.y)/((float)tileSize));
rasterizationStencilKernel<<<scanlineBlocks, tileSize>>>(stencilPrimitives, ibosize/3, depthbuffer, resolution, stencil, lockbuffer);
cudaDeviceSynchronize();
cudaFree(stencilPrimitives);
}
// Clear the depth buffer based on stencil test
void clearOnStencil(glm::vec2 resolution, int stencil) {
// set up crucial magic
int tileSize = 8;
dim3 threadsPerBlock(tileSize, tileSize);
dim3 fullBlocksPerGrid((int)ceil(float(resolution.x)/float(tileSize)), (int)ceil(float(resolution.y)/float(tileSize)));
fragment frag;
frag.color = glm::vec3(0,0,0);
frag.normal = glm::vec3(0,0,0);
frag.position = glm::vec3(0,0,0);
frag.z = -FLT_MAX;
frag.s = stencil;
clearDepthBufferOnStencil<<<fullBlocksPerGrid, threadsPerBlock>>>(resolution, depthbuffer, frag);
cudaDeviceSynchronize();
clearLockBuffer<<<fullBlocksPerGrid, threadsPerBlock>>>(resolution, lockbuffer);
cudaDeviceSynchronize();
}
__global__ void stencilTestKernel(bool* result, glm::vec2 resolution, fragment* depthbuffer, int stencil) {
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * resolution.x);
if(x<=resolution.x && y<=resolution.y){
if (depthbuffer[index].s == stencil) {
result[0] = true;
}
}
}
// Wrapper for the __global__ call that sets up the kernel calls and does a ton of memory management
void cudaRasterizeCore(glm::vec2 resolution, glm::mat4 rotation, glm::vec3 eye, glm::vec3 center,
float* vbo, int vbosize, float* cbo, int cbosize, float* nbo,
int nbosize, int* ibo, int ibosize, bool stencilTest, bool perPrimitive, int stencil){
//------------------------------
//test stencil values, if there's no buffer with desired stencil value, return
//------------------------------
if (stencilTest) {
int tileSize = 8;
dim3 threadsPerBlock(tileSize, tileSize);
dim3 fullBlocksPerGrid((int)ceil(float(resolution.x)/float(tileSize)), (int)ceil(float(resolution.y)/float(tileSize)));
bool cpuresult[1] = {false};
bool* result;
cudaMalloc((void**)&result, sizeof(bool));
cudaMemcpy(result, cpuresult, sizeof(bool), cudaMemcpyHostToDevice);
stencilTestKernel<<<fullBlocksPerGrid, threadsPerBlock>>>(result, resolution, depthbuffer, stencil);
bool* cpuresult2 = new bool[1];
cudaMemcpy(cpuresult2, result, sizeof(bool), cudaMemcpyDeviceToHost);
if (!cpuresult2[0]) {
return;
}
}
//------------------------------
//memory stuff
//------------------------------
primitives = NULL;
cudaMalloc((void**)&primitives, (ibosize/3)*sizeof(triangle));
device_ibo = NULL;
cudaMalloc((void**)&device_ibo, ibosize*sizeof(int));
cudaMemcpy( device_ibo, ibo, ibosize*sizeof(int), cudaMemcpyHostToDevice);
device_vbo = NULL;
cudaMalloc((void**)&device_vbo, vbosize*sizeof(float));
cudaMemcpy( device_vbo, vbo, vbosize*sizeof(float), cudaMemcpyHostToDevice);
device_cbo = NULL;
cudaMalloc((void**)&device_cbo, cbosize*sizeof(float));
cudaMemcpy( device_cbo, cbo, cbosize*sizeof(float), cudaMemcpyHostToDevice);
device_nbo = NULL;
cudaMalloc((void**)&device_nbo, nbosize*sizeof(float));
cudaMemcpy( device_nbo, nbo, nbosize*sizeof(float), cudaMemcpyHostToDevice);
int tileSize = 64;
//------------------------------
//compute the camera matrix
//------------------------------
float aspect = resolution.x / resolution.y;
glm::mat4 perspMatrix = glm::perspective(fovy, resolution.x/resolution.y, zNear, zFar);
glm::mat4 lookatMatrix = glm::lookAt(eye, center, up);
glm::mat4 cameraMatrix = perspMatrix * lookatMatrix;
//------------------------------
//transform vertices and normals
//------------------------------
int primitiveBlocks = ceil(((float)vbosize/3)/((float)tileSize));
transformVertices<<<primitiveBlocks, tileSize>>>(device_vbo, vbosize, rotation);
transformNormals<<<primitiveBlocks, tileSize>>>(device_nbo, nbosize, rotation);
//------------------------------
//primitive assembly
//------------------------------
primitiveBlocks = ceil(((float)ibosize/3)/((float)tileSize));
primitiveAssemblyKernel<<<primitiveBlocks, tileSize>>>(device_vbo, vbosize, device_cbo, cbosize, device_nbo, nbosize, device_ibo, ibosize, primitives);
cudaDeviceSynchronize();
//------------------------------
//vertex shader
//------------------------------
primitiveBlocks = ceil(((float)vbosize/3)/((float)tileSize));
vertexShadeKernel<<<primitiveBlocks, tileSize>>>(device_vbo, vbosize, cameraMatrix, resolution);
cudaDeviceSynchronize();
//------------------------------
//update primitives
//------------------------------
primitiveBlocks = ceil(((float)ibosize/3)/((float)tileSize));
updatePrimitiveKernel<<<primitiveBlocks, tileSize>>>(device_vbo, vbosize, device_ibo, ibosize, primitives);
cudaDeviceSynchronize();
//cudaMemcpy(prim_cpu, primitives, ibosize/3*sizeof(triangle), cudaMemcpyDeviceToHost);
//t = prim_cpu[0];
//------------------------------
//rasterization
//------------------------------
if (perPrimitive) {
//parallel by primitive
rasterizationPerPrimKernel<<<primitiveBlocks, tileSize>>>(primitives, ibosize/3, depthbuffer, resolution, stencilTest, stencil, lockbuffer);
}
else {
int scanlineBlocks = ceil(((float)resolution.y)/((float)tileSize));
rasterizationKernel<<<scanlineBlocks, tileSize>>>(primitives, ibosize/3, depthbuffer, resolution, stencilTest, stencil, lockbuffer);
}
cudaDeviceSynchronize();
kernelCleanup();
checkCUDAError("Kernel failed!");
}
//fragment shader and render
void renderToPBO(uchar4* PBOpos, glm::vec2 resolution, glm::vec3 eye) {
// set up crucial magic
int tileSize = 8;
dim3 threadsPerBlock(tileSize, tileSize);
dim3 fullBlocksPerGrid((int)ceil(float(resolution.x)/float(tileSize)), (int)ceil(float(resolution.y)/float(tileSize)));
//------------------------------
//fragment shader
//------------------------------
fragmentShadeKernel<<<fullBlocksPerGrid, threadsPerBlock>>>(depthbuffer, resolution, eye, lights, lightsize);
cudaDeviceSynchronize();
//------------------------------
//write fragments to framebuffer
//------------------------------
render<<<fullBlocksPerGrid, threadsPerBlock>>>(resolution, depthbuffer, framebuffer);
sendImageToPBO<<<fullBlocksPerGrid, threadsPerBlock>>>(PBOpos, resolution, framebuffer);
cudaDeviceSynchronize();
}
void kernelCleanup(){
cudaFree( primitives );
cudaFree( device_vbo );
cudaFree( device_cbo );
cudaFree( device_nbo );
cudaFree( device_ibo );
}
void freeBuffers() {
cudaFree( framebuffer );
cudaFree( depthbuffer );
cudaFree( lockbuffer );
}
|
26b63463e5e799216ce8ab2917777771ec7bc238.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <fstream>
#include <GL/glew.h>
#include <GL/freeglut.h>
#include <hip/hip_runtime.h>
#include <cuda_gl_interop.h>
#include "device_launch_parameters.h"
#include <cmath>
#include <vector>
#include <random>
static void HandleError(hipError_t err, const char* file, int line) {
if (err != hipSuccess) {
std::cerr << hipGetErrorString(err)
<< " in " << std::string(file)
<< " at line " << line << "\n";
exit(EXIT_FAILURE);
}
}
#define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ ))
std::random_device rd; //
std::mt19937 mersenne(rd());//
int a = 15; //
int n = 150; //
float eps = 1e-3; //
float wc = 0.99; //
float k = 50.0; //
float dt = 0.01; //
float g = 20; //
float qi = 1; //
float qc = 30; //
float qb = 50; //
float vb = 30; //
float shift_z = 0.75; //
float radius = 1;
std::uniform_int_distribution<> dist_x(-a + radius, a - radius); // x
std::uniform_int_distribution<> dist_y(-a + radius, a - radius); // y
std::uniform_int_distribution<> dist_z(0 + radius, 2 * a - radius); // z
std::vector<float> pq;
std::vector<float> px; //x coords
std::vector<float> pxx; //temp buffers
std::vector<float> py; //y coords
std::vector<float> pyy; //temp buffers
std::vector<float> pz; //z coords
std::vector<float> pzz; //temp buffers
float* cpx = nullptr; //cuda buffers
float* cpy = nullptr; //cuda buffers
float* cpz = nullptr; //cuda buffers
float* cpq = nullptr; //cuda buffers
std::vector<float> pvx; //vx speed
std::vector<float> pvy; //vy speed
std::vector<float> pvz; //vz speed
int w = 1024; //width resolution
int h = 648; //height resolution
float xc = -1.5; // x
float yc = -1.5; // y
float zc = 1.0; // z
float dx = 0.0;
float dy = 0.0;
float dz = 0.0;
float xb = -1000000; // x
float yb = -1000000; // y
float zb = 1000000; // z
float vxb = -1.5; // vx
float vyb = -1.5; // vy
float vzb = 1.0; // vz
float yaw = 0.0; //
float pitch = 0.0; //
float dyaw = 0.0;
float dpitch = 0.0;
#define M_PI 3.14f
typedef unsigned char uchar;
float speed = 0.2;
const int np = 100; //
GLUquadric* quadric; // quadric - 2- , .. , , , .
cudaGraphicsResource* res;
GLuint textures[2]; //
GLuint vbo; //
__global__ void kernel(uchar4* data, int n,
float a, float k, float eps, float sz, int pn,
float* px, float* py, float *pz, float* pq,
float xb, float yb, float zb, float qb ) { // GPU
int offset = blockDim.x * gridDim.x;
int i = blockIdx.x * blockDim.x + threadIdx.x;
int size = n * n;
float x;
float y;
float e = 0.0;
int p;
while (i < size) {
x = a * (2.0 * static_cast<float>(i % n)
/ static_cast<float>(n) - 1);
y = a * (2.0 * static_cast<float>(i / n)
/ static_cast<float>(n) - 1);
e = qb / ( eps +
powf(xb - x, 2) +
powf(yb - y, 2) +
powf(zb - sz, 2)
);
for (p = 0; p < pn; ++p) {
e += pq[p] / (eps +
powf(px[p] - x, 2) +
powf(py[p] - y, 2) +
powf(pz[p] - sz, 2)
);
}
e *= k;
data[i].x = min(e, 255.0);
data[i].y = min(e, 255.0);
data[i].z = min(e, 255.0);
data[i].w = 1;
i += offset;
}
}
void display() {
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
// " "
gluPerspective(90.0f, (GLfloat)w / (GLfloat)h, 0.1f, 100.0f);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
//
gluLookAt(xc, yc, zc,
xc + cos(yaw) * cos(pitch),
yc + sin(yaw) * cos(pitch),
zc + sin(pitch),
0.0f, 0.0f, 1.0f);
glBindTexture(GL_TEXTURE_2D, textures[0]); //
static float angle = 0.0;
for (size_t i = 0; i < n; ++i) {
glPushMatrix();
glTranslatef(px[i], py[i], pz[i]); //
glRotatef(angle, 0.0, 0.0, 1.0); //
gluSphere(quadric, 1.0f, 32, 32);
glPopMatrix();
}
angle += 0.15;
glPushMatrix();
glTranslatef(xb, yb, zb);
gluSphere(quadric, 1.0f, 32, 32);
glPopMatrix();
glBindBuffer(GL_PIXEL_UNPACK_BUFFER, vbo); // vbo
glBindTexture(GL_TEXTURE_2D, textures[1]); //
glTexImage2D(GL_TEXTURE_2D, 0, 3, (GLsizei)np, (GLsizei)np, 0, GL_RGBA, GL_UNSIGNED_BYTE, NULL);
glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0); //
// NULL glTexImage2D
glBegin(GL_QUADS); //
glTexCoord2f(0.0, 0.0);
glVertex3f(-a, -a, 0.0);
glTexCoord2f(1.0, 0.0);
glVertex3f(a, -a, 0.0);
glTexCoord2f(1.0, 1.0);
glVertex3f(a, a, 0.0);
glTexCoord2f(0.0, 1.0);
glVertex3f(-a, a, 0.0);
glEnd();
glBindTexture(GL_TEXTURE_2D, 0); //
//
glLineWidth(2); //
glColor3f(0.5f, 0.5f, 0.5f); //
glBegin(GL_LINES); //
glVertex3f(-a, -a, 0.0);
glVertex3f(-a, -a, 2.0 * a);
glVertex3f(a, -a, 0.0);
glVertex3f(a, -a, 2.0 * a);
glVertex3f(a, a, 0.0);
glVertex3f(a, a, 2.0 * a);
glVertex3f(-a, a, 0.0);
glVertex3f(-a, a, 2.0 * a);
glEnd();
glBegin(GL_LINE_LOOP); //
glVertex3f(-a, -a, 0.0);
glVertex3f(a, -a, 0.0);
glVertex3f(a, a, 0.0);
glVertex3f(-a, a, 0.0);
glEnd();
glBegin(GL_LINE_LOOP);
glVertex3f(-a, -a, 2.0 * a);
glVertex3f(a, -a, 2.0 * a);
glVertex3f(a, a, 2.0 * a);
glVertex3f(-a, a, 2.0 * a);
glEnd();
glColor3f(1.0f, 1.0f, 1.0f);
glutSwapBuffers();
}
void update() {
for (size_t i = 0; i < n; ++i) {
float dvx = 0.0;
float dvy = 0.0;
float dvz = 0.0;
float l = 0.0;
//
for (size_t j = 0; j < n; ++j) {
l = ::pow(
::pow(px[i] - px[j], 2) +
::pow(py[i] - py[j], 2) +
::pow(pz[i] - pz[j], 2),
1.5) + eps;
dvx += pq[j] * (px[i] - px[j]) / l;
dvy += pq[j] * (py[i] - py[j]) / l;
dvz += pq[j] * (pz[i] - pz[j]) / l;
}
//
//
l = eps + ::pow(std::abs(px[i] - a), 3);
dvx += pq[i] * (px[i] - a) / l;
//
l = eps + ::pow(std::abs(py[i] - a), 3);
dvy += pq[i] * (py[i] - a) / l;
//
l = eps + ::pow(std::abs(pz[i] - 2 * a), 3);
dvz += pq[i] * (pz[i] - 2 * a) / l;
//
l = eps + ::pow(std::abs(px[i] + a), 3);
dvx += pq[i] * (px[i] + a) / l;
//
l = eps + ::pow(std::abs(py[i] + a), 3);
dvy += pq[i] * (py[i] + a) / l;
//
l = eps + ::pow(std::abs(pz[i]), 3);
dvz += pq[i] * (pz[i]) / l;
//
l = ::pow( ::pow(px[i] - xc, 2) +
::pow(py[i] - yc, 2) +
::pow(pz[i] - zc, 2),
1.5) + eps;
dvx += qc * (px[i] - xc) / l;
dvy += qc * (py[i] - yc) / l;
dvz += qc * (pz[i] - zc) / l;
//
l = ::pow( ::pow(px[i] - xb, 2) +
::pow(py[i] - yb, 2) +
::pow(pz[i] - zb, 2),
1.5) + eps;
dvx += qb * (px[i] - xb) / l;
dvy += qb * (py[i] - yb) / l;
dvz += qb * (pz[i] - zb) / l;
//
dvx = k * pq[i] * dvx * dt;
dvy = k * pq[i] * dvy * dt;
dvz = k * pq[i] * dvz * dt -g * dt;
pvx[i] = wc * pvx[i] + dvx;
pvy[i] = wc * pvy[i] + dvy;
pvz[i] = wc * pvz[i] + dvz;
//
if ((px[i] + pvx[i] * dt) >= a
|| (px[i] + pvx[i] * dt) <= -a) {
pvx[i] -= pvx[i];
}
if ((py[i] + pvy[i] * dt) >= a
|| (py[i] + pvy[i] * dt) <= -a) {
pvy[i] -= pvy[i];
}
if ((pz[i] + pvz[i] * dt) >= 2 * a
|| (pz[i] + pvz[i] * dt) <= 0) {
pvz[i] -= pvz[i];
}
//
pxx[i] = px[i] + pvx[i] * dt;
pyy[i] = py[i] + pvy[i] * dt;
pzz[i] = pz[i] + pvz[i] * dt;
}
std::swap(px, pxx);
std::swap(py, pyy);
std::swap(pz, pzz);
//
xb += vxb * dt;
yb += vyb * dt;
zb += vzb * dt;
float v = std::sqrt(dx * dx + dy * dy + dz * dz);
if (v > speed) { //
dx *= speed / v;
dy *= speed / v;
dz *= speed / v;
}
xc += dx; dx *= 0.95;
yc += dy; dy *= 0.95;
zc += dz; dz *= 0.95;
if (std::abs(dpitch) + fabs(dyaw) > 0.0001) { //
yaw += dyaw;
pitch += dpitch;
pitch = ::min(M_PI / 2.0f - 0.0001f, ::max(-M_PI / 2.0f + 0.0001f, pitch));
dyaw = dpitch = 0.0;
}
uchar4* dev_data;
size_t size;
hipGraphicsMapResources(1, &res, 0); // CUDA
hipGraphicsResourceGetMappedPointer((void**)&dev_data, &size, res); //
hipMemcpy(cpx, px.data(), n * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(cpy, py.data(), n * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(cpz, pz.data(), n * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(cpq, pq.data(), n * sizeof(float), hipMemcpyHostToDevice);
kernel <<<256, 128>> > (dev_data, np, a, k, eps, shift_z, n, cpx, cpy, cpz, cpq, xb, yb, zb, qb);
hipGraphicsUnmapResources(1, &res, 0); // OpenGL'
glutPostRedisplay(); //
}
void keys(unsigned char key, int x, int y) { //
switch (key) {
case 'w': // "W"
dx = cos(yaw) * cos(pitch) * speed;
dy = sin(yaw) * cos(pitch) * speed;
dz = sin(pitch) * speed;
break;
case 's': // "S"
dx = -cos(yaw) * cos(pitch) * speed;
dy = -sin(yaw) * cos(pitch) * speed;
dz = -sin(pitch) * speed;
break;
case 'a': // "A"
dx = -sin(yaw) * speed;
dy = cos(yaw) * speed;
break;
case 'd': // "D"
dx = sin(yaw) * speed;
dy = -cos(yaw) * speed;
break;
case 27: // "ESC"
hipGraphicsUnregisterResource(res);
glDeleteTextures(2, textures);
glDeleteBuffers(1, &vbo);
gluDeleteQuadric(quadric);
HANDLE_ERROR(hipFree(cpx));
HANDLE_ERROR(hipFree(cpy));
HANDLE_ERROR(hipFree(cpz));
HANDLE_ERROR(hipFree(cpq));
exit(0);
break;
}
}
void mouse(int x, int y) {
static int x_prev = w / 2;
static int y_prev = h / 2;
float dx = 0.005 * (x - x_prev);
float dy = 0.005 * (y - y_prev);
dyaw -= dx;
dpitch -= dy;
x_prev = x;
y_prev = y;
// ,
if ((x < 20) || (y < 20) || (x > w - 20) || (y > h - 20)) {
glutWarpPointer(w / 2, h / 2);
x_prev = w / 2;
y_prev = h / 2;
}
}
void mouseClicks(int button, int state, int x, int y) {
if (button == GLUT_LEFT_BUTTON && GLUT_DOWN == state) {
vxb = vb * cos(yaw) * cos(pitch);
vyb = vb * sin(yaw) * cos(pitch);
vzb = vb * sin(pitch);
xb = xc + vxb * 3 * dt;
yb = yc + vyb * 3 * dt;
zb = zc + vzb * 3 * dt;
}
}
void reshape(int w_new, int h_new) {
w = w_new;
h = h_new;
glViewport(0, 0, w, h); //
glMatrixMode(GL_PROJECTION); //
glLoadIdentity(); //
}
int main(int argc, char** argv) {
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGBA); //
glutInitWindowSize(w, h);
glutCreateWindow("OpenGL");
//
glutIdleFunc(update);
glutDisplayFunc(display);
glutKeyboardFunc(keys);
glutMouseFunc(mouseClicks);
glutPassiveMotionFunc(mouse);
glutReshapeFunc(reshape);
glutSetCursor(GLUT_CURSOR_NONE); //
px.resize(n);
py.resize(n);
pz.resize(n);
pvx.resize(n);
pvy.resize(n);
pvz.resize(n);
pxx.resize(n);
pyy.resize(n);
pzz.resize(n);
pq.resize(n);
for (size_t i = 0; i < n; ++i) {
pq[i] = qi;
pvx[i] = 0;
pvy[i] = 0;
pvz[i] = 0;
px[i] = dist_x(mersenne);
py[i] = dist_y(mersenne);
pz[i] = dist_z(mersenne);
}
HANDLE_ERROR(hipMalloc(reinterpret_cast<void**>(&cpx), n * sizeof(float)));
HANDLE_ERROR(hipMalloc(reinterpret_cast<void**>(&cpy), n * sizeof(float)));
HANDLE_ERROR(hipMalloc(reinterpret_cast<void**>(&cpz), n * sizeof(float)));
HANDLE_ERROR(hipMalloc(reinterpret_cast<void**>(&cpq), n * sizeof(float)));
int wt , ht;
std::fstream in("in.data", std::ios::binary | std::ios::in);
if (!in.is_open()) {
std::cerr << "Could not open file\n";
std::cerr << "Make sure in.data exists\n";
}
in.read(reinterpret_cast<char*>(&wt), sizeof(int));
in.read(reinterpret_cast<char*>(&ht), sizeof(int));
std::cout << wt << " " << ht << std::endl;
uchar* data = reinterpret_cast<uchar*>(operator new(wt * ht * sizeof(int)));
in.read(reinterpret_cast<char*>(data), wt * ht * sizeof(int));
in.close();
glGenTextures(2, textures);
glBindTexture(GL_TEXTURE_2D, textures[0]);
glTexImage2D(GL_TEXTURE_2D, 0, 3, (GLsizei)wt, (GLsizei)ht, 0, GL_RGBA, GL_UNSIGNED_BYTE, (void*)data);
// , ,
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST); //GL_LINEAR); //
//
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST); //GL_LINEAR);
//operator delete(data);
quadric = gluNewQuadric();
gluQuadricTexture(quadric, GL_TRUE);
glBindTexture(GL_TEXTURE_2D, textures[1]);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR); //
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); //
glEnable(GL_TEXTURE_2D); //
glShadeModel(GL_SMOOTH); //
glClearColor(0.0f, 0.0f, 0.0f, 1.0f); //
glClearDepth(1.0f); //
glDepthFunc(GL_LEQUAL); // .
glEnable(GL_DEPTH_TEST); //
glEnable(GL_CULL_FACE); // ,
glewInit();
glGenBuffers(1, &vbo); //
glBindBuffer(GL_PIXEL_UNPACK_BUFFER, vbo); //
glBufferData(GL_PIXEL_UNPACK_BUFFER, np * np * sizeof(uchar4), NULL, GL_DYNAMIC_DRAW); //
hipGraphicsGLRegisterBuffer(&res, vbo, hipGraphicsMapFlagsWriteDiscard); // CUDA
glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0); //
glutMainLoop();
}
| 26b63463e5e799216ce8ab2917777771ec7bc238.cu |
#include <iostream>
#include <fstream>
#include <GL/glew.h>
#include <GL/freeglut.h>
#include <cuda_runtime.h>
#include <cuda_gl_interop.h>
#include "device_launch_parameters.h"
#include <cmath>
#include <vector>
#include <random>
static void HandleError(cudaError_t err, const char* file, int line) {
if (err != cudaSuccess) {
std::cerr << cudaGetErrorString(err)
<< " in " << std::string(file)
<< " at line " << line << "\n";
exit(EXIT_FAILURE);
}
}
#define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ ))
std::random_device rd; // генератор случайных чисел
std::mt19937 mersenne(rd());// алгоритм мерсенна
int a = 15; // половина стороны куба
int n = 150; // количество частиц
float eps = 1e-3; // избежать деление на ноль
float wc = 0.99; // коэффициент замедления
float k = 50.0; // коэффициент пропорциональности
float dt = 0.01; // шаг интегрирования
float g = 20; // ускорение свободного падения
float qi = 1; // заряды частиц
float qc = 30; // заряд камеры
float qb = 50; // заряд пули
float vb = 30; // скорость пули
float shift_z = 0.75; // сдвиг карты напряженности
float radius = 1;
std::uniform_int_distribution<> dist_x(-a + radius, a - radius); // распределение частиц по x
std::uniform_int_distribution<> dist_y(-a + radius, a - radius); // распределение частиц по y
std::uniform_int_distribution<> dist_z(0 + radius, 2 * a - radius); // распределение частиц по z
std::vector<float> pq;
std::vector<float> px; //x coords
std::vector<float> pxx; //temp buffers
std::vector<float> py; //y coords
std::vector<float> pyy; //temp buffers
std::vector<float> pz; //z coords
std::vector<float> pzz; //temp buffers
float* cpx = nullptr; //cuda buffers
float* cpy = nullptr; //cuda buffers
float* cpz = nullptr; //cuda buffers
float* cpq = nullptr; //cuda buffers
std::vector<float> pvx; //vx speed
std::vector<float> pvy; //vy speed
std::vector<float> pvz; //vz speed
int w = 1024; //width resolution
int h = 648; //height resolution
float xc = -1.5; // x камеры
float yc = -1.5; // y камеры
float zc = 1.0; // z камеры
float dx = 0.0;
float dy = 0.0;
float dz = 0.0;
float xb = -1000000; // x пули
float yb = -1000000; // y пули
float zb = 1000000; // z пули
float vxb = -1.5; // vx пули
float vyb = -1.5; // vy пули
float vzb = 1.0; // vz пули
float yaw = 0.0; // рыскание
float pitch = 0.0; // тангаж
float dyaw = 0.0;
float dpitch = 0.0;
#define M_PI 3.14f
typedef unsigned char uchar;
float speed = 0.2;
const int np = 100; // Размер текстуры пола
GLUquadric* quadric; // quadric объекты - это геометрические фигуры 2-го порядка, т.е. сфера, цилиндр, диск, конус.
cudaGraphicsResource* res;
GLuint textures[2]; // Массив из текстурных номеров
GLuint vbo; // Номер буфера
__global__ void kernel(uchar4* data, int n,
float a, float k, float eps, float sz, int pn,
float* px, float* py, float *pz, float* pq,
float xb, float yb, float zb, float qb ) { // Генерация текстуры пола на GPU
int offset = blockDim.x * gridDim.x;
int i = blockIdx.x * blockDim.x + threadIdx.x;
int size = n * n;
float x;
float y;
float e = 0.0;
int p;
while (i < size) {
x = a * (2.0 * static_cast<float>(i % n)
/ static_cast<float>(n) - 1);
y = a * (2.0 * static_cast<float>(i / n)
/ static_cast<float>(n) - 1);
e = qb / ( eps +
powf(xb - x, 2) +
powf(yb - y, 2) +
powf(zb - sz, 2)
);
for (p = 0; p < pn; ++p) {
e += pq[p] / (eps +
powf(px[p] - x, 2) +
powf(py[p] - y, 2) +
powf(pz[p] - sz, 2)
);
}
e *= k;
data[i].x = min(e, 255.0);
data[i].y = min(e, 255.0);
data[i].z = min(e, 255.0);
data[i].w = 1;
i += offset;
}
}
void display() {
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
// Задаем "объектив камеры"
gluPerspective(90.0f, (GLfloat)w / (GLfloat)h, 0.1f, 100.0f);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
// Задаем позицию и направление камеры
gluLookAt(xc, yc, zc,
xc + cos(yaw) * cos(pitch),
yc + sin(yaw) * cos(pitch),
zc + sin(pitch),
0.0f, 0.0f, 1.0f);
glBindTexture(GL_TEXTURE_2D, textures[0]); // Задаем текстуру
static float angle = 0.0;
for (size_t i = 0; i < n; ++i) {
glPushMatrix();
glTranslatef(px[i], py[i], pz[i]); // Задаем координаты центра сферы
glRotatef(angle, 0.0, 0.0, 1.0); // Поворачиваем сферу на угол
gluSphere(quadric, 1.0f, 32, 32);
glPopMatrix();
}
angle += 0.15;
glPushMatrix();
glTranslatef(xb, yb, zb);
gluSphere(quadric, 1.0f, 32, 32);
glPopMatrix();
glBindBuffer(GL_PIXEL_UNPACK_BUFFER, vbo); // Делаем активным буфер с номером vbo
glBindTexture(GL_TEXTURE_2D, textures[1]); // Делаем активной вторую текстуру
glTexImage2D(GL_TEXTURE_2D, 0, 3, (GLsizei)np, (GLsizei)np, 0, GL_RGBA, GL_UNSIGNED_BYTE, NULL);
glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0); // Деактивируем буфер
// Последний параметр NULL в glTexImage2D говорит о том что данные для текстуры нужно брать из активного буфера
glBegin(GL_QUADS); // Рисуем пол
glTexCoord2f(0.0, 0.0);
glVertex3f(-a, -a, 0.0);
glTexCoord2f(1.0, 0.0);
glVertex3f(a, -a, 0.0);
glTexCoord2f(1.0, 1.0);
glVertex3f(a, a, 0.0);
glTexCoord2f(0.0, 1.0);
glVertex3f(-a, a, 0.0);
glEnd();
glBindTexture(GL_TEXTURE_2D, 0); // Деактивируем текстуру
// Отрисовка каркаса куба
glLineWidth(2); // Толщина линий
glColor3f(0.5f, 0.5f, 0.5f); // Цвет линий
glBegin(GL_LINES); // Последующие пары вершин будут задавать линии
glVertex3f(-a, -a, 0.0);
glVertex3f(-a, -a, 2.0 * a);
glVertex3f(a, -a, 0.0);
glVertex3f(a, -a, 2.0 * a);
glVertex3f(a, a, 0.0);
glVertex3f(a, a, 2.0 * a);
glVertex3f(-a, a, 0.0);
glVertex3f(-a, a, 2.0 * a);
glEnd();
glBegin(GL_LINE_LOOP); // Все последующие точки будут соеденены замкнутой линией
glVertex3f(-a, -a, 0.0);
glVertex3f(a, -a, 0.0);
glVertex3f(a, a, 0.0);
glVertex3f(-a, a, 0.0);
glEnd();
glBegin(GL_LINE_LOOP);
glVertex3f(-a, -a, 2.0 * a);
glVertex3f(a, -a, 2.0 * a);
glVertex3f(a, a, 2.0 * a);
glVertex3f(-a, a, 2.0 * a);
glEnd();
glColor3f(1.0f, 1.0f, 1.0f);
glutSwapBuffers();
}
void update() {
for (size_t i = 0; i < n; ++i) {
float dvx = 0.0;
float dvy = 0.0;
float dvz = 0.0;
float l = 0.0;
// Отталкивание от частиц
for (size_t j = 0; j < n; ++j) {
l = std::pow(
std::pow(px[i] - px[j], 2) +
std::pow(py[i] - py[j], 2) +
std::pow(pz[i] - pz[j], 2),
1.5) + eps;
dvx += pq[j] * (px[i] - px[j]) / l;
dvy += pq[j] * (py[i] - py[j]) / l;
dvz += pq[j] * (pz[i] - pz[j]) / l;
}
// Отталкивание от стен
// правая стена
l = eps + std::pow(std::abs(px[i] - a), 3);
dvx += pq[i] * (px[i] - a) / l;
// передняя стена
l = eps + std::pow(std::abs(py[i] - a), 3);
dvy += pq[i] * (py[i] - a) / l;
// потолок
l = eps + std::pow(std::abs(pz[i] - 2 * a), 3);
dvz += pq[i] * (pz[i] - 2 * a) / l;
// левая стена
l = eps + std::pow(std::abs(px[i] + a), 3);
dvx += pq[i] * (px[i] + a) / l;
// задняя стена
l = eps + std::pow(std::abs(py[i] + a), 3);
dvy += pq[i] * (py[i] + a) / l;
//пол
l = eps + std::pow(std::abs(pz[i]), 3);
dvz += pq[i] * (pz[i]) / l;
// Отталкивание от камеры
l = std::pow( std::pow(px[i] - xc, 2) +
std::pow(py[i] - yc, 2) +
std::pow(pz[i] - zc, 2),
1.5) + eps;
dvx += qc * (px[i] - xc) / l;
dvy += qc * (py[i] - yc) / l;
dvz += qc * (pz[i] - zc) / l;
// Отталкивание от пули
l = std::pow( std::pow(px[i] - xb, 2) +
std::pow(py[i] - yb, 2) +
std::pow(pz[i] - zb, 2),
1.5) + eps;
dvx += qb * (px[i] - xb) / l;
dvy += qb * (py[i] - yb) / l;
dvz += qb * (pz[i] - zb) / l;
// интегрируем
dvx = k * pq[i] * dvx * dt;
dvy = k * pq[i] * dvy * dt;
dvz = k * pq[i] * dvz * dt -g * dt;
pvx[i] = wc * pvx[i] + dvx;
pvy[i] = wc * pvy[i] + dvy;
pvz[i] = wc * pvz[i] + dvz;
// сохранение импульса при ударе
if ((px[i] + pvx[i] * dt) >= a
|| (px[i] + pvx[i] * dt) <= -a) {
pvx[i] -= pvx[i];
}
if ((py[i] + pvy[i] * dt) >= a
|| (py[i] + pvy[i] * dt) <= -a) {
pvy[i] -= pvy[i];
}
if ((pz[i] + pvz[i] * dt) >= 2 * a
|| (pz[i] + pvz[i] * dt) <= 0) {
pvz[i] -= pvz[i];
}
// изменяем коодинаты
pxx[i] = px[i] + pvx[i] * dt;
pyy[i] = py[i] + pvy[i] * dt;
pzz[i] = pz[i] + pvz[i] * dt;
}
std::swap(px, pxx);
std::swap(py, pyy);
std::swap(pz, pzz);
// движение пули
xb += vxb * dt;
yb += vyb * dt;
zb += vzb * dt;
float v = std::sqrt(dx * dx + dy * dy + dz * dz);
if (v > speed) { // Ограничение максимальной скорости
dx *= speed / v;
dy *= speed / v;
dz *= speed / v;
}
xc += dx; dx *= 0.95;
yc += dy; dy *= 0.95;
zc += dz; dz *= 0.95;
if (std::abs(dpitch) + fabs(dyaw) > 0.0001) { // Вращение камеры
yaw += dyaw;
pitch += dpitch;
pitch = std::min(M_PI / 2.0f - 0.0001f, std::max(-M_PI / 2.0f + 0.0001f, pitch));
dyaw = dpitch = 0.0;
}
uchar4* dev_data;
size_t size;
cudaGraphicsMapResources(1, &res, 0); // Делаем буфер доступным для CUDA
cudaGraphicsResourceGetMappedPointer((void**)&dev_data, &size, res); // Получаем указатель на память буфера
cudaMemcpy(cpx, px.data(), n * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(cpy, py.data(), n * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(cpz, pz.data(), n * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(cpq, pq.data(), n * sizeof(float), cudaMemcpyHostToDevice);
kernel <<<256, 128>> > (dev_data, np, a, k, eps, shift_z, n, cpx, cpy, cpz, cpq, xb, yb, zb, qb);
cudaGraphicsUnmapResources(1, &res, 0); // Возращаем буфер OpenGL'ю что бы он мог его использовать
glutPostRedisplay(); // Перерисовка
}
void keys(unsigned char key, int x, int y) { // Обработка кнопок
switch (key) {
case 'w': // "W" Вперед
dx = cos(yaw) * cos(pitch) * speed;
dy = sin(yaw) * cos(pitch) * speed;
dz = sin(pitch) * speed;
break;
case 's': // "S" Назад
dx = -cos(yaw) * cos(pitch) * speed;
dy = -sin(yaw) * cos(pitch) * speed;
dz = -sin(pitch) * speed;
break;
case 'a': // "A" Влево
dx = -sin(yaw) * speed;
dy = cos(yaw) * speed;
break;
case 'd': // "D" Вправо
dx = sin(yaw) * speed;
dy = -cos(yaw) * speed;
break;
case 27: // "ESC" Выход
cudaGraphicsUnregisterResource(res);
glDeleteTextures(2, textures);
glDeleteBuffers(1, &vbo);
gluDeleteQuadric(quadric);
HANDLE_ERROR(cudaFree(cpx));
HANDLE_ERROR(cudaFree(cpy));
HANDLE_ERROR(cudaFree(cpz));
HANDLE_ERROR(cudaFree(cpq));
exit(0);
break;
}
}
void mouse(int x, int y) {
static int x_prev = w / 2;
static int y_prev = h / 2;
float dx = 0.005 * (x - x_prev);
float dy = 0.005 * (y - y_prev);
dyaw -= dx;
dpitch -= dy;
x_prev = x;
y_prev = y;
// Перемещаем указатель мышки в центр, когда он достиг границы
if ((x < 20) || (y < 20) || (x > w - 20) || (y > h - 20)) {
glutWarpPointer(w / 2, h / 2);
x_prev = w / 2;
y_prev = h / 2;
}
}
void mouseClicks(int button, int state, int x, int y) {
if (button == GLUT_LEFT_BUTTON && GLUT_DOWN == state) {
vxb = vb * cos(yaw) * cos(pitch);
vyb = vb * sin(yaw) * cos(pitch);
vzb = vb * sin(pitch);
xb = xc + vxb * 3 * dt;
yb = yc + vyb * 3 * dt;
zb = zc + vzb * 3 * dt;
}
}
void reshape(int w_new, int h_new) {
w = w_new;
h = h_new;
glViewport(0, 0, w, h); // Сброс текущей области вывода
glMatrixMode(GL_PROJECTION); // Выбор матрицы проекций
glLoadIdentity(); // Сброс матрицы проекции
}
int main(int argc, char** argv) {
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGBA); // двойная буферизация
glutInitWindowSize(w, h);
glutCreateWindow("OpenGL");
// назначаем обработчики
glutIdleFunc(update);
glutDisplayFunc(display);
glutKeyboardFunc(keys);
glutMouseFunc(mouseClicks);
glutPassiveMotionFunc(mouse);
glutReshapeFunc(reshape);
glutSetCursor(GLUT_CURSOR_NONE); // Скрываем курсор мышки
px.resize(n);
py.resize(n);
pz.resize(n);
pvx.resize(n);
pvy.resize(n);
pvz.resize(n);
pxx.resize(n);
pyy.resize(n);
pzz.resize(n);
pq.resize(n);
for (size_t i = 0; i < n; ++i) {
pq[i] = qi;
pvx[i] = 0;
pvy[i] = 0;
pvz[i] = 0;
px[i] = dist_x(mersenne);
py[i] = dist_y(mersenne);
pz[i] = dist_z(mersenne);
}
HANDLE_ERROR(cudaMalloc(reinterpret_cast<void**>(&cpx), n * sizeof(float)));
HANDLE_ERROR(cudaMalloc(reinterpret_cast<void**>(&cpy), n * sizeof(float)));
HANDLE_ERROR(cudaMalloc(reinterpret_cast<void**>(&cpz), n * sizeof(float)));
HANDLE_ERROR(cudaMalloc(reinterpret_cast<void**>(&cpq), n * sizeof(float)));
int wt , ht;
std::fstream in("in.data", std::ios::binary | std::ios::in);
if (!in.is_open()) {
std::cerr << "Could not open file\n";
std::cerr << "Make sure in.data exists\n";
}
in.read(reinterpret_cast<char*>(&wt), sizeof(int));
in.read(reinterpret_cast<char*>(&ht), sizeof(int));
std::cout << wt << " " << ht << std::endl;
uchar* data = reinterpret_cast<uchar*>(operator new(wt * ht * sizeof(int)));
in.read(reinterpret_cast<char*>(data), wt * ht * sizeof(int));
in.close();
glGenTextures(2, textures);
glBindTexture(GL_TEXTURE_2D, textures[0]);
glTexImage2D(GL_TEXTURE_2D, 0, 3, (GLsizei)wt, (GLsizei)ht, 0, GL_RGBA, GL_UNSIGNED_BYTE, (void*)data);
// если полигон, на который наносим текстуру, меньше текстуры
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST); //GL_LINEAR); // Интерполяция
// если больше
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST); //GL_LINEAR);
//operator delete(data);
quadric = gluNewQuadric();
gluQuadricTexture(quadric, GL_TRUE);
glBindTexture(GL_TEXTURE_2D, textures[1]);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR); // Интерполяция
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); // Интерполяция
glEnable(GL_TEXTURE_2D); // Разрешить наложение текстуры
glShadeModel(GL_SMOOTH); // Разрешение сглаженного закрашивания
glClearColor(0.0f, 0.0f, 0.0f, 1.0f); // Черный фон
glClearDepth(1.0f); // Установка буфера глубины
glDepthFunc(GL_LEQUAL); // Тип теста глубины.
glEnable(GL_DEPTH_TEST); // Включаем тест глубины
glEnable(GL_CULL_FACE); // Режим при котором, тектуры накладываются только с одной стороны
glewInit();
glGenBuffers(1, &vbo); // Получаем номер буфера
glBindBuffer(GL_PIXEL_UNPACK_BUFFER, vbo); // Делаем его активным
glBufferData(GL_PIXEL_UNPACK_BUFFER, np * np * sizeof(uchar4), NULL, GL_DYNAMIC_DRAW); // Задаем размер буфера
cudaGraphicsGLRegisterBuffer(&res, vbo, cudaGraphicsMapFlagsWriteDiscard); // Регистрируем буфер для использования его памяти в CUDA
glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0); // Деактивируем буфер
glutMainLoop();
}
|
72375493e7b87b90e15523fc14f9120e82772cc8.hip | // !!! This is a file automatically generated by hipify!!!
/** \file ReducedMean.cu
* \author Tomasz Jakubczyk
* \brief liczenie rednich dla klatki przy zredukowanej liczbie punktw
*
*
*
*/
#define WIN32
#include "mex.h"
#include<stdio.h>
#include<stdlib.h>
#include <hip/hip_runtime.h>
#include <helper_functions.h>
#include <helper_cuda.h>
#include <hip/hip_vector_types.h>
#include "helper_math.h"
#include "ReducedMean_CUDA_kernel.cuh"
__host__
//Round a / b to nearest higher integer value
uint iDivUp(uint a, uint b)
{
return (a % b != 0) ? (a / b + 1) : (a / b);
}
__host__
/** \brief compute grid and thread block size for a given number of elements
*
* \param n uint
* \param blockSize uint
* \param numBlocks uint&
* \param numThreads uint&
* \return void
*
*/
void computeGridSize(uint n, uint blockSize, uint &numBlocks, uint &numThreads)
{
numThreads = min(blockSize, n);
numBlocks = iDivUp(n, numThreads);
}
/** \brief
* function [nTheta, I_] = ReducedMean(Theta_S, deltaT, I, I_S)
* \param nlhs int
* \param plhs[] mxArray*
* \param nrhs int
* \param prhs[] const mxArray*
* \return void
*
*/
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[])
{
float* Theta_S;/**< posortowany wektor theta */
unsigned int Theta_S_size;
float* deltaT;/**< staa delta */
float* I;/**< skorygowana klatka */
unsigned int I_size;
float* I_S;/**< indeksy posortowanej klatki */
unsigned int I_S_size;
/**< sprawdzanie argumentw */
if(nlhs!=2)
{
printf("function returns [nTheta, I_] \n");
return;
}
if(nrhs!=4)
{
printf("function arguments are (Theta_S, deltaT, I, I_S) \n");
return;
}
if(!mxIsSingle(prhs[0]))
{
printf("1st argument needs to be single precision vector\n");
return;
}
if(!mxIsSingle(prhs[1]))
{
printf("2nd argument needs to be single precision number\n");
return;
}
if(!mxIsSingle(prhs[2]))
{
printf("3rd argument needs to be single precision vector\n");
return;
}
if(!mxIsSingle(prhs[3]))
{
printf("4th argument needs to be single precision vector\n");
return;
}
/**< pobranie argumentw z matlaba */
Theta_S=(float*)mxGetPr(prhs[0]);
Theta_S_size=mxGetN(prhs[0])*mxGetM(prhs[0]);
deltaT=(float*)mxGetPr(prhs[1]);
if(mxGetN(prhs[1])*mxGetM(prhs[1])!=1)
{
printf("2nd argument (deltaT) must be a number\n");
return;
}
I=(float*)mxGetPr(prhs[2]);
I_size=mxGetN(prhs[2])*mxGetM(prhs[2]);
I_S=(float*)mxGetPr(prhs[3]);
I_S_size=mxGetN(prhs[3])*mxGetM(prhs[3]);
unsigned int max_nom=floor((Theta_S[Theta_S_size-1]-Theta_S[0])/(float)*deltaT);
float* dev_Theta_S=NULL;
float* dev_I=NULL;
float* dev_I_S=NULL;
float* dev_nTheta=NULL;
float* dev_nI=NULL;
float* dev_counter=NULL;
hipError_t err;
checkCudaErrors(hipMalloc((void**)&dev_Theta_S, sizeof(float)*Theta_S_size));
checkCudaErrors(hipMemcpy((void*)dev_Theta_S, Theta_S, sizeof(float)*Theta_S_size, hipMemcpyHostToDevice));
checkCudaErrors(hipMalloc((void**)&dev_I, sizeof(float)*I_size));
checkCudaErrors(hipMemcpy((void*)dev_I, I, sizeof(float)*I_size, hipMemcpyHostToDevice));
checkCudaErrors(hipMalloc((void**)&dev_I_S, sizeof(float)*I_S_size));
checkCudaErrors(hipMemcpy((void*)dev_I_S, I_S, sizeof(float)*I_S_size, hipMemcpyHostToDevice));
checkCudaErrors(hipMalloc((void**)&dev_nTheta, sizeof(float)*max_nom));
checkCudaErrors(hipMemset(dev_nTheta,0,sizeof(float)*max_nom));
checkCudaErrors(hipMalloc((void**)&dev_nI, sizeof(float)*max_nom));
checkCudaErrors(hipMemset(dev_nI,0,sizeof(float)*max_nom));
checkCudaErrors(hipMalloc((void**)&dev_counter, sizeof(float)*max_nom));
checkCudaErrors(hipMemset(dev_counter,0,sizeof(float)*max_nom));
err = hipGetLastError();
if (err != hipSuccess)
{
printf("hipError_t(Malloc,Memcpy): %s\n", hipGetErrorString(err));
}
uint numThreads, numBlocks;
computeGridSize(Theta_S_size, 512, numBlocks, numThreads);
unsigned int dimGridX=numBlocks<65535?numBlocks:65535;
unsigned int dimGridY=numBlocks/65535+1;
dim3 dimGrid(dimGridX,dimGridY);
hipLaunchKernelGGL(( ReducedMeanD), dim3(dimGrid), dim3(numThreads) , 0, 0, dev_Theta_S,Theta_S_size,(float)*deltaT,max_nom,dev_I,dev_I_S,dev_nTheta,dev_nI,dev_counter);
err = hipGetLastError();
if (err != hipSuccess)
{
printf("hipError_t(ReducedMeanD): %s\n", hipGetErrorString(err));
}
int dimsnTheta[1]={(int)max_nom};
plhs[0]=mxCreateNumericArray(1,dimsnTheta,mxSINGLE_CLASS,mxREAL);
float* nTheta=(float*)mxGetPr(plhs[0]);
int dimsnI[1]={(int)max_nom};
plhs[1]=mxCreateNumericArray(1,dimsnI,mxSINGLE_CLASS,mxREAL);
float* nI=(float*)mxGetPr(plhs[1]);
float* counter=(float*)malloc(sizeof(float)*max_nom);
checkCudaErrors(hipMemcpy((void*)nTheta,dev_nTheta,sizeof(float)*max_nom,hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy((void*)nI,dev_nI,sizeof(float)*max_nom,hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy((void*)counter,dev_counter,sizeof(float)*max_nom,hipMemcpyDeviceToHost));
err = hipGetLastError();
if (err != hipSuccess)
{
printf("hipError_t(hipMemcpyDeviceToHost): %s\n", hipGetErrorString(err));
}
/*printf("\nnumer: ");
for(int i=0;i<max_nom;i++)
{
printf("%17.9f ",(float)i);
}
printf("\n");
printf("\nnTheta: ");
for(int i=0,j=0;i<max_nom && j<100;i++)
{
printf("%17.9f ",nTheta[i]);
}
printf("\n");
printf("\nnI: ");
for(int i=0,j=0;i<max_nom && j<100;i++)
{
printf("%17.9f ",nI[i]);
}
printf("\n");
printf("\ncounter: ");
for(int i=0,j=0;i<max_nom && j<100;i++)
{
printf("%17.9f ",counter[i]);
}
printf("\n");*/
checkCudaErrors(hipFree(dev_counter));
checkCudaErrors(hipFree(dev_nI));
checkCudaErrors(hipFree(dev_nTheta));
checkCudaErrors(hipFree(dev_I_S));
checkCudaErrors(hipFree(dev_I));
checkCudaErrors(hipFree(dev_Theta_S));
err = hipGetLastError();
if (err != hipSuccess)
{
printf("hipError_t(hipFree): %s\n", hipGetErrorString(err));
}
for(unsigned int i=0;i<max_nom;i++)
{
if(counter[i]==0.0f)
{
nTheta[i]=nTheta[i-1];
nI[i]=nI[i-1];
}
else
{
nTheta[i]/=counter[i];
nI[i]/=counter[i];
}
if(isnan(nI[i]))
nI[i]=0.0f;
}
free(counter);
}
| 72375493e7b87b90e15523fc14f9120e82772cc8.cu | /** \file ReducedMean.cu
* \author Tomasz Jakubczyk
* \brief liczenie œrednich dla klatki przy zredukowanej liczbie punktów
*
*
*
*/
#define WIN32
#include "mex.h"
#include<stdio.h>
#include<stdlib.h>
#include <cuda_runtime.h>
#include <helper_functions.h>
#include <helper_cuda.h>
#include <vector_types.h>
#include "helper_math.h"
#include "ReducedMean_CUDA_kernel.cuh"
__host__
//Round a / b to nearest higher integer value
uint iDivUp(uint a, uint b)
{
return (a % b != 0) ? (a / b + 1) : (a / b);
}
__host__
/** \brief compute grid and thread block size for a given number of elements
*
* \param n uint
* \param blockSize uint
* \param numBlocks uint&
* \param numThreads uint&
* \return void
*
*/
void computeGridSize(uint n, uint blockSize, uint &numBlocks, uint &numThreads)
{
numThreads = min(blockSize, n);
numBlocks = iDivUp(n, numThreads);
}
/** \brief
* function [nTheta, I_] = ReducedMean(Theta_S, deltaT, I, I_S)
* \param nlhs int
* \param plhs[] mxArray*
* \param nrhs int
* \param prhs[] const mxArray*
* \return void
*
*/
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[])
{
float* Theta_S;/**< posortowany wektor theta */
unsigned int Theta_S_size;
float* deltaT;/**< sta³a delta */
float* I;/**< skorygowana klatka */
unsigned int I_size;
float* I_S;/**< indeksy posortowanej klatki */
unsigned int I_S_size;
/**< sprawdzanie argumentów */
if(nlhs!=2)
{
printf("function returns [nTheta, I_] \n");
return;
}
if(nrhs!=4)
{
printf("function arguments are (Theta_S, deltaT, I, I_S) \n");
return;
}
if(!mxIsSingle(prhs[0]))
{
printf("1st argument needs to be single precision vector\n");
return;
}
if(!mxIsSingle(prhs[1]))
{
printf("2nd argument needs to be single precision number\n");
return;
}
if(!mxIsSingle(prhs[2]))
{
printf("3rd argument needs to be single precision vector\n");
return;
}
if(!mxIsSingle(prhs[3]))
{
printf("4th argument needs to be single precision vector\n");
return;
}
/**< pobranie argumentów z matlaba */
Theta_S=(float*)mxGetPr(prhs[0]);
Theta_S_size=mxGetN(prhs[0])*mxGetM(prhs[0]);
deltaT=(float*)mxGetPr(prhs[1]);
if(mxGetN(prhs[1])*mxGetM(prhs[1])!=1)
{
printf("2nd argument (deltaT) must be a number\n");
return;
}
I=(float*)mxGetPr(prhs[2]);
I_size=mxGetN(prhs[2])*mxGetM(prhs[2]);
I_S=(float*)mxGetPr(prhs[3]);
I_S_size=mxGetN(prhs[3])*mxGetM(prhs[3]);
unsigned int max_nom=floor((Theta_S[Theta_S_size-1]-Theta_S[0])/(float)*deltaT);
float* dev_Theta_S=NULL;
float* dev_I=NULL;
float* dev_I_S=NULL;
float* dev_nTheta=NULL;
float* dev_nI=NULL;
float* dev_counter=NULL;
cudaError_t err;
checkCudaErrors(cudaMalloc((void**)&dev_Theta_S, sizeof(float)*Theta_S_size));
checkCudaErrors(cudaMemcpy((void*)dev_Theta_S, Theta_S, sizeof(float)*Theta_S_size, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMalloc((void**)&dev_I, sizeof(float)*I_size));
checkCudaErrors(cudaMemcpy((void*)dev_I, I, sizeof(float)*I_size, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMalloc((void**)&dev_I_S, sizeof(float)*I_S_size));
checkCudaErrors(cudaMemcpy((void*)dev_I_S, I_S, sizeof(float)*I_S_size, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMalloc((void**)&dev_nTheta, sizeof(float)*max_nom));
checkCudaErrors(cudaMemset(dev_nTheta,0,sizeof(float)*max_nom));
checkCudaErrors(cudaMalloc((void**)&dev_nI, sizeof(float)*max_nom));
checkCudaErrors(cudaMemset(dev_nI,0,sizeof(float)*max_nom));
checkCudaErrors(cudaMalloc((void**)&dev_counter, sizeof(float)*max_nom));
checkCudaErrors(cudaMemset(dev_counter,0,sizeof(float)*max_nom));
err = cudaGetLastError();
if (err != cudaSuccess)
{
printf("cudaError(Malloc,Memcpy): %s\n", cudaGetErrorString(err));
}
uint numThreads, numBlocks;
computeGridSize(Theta_S_size, 512, numBlocks, numThreads);
unsigned int dimGridX=numBlocks<65535?numBlocks:65535;
unsigned int dimGridY=numBlocks/65535+1;
dim3 dimGrid(dimGridX,dimGridY);
ReducedMeanD<<< dimGrid, numThreads >>>(dev_Theta_S,Theta_S_size,(float)*deltaT,max_nom,dev_I,dev_I_S,dev_nTheta,dev_nI,dev_counter);
err = cudaGetLastError();
if (err != cudaSuccess)
{
printf("cudaError(ReducedMeanD): %s\n", cudaGetErrorString(err));
}
int dimsnTheta[1]={(int)max_nom};
plhs[0]=mxCreateNumericArray(1,dimsnTheta,mxSINGLE_CLASS,mxREAL);
float* nTheta=(float*)mxGetPr(plhs[0]);
int dimsnI[1]={(int)max_nom};
plhs[1]=mxCreateNumericArray(1,dimsnI,mxSINGLE_CLASS,mxREAL);
float* nI=(float*)mxGetPr(plhs[1]);
float* counter=(float*)malloc(sizeof(float)*max_nom);
checkCudaErrors(cudaMemcpy((void*)nTheta,dev_nTheta,sizeof(float)*max_nom,cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy((void*)nI,dev_nI,sizeof(float)*max_nom,cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy((void*)counter,dev_counter,sizeof(float)*max_nom,cudaMemcpyDeviceToHost));
err = cudaGetLastError();
if (err != cudaSuccess)
{
printf("cudaError(cudaMemcpyDeviceToHost): %s\n", cudaGetErrorString(err));
}
/*printf("\nnumer: ");
for(int i=0;i<max_nom;i++)
{
printf("%17.9f ",(float)i);
}
printf("\n");
printf("\nnTheta: ");
for(int i=0,j=0;i<max_nom && j<100;i++)
{
printf("%17.9f ",nTheta[i]);
}
printf("\n");
printf("\nnI: ");
for(int i=0,j=0;i<max_nom && j<100;i++)
{
printf("%17.9f ",nI[i]);
}
printf("\n");
printf("\ncounter: ");
for(int i=0,j=0;i<max_nom && j<100;i++)
{
printf("%17.9f ",counter[i]);
}
printf("\n");*/
checkCudaErrors(cudaFree(dev_counter));
checkCudaErrors(cudaFree(dev_nI));
checkCudaErrors(cudaFree(dev_nTheta));
checkCudaErrors(cudaFree(dev_I_S));
checkCudaErrors(cudaFree(dev_I));
checkCudaErrors(cudaFree(dev_Theta_S));
err = cudaGetLastError();
if (err != cudaSuccess)
{
printf("cudaError(cudaFree): %s\n", cudaGetErrorString(err));
}
for(unsigned int i=0;i<max_nom;i++)
{
if(counter[i]==0.0f)
{
nTheta[i]=nTheta[i-1];
nI[i]=nI[i-1];
}
else
{
nTheta[i]/=counter[i];
nI[i]/=counter[i];
}
if(isnan(nI[i]))
nI[i]=0.0f;
}
free(counter);
}
|
66c3089745737259ce3c4197f4da905df1917d85.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <cutil.h>
#include "rocblas.h"
//=============================================================================
extern "C" int sgemm_(char *, char *, int *, int *, int *, float *, float *,
int *, float *, int *, float *, float *, int *);
extern "C" int sgesvd_(char *, char *, int *, int *, float *, int *, float *,
float *, int *, float *, int *, float *, int *, int *);
extern "C" void sgeqrf_(int*, int*, float*, int*, float*, float*, int*, int*);
extern "C" int scopy_(int *, float*, int *, float*, int *);
extern "C" int strmm_(char*, char *, char*, char *, int *, int *, float *,
float *, int *, float *, int *);
extern "C" int strsm_(char *, char *, char *, char *, int *, int *,
float *, float *, int *, float *, int *);
extern "C" int ssyrk_(char *, char *, int *, int *, float *, float *,
int *, float *, float *, int *);
//=============================================================================
void chol_qr_it(int m, int n, float *A, int lda, float *R){
int i = 0, k, j, info, lwork = n*n, n2 = n*n, one = 1;
float *G, *U, *S, *VT, *vt, *tau, *work;
float cn = 200.f, alpha = 1.f, zero = 0.f, mins, maxs;
G = (float*)malloc(n * n * 4);
VT = (float*)malloc(n * n * 4);
S = (float*)malloc( n * 4);
work = (float*)malloc(lwork * 4);
tau = (float*)malloc( n * 4);
do {
i++;
sgemm_("t", "n", &n, &n, &m, &alpha, A, &m, A, &m, &zero, G, &n);
//ssyrk_("l", "t", &n, &m, &alpha, A, &m, &zero, G, &n);
//for(j=0; j<n; j++)
// for(k=0; k<j; k++)
// G[j*n+k] = G[k*n+j];
sgesvd_("n", "a", &n, &n, G, &n, S, U, &n, VT, &n, work, &lwork, &info);
mins = 100.f, maxs = 0.f;
for(k=0; k<n; k++){
S[k] = sqrt(S[k]);
if (S[k] < mins) mins = S[k];
if (S[k] > maxs) maxs = S[k];
}
for(k=0; k<n;k++){
vt = VT + k*n;
for(j=0; j<n; j++)
vt[j]*=S[j];
}
sgeqrf_(&n, &n, VT, &n, tau, work, &lwork, &info);
if (i==1)
scopy_(&n2, VT, &one, R, &one);
else
strmm_("l", "u", "n", "n", &n, &n, &alpha, VT, &n, R, &n);
strsm_("r", "u", "n", "n", &m, &n, &alpha, VT, &n, A, &m);
if (mins > 0.00001f)
cn = maxs/mins;
fprintf(stderr, "\nIteration %d, cond num = %f \n", i, cn);
} while (cn > 100.f);
free(G);
free(VT);
free(S);
free(work);
free(tau);
}
//=============================================================================
void chol_qr_it_GPU(int m, int n, float *d_A, int lda, float *G, float *R,
float *work, int lwork){
int i = 0, k, j, info, n2 = n*n, one = 1, lwork2 = n*n;
float *U, *S, *VT, *vt, *tau, *work1, *work2;
float cn = 200.f, alpha = 1.f, zero = 0.f, mins, maxs;
work1 = (float*) malloc(n * n * 4);
VT = (float*)malloc(n * n * 4);
work2 = (float*)malloc(lwork2 * 4);
S = (float*)malloc( n * 4);
tau = (float*)malloc( n * 4);
do {
i++;
hipblasSgemm('T', 'N', n, n, m, alpha, d_A, lda, d_A, lda, zero, G, n);
hipblasGetVector(n2 , 4.f ,G , 1.f , work1 , 1.f);
sgesvd_("N", "A", &n, &n, work1, &n, S, U, &n, VT, &n, work2, &lwork2, &info);
mins = 100.f, maxs = 0.f;
for(k=0; k<n; k++){
S[k] = sqrt(S[k]);
if (S[k] < mins) mins = S[k];
if (S[k] > maxs) maxs = S[k];
}
for(k=0; k<n;k++){
vt = VT + k*n;
for(j=0; j<n; j++)
vt[j]*=S[j];
}
sgeqrf_(&n, &n, VT, &n, tau, work2, &lwork2, &info);
if (i==1)
scopy_(&n2, VT, &one, R, &one);/*then for next loops, R keeps staying upper triangular*/
else
strmm_("l", "u", "n", "n", &n, &n, &alpha, VT, &n, R, &n);/*R is "u" from pre loop, so thing are right*/
hipblasSetVector(n2, 4.f, VT, 1.f, G, 1.f);
hipblasStrsm('r', 'u', 'n', 'n', m, n, alpha, G, n, d_A, m);
if (mins > 0.00001f)
cn = maxs/mins;
fprintf(stderr, "\nIteration %d, cond num = %f \n", i, cn);
} while (cn > 100.f);
free(VT);
free(S);
free(tau);
free(work1);
free(work2);
}
//=============================================================================
| 66c3089745737259ce3c4197f4da905df1917d85.cu | #include <stdio.h>
#include <cutil.h>
#include "cublas.h"
//=============================================================================
extern "C" int sgemm_(char *, char *, int *, int *, int *, float *, float *,
int *, float *, int *, float *, float *, int *);
extern "C" int sgesvd_(char *, char *, int *, int *, float *, int *, float *,
float *, int *, float *, int *, float *, int *, int *);
extern "C" void sgeqrf_(int*, int*, float*, int*, float*, float*, int*, int*);
extern "C" int scopy_(int *, float*, int *, float*, int *);
extern "C" int strmm_(char*, char *, char*, char *, int *, int *, float *,
float *, int *, float *, int *);
extern "C" int strsm_(char *, char *, char *, char *, int *, int *,
float *, float *, int *, float *, int *);
extern "C" int ssyrk_(char *, char *, int *, int *, float *, float *,
int *, float *, float *, int *);
//=============================================================================
void chol_qr_it(int m, int n, float *A, int lda, float *R){
int i = 0, k, j, info, lwork = n*n, n2 = n*n, one = 1;
float *G, *U, *S, *VT, *vt, *tau, *work;
float cn = 200.f, alpha = 1.f, zero = 0.f, mins, maxs;
G = (float*)malloc(n * n * 4);
VT = (float*)malloc(n * n * 4);
S = (float*)malloc( n * 4);
work = (float*)malloc(lwork * 4);
tau = (float*)malloc( n * 4);
do {
i++;
sgemm_("t", "n", &n, &n, &m, &alpha, A, &m, A, &m, &zero, G, &n);
//ssyrk_("l", "t", &n, &m, &alpha, A, &m, &zero, G, &n);
//for(j=0; j<n; j++)
// for(k=0; k<j; k++)
// G[j*n+k] = G[k*n+j];
sgesvd_("n", "a", &n, &n, G, &n, S, U, &n, VT, &n, work, &lwork, &info);
mins = 100.f, maxs = 0.f;
for(k=0; k<n; k++){
S[k] = sqrt(S[k]);
if (S[k] < mins) mins = S[k];
if (S[k] > maxs) maxs = S[k];
}
for(k=0; k<n;k++){
vt = VT + k*n;
for(j=0; j<n; j++)
vt[j]*=S[j];
}
sgeqrf_(&n, &n, VT, &n, tau, work, &lwork, &info);
if (i==1)
scopy_(&n2, VT, &one, R, &one);
else
strmm_("l", "u", "n", "n", &n, &n, &alpha, VT, &n, R, &n);
strsm_("r", "u", "n", "n", &m, &n, &alpha, VT, &n, A, &m);
if (mins > 0.00001f)
cn = maxs/mins;
fprintf(stderr, "\nIteration %d, cond num = %f \n", i, cn);
} while (cn > 100.f);
free(G);
free(VT);
free(S);
free(work);
free(tau);
}
//=============================================================================
void chol_qr_it_GPU(int m, int n, float *d_A, int lda, float *G, float *R,
float *work, int lwork){
int i = 0, k, j, info, n2 = n*n, one = 1, lwork2 = n*n;
float *U, *S, *VT, *vt, *tau, *work1, *work2;
float cn = 200.f, alpha = 1.f, zero = 0.f, mins, maxs;
work1 = (float*) malloc(n * n * 4);
VT = (float*)malloc(n * n * 4);
work2 = (float*)malloc(lwork2 * 4);
S = (float*)malloc( n * 4);
tau = (float*)malloc( n * 4);
do {
i++;
cublasSgemm('T', 'N', n, n, m, alpha, d_A, lda, d_A, lda, zero, G, n);
cublasGetVector(n2 , 4.f ,G , 1.f , work1 , 1.f);
sgesvd_("N", "A", &n, &n, work1, &n, S, U, &n, VT, &n, work2, &lwork2, &info);
mins = 100.f, maxs = 0.f;
for(k=0; k<n; k++){
S[k] = sqrt(S[k]);
if (S[k] < mins) mins = S[k];
if (S[k] > maxs) maxs = S[k];
}
for(k=0; k<n;k++){
vt = VT + k*n;
for(j=0; j<n; j++)
vt[j]*=S[j];
}
sgeqrf_(&n, &n, VT, &n, tau, work2, &lwork2, &info);
if (i==1)
scopy_(&n2, VT, &one, R, &one);/*then for next loops, R keeps staying upper triangular*/
else
strmm_("l", "u", "n", "n", &n, &n, &alpha, VT, &n, R, &n);/*R is "u" from pre loop, so thing are right*/
cublasSetVector(n2, 4.f, VT, 1.f, G, 1.f);
cublasStrsm('r', 'u', 'n', 'n', m, n, alpha, G, n, d_A, m);
if (mins > 0.00001f)
cn = maxs/mins;
fprintf(stderr, "\nIteration %d, cond num = %f \n", i, cn);
} while (cn > 100.f);
free(VT);
free(S);
free(tau);
free(work1);
free(work2);
}
//=============================================================================
|
a0f52a375a8c2559eba1edc1c098149128865ba8.hip | // !!! This is a file automatically generated by hipify!!!
// Designed by: Amir Yazdanbakhsh
// Date: March 26th - 2015
// Alternative Computing Technologies Lab.
// Georgia Institute of Technology
#include "stdlib.h"
#include <fstream>
#include <iostream>
#include <cstddef>
// Cuda Libraries
#include <hip/hip_runtime_api.h>
#include <hip/hip_runtime.h>
#include <hip/hip_fp16.h>
//#include "../../../include/fast_math.cuh"
//#define SLOW_MATH
#ifdef SLOW_MATH
#include "../../../include/cuda_math.cuh"
#else //my approximate math lib
#include "../../../include/fast_math.cuh"
#endif
#define SCATTER
//#define SPEED 1024
#define MAX_LOOP 1000
#define MAX_DIFF 0.15f
#define NUM_JOINTS 3
#define PI 3.14159265358979f
#define NUM_JOINTS_P1 (NUM_JOINTS + 1)
using namespace std;
__global__ void invkin_kernel(float *xTarget_in, float *yTarget_in, float *angles, int size, float err_thresh, int speed)
{
#ifdef SCATTER
if(blockIdx.x %100 < speed){
#else
if(blockIdx.x < speed){
#endif
// if(blockIdx.x < speed){
if(threadIdx.x<blockDim.x/2){ //fuse threads
int blockId = blockIdx.x + blockIdx.y * gridDim.x;
int idx = blockId * (blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + 2*threadIdx.x;
if(idx < size)
{
half2 angle_out[NUM_JOINTS];
half zero = 0.0f;
half one = 1.f;
half minus_one = -1.f;
for(int i = 0; i < NUM_JOINTS; i++)
{
angle_out[i] = __float2half2_rn(0.0);
//angle_out[i].x = 0.f;
//angle_out[i].y = 0.f;
}
half max_err = err_thresh * (float)(NUM_JOINTS);
half err = max_err + one; // initialize error to something greater than error threshold
// Initialize x and y data
half2 xData[NUM_JOINTS_P1];
half2 yData[NUM_JOINTS_P1];
for (int i = 0 ; i < NUM_JOINTS_P1; i++)
{
xData[i] = __float2half2_rn((float)i);
yData[i] = __float2half2_rn(0.f);
}
half2 xTarget_in_temp = __floats2half2_rn(xTarget_in[idx],xTarget_in[idx+1]);
half2 yTarget_in_temp = __floats2half2_rn(yTarget_in[idx],yTarget_in[idx+1]);
//half minus_one = -1.0f;
half2 pe_x = xData[NUM_JOINTS];
half2 pe_y = yData[NUM_JOINTS];
for(int curr_loop = 0; curr_loop < MAX_LOOP; curr_loop++)
{
for (int iter = NUM_JOINTS; iter > 0; iter--)
{
half2 pc_x = xData[iter-1];
half2 pc_y = yData[iter-1];
half2 diff_pe_pc_x = pe_x - pc_x;
half2 diff_pe_pc_y = pe_y - pc_y;
// half2 diff_tgt_pc_x = xTarget_in[idx] - pc_x;
// half2 diff_tgt_pc_y = yTarget_in[idx] - pc_y;
half2 diff_tgt_pc_x = xTarget_in_temp - pc_x;
half2 diff_tgt_pc_y = yTarget_in_temp - pc_y;
half2 len_diff_pe_pc = fast_h2sqrt (diff_pe_pc_x * diff_pe_pc_x + diff_pe_pc_y * diff_pe_pc_y);
half2 len_diff_tgt_pc = fast_h2sqrt (diff_tgt_pc_x * diff_tgt_pc_x + diff_tgt_pc_y * diff_tgt_pc_y);
half2 a_x = diff_pe_pc_x * fast_h2rcp(len_diff_pe_pc);
half2 a_y = diff_pe_pc_y * fast_h2rcp(len_diff_pe_pc);
half2 b_x = diff_tgt_pc_x * fast_h2rcp(len_diff_tgt_pc);
half2 b_y = diff_tgt_pc_y * fast_h2rcp(len_diff_tgt_pc);
half2 a_dot_b = a_x * b_x + a_y * b_y;
//float2 a_dot_b_float = __half22float2(a_dot_b);
if (a_dot_b.x > one) {
a_dot_b.x = one ;
}
if (a_dot_b.x < minus_one) {
a_dot_b.x = minus_one ;
}
if (a_dot_b.y > one) {
a_dot_b.y = one ;
}
if (a_dot_b.y < minus_one) {
a_dot_b.y = minus_one ;
}
/*
if (a_dot_b > 1.f)
a_dot_b = 1.f;
else if (a_dot_b < -1.f)
a_dot_b = -1.f;
*/
//float2 a_dot_b_float = __half22float2(a_dot_b);
//half2 angle =__floats2half2_rn (acosf(a_dot_b_float.x) * (180.f / PI), acosf(a_dot_b_float.x) * (180.f / PI));
//angle.x = acosf(a_dot_b_float.x) * (180.f / PI);
//angle.y = acosf(a_dot_b_float.y) * (180.f / PI);
half2 angle = fast_h2acos(a_dot_b) * 57.29578;//(180.f / PI);
// Determine angle direction
half2 direction = a_x * b_y - a_y * b_x;
if (direction.x < zero)
angle.x = -angle.x ;
if (direction.y < zero)
angle.y = -angle.y;
// Make the result look more natural (these checks may be omitted)
// if (angle > 30.f)
// angle = 30.f;
// else if (angle < -30.f)
// angle = -30.f;
// Save angle
angle_out[iter - 1] = angle;
for (int i = 0; i < NUM_JOINTS; i++)
{
if(i < NUM_JOINTS - 1)
{
angle_out[i+1] += angle_out[i];
//angle_out[i+1].y += angle_out[i].y;
}
}
}// loop NUM_JOINTS
}// loop 1k
float2 angle_0 = __half22float2(angle_out[0]);
float2 angle_1 = __half22float2(angle_out[1]);
float2 angle_2 = __half22float2(angle_out[2]);
angles[idx * NUM_JOINTS + 0] = angle_0.x;
angles[idx * NUM_JOINTS + 1] = angle_1.x;
angles[idx * NUM_JOINTS + 2] = angle_2.x;
angles[(idx+1) * NUM_JOINTS + 0] = angle_0.y;
angles[(idx+1) * NUM_JOINTS + 1] = angle_1.y;
angles[(idx+1) * NUM_JOINTS + 2] = angle_2.y;
}
} //end if(threadIdx.x<512/2)
else return;
}
else {
//doing float computation
int blockId = blockIdx.x + blockIdx.y * gridDim.x;
int idx = blockId * (blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x;
if(idx < size)
{
// float parrotInput[2];
// float parrotOutput[3];
float angle_out[NUM_JOINTS];
for(int i = 0; i < NUM_JOINTS; i++)
{
angle_out[i] = 0.0;
}
float max_err = err_thresh * (float)(NUM_JOINTS);
float err = max_err + 1.f; // initialize error to something greater than error threshold
/* parrot not used
parrotInput[0] = xTarget_in[idx];
parrotInput[1] = yTarget_in[idx];
#pragma parrot(input, "invkin_kernel", [2]<-1.0; 1.0>parrotInput)
*/
//float max_err = err_thresh * (float)(NUM_JOINTS);
//float err = max_err + 1.f;
// Initialize x and y data
float xData[NUM_JOINTS_P1];
float yData[NUM_JOINTS_P1];
for (int i = 0 ; i < NUM_JOINTS_P1; i++)
{
xData[i] = i;
yData[i] = 0.f;
}
for(int curr_loop = 0; curr_loop < MAX_LOOP; curr_loop++)
{
for (int iter = NUM_JOINTS; iter > 0; iter--)
{
float pe_x = xData[NUM_JOINTS];
float pe_y = yData[NUM_JOINTS];
float pc_x = xData[iter-1];
float pc_y = yData[iter-1];
float diff_pe_pc_x = pe_x - pc_x;
float diff_pe_pc_y = pe_y - pc_y;
float diff_tgt_pc_x = xTarget_in[idx] - pc_x;
float diff_tgt_pc_y = yTarget_in[idx] - pc_y;
float len_diff_pe_pc = sqrt(diff_pe_pc_x * diff_pe_pc_x + diff_pe_pc_y * diff_pe_pc_y);
float len_diff_tgt_pc = sqrt(diff_tgt_pc_x * diff_tgt_pc_x + diff_tgt_pc_y * diff_tgt_pc_y);
float a_x = diff_pe_pc_x / len_diff_pe_pc;
float a_y = diff_pe_pc_y / len_diff_pe_pc;
float b_x = diff_tgt_pc_x / len_diff_tgt_pc;
float b_y = diff_tgt_pc_y / len_diff_tgt_pc;
float a_dot_b = a_x * b_x + a_y * b_y;
if (a_dot_b > 1.f)
a_dot_b = 1.f;
else if (a_dot_b < -1.f)
a_dot_b = -1.f;
float angle = acos(a_dot_b) * (180.f / PI);
// Determine angle direction
float direction = a_x * b_y - a_y * b_x;
if (direction < 0.f)
angle = -angle;
// Make the result look more natural (these checks may be omitted)
// if (angle > 30.f)
// angle = 30.f;
// else if (angle < -30.f)
// angle = -30.f;
// Save angle
angle_out[iter - 1] = angle;
for (int i = 0; i < NUM_JOINTS; i++)
{
if(i < NUM_JOINTS - 1)
{
angle_out[i+1] += angle_out[i];
}
}
}
}
/* parrot : not used
parrotOutput[0] = angle_out[0] / 30.0;
parrotOutput[1] = angle_out[1] / 30.0;
parrotOutput[2] = angle_out[2] / 30.0;
#pragma parrot(output, "invkin_kernel", [3]<-1.0; 1.0>parrotOutput)
angle_out[0] = parrotOutput[0] * 30.0;
angle_out[1] = parrotOutput[1] * 30.0;
angle_out[2] = parrotOutput[2] * 30.0;
*/
angles[idx * NUM_JOINTS + 0] = angle_out[0];
angles[idx * NUM_JOINTS + 1] = angle_out[1];
angles[idx * NUM_JOINTS + 2] = angle_out[2];
}
}
}
int main(int argc, char* argv[])
{
if(argc != 5)
{
std::cerr << "Usage: ./invkin.out <input file coefficients> <output file> <error threshold> <speed>" << std::endl;
exit(EXIT_FAILURE);
}
float* xTarget_in_h;
float* yTarget_in_h;
float* angle_out_h;
hipError_t cudaStatus;
int data_size = 0;
// process the files
ifstream coordinate_in_file (argv[1]);
ofstream angle_out_file (argv[2]);
float err_thresh = atof(argv[3]);
int speed = atoi(argv[4]);
if(coordinate_in_file.is_open())
{
coordinate_in_file >> data_size;
std::cout << "# Data Size = " << data_size << std::endl;
}
std::cout << "# Speed = " << speed << std::endl;
// allocate the memory
xTarget_in_h = new (nothrow) float[data_size];
if(xTarget_in_h == NULL)
{
std::cerr << "Memory allocation fails!!!" << std::endl;
exit(EXIT_FAILURE);
}
yTarget_in_h = new (nothrow) float[data_size];
if(yTarget_in_h == NULL)
{
std::cerr << "Memory allocation fails!!!" << std::endl;
exit(EXIT_FAILURE);
}
angle_out_h = new (nothrow) float[data_size*NUM_JOINTS];
if(angle_out_h == NULL)
{
std::cerr << "Memory allocation fails!!!" << std::endl;
exit(EXIT_FAILURE);
}
// Prepare
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
// add data to the arrays
float xTarget_tmp, yTarget_tmp;
int coeff_index = 0;
while(coeff_index < data_size)
{
coordinate_in_file >> xTarget_tmp >> yTarget_tmp;
for(int i = 0; i < NUM_JOINTS ; i++)
{
angle_out_h[coeff_index * NUM_JOINTS + i] = 0.0;
}
xTarget_in_h[coeff_index] = xTarget_tmp;
yTarget_in_h[coeff_index++] = yTarget_tmp;
}
std::cout << "# Coordinates are read from file..." << std::endl;
// memory allocations on the host
float *xTarget_in_d,
*yTarget_in_d;
float *angle_out_d;
hipMalloc((void**) &xTarget_in_d, data_size * sizeof(float));
hipMalloc((void**) &yTarget_in_d, data_size * sizeof(float));
hipMalloc((void**) &angle_out_d, data_size * NUM_JOINTS * sizeof(float));
std::cout << "# Memory allocation on GPU is done..." << std::endl;
hipMemcpy(xTarget_in_d, xTarget_in_h, data_size * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(yTarget_in_d, yTarget_in_h, data_size * sizeof(float), hipMemcpyHostToDevice);
std::cout << "# Data are transfered to GPU..." << std::endl;
dim3 dimBlock ( 512, 1 );
dim3 dimGrid ( data_size / 512, 1 );
hipEventRecord(start, 0);
#pragma parrot.start("invkin_kernel")
hipLaunchKernelGGL(( invkin_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, xTarget_in_d, yTarget_in_d, angle_out_d, data_size, err_thresh,speed);
#pragma parrot.end("invkin_kernel")
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
std::cout << "Something was wrong! Error code: " << cudaStatus << std::endl;
}
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
float elapsedTime;
hipEventElapsedTime(&elapsedTime, start, stop);
hipEventDestroy(start);
hipEventDestroy(stop);
std::cout << "# Elapsed Time in `nrpoly3` kernel = " << elapsedTime << std::endl;
std::cout << "# GPU computation is done ..." << std::endl;
hipMemcpy(angle_out_h, angle_out_d, data_size * NUM_JOINTS * sizeof(float), hipMemcpyDeviceToHost);
for(int i = 0; i < data_size; i++)
{
angle_out_file << xTarget_in_h[i] << " " << yTarget_in_h[i] << " ";
//compare output, not need to store this
for(int j = 0 ; j < NUM_JOINTS; j++)
{
angle_out_file << angle_out_h[i * NUM_JOINTS + j] << " ";
}
angle_out_file << std::endl;
}
// close files
coordinate_in_file.close();
angle_out_file.close();
// de-allocate the memory
delete[] xTarget_in_h;
delete[] yTarget_in_h;
delete[] angle_out_h;
// de-allocate cuda memory
hipFree(xTarget_in_d);
hipFree(yTarget_in_d);
hipFree(angle_out_d);
std::cout << "Thank you..." << std::endl;
}
| a0f52a375a8c2559eba1edc1c098149128865ba8.cu | // Designed by: Amir Yazdanbakhsh
// Date: March 26th - 2015
// Alternative Computing Technologies Lab.
// Georgia Institute of Technology
#include "stdlib.h"
#include <fstream>
#include <iostream>
#include <cstddef>
// Cuda Libraries
#include <cuda_runtime_api.h>
#include <cuda.h>
#include <cuda_fp16.h>
//#include "../../../include/fast_math.cuh"
//#define SLOW_MATH
#ifdef SLOW_MATH
#include "../../../include/cuda_math.cuh"
#else //my approximate math lib
#include "../../../include/fast_math.cuh"
#endif
#define SCATTER
//#define SPEED 1024
#define MAX_LOOP 1000
#define MAX_DIFF 0.15f
#define NUM_JOINTS 3
#define PI 3.14159265358979f
#define NUM_JOINTS_P1 (NUM_JOINTS + 1)
using namespace std;
__global__ void invkin_kernel(float *xTarget_in, float *yTarget_in, float *angles, int size, float err_thresh, int speed)
{
#ifdef SCATTER
if(blockIdx.x %100 < speed){
#else
if(blockIdx.x < speed){
#endif
// if(blockIdx.x < speed){
if(threadIdx.x<blockDim.x/2){ //fuse threads
int blockId = blockIdx.x + blockIdx.y * gridDim.x;
int idx = blockId * (blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + 2*threadIdx.x;
if(idx < size)
{
half2 angle_out[NUM_JOINTS];
half zero = 0.0f;
half one = 1.f;
half minus_one = -1.f;
for(int i = 0; i < NUM_JOINTS; i++)
{
angle_out[i] = __float2half2_rn(0.0);
//angle_out[i].x = 0.f;
//angle_out[i].y = 0.f;
}
half max_err = err_thresh * (float)(NUM_JOINTS);
half err = max_err + one; // initialize error to something greater than error threshold
// Initialize x and y data
half2 xData[NUM_JOINTS_P1];
half2 yData[NUM_JOINTS_P1];
for (int i = 0 ; i < NUM_JOINTS_P1; i++)
{
xData[i] = __float2half2_rn((float)i);
yData[i] = __float2half2_rn(0.f);
}
half2 xTarget_in_temp = __floats2half2_rn(xTarget_in[idx],xTarget_in[idx+1]);
half2 yTarget_in_temp = __floats2half2_rn(yTarget_in[idx],yTarget_in[idx+1]);
//half minus_one = -1.0f;
half2 pe_x = xData[NUM_JOINTS];
half2 pe_y = yData[NUM_JOINTS];
for(int curr_loop = 0; curr_loop < MAX_LOOP; curr_loop++)
{
for (int iter = NUM_JOINTS; iter > 0; iter--)
{
half2 pc_x = xData[iter-1];
half2 pc_y = yData[iter-1];
half2 diff_pe_pc_x = pe_x - pc_x;
half2 diff_pe_pc_y = pe_y - pc_y;
// half2 diff_tgt_pc_x = xTarget_in[idx] - pc_x;
// half2 diff_tgt_pc_y = yTarget_in[idx] - pc_y;
half2 diff_tgt_pc_x = xTarget_in_temp - pc_x;
half2 diff_tgt_pc_y = yTarget_in_temp - pc_y;
half2 len_diff_pe_pc = fast_h2sqrt (diff_pe_pc_x * diff_pe_pc_x + diff_pe_pc_y * diff_pe_pc_y);
half2 len_diff_tgt_pc = fast_h2sqrt (diff_tgt_pc_x * diff_tgt_pc_x + diff_tgt_pc_y * diff_tgt_pc_y);
half2 a_x = diff_pe_pc_x * fast_h2rcp(len_diff_pe_pc);
half2 a_y = diff_pe_pc_y * fast_h2rcp(len_diff_pe_pc);
half2 b_x = diff_tgt_pc_x * fast_h2rcp(len_diff_tgt_pc);
half2 b_y = diff_tgt_pc_y * fast_h2rcp(len_diff_tgt_pc);
half2 a_dot_b = a_x * b_x + a_y * b_y;
//float2 a_dot_b_float = __half22float2(a_dot_b);
if (a_dot_b.x > one) {
a_dot_b.x = one ;
}
if (a_dot_b.x < minus_one) {
a_dot_b.x = minus_one ;
}
if (a_dot_b.y > one) {
a_dot_b.y = one ;
}
if (a_dot_b.y < minus_one) {
a_dot_b.y = minus_one ;
}
/*
if (a_dot_b > 1.f)
a_dot_b = 1.f;
else if (a_dot_b < -1.f)
a_dot_b = -1.f;
*/
//float2 a_dot_b_float = __half22float2(a_dot_b);
//half2 angle =__floats2half2_rn (acosf(a_dot_b_float.x) * (180.f / PI), acosf(a_dot_b_float.x) * (180.f / PI));
//angle.x = acosf(a_dot_b_float.x) * (180.f / PI);
//angle.y = acosf(a_dot_b_float.y) * (180.f / PI);
half2 angle = fast_h2acos(a_dot_b) * 57.29578;//(180.f / PI);
// Determine angle direction
half2 direction = a_x * b_y - a_y * b_x;
if (direction.x < zero)
angle.x = -angle.x ;
if (direction.y < zero)
angle.y = -angle.y;
// Make the result look more natural (these checks may be omitted)
// if (angle > 30.f)
// angle = 30.f;
// else if (angle < -30.f)
// angle = -30.f;
// Save angle
angle_out[iter - 1] = angle;
for (int i = 0; i < NUM_JOINTS; i++)
{
if(i < NUM_JOINTS - 1)
{
angle_out[i+1] += angle_out[i];
//angle_out[i+1].y += angle_out[i].y;
}
}
}// loop NUM_JOINTS
}// loop 1k
float2 angle_0 = __half22float2(angle_out[0]);
float2 angle_1 = __half22float2(angle_out[1]);
float2 angle_2 = __half22float2(angle_out[2]);
angles[idx * NUM_JOINTS + 0] = angle_0.x;
angles[idx * NUM_JOINTS + 1] = angle_1.x;
angles[idx * NUM_JOINTS + 2] = angle_2.x;
angles[(idx+1) * NUM_JOINTS + 0] = angle_0.y;
angles[(idx+1) * NUM_JOINTS + 1] = angle_1.y;
angles[(idx+1) * NUM_JOINTS + 2] = angle_2.y;
}
} //end if(threadIdx.x<512/2)
else return;
}
else {
//doing float computation
int blockId = blockIdx.x + blockIdx.y * gridDim.x;
int idx = blockId * (blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x;
if(idx < size)
{
// float parrotInput[2];
// float parrotOutput[3];
float angle_out[NUM_JOINTS];
for(int i = 0; i < NUM_JOINTS; i++)
{
angle_out[i] = 0.0;
}
float max_err = err_thresh * (float)(NUM_JOINTS);
float err = max_err + 1.f; // initialize error to something greater than error threshold
/* parrot not used
parrotInput[0] = xTarget_in[idx];
parrotInput[1] = yTarget_in[idx];
#pragma parrot(input, "invkin_kernel", [2]<-1.0; 1.0>parrotInput)
*/
//float max_err = err_thresh * (float)(NUM_JOINTS);
//float err = max_err + 1.f;
// Initialize x and y data
float xData[NUM_JOINTS_P1];
float yData[NUM_JOINTS_P1];
for (int i = 0 ; i < NUM_JOINTS_P1; i++)
{
xData[i] = i;
yData[i] = 0.f;
}
for(int curr_loop = 0; curr_loop < MAX_LOOP; curr_loop++)
{
for (int iter = NUM_JOINTS; iter > 0; iter--)
{
float pe_x = xData[NUM_JOINTS];
float pe_y = yData[NUM_JOINTS];
float pc_x = xData[iter-1];
float pc_y = yData[iter-1];
float diff_pe_pc_x = pe_x - pc_x;
float diff_pe_pc_y = pe_y - pc_y;
float diff_tgt_pc_x = xTarget_in[idx] - pc_x;
float diff_tgt_pc_y = yTarget_in[idx] - pc_y;
float len_diff_pe_pc = sqrt(diff_pe_pc_x * diff_pe_pc_x + diff_pe_pc_y * diff_pe_pc_y);
float len_diff_tgt_pc = sqrt(diff_tgt_pc_x * diff_tgt_pc_x + diff_tgt_pc_y * diff_tgt_pc_y);
float a_x = diff_pe_pc_x / len_diff_pe_pc;
float a_y = diff_pe_pc_y / len_diff_pe_pc;
float b_x = diff_tgt_pc_x / len_diff_tgt_pc;
float b_y = diff_tgt_pc_y / len_diff_tgt_pc;
float a_dot_b = a_x * b_x + a_y * b_y;
if (a_dot_b > 1.f)
a_dot_b = 1.f;
else if (a_dot_b < -1.f)
a_dot_b = -1.f;
float angle = acos(a_dot_b) * (180.f / PI);
// Determine angle direction
float direction = a_x * b_y - a_y * b_x;
if (direction < 0.f)
angle = -angle;
// Make the result look more natural (these checks may be omitted)
// if (angle > 30.f)
// angle = 30.f;
// else if (angle < -30.f)
// angle = -30.f;
// Save angle
angle_out[iter - 1] = angle;
for (int i = 0; i < NUM_JOINTS; i++)
{
if(i < NUM_JOINTS - 1)
{
angle_out[i+1] += angle_out[i];
}
}
}
}
/* parrot : not used
parrotOutput[0] = angle_out[0] / 30.0;
parrotOutput[1] = angle_out[1] / 30.0;
parrotOutput[2] = angle_out[2] / 30.0;
#pragma parrot(output, "invkin_kernel", [3]<-1.0; 1.0>parrotOutput)
angle_out[0] = parrotOutput[0] * 30.0;
angle_out[1] = parrotOutput[1] * 30.0;
angle_out[2] = parrotOutput[2] * 30.0;
*/
angles[idx * NUM_JOINTS + 0] = angle_out[0];
angles[idx * NUM_JOINTS + 1] = angle_out[1];
angles[idx * NUM_JOINTS + 2] = angle_out[2];
}
}
}
int main(int argc, char* argv[])
{
if(argc != 5)
{
std::cerr << "Usage: ./invkin.out <input file coefficients> <output file> <error threshold> <speed>" << std::endl;
exit(EXIT_FAILURE);
}
float* xTarget_in_h;
float* yTarget_in_h;
float* angle_out_h;
cudaError_t cudaStatus;
int data_size = 0;
// process the files
ifstream coordinate_in_file (argv[1]);
ofstream angle_out_file (argv[2]);
float err_thresh = atof(argv[3]);
int speed = atoi(argv[4]);
if(coordinate_in_file.is_open())
{
coordinate_in_file >> data_size;
std::cout << "# Data Size = " << data_size << std::endl;
}
std::cout << "# Speed = " << speed << std::endl;
// allocate the memory
xTarget_in_h = new (nothrow) float[data_size];
if(xTarget_in_h == NULL)
{
std::cerr << "Memory allocation fails!!!" << std::endl;
exit(EXIT_FAILURE);
}
yTarget_in_h = new (nothrow) float[data_size];
if(yTarget_in_h == NULL)
{
std::cerr << "Memory allocation fails!!!" << std::endl;
exit(EXIT_FAILURE);
}
angle_out_h = new (nothrow) float[data_size*NUM_JOINTS];
if(angle_out_h == NULL)
{
std::cerr << "Memory allocation fails!!!" << std::endl;
exit(EXIT_FAILURE);
}
// Prepare
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// add data to the arrays
float xTarget_tmp, yTarget_tmp;
int coeff_index = 0;
while(coeff_index < data_size)
{
coordinate_in_file >> xTarget_tmp >> yTarget_tmp;
for(int i = 0; i < NUM_JOINTS ; i++)
{
angle_out_h[coeff_index * NUM_JOINTS + i] = 0.0;
}
xTarget_in_h[coeff_index] = xTarget_tmp;
yTarget_in_h[coeff_index++] = yTarget_tmp;
}
std::cout << "# Coordinates are read from file..." << std::endl;
// memory allocations on the host
float *xTarget_in_d,
*yTarget_in_d;
float *angle_out_d;
cudaMalloc((void**) &xTarget_in_d, data_size * sizeof(float));
cudaMalloc((void**) &yTarget_in_d, data_size * sizeof(float));
cudaMalloc((void**) &angle_out_d, data_size * NUM_JOINTS * sizeof(float));
std::cout << "# Memory allocation on GPU is done..." << std::endl;
cudaMemcpy(xTarget_in_d, xTarget_in_h, data_size * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(yTarget_in_d, yTarget_in_h, data_size * sizeof(float), cudaMemcpyHostToDevice);
std::cout << "# Data are transfered to GPU..." << std::endl;
dim3 dimBlock ( 512, 1 );
dim3 dimGrid ( data_size / 512, 1 );
cudaEventRecord(start, 0);
#pragma parrot.start("invkin_kernel")
invkin_kernel<<<dimGrid, dimBlock>>>(xTarget_in_d, yTarget_in_d, angle_out_d, data_size, err_thresh,speed);
#pragma parrot.end("invkin_kernel")
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
std::cout << "Something was wrong! Error code: " << cudaStatus << std::endl;
}
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
float elapsedTime;
cudaEventElapsedTime(&elapsedTime, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
std::cout << "# Elapsed Time in `nrpoly3` kernel = " << elapsedTime << std::endl;
std::cout << "# GPU computation is done ..." << std::endl;
cudaMemcpy(angle_out_h, angle_out_d, data_size * NUM_JOINTS * sizeof(float), cudaMemcpyDeviceToHost);
for(int i = 0; i < data_size; i++)
{
angle_out_file << xTarget_in_h[i] << " " << yTarget_in_h[i] << " ";
//compare output, not need to store this
for(int j = 0 ; j < NUM_JOINTS; j++)
{
angle_out_file << angle_out_h[i * NUM_JOINTS + j] << " ";
}
angle_out_file << std::endl;
}
// close files
coordinate_in_file.close();
angle_out_file.close();
// de-allocate the memory
delete[] xTarget_in_h;
delete[] yTarget_in_h;
delete[] angle_out_h;
// de-allocate cuda memory
cudaFree(xTarget_in_d);
cudaFree(yTarget_in_d);
cudaFree(angle_out_d);
std::cout << "Thank you..." << std::endl;
}
|
d06e9d92b4f2ffe954036bebf7d2a16f9b6d2a6c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// ##########################################################
// By Eugene Ch'ng | www.complexity.io
// Email: genechng@gmail.com
// ----------------------------------------------------------
// The ERC 'Lost Frontiers' Project
// Development for the Parallelisation of ABM Simulation
// ----------------------------------------------------------
// A Basic CUDA Application for ABM Development
//
// Filling arrays with block generated IDs
// This application uses both blocks and threads
// to generate arbitrarily unsigned long numbers
//
// Using a combination of blocks and threads for this GPU (GTX 750M)
// we are able to generate a number with 12 zeros - about 4.3 trillion
// This should be sufficient for a large population of agents!
// Even larger numbers can be generated using dim3 types for blocks and threads
// or by incrementing thread IDs after a kernel operation (discussed later)
//
// The GPU (GTX 750M) used for this development has a limit below:
// blockIdx = {0 ... 65535}
// blockDim = 65535
// threadIdx = {0 ... 1024}
// 65535 * 65535 * 1024 = 4,397,912,294,400
//
// 4 trillion thread IDs should be sufficient for ABM simulation
// But there are also alternate ways to increase IDs for arbitrarily long arrays
// Besides limits with blocks*threads, memory is also an issue (see below)
//
// ----------------------------------------------------------
// How to compile:
// nvcc <filename>.cu -o <outputfile>
// ##########################################################
#include <stdio.h>
#include <iostream>
using namespace std;
// N = 0.22 billion (1 billion = 9 zeros)
// here we are able to reach 220,000,000+ unsigned integers with the memory
// available (GTX 750M device global memory = 2,097,086,464 = 2GB)
// unsigned long int is 8 byte, 8 x 220,000,000 = 1,760,000,000 bytes = 1.76GB
// this leaves some memory for processing the kernel code.
// putting anything larger will yield invalid argument error using hipMemcpy
// #define N 500000000
const unsigned long int N = 220000000; //200000000
// THREADMAX is measured from using 01.DeviceInfo for GTX 750M
#define THREADMAX 1024
// --------------------- CUDA KERNELS
// Fill arrays with device thread IDs
__global__ void fillArray(unsigned long int *dev_arr)
{
// we allow blocks and threads to cooperate in generating unsigned long numbers
// the code below linearise the block and threads into tid used for unsigned long arrays
// we were previously limited by the thread (1024) and blocks (65535) available
// to the current GPU (NVIDIA GTX 750M) used for preparing this code
// using the code below, we can generate
// threadIdx.x and blockIdx.x is incremental
// blockDim.x is constant calculated with (N + (THREADMAX-1)/THREADMAX) = 488282.25
unsigned long int tid = threadIdx.x + blockIdx.x * blockDim.x;
// assign the dev_array element with tid
// until it reaches N
if(tid < N)
{
dev_arr[tid] = tid;
}
}
int main(int argc, const char * argv[])
{
cout << "------------ initialising device and host arrays" << endl;
// declaring the array on the stack would cause a segmentation fault
// as program stack has a limit
// otherwise, declare the array "unsigned long int arr[N]" outside main
// to assign it as a global variable - globals are in the heap
// unsigned long int arr[N]; // host variable
// here we are instantiating the array on the heap for long int within main
unsigned long int *arr;
arr = (unsigned long int*)malloc(N*sizeof(unsigned long int));
// cout << "-- passed malloc" << endl;
unsigned long int *dev_arr; // device variable
for(int i=0; i<N; i++)
{
// cout << "-- start: " << i << endl;
arr[i] = 0;
// commented so we don't need to print all the way up to large values
// printf("host arr[%d] = %d\n", i, arr[i]);
}
size_t s = sizeof(unsigned long int);
cout << "size of arr: " << s*N << " bytes" << endl;
cout << "** the last second item of arr[N-1] is:" << "" << arr[N-1] << endl;
cout << "------------ allocate device memory dev_arr" << endl;
// allocating a device array to copy to
// note the N * sizeof(int)
hipMalloc( (void**)&dev_arr, N * sizeof(unsigned long int) );
cout << "------------ copy arr to dev_arr" << endl;
// copying host array to device
// note the N * sizeof(int)
size_t size = N * sizeof(unsigned long int);
cout << size << endl;
hipMemcpy(dev_arr, arr, size, hipMemcpyHostToDevice);
cout << "------------ calling kernel fillArray" << endl;
// What's happening here?
// what we are doing here is to determine the number of blocks needed, in
// combination with the thread, to generate thread IDs larger than N
// Let's say we are using threads = 128, with N = 1000 elements
// (N + (threads-1)/threads = 8.8
// we will need 8.8 blocks to generate a number > N, adequate for the num ber of
// thread IDs needed for an array of N size (8.8 * 128 = 1126.4)
hipLaunchKernelGGL(( fillArray), dim3((unsigned long int)(N + (THREADMAX-1))/THREADMAX),dim3(THREADMAX), 0, 0, dev_arr);
cout << "------------ copy dev_arr to arr" << endl;
// note the N * sizeof(int)
hipMemcpy(arr, dev_arr, s, hipMemcpyDeviceToHost);
cout << "------------ printing changed host array" << endl;
for(unsigned long int i=0; i<N; i++)
{
// we want to print only 0-9 and the last 10 values of N
if(i < 10 || i > N-10)
printf("** changed host arr[%ld] = %ld\n", i, arr[i]);
}
// ---- FREE ALLOCATED KERNEL MEMORY
hipFree( dev_arr );
free(arr);
return 0;
}
| d06e9d92b4f2ffe954036bebf7d2a16f9b6d2a6c.cu | // ##########################################################
// By Eugene Ch'ng | www.complexity.io
// Email: genechng@gmail.com
// ----------------------------------------------------------
// The ERC 'Lost Frontiers' Project
// Development for the Parallelisation of ABM Simulation
// ----------------------------------------------------------
// A Basic CUDA Application for ABM Development
//
// Filling arrays with block generated IDs
// This application uses both blocks and threads
// to generate arbitrarily unsigned long numbers
//
// Using a combination of blocks and threads for this GPU (GTX 750M)
// we are able to generate a number with 12 zeros - about 4.3 trillion
// This should be sufficient for a large population of agents!
// Even larger numbers can be generated using dim3 types for blocks and threads
// or by incrementing thread IDs after a kernel operation (discussed later)
//
// The GPU (GTX 750M) used for this development has a limit below:
// blockIdx = {0 ... 65535}
// blockDim = 65535
// threadIdx = {0 ... 1024}
// 65535 * 65535 * 1024 = 4,397,912,294,400
//
// 4 trillion thread IDs should be sufficient for ABM simulation
// But there are also alternate ways to increase IDs for arbitrarily long arrays
// Besides limits with blocks*threads, memory is also an issue (see below)
//
// ----------------------------------------------------------
// How to compile:
// nvcc <filename>.cu -o <outputfile>
// ##########################################################
#include <stdio.h>
#include <iostream>
using namespace std;
// N = 0.22 billion (1 billion = 9 zeros)
// here we are able to reach 220,000,000+ unsigned integers with the memory
// available (GTX 750M device global memory = 2,097,086,464 = 2GB)
// unsigned long int is 8 byte, 8 x 220,000,000 = 1,760,000,000 bytes = 1.76GB
// this leaves some memory for processing the kernel code.
// putting anything larger will yield invalid argument error using cudaMemcpy
// #define N 500000000
const unsigned long int N = 220000000; //200000000
// THREADMAX is measured from using 01.DeviceInfo for GTX 750M
#define THREADMAX 1024
// --------------------- CUDA KERNELS
// Fill arrays with device thread IDs
__global__ void fillArray(unsigned long int *dev_arr)
{
// we allow blocks and threads to cooperate in generating unsigned long numbers
// the code below linearise the block and threads into tid used for unsigned long arrays
// we were previously limited by the thread (1024) and blocks (65535) available
// to the current GPU (NVIDIA GTX 750M) used for preparing this code
// using the code below, we can generate
// threadIdx.x and blockIdx.x is incremental
// blockDim.x is constant calculated with (N + (THREADMAX-1)/THREADMAX) = 488282.25
unsigned long int tid = threadIdx.x + blockIdx.x * blockDim.x;
// assign the dev_array element with tid
// until it reaches N
if(tid < N)
{
dev_arr[tid] = tid;
}
}
int main(int argc, const char * argv[])
{
cout << "------------ initialising device and host arrays" << endl;
// declaring the array on the stack would cause a segmentation fault
// as program stack has a limit
// otherwise, declare the array "unsigned long int arr[N]" outside main
// to assign it as a global variable - globals are in the heap
// unsigned long int arr[N]; // host variable
// here we are instantiating the array on the heap for long int within main
unsigned long int *arr;
arr = (unsigned long int*)malloc(N*sizeof(unsigned long int));
// cout << "-- passed malloc" << endl;
unsigned long int *dev_arr; // device variable
for(int i=0; i<N; i++)
{
// cout << "-- start: " << i << endl;
arr[i] = 0;
// commented so we don't need to print all the way up to large values
// printf("host arr[%d] = %d\n", i, arr[i]);
}
size_t s = sizeof(unsigned long int);
cout << "size of arr: " << s*N << " bytes" << endl;
cout << "** the last second item of arr[N-1] is:" << "" << arr[N-1] << endl;
cout << "------------ allocate device memory dev_arr" << endl;
// allocating a device array to copy to
// note the N * sizeof(int)
cudaMalloc( (void**)&dev_arr, N * sizeof(unsigned long int) );
cout << "------------ copy arr to dev_arr" << endl;
// copying host array to device
// note the N * sizeof(int)
size_t size = N * sizeof(unsigned long int);
cout << size << endl;
cudaMemcpy(dev_arr, arr, size, cudaMemcpyHostToDevice);
cout << "------------ calling kernel fillArray" << endl;
// What's happening here?
// what we are doing here is to determine the number of blocks needed, in
// combination with the thread, to generate thread IDs larger than N
// Let's say we are using threads = 128, with N = 1000 elements
// (N + (threads-1)/threads = 8.8
// we will need 8.8 blocks to generate a number > N, adequate for the num ber of
// thread IDs needed for an array of N size (8.8 * 128 = 1126.4)
fillArray<<<(unsigned long int)(N + (THREADMAX-1))/THREADMAX,THREADMAX>>>(dev_arr);
cout << "------------ copy dev_arr to arr" << endl;
// note the N * sizeof(int)
cudaMemcpy(arr, dev_arr, s, cudaMemcpyDeviceToHost);
cout << "------------ printing changed host array" << endl;
for(unsigned long int i=0; i<N; i++)
{
// we want to print only 0-9 and the last 10 values of N
if(i < 10 || i > N-10)
printf("** changed host arr[%ld] = %ld\n", i, arr[i]);
}
// ---- FREE ALLOCATED KERNEL MEMORY
cudaFree( dev_arr );
free(arr);
return 0;
}
|
818e638442f472e6c261c3cf6464f0863ab47a79.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <petsc/private/cudavecimpl.h>
#include <../src/vec/is/sf/impls/basic/sfpack.h>
#include <mpi.h>
#include <nvshmem.h>
#include <nvshmemx.h>
PetscErrorCode PetscNvshmemInitializeCheck(void)
{
PetscFunctionBegin;
if (!PetscNvshmemInitialized) { /* Note NVSHMEM does not provide a routine to check whether it is initialized */
nvshmemx_init_attr_t attr;
attr.mpi_comm = &PETSC_COMM_WORLD;
PetscCall(PetscDeviceInitialize(PETSC_DEVICE_CUDA));
PetscCall(nvshmemx_init_attr(NVSHMEMX_INIT_WITH_MPI_COMM, &attr));
PetscNvshmemInitialized = PETSC_TRUE;
PetscBeganNvshmem = PETSC_TRUE;
}
PetscFunctionReturn(PETSC_SUCCESS);
}
PetscErrorCode PetscNvshmemMalloc(size_t size, void **ptr)
{
PetscFunctionBegin;
PetscCall(PetscNvshmemInitializeCheck());
*ptr = nvshmem_malloc(size);
PetscCheck(*ptr, PETSC_COMM_SELF, PETSC_ERR_ARG_WRONG, "nvshmem_malloc() failed to allocate %zu bytes", size);
PetscFunctionReturn(PETSC_SUCCESS);
}
PetscErrorCode PetscNvshmemCalloc(size_t size, void **ptr)
{
PetscFunctionBegin;
PetscCall(PetscNvshmemInitializeCheck());
*ptr = nvshmem_calloc(size, 1);
PetscCheck(*ptr, PETSC_COMM_SELF, PETSC_ERR_ARG_WRONG, "nvshmem_calloc() failed to allocate %zu bytes", size);
PetscFunctionReturn(PETSC_SUCCESS);
}
PetscErrorCode PetscNvshmemFree_Private(void *ptr)
{
PetscFunctionBegin;
nvshmem_free(ptr);
PetscFunctionReturn(PETSC_SUCCESS);
}
PetscErrorCode PetscNvshmemFinalize(void)
{
PetscFunctionBegin;
nvshmem_finalize();
PetscFunctionReturn(PETSC_SUCCESS);
}
/* Free nvshmem related fields in the SF */
PetscErrorCode PetscSFReset_Basic_NVSHMEM(PetscSF sf)
{
PetscSF_Basic *bas = (PetscSF_Basic *)sf->data;
PetscFunctionBegin;
PetscCall(PetscFree2(bas->leafsigdisp, bas->leafbufdisp));
PetscCall(PetscSFFree(sf, PETSC_MEMTYPE_CUDA, bas->leafbufdisp_d));
PetscCall(PetscSFFree(sf, PETSC_MEMTYPE_CUDA, bas->leafsigdisp_d));
PetscCall(PetscSFFree(sf, PETSC_MEMTYPE_CUDA, bas->iranks_d));
PetscCall(PetscSFFree(sf, PETSC_MEMTYPE_CUDA, bas->ioffset_d));
PetscCall(PetscFree2(sf->rootsigdisp, sf->rootbufdisp));
PetscCall(PetscSFFree(sf, PETSC_MEMTYPE_CUDA, sf->rootbufdisp_d));
PetscCall(PetscSFFree(sf, PETSC_MEMTYPE_CUDA, sf->rootsigdisp_d));
PetscCall(PetscSFFree(sf, PETSC_MEMTYPE_CUDA, sf->ranks_d));
PetscCall(PetscSFFree(sf, PETSC_MEMTYPE_CUDA, sf->roffset_d));
PetscFunctionReturn(PETSC_SUCCESS);
}
/* Set up NVSHMEM related fields for an SF of type SFBASIC (only after PetscSFSetup_Basic() already set up dependent fields) */
static PetscErrorCode PetscSFSetUp_Basic_NVSHMEM(PetscSF sf)
{
hipError_t cerr;
PetscSF_Basic *bas = (PetscSF_Basic *)sf->data;
PetscInt i, nRemoteRootRanks, nRemoteLeafRanks;
PetscMPIInt tag;
MPI_Comm comm;
MPI_Request *rootreqs, *leafreqs;
PetscInt tmp, stmp[4], rtmp[4]; /* tmps for send/recv buffers */
PetscFunctionBegin;
PetscCall(PetscObjectGetComm((PetscObject)sf, &comm));
PetscCall(PetscObjectGetNewTag((PetscObject)sf, &tag));
nRemoteRootRanks = sf->nranks - sf->ndranks;
nRemoteLeafRanks = bas->niranks - bas->ndiranks;
sf->nRemoteRootRanks = nRemoteRootRanks;
bas->nRemoteLeafRanks = nRemoteLeafRanks;
PetscCall(PetscMalloc2(nRemoteLeafRanks, &rootreqs, nRemoteRootRanks, &leafreqs));
stmp[0] = nRemoteRootRanks;
stmp[1] = sf->leafbuflen[PETSCSF_REMOTE];
stmp[2] = nRemoteLeafRanks;
stmp[3] = bas->rootbuflen[PETSCSF_REMOTE];
PetscCall(MPIU_Allreduce(stmp, rtmp, 4, MPIU_INT, MPI_MAX, comm));
sf->nRemoteRootRanksMax = rtmp[0];
sf->leafbuflen_rmax = rtmp[1];
bas->nRemoteLeafRanksMax = rtmp[2];
bas->rootbuflen_rmax = rtmp[3];
/* Total four rounds of MPI communications to set up the nvshmem fields */
/* Root ranks to leaf ranks: send info about rootsigdisp[] and rootbufdisp[] */
PetscCall(PetscMalloc2(nRemoteRootRanks, &sf->rootsigdisp, nRemoteRootRanks, &sf->rootbufdisp));
for (i = 0; i < nRemoteRootRanks; i++) PetscCallMPI(MPI_Irecv(&sf->rootsigdisp[i], 1, MPIU_INT, sf->ranks[i + sf->ndranks], tag, comm, &leafreqs[i])); /* Leaves recv */
for (i = 0; i < nRemoteLeafRanks; i++) PetscCallMPI(MPI_Send(&i, 1, MPIU_INT, bas->iranks[i + bas->ndiranks], tag, comm)); /* Roots send. Note i changes, so we use MPI_Send. */
PetscCallMPI(MPI_Waitall(nRemoteRootRanks, leafreqs, MPI_STATUSES_IGNORE));
for (i = 0; i < nRemoteRootRanks; i++) PetscCallMPI(MPI_Irecv(&sf->rootbufdisp[i], 1, MPIU_INT, sf->ranks[i + sf->ndranks], tag, comm, &leafreqs[i])); /* Leaves recv */
for (i = 0; i < nRemoteLeafRanks; i++) {
tmp = bas->ioffset[i + bas->ndiranks] - bas->ioffset[bas->ndiranks];
PetscCallMPI(MPI_Send(&tmp, 1, MPIU_INT, bas->iranks[i + bas->ndiranks], tag, comm)); /* Roots send. Note tmp changes, so we use MPI_Send. */
}
PetscCallMPI(MPI_Waitall(nRemoteRootRanks, leafreqs, MPI_STATUSES_IGNORE));
PetscCallCUDA(hipMalloc((void **)&sf->rootbufdisp_d, nRemoteRootRanks * sizeof(PetscInt)));
PetscCallCUDA(hipMalloc((void **)&sf->rootsigdisp_d, nRemoteRootRanks * sizeof(PetscInt)));
PetscCallCUDA(hipMalloc((void **)&sf->ranks_d, nRemoteRootRanks * sizeof(PetscMPIInt)));
PetscCallCUDA(hipMalloc((void **)&sf->roffset_d, (nRemoteRootRanks + 1) * sizeof(PetscInt)));
PetscCallCUDA(hipMemcpyAsync(sf->rootbufdisp_d, sf->rootbufdisp, nRemoteRootRanks * sizeof(PetscInt), hipMemcpyHostToDevice, PetscDefaultCudaStream));
PetscCallCUDA(hipMemcpyAsync(sf->rootsigdisp_d, sf->rootsigdisp, nRemoteRootRanks * sizeof(PetscInt), hipMemcpyHostToDevice, PetscDefaultCudaStream));
PetscCallCUDA(hipMemcpyAsync(sf->ranks_d, sf->ranks + sf->ndranks, nRemoteRootRanks * sizeof(PetscMPIInt), hipMemcpyHostToDevice, PetscDefaultCudaStream));
PetscCallCUDA(hipMemcpyAsync(sf->roffset_d, sf->roffset + sf->ndranks, (nRemoteRootRanks + 1) * sizeof(PetscInt), hipMemcpyHostToDevice, PetscDefaultCudaStream));
/* Leaf ranks to root ranks: send info about leafsigdisp[] and leafbufdisp[] */
PetscCall(PetscMalloc2(nRemoteLeafRanks, &bas->leafsigdisp, nRemoteLeafRanks, &bas->leafbufdisp));
for (i = 0; i < nRemoteLeafRanks; i++) PetscCallMPI(MPI_Irecv(&bas->leafsigdisp[i], 1, MPIU_INT, bas->iranks[i + bas->ndiranks], tag, comm, &rootreqs[i]));
for (i = 0; i < nRemoteRootRanks; i++) PetscCallMPI(MPI_Send(&i, 1, MPIU_INT, sf->ranks[i + sf->ndranks], tag, comm));
PetscCallMPI(MPI_Waitall(nRemoteLeafRanks, rootreqs, MPI_STATUSES_IGNORE));
for (i = 0; i < nRemoteLeafRanks; i++) PetscCallMPI(MPI_Irecv(&bas->leafbufdisp[i], 1, MPIU_INT, bas->iranks[i + bas->ndiranks], tag, comm, &rootreqs[i]));
for (i = 0; i < nRemoteRootRanks; i++) {
tmp = sf->roffset[i + sf->ndranks] - sf->roffset[sf->ndranks];
PetscCallMPI(MPI_Send(&tmp, 1, MPIU_INT, sf->ranks[i + sf->ndranks], tag, comm));
}
PetscCallMPI(MPI_Waitall(nRemoteLeafRanks, rootreqs, MPI_STATUSES_IGNORE));
PetscCallCUDA(hipMalloc((void **)&bas->leafbufdisp_d, nRemoteLeafRanks * sizeof(PetscInt)));
PetscCallCUDA(hipMalloc((void **)&bas->leafsigdisp_d, nRemoteLeafRanks * sizeof(PetscInt)));
PetscCallCUDA(hipMalloc((void **)&bas->iranks_d, nRemoteLeafRanks * sizeof(PetscMPIInt)));
PetscCallCUDA(hipMalloc((void **)&bas->ioffset_d, (nRemoteLeafRanks + 1) * sizeof(PetscInt)));
PetscCallCUDA(hipMemcpyAsync(bas->leafbufdisp_d, bas->leafbufdisp, nRemoteLeafRanks * sizeof(PetscInt), hipMemcpyHostToDevice, PetscDefaultCudaStream));
PetscCallCUDA(hipMemcpyAsync(bas->leafsigdisp_d, bas->leafsigdisp, nRemoteLeafRanks * sizeof(PetscInt), hipMemcpyHostToDevice, PetscDefaultCudaStream));
PetscCallCUDA(hipMemcpyAsync(bas->iranks_d, bas->iranks + bas->ndiranks, nRemoteLeafRanks * sizeof(PetscMPIInt), hipMemcpyHostToDevice, PetscDefaultCudaStream));
PetscCallCUDA(hipMemcpyAsync(bas->ioffset_d, bas->ioffset + bas->ndiranks, (nRemoteLeafRanks + 1) * sizeof(PetscInt), hipMemcpyHostToDevice, PetscDefaultCudaStream));
PetscCall(PetscFree2(rootreqs, leafreqs));
PetscFunctionReturn(PETSC_SUCCESS);
}
PetscErrorCode PetscSFLinkNvshmemCheck(PetscSF sf, PetscMemType rootmtype, const void *rootdata, PetscMemType leafmtype, const void *leafdata, PetscBool *use_nvshmem)
{
MPI_Comm comm;
PetscBool isBasic;
PetscMPIInt result = MPI_UNEQUAL;
PetscFunctionBegin;
PetscCall(PetscObjectGetComm((PetscObject)sf, &comm));
/* Check if the sf is eligible for NVSHMEM, if we have not checked yet.
Note the check result <use_nvshmem> must be the same over comm, since an SFLink must be collectively either NVSHMEM or MPI.
*/
sf->checked_nvshmem_eligibility = PETSC_TRUE;
if (sf->use_nvshmem && !sf->checked_nvshmem_eligibility) {
/* Only use NVSHMEM for SFBASIC on PETSC_COMM_WORLD */
PetscCall(PetscObjectTypeCompare((PetscObject)sf, PETSCSFBASIC, &isBasic));
if (isBasic) PetscCallMPI(MPI_Comm_compare(PETSC_COMM_WORLD, comm, &result));
if (!isBasic || (result != MPI_IDENT && result != MPI_CONGRUENT)) sf->use_nvshmem = PETSC_FALSE; /* If not eligible, clear the flag so that we don't try again */
/* Do further check: If on a rank, both rootdata and leafdata are NULL, we might think they are PETSC_MEMTYPE_CUDA (or HOST)
and then use NVSHMEM. But if root/leafmtypes on other ranks are PETSC_MEMTYPE_HOST (or DEVICE), this would lead to
inconsistency on the return value <use_nvshmem>. To be safe, we simply disable nvshmem on these rare SFs.
*/
if (sf->use_nvshmem) {
PetscInt hasNullRank = (!rootdata && !leafdata) ? 1 : 0;
PetscCall(MPIU_Allreduce(MPI_IN_PLACE, &hasNullRank, 1, MPIU_INT, MPI_LOR, comm));
if (hasNullRank) sf->use_nvshmem = PETSC_FALSE;
}
sf->checked_nvshmem_eligibility = PETSC_TRUE; /* If eligible, don't do above check again */
}
/* Check if rootmtype and leafmtype collectively are PETSC_MEMTYPE_CUDA */
if (sf->use_nvshmem) {
PetscInt oneCuda = (!rootdata || PetscMemTypeCUDA(rootmtype)) && (!leafdata || PetscMemTypeCUDA(leafmtype)) ? 1 : 0; /* Do I use cuda for both root&leafmtype? */
PetscInt allCuda = oneCuda; /* Assume the same for all ranks. But if not, in opt mode, return value <use_nvshmem> won't be collective! */
#if defined(PETSC_USE_DEBUG) /* Check in debug mode. Note MPI_Allreduce is expensive, so only in debug mode */
PetscCall(MPIU_Allreduce(&oneCuda, &allCuda, 1, MPIU_INT, MPI_LAND, comm));
PetscCheck(allCuda == oneCuda, comm, PETSC_ERR_SUP, "root/leaf mtypes are inconsistent among ranks, which may lead to SF nvshmem failure in opt mode. Add -use_nvshmem 0 to disable it.");
#endif
if (allCuda) {
PetscCall(PetscNvshmemInitializeCheck());
if (!sf->setup_nvshmem) { /* Set up nvshmem related fields on this SF on-demand */
PetscCall(PetscSFSetUp_Basic_NVSHMEM(sf));
sf->setup_nvshmem = PETSC_TRUE;
}
*use_nvshmem = PETSC_TRUE;
} else {
*use_nvshmem = PETSC_FALSE;
}
} else {
*use_nvshmem = PETSC_FALSE;
}
PetscFunctionReturn(PETSC_SUCCESS);
}
/* Build dependence between <stream> and <remoteCommStream> at the entry of NVSHMEM communication */
static PetscErrorCode PetscSFLinkBuildDependenceBegin(PetscSF sf, PetscSFLink link, PetscSFDirection direction)
{
hipError_t cerr;
PetscSF_Basic *bas = (PetscSF_Basic *)sf->data;
PetscInt buflen = (direction == PETSCSF_ROOT2LEAF) ? bas->rootbuflen[PETSCSF_REMOTE] : sf->leafbuflen[PETSCSF_REMOTE];
PetscFunctionBegin;
if (buflen) {
PetscCallCUDA(hipEventRecord(link->dataReady, link->stream));
PetscCallCUDA(hipStreamWaitEvent(link->remoteCommStream, link->dataReady, 0));
}
PetscFunctionReturn(PETSC_SUCCESS);
}
/* Build dependence between <stream> and <remoteCommStream> at the exit of NVSHMEM communication */
static PetscErrorCode PetscSFLinkBuildDependenceEnd(PetscSF sf, PetscSFLink link, PetscSFDirection direction)
{
hipError_t cerr;
PetscSF_Basic *bas = (PetscSF_Basic *)sf->data;
PetscInt buflen = (direction == PETSCSF_ROOT2LEAF) ? sf->leafbuflen[PETSCSF_REMOTE] : bas->rootbuflen[PETSCSF_REMOTE];
PetscFunctionBegin;
/* If unpack to non-null device buffer, build the endRemoteComm dependence */
if (buflen) {
PetscCallCUDA(hipEventRecord(link->endRemoteComm, link->remoteCommStream));
PetscCallCUDA(hipStreamWaitEvent(link->stream, link->endRemoteComm, 0));
}
PetscFunctionReturn(PETSC_SUCCESS);
}
/* Send/Put signals to remote ranks
Input parameters:
+ n - Number of remote ranks
. sig - Signal address in symmetric heap
. sigdisp - To i-th rank, use its signal at offset sigdisp[i]
. ranks - remote ranks
- newval - Set signals to this value
*/
__global__ static void NvshmemSendSignals(PetscInt n, uint64_t *sig, PetscInt *sigdisp, PetscMPIInt *ranks, uint64_t newval)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
/* Each thread puts one remote signal */
if (i < n) nvshmemx_uint64_signal(sig + sigdisp[i], newval, ranks[i]);
}
/* Wait until local signals equal to the expected value and then set them to a new value
Input parameters:
+ n - Number of signals
. sig - Local signal address
. expval - expected value
- newval - Set signals to this new value
*/
__global__ static void NvshmemWaitSignals(PetscInt n, uint64_t *sig, uint64_t expval, uint64_t newval)
{
#if 0
/* Akhil Langer@NVIDIA said using 1 thread and nvshmem_uint64_wait_until_all is better */
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < n) {
nvshmem_signal_wait_until(sig+i,NVSHMEM_CMP_EQ,expval);
sig[i] = newval;
}
#else
nvshmem_uint64_wait_until_all(sig, n, NULL /*no mask*/, NVSHMEM_CMP_EQ, expval);
for (int i = 0; i < n; i++) sig[i] = newval;
#endif
}
/* ===========================================================================================================
A set of routines to support receiver initiated communication using the get method
The getting protocol is:
Sender has a send buf (sbuf) and a signal variable (ssig); Receiver has a recv buf (rbuf) and a signal variable (rsig);
All signal variables have an initial value 0.
Sender: | Receiver:
1. Wait ssig be 0, then set it to 1
2. Pack data into stand alone sbuf |
3. Put 1 to receiver's rsig | 1. Wait rsig to be 1, then set it 0
| 2. Get data from remote sbuf to local rbuf
| 3. Put 1 to sender's ssig
| 4. Unpack data from local rbuf
===========================================================================================================*/
/* PrePack operation -- since sender will overwrite the send buffer which the receiver might be getting data from.
Sender waits for signals (from receivers) indicating receivers have finished getting data
*/
PetscErrorCode PetscSFLinkWaitSignalsOfCompletionOfGettingData_NVSHMEM(PetscSF sf, PetscSFLink link, PetscSFDirection direction)
{
PetscSF_Basic *bas = (PetscSF_Basic *)sf->data;
uint64_t *sig;
PetscInt n;
PetscFunctionBegin;
if (direction == PETSCSF_ROOT2LEAF) { /* leaf ranks are getting data */
sig = link->rootSendSig; /* leaf ranks set my rootSendsig */
n = bas->nRemoteLeafRanks;
} else { /* LEAF2ROOT */
sig = link->leafSendSig;
n = sf->nRemoteRootRanks;
}
if (n) {
hipLaunchKernelGGL(( NvshmemWaitSignals), dim3(1), dim3(1), 0, link->remoteCommStream, n, sig, 0, 1); /* wait the signals to be 0, then set them to 1 */
PetscCallCUDA(hipGetLastError());
}
PetscFunctionReturn(PETSC_SUCCESS);
}
/* n thread blocks. Each takes in charge one remote rank */
__global__ static void GetDataFromRemotelyAccessible(PetscInt nsrcranks, PetscMPIInt *srcranks, const char *src, PetscInt *srcdisp, char *dst, PetscInt *dstdisp, PetscInt unitbytes)
{
int bid = blockIdx.x;
PetscMPIInt pe = srcranks[bid];
if (!nvshmem_ptr(src, pe)) {
PetscInt nelems = (dstdisp[bid + 1] - dstdisp[bid]) * unitbytes;
nvshmem_getmem_nbi(dst + (dstdisp[bid] - dstdisp[0]) * unitbytes, src + srcdisp[bid] * unitbytes, nelems, pe);
}
}
/* Start communication -- Get data in the given direction */
PetscErrorCode PetscSFLinkGetDataBegin_NVSHMEM(PetscSF sf, PetscSFLink link, PetscSFDirection direction)
{
hipError_t cerr;
PetscSF_Basic *bas = (PetscSF_Basic *)sf->data;
PetscInt nsrcranks, ndstranks, nLocallyAccessible = 0;
char *src, *dst;
PetscInt *srcdisp_h, *dstdisp_h;
PetscInt *srcdisp_d, *dstdisp_d;
PetscMPIInt *srcranks_h;
PetscMPIInt *srcranks_d, *dstranks_d;
uint64_t *dstsig;
PetscInt *dstsigdisp_d;
PetscFunctionBegin;
PetscCall(PetscSFLinkBuildDependenceBegin(sf, link, direction));
if (direction == PETSCSF_ROOT2LEAF) { /* src is root, dst is leaf; we will move data from src to dst */
nsrcranks = sf->nRemoteRootRanks;
src = link->rootbuf[PETSCSF_REMOTE][PETSC_MEMTYPE_DEVICE]; /* root buf is the send buf; it is in symmetric heap */
srcdisp_h = sf->rootbufdisp; /* for my i-th remote root rank, I will access its buf at offset rootbufdisp[i] */
srcdisp_d = sf->rootbufdisp_d;
srcranks_h = sf->ranks + sf->ndranks; /* my (remote) root ranks */
srcranks_d = sf->ranks_d;
ndstranks = bas->nRemoteLeafRanks;
dst = link->leafbuf[PETSCSF_REMOTE][PETSC_MEMTYPE_DEVICE]; /* recv buf is the local leaf buf, also in symmetric heap */
dstdisp_h = sf->roffset + sf->ndranks; /* offsets of the local leaf buf. Note dstdisp[0] is not necessarily 0 */
dstdisp_d = sf->roffset_d;
dstranks_d = bas->iranks_d; /* my (remote) leaf ranks */
dstsig = link->leafRecvSig;
dstsigdisp_d = bas->leafsigdisp_d;
} else { /* src is leaf, dst is root; we will move data from src to dst */
nsrcranks = bas->nRemoteLeafRanks;
src = link->leafbuf[PETSCSF_REMOTE][PETSC_MEMTYPE_DEVICE]; /* leaf buf is the send buf */
srcdisp_h = bas->leafbufdisp; /* for my i-th remote root rank, I will access its buf at offset rootbufdisp[i] */
srcdisp_d = bas->leafbufdisp_d;
srcranks_h = bas->iranks + bas->ndiranks; /* my (remote) root ranks */
srcranks_d = bas->iranks_d;
ndstranks = sf->nRemoteRootRanks;
dst = link->rootbuf[PETSCSF_REMOTE][PETSC_MEMTYPE_DEVICE]; /* the local root buf is the recv buf */
dstdisp_h = bas->ioffset + bas->ndiranks; /* offsets of the local root buf. Note dstdisp[0] is not necessarily 0 */
dstdisp_d = bas->ioffset_d;
dstranks_d = sf->ranks_d; /* my (remote) root ranks */
dstsig = link->rootRecvSig;
dstsigdisp_d = sf->rootsigdisp_d;
}
/* After Pack operation -- src tells dst ranks that they are allowed to get data */
if (ndstranks) {
hipLaunchKernelGGL(( NvshmemSendSignals), dim3((ndstranks + 255) / 256), dim3(256), 0, link->remoteCommStream, ndstranks, dstsig, dstsigdisp_d, dstranks_d, 1); /* set signals to 1 */
PetscCallCUDA(hipGetLastError());
}
/* dst waits for signals (permissions) from src ranks to start getting data */
if (nsrcranks) {
hipLaunchKernelGGL(( NvshmemWaitSignals), dim3(1), dim3(1), 0, link->remoteCommStream, nsrcranks, dstsig, 1, 0); /* wait the signals to be 1, then set them to 0 */
PetscCallCUDA(hipGetLastError());
}
/* dst gets data from src ranks using non-blocking nvshmem_gets, which are finished in PetscSFLinkGetDataEnd_NVSHMEM() */
/* Count number of locally accessible src ranks, which should be a small number */
for (int i = 0; i < nsrcranks; i++) {
if (nvshmem_ptr(src, srcranks_h[i])) nLocallyAccessible++;
}
/* Get data from remotely accessible PEs */
if (nLocallyAccessible < nsrcranks) {
hipLaunchKernelGGL(( GetDataFromRemotelyAccessible), dim3(nsrcranks), dim3(1), 0, link->remoteCommStream, nsrcranks, srcranks_d, src, srcdisp_d, dst, dstdisp_d, link->unitbytes);
PetscCallCUDA(hipGetLastError());
}
/* Get data from locally accessible PEs */
if (nLocallyAccessible) {
for (int i = 0; i < nsrcranks; i++) {
int pe = srcranks_h[i];
if (nvshmem_ptr(src, pe)) {
size_t nelems = (dstdisp_h[i + 1] - dstdisp_h[i]) * link->unitbytes;
nvshmemx_getmem_nbi_on_stream(dst + (dstdisp_h[i] - dstdisp_h[0]) * link->unitbytes, src + srcdisp_h[i] * link->unitbytes, nelems, pe, link->remoteCommStream);
}
}
}
PetscFunctionReturn(PETSC_SUCCESS);
}
/* Finish the communication (can be done before Unpack)
Receiver tells its senders that they are allowed to reuse their send buffer (since receiver has got data from their send buffer)
*/
PetscErrorCode PetscSFLinkGetDataEnd_NVSHMEM(PetscSF sf, PetscSFLink link, PetscSFDirection direction)
{
hipError_t cerr;
PetscSF_Basic *bas = (PetscSF_Basic *)sf->data;
uint64_t *srcsig;
PetscInt nsrcranks, *srcsigdisp;
PetscMPIInt *srcranks;
PetscFunctionBegin;
if (direction == PETSCSF_ROOT2LEAF) { /* leaf ranks are getting data */
nsrcranks = sf->nRemoteRootRanks;
srcsig = link->rootSendSig; /* I want to set their root signal */
srcsigdisp = sf->rootsigdisp_d; /* offset of each root signal */
srcranks = sf->ranks_d; /* ranks of the n root ranks */
} else { /* LEAF2ROOT, root ranks are getting data */
nsrcranks = bas->nRemoteLeafRanks;
srcsig = link->leafSendSig;
srcsigdisp = bas->leafsigdisp_d;
srcranks = bas->iranks_d;
}
if (nsrcranks) {
nvshmemx_quiet_on_stream(link->remoteCommStream); /* Finish the nonblocking get, so that we can unpack afterwards */
PetscCallCUDA(hipGetLastError());
hipLaunchKernelGGL(( NvshmemSendSignals), dim3((nsrcranks + 511) / 512), dim3(512), 0, link->remoteCommStream, nsrcranks, srcsig, srcsigdisp, srcranks, 0); /* set signals to 0 */
PetscCallCUDA(hipGetLastError());
}
PetscCall(PetscSFLinkBuildDependenceEnd(sf, link, direction));
PetscFunctionReturn(PETSC_SUCCESS);
}
/* ===========================================================================================================
A set of routines to support sender initiated communication using the put-based method (the default)
The putting protocol is:
Sender has a send buf (sbuf) and a send signal var (ssig); Receiver has a stand-alone recv buf (rbuf)
and a recv signal var (rsig); All signal variables have an initial value 0. rbuf is allocated by SF and
is in nvshmem space.
Sender: | Receiver:
|
1. Pack data into sbuf |
2. Wait ssig be 0, then set it to 1 |
3. Put data to remote stand-alone rbuf |
4. Fence // make sure 5 happens after 3 |
5. Put 1 to receiver's rsig | 1. Wait rsig to be 1, then set it 0
| 2. Unpack data from local rbuf
| 3. Put 0 to sender's ssig
===========================================================================================================*/
/* n thread blocks. Each takes in charge one remote rank */
__global__ static void WaitAndPutDataToRemotelyAccessible(PetscInt ndstranks, PetscMPIInt *dstranks, char *dst, PetscInt *dstdisp, const char *src, PetscInt *srcdisp, uint64_t *srcsig, PetscInt unitbytes)
{
int bid = blockIdx.x;
PetscMPIInt pe = dstranks[bid];
if (!nvshmem_ptr(dst, pe)) {
PetscInt nelems = (srcdisp[bid + 1] - srcdisp[bid]) * unitbytes;
nvshmem_uint64_wait_until(srcsig + bid, NVSHMEM_CMP_EQ, 0); /* Wait until the sig = 0 */
srcsig[bid] = 1;
nvshmem_putmem_nbi(dst + dstdisp[bid] * unitbytes, src + (srcdisp[bid] - srcdisp[0]) * unitbytes, nelems, pe);
}
}
/* one-thread kernel, which takes in charge all locally accessible */
__global__ static void WaitSignalsFromLocallyAccessible(PetscInt ndstranks, PetscMPIInt *dstranks, uint64_t *srcsig, const char *dst)
{
for (int i = 0; i < ndstranks; i++) {
int pe = dstranks[i];
if (nvshmem_ptr(dst, pe)) {
nvshmem_uint64_wait_until(srcsig + i, NVSHMEM_CMP_EQ, 0); /* Wait until the sig = 0 */
srcsig[i] = 1;
}
}
}
/* Put data in the given direction */
PetscErrorCode PetscSFLinkPutDataBegin_NVSHMEM(PetscSF sf, PetscSFLink link, PetscSFDirection direction)
{
hipError_t cerr;
PetscSF_Basic *bas = (PetscSF_Basic *)sf->data;
PetscInt ndstranks, nLocallyAccessible = 0;
char *src, *dst;
PetscInt *srcdisp_h, *dstdisp_h;
PetscInt *srcdisp_d, *dstdisp_d;
PetscMPIInt *dstranks_h;
PetscMPIInt *dstranks_d;
uint64_t *srcsig;
PetscFunctionBegin;
PetscCall(PetscSFLinkBuildDependenceBegin(sf, link, direction));
if (direction == PETSCSF_ROOT2LEAF) { /* put data in rootbuf to leafbuf */
ndstranks = bas->nRemoteLeafRanks; /* number of (remote) leaf ranks */
src = link->rootbuf[PETSCSF_REMOTE][PETSC_MEMTYPE_DEVICE]; /* Both src & dst must be symmetric */
dst = link->leafbuf[PETSCSF_REMOTE][PETSC_MEMTYPE_DEVICE];
srcdisp_h = bas->ioffset + bas->ndiranks; /* offsets of rootbuf. srcdisp[0] is not necessarily zero */
srcdisp_d = bas->ioffset_d;
srcsig = link->rootSendSig;
dstdisp_h = bas->leafbufdisp; /* for my i-th remote leaf rank, I will access its leaf buf at offset leafbufdisp[i] */
dstdisp_d = bas->leafbufdisp_d;
dstranks_h = bas->iranks + bas->ndiranks; /* remote leaf ranks */
dstranks_d = bas->iranks_d;
} else { /* put data in leafbuf to rootbuf */
ndstranks = sf->nRemoteRootRanks;
src = link->leafbuf[PETSCSF_REMOTE][PETSC_MEMTYPE_DEVICE];
dst = link->rootbuf[PETSCSF_REMOTE][PETSC_MEMTYPE_DEVICE];
srcdisp_h = sf->roffset + sf->ndranks; /* offsets of leafbuf */
srcdisp_d = sf->roffset_d;
srcsig = link->leafSendSig;
dstdisp_h = sf->rootbufdisp; /* for my i-th remote root rank, I will access its root buf at offset rootbufdisp[i] */
dstdisp_d = sf->rootbufdisp_d;
dstranks_h = sf->ranks + sf->ndranks; /* remote root ranks */
dstranks_d = sf->ranks_d;
}
/* Wait for signals and then put data to dst ranks using non-blocking nvshmem_put, which are finished in PetscSFLinkPutDataEnd_NVSHMEM */
/* Count number of locally accessible neighbors, which should be a small number */
for (int i = 0; i < ndstranks; i++) {
if (nvshmem_ptr(dst, dstranks_h[i])) nLocallyAccessible++;
}
/* For remotely accessible PEs, send data to them in one kernel call */
if (nLocallyAccessible < ndstranks) {
hipLaunchKernelGGL(( WaitAndPutDataToRemotelyAccessible), dim3(ndstranks), dim3(1), 0, link->remoteCommStream, ndstranks, dstranks_d, dst, dstdisp_d, src, srcdisp_d, srcsig, link->unitbytes);
PetscCallCUDA(hipGetLastError());
}
/* For locally accessible PEs, use host API, which uses CUDA copy-engines and is much faster than device API */
if (nLocallyAccessible) {
hipLaunchKernelGGL(( WaitSignalsFromLocallyAccessible), dim3(1), dim3(1), 0, link->remoteCommStream, ndstranks, dstranks_d, srcsig, dst);
for (int i = 0; i < ndstranks; i++) {
int pe = dstranks_h[i];
if (nvshmem_ptr(dst, pe)) { /* If return a non-null pointer, then <pe> is locally accessible */
size_t nelems = (srcdisp_h[i + 1] - srcdisp_h[i]) * link->unitbytes;
/* Initiate the nonblocking communication */
nvshmemx_putmem_nbi_on_stream(dst + dstdisp_h[i] * link->unitbytes, src + (srcdisp_h[i] - srcdisp_h[0]) * link->unitbytes, nelems, pe, link->remoteCommStream);
}
}
}
if (nLocallyAccessible) { nvshmemx_quiet_on_stream(link->remoteCommStream); /* Calling nvshmem_fence/quiet() does not fence the above nvshmemx_putmem_nbi_on_stream! */ }
PetscFunctionReturn(PETSC_SUCCESS);
}
/* A one-thread kernel. The thread takes in charge all remote PEs */
__global__ static void PutDataEnd(PetscInt nsrcranks, PetscInt ndstranks, PetscMPIInt *dstranks, uint64_t *dstsig, PetscInt *dstsigdisp)
{
/* TODO: Shall we finished the non-blocking remote puts? */
/* 1. Send a signal to each dst rank */
/* According to Akhil@NVIDIA, IB is orderred, so no fence is needed for remote PEs.
For local PEs, we already called nvshmemx_quiet_on_stream(). Therefore, we are good to send signals to all dst ranks now.
*/
for (int i = 0; i < ndstranks; i++) nvshmemx_uint64_signal(dstsig + dstsigdisp[i], 1, dstranks[i]); /* set sig to 1 */
/* 2. Wait for signals from src ranks (if any) */
if (nsrcranks) {
nvshmem_uint64_wait_until_all(dstsig, nsrcranks, NULL /*no mask*/, NVSHMEM_CMP_EQ, 1); /* wait sigs to be 1, then set them to 0 */
for (int i = 0; i < nsrcranks; i++) dstsig[i] = 0;
}
}
/* Finish the communication -- A receiver waits until it can access its receive buffer */
PetscErrorCode PetscSFLinkPutDataEnd_NVSHMEM(PetscSF sf, PetscSFLink link, PetscSFDirection direction)
{
hipError_t cerr;
PetscSF_Basic *bas = (PetscSF_Basic *)sf->data;
PetscMPIInt *dstranks;
uint64_t *dstsig;
PetscInt nsrcranks, ndstranks, *dstsigdisp;
PetscFunctionBegin;
if (direction == PETSCSF_ROOT2LEAF) { /* put root data to leaf */
nsrcranks = sf->nRemoteRootRanks;
ndstranks = bas->nRemoteLeafRanks;
dstranks = bas->iranks_d; /* leaf ranks */
dstsig = link->leafRecvSig; /* I will set my leaf ranks's RecvSig */
dstsigdisp = bas->leafsigdisp_d; /* for my i-th remote leaf rank, I will access its signal at offset leafsigdisp[i] */
} else { /* LEAF2ROOT */
nsrcranks = bas->nRemoteLeafRanks;
ndstranks = sf->nRemoteRootRanks;
dstranks = sf->ranks_d;
dstsig = link->rootRecvSig;
dstsigdisp = sf->rootsigdisp_d;
}
if (nsrcranks || ndstranks) {
hipLaunchKernelGGL(( PutDataEnd), dim3(1), dim3(1), 0, link->remoteCommStream, nsrcranks, ndstranks, dstranks, dstsig, dstsigdisp);
PetscCallCUDA(hipGetLastError());
}
PetscCall(PetscSFLinkBuildDependenceEnd(sf, link, direction));
PetscFunctionReturn(PETSC_SUCCESS);
}
/* PostUnpack operation -- A receiver tells its senders that they are allowed to put data to here (it implies recv buf is free to take new data) */
PetscErrorCode PetscSFLinkSendSignalsToAllowPuttingData_NVSHMEM(PetscSF sf, PetscSFLink link, PetscSFDirection direction)
{
PetscSF_Basic *bas = (PetscSF_Basic *)sf->data;
uint64_t *srcsig;
PetscInt nsrcranks, *srcsigdisp_d;
PetscMPIInt *srcranks_d;
PetscFunctionBegin;
if (direction == PETSCSF_ROOT2LEAF) { /* I allow my root ranks to put data to me */
nsrcranks = sf->nRemoteRootRanks;
srcsig = link->rootSendSig; /* I want to set their send signals */
srcsigdisp_d = sf->rootsigdisp_d; /* offset of each root signal */
srcranks_d = sf->ranks_d; /* ranks of the n root ranks */
} else { /* LEAF2ROOT */
nsrcranks = bas->nRemoteLeafRanks;
srcsig = link->leafSendSig;
srcsigdisp_d = bas->leafsigdisp_d;
srcranks_d = bas->iranks_d;
}
if (nsrcranks) {
hipLaunchKernelGGL(( NvshmemSendSignals), dim3((nsrcranks + 255) / 256), dim3(256), 0, link->remoteCommStream, nsrcranks, srcsig, srcsigdisp_d, srcranks_d, 0); /* Set remote signals to 0 */
PetscCallCUDA(hipGetLastError());
}
PetscFunctionReturn(PETSC_SUCCESS);
}
/* Destructor when the link uses nvshmem for communication */
static PetscErrorCode PetscSFLinkDestroy_NVSHMEM(PetscSF sf, PetscSFLink link)
{
hipError_t cerr;
PetscFunctionBegin;
PetscCallCUDA(hipEventDestroy(link->dataReady));
PetscCallCUDA(hipEventDestroy(link->endRemoteComm));
PetscCallCUDA(hipStreamDestroy(link->remoteCommStream));
/* nvshmem does not need buffers on host, which should be NULL */
PetscCall(PetscNvshmemFree(link->leafbuf_alloc[PETSCSF_REMOTE][PETSC_MEMTYPE_DEVICE]));
PetscCall(PetscNvshmemFree(link->leafSendSig));
PetscCall(PetscNvshmemFree(link->leafRecvSig));
PetscCall(PetscNvshmemFree(link->rootbuf_alloc[PETSCSF_REMOTE][PETSC_MEMTYPE_DEVICE]));
PetscCall(PetscNvshmemFree(link->rootSendSig));
PetscCall(PetscNvshmemFree(link->rootRecvSig));
PetscFunctionReturn(PETSC_SUCCESS);
}
PetscErrorCode PetscSFLinkCreate_NVSHMEM(PetscSF sf, MPI_Datatype unit, PetscMemType rootmtype, const void *rootdata, PetscMemType leafmtype, const void *leafdata, MPI_Op op, PetscSFOperation sfop, PetscSFLink *mylink)
{
hipError_t cerr;
PetscSF_Basic *bas = (PetscSF_Basic *)sf->data;
PetscSFLink *p, link;
PetscBool match, rootdirect[2], leafdirect[2];
int greatestPriority;
PetscFunctionBegin;
/* Check to see if we can directly send/recv root/leafdata with the given sf, sfop and op.
We only care root/leafdirect[PETSCSF_REMOTE], since we never need intermediate buffers in local communication with NVSHMEM.
*/
if (sfop == PETSCSF_BCAST) { /* Move data from rootbuf to leafbuf */
if (sf->use_nvshmem_get) {
rootdirect[PETSCSF_REMOTE] = PETSC_FALSE; /* send buffer has to be stand-alone (can't be rootdata) */
leafdirect[PETSCSF_REMOTE] = (PetscMemTypeNVSHMEM(leafmtype) && sf->leafcontig[PETSCSF_REMOTE] && op == MPI_REPLACE) ? PETSC_TRUE : PETSC_FALSE;
} else {
rootdirect[PETSCSF_REMOTE] = (PetscMemTypeNVSHMEM(rootmtype) && bas->rootcontig[PETSCSF_REMOTE]) ? PETSC_TRUE : PETSC_FALSE;
leafdirect[PETSCSF_REMOTE] = PETSC_FALSE; /* Our put-protocol always needs a nvshmem alloc'ed recv buffer */
}
} else if (sfop == PETSCSF_REDUCE) { /* Move data from leafbuf to rootbuf */
if (sf->use_nvshmem_get) {
rootdirect[PETSCSF_REMOTE] = (PetscMemTypeNVSHMEM(rootmtype) && bas->rootcontig[PETSCSF_REMOTE] && op == MPI_REPLACE) ? PETSC_TRUE : PETSC_FALSE;
leafdirect[PETSCSF_REMOTE] = PETSC_FALSE;
} else {
rootdirect[PETSCSF_REMOTE] = PETSC_FALSE;
leafdirect[PETSCSF_REMOTE] = (PetscMemTypeNVSHMEM(leafmtype) && sf->leafcontig[PETSCSF_REMOTE]) ? PETSC_TRUE : PETSC_FALSE;
}
} else { /* PETSCSF_FETCH */
rootdirect[PETSCSF_REMOTE] = PETSC_FALSE; /* FETCH always need a separate rootbuf */
leafdirect[PETSCSF_REMOTE] = PETSC_FALSE; /* We also force allocating a separate leafbuf so that leafdata and leafupdate can share mpi requests */
}
/* Look for free nvshmem links in cache */
for (p = &bas->avail; (link = *p); p = &link->next) {
if (link->use_nvshmem) {
PetscCall(MPIPetsc_Type_compare(unit, link->unit, &match));
if (match) {
*p = link->next; /* Remove from available list */
goto found;
}
}
}
PetscCall(PetscNew(&link));
PetscCall(PetscSFLinkSetUp_Host(sf, link, unit)); /* Compute link->unitbytes, dup link->unit etc. */
if (sf->backend == PETSCSF_BACKEND_CUDA) PetscCall(PetscSFLinkSetUp_CUDA(sf, link, unit)); /* Setup pack routines, streams etc */
#if defined(PETSC_HAVE_KOKKOS)
else if (sf->backend == PETSCSF_BACKEND_KOKKOS) PetscCall(PetscSFLinkSetUp_Kokkos(sf, link, unit));
#endif
link->rootdirect[PETSCSF_LOCAL] = PETSC_TRUE; /* For the local part we directly use root/leafdata */
link->leafdirect[PETSCSF_LOCAL] = PETSC_TRUE;
/* Init signals to zero */
if (!link->rootSendSig) PetscCall(PetscNvshmemCalloc(bas->nRemoteLeafRanksMax * sizeof(uint64_t), (void **)&link->rootSendSig));
if (!link->rootRecvSig) PetscCall(PetscNvshmemCalloc(bas->nRemoteLeafRanksMax * sizeof(uint64_t), (void **)&link->rootRecvSig));
if (!link->leafSendSig) PetscCall(PetscNvshmemCalloc(sf->nRemoteRootRanksMax * sizeof(uint64_t), (void **)&link->leafSendSig));
if (!link->leafRecvSig) PetscCall(PetscNvshmemCalloc(sf->nRemoteRootRanksMax * sizeof(uint64_t), (void **)&link->leafRecvSig));
link->use_nvshmem = PETSC_TRUE;
link->rootmtype = PETSC_MEMTYPE_DEVICE; /* Only need 0/1-based mtype from now on */
link->leafmtype = PETSC_MEMTYPE_DEVICE;
/* Overwrite some function pointers set by PetscSFLinkSetUp_CUDA */
link->Destroy = PetscSFLinkDestroy_NVSHMEM;
if (sf->use_nvshmem_get) { /* get-based protocol */
link->PrePack = PetscSFLinkWaitSignalsOfCompletionOfGettingData_NVSHMEM;
link->StartCommunication = PetscSFLinkGetDataBegin_NVSHMEM;
link->FinishCommunication = PetscSFLinkGetDataEnd_NVSHMEM;
} else { /* put-based protocol */
link->StartCommunication = PetscSFLinkPutDataBegin_NVSHMEM;
link->FinishCommunication = PetscSFLinkPutDataEnd_NVSHMEM;
link->PostUnpack = PetscSFLinkSendSignalsToAllowPuttingData_NVSHMEM;
}
PetscCallCUDA(hipDeviceGetStreamPriorityRange(NULL, &greatestPriority));
PetscCallCUDA(hipStreamCreateWithPriority(&link->remoteCommStream, hipStreamNonBlocking, greatestPriority));
PetscCallCUDA(hipEventCreateWithFlags(&link->dataReady, hipEventDisableTiming));
PetscCallCUDA(hipEventCreateWithFlags(&link->endRemoteComm, hipEventDisableTiming));
found:
if (rootdirect[PETSCSF_REMOTE]) {
link->rootbuf[PETSCSF_REMOTE][PETSC_MEMTYPE_DEVICE] = (char *)rootdata + bas->rootstart[PETSCSF_REMOTE] * link->unitbytes;
} else {
if (!link->rootbuf_alloc[PETSCSF_REMOTE][PETSC_MEMTYPE_DEVICE]) PetscCall(PetscNvshmemMalloc(bas->rootbuflen_rmax * link->unitbytes, (void **)&link->rootbuf_alloc[PETSCSF_REMOTE][PETSC_MEMTYPE_DEVICE]));
link->rootbuf[PETSCSF_REMOTE][PETSC_MEMTYPE_DEVICE] = link->rootbuf_alloc[PETSCSF_REMOTE][PETSC_MEMTYPE_DEVICE];
}
if (leafdirect[PETSCSF_REMOTE]) {
link->leafbuf[PETSCSF_REMOTE][PETSC_MEMTYPE_DEVICE] = (char *)leafdata + sf->leafstart[PETSCSF_REMOTE] * link->unitbytes;
} else {
if (!link->leafbuf_alloc[PETSCSF_REMOTE][PETSC_MEMTYPE_DEVICE]) PetscCall(PetscNvshmemMalloc(sf->leafbuflen_rmax * link->unitbytes, (void **)&link->leafbuf_alloc[PETSCSF_REMOTE][PETSC_MEMTYPE_DEVICE]));
link->leafbuf[PETSCSF_REMOTE][PETSC_MEMTYPE_DEVICE] = link->leafbuf_alloc[PETSCSF_REMOTE][PETSC_MEMTYPE_DEVICE];
}
link->rootdirect[PETSCSF_REMOTE] = rootdirect[PETSCSF_REMOTE];
link->leafdirect[PETSCSF_REMOTE] = leafdirect[PETSCSF_REMOTE];
link->rootdata = rootdata; /* root/leafdata are keys to look up links in PetscSFXxxEnd */
link->leafdata = leafdata;
link->next = bas->inuse;
bas->inuse = link;
*mylink = link;
PetscFunctionReturn(PETSC_SUCCESS);
}
#if defined(PETSC_USE_REAL_SINGLE)
PetscErrorCode PetscNvshmemSum(PetscInt count, float *dst, const float *src)
{
PetscMPIInt num; /* Assume nvshmem's int is MPI's int */
PetscFunctionBegin;
PetscCall(PetscMPIIntCast(count, &num));
nvshmemx_float_sum_reduce_on_stream(NVSHMEM_TEAM_WORLD, dst, src, num, PetscDefaultCudaStream);
PetscFunctionReturn(PETSC_SUCCESS);
}
PetscErrorCode PetscNvshmemMax(PetscInt count, float *dst, const float *src)
{
PetscMPIInt num;
PetscFunctionBegin;
PetscCall(PetscMPIIntCast(count, &num));
nvshmemx_float_max_reduce_on_stream(NVSHMEM_TEAM_WORLD, dst, src, num, PetscDefaultCudaStream);
PetscFunctionReturn(PETSC_SUCCESS);
}
#elif defined(PETSC_USE_REAL_DOUBLE)
PetscErrorCode PetscNvshmemSum(PetscInt count, double *dst, const double *src)
{
PetscMPIInt num;
PetscFunctionBegin;
PetscCall(PetscMPIIntCast(count, &num));
nvshmemx_double_sum_reduce_on_stream(NVSHMEM_TEAM_WORLD, dst, src, num, PetscDefaultCudaStream);
PetscFunctionReturn(PETSC_SUCCESS);
}
PetscErrorCode PetscNvshmemMax(PetscInt count, double *dst, const double *src)
{
PetscMPIInt num;
PetscFunctionBegin;
PetscCall(PetscMPIIntCast(count, &num));
nvshmemx_double_max_reduce_on_stream(NVSHMEM_TEAM_WORLD, dst, src, num, PetscDefaultCudaStream);
PetscFunctionReturn(PETSC_SUCCESS);
}
#endif
| 818e638442f472e6c261c3cf6464f0863ab47a79.cu | #include <petsc/private/cudavecimpl.h>
#include <../src/vec/is/sf/impls/basic/sfpack.h>
#include <mpi.h>
#include <nvshmem.h>
#include <nvshmemx.h>
PetscErrorCode PetscNvshmemInitializeCheck(void)
{
PetscFunctionBegin;
if (!PetscNvshmemInitialized) { /* Note NVSHMEM does not provide a routine to check whether it is initialized */
nvshmemx_init_attr_t attr;
attr.mpi_comm = &PETSC_COMM_WORLD;
PetscCall(PetscDeviceInitialize(PETSC_DEVICE_CUDA));
PetscCall(nvshmemx_init_attr(NVSHMEMX_INIT_WITH_MPI_COMM, &attr));
PetscNvshmemInitialized = PETSC_TRUE;
PetscBeganNvshmem = PETSC_TRUE;
}
PetscFunctionReturn(PETSC_SUCCESS);
}
PetscErrorCode PetscNvshmemMalloc(size_t size, void **ptr)
{
PetscFunctionBegin;
PetscCall(PetscNvshmemInitializeCheck());
*ptr = nvshmem_malloc(size);
PetscCheck(*ptr, PETSC_COMM_SELF, PETSC_ERR_ARG_WRONG, "nvshmem_malloc() failed to allocate %zu bytes", size);
PetscFunctionReturn(PETSC_SUCCESS);
}
PetscErrorCode PetscNvshmemCalloc(size_t size, void **ptr)
{
PetscFunctionBegin;
PetscCall(PetscNvshmemInitializeCheck());
*ptr = nvshmem_calloc(size, 1);
PetscCheck(*ptr, PETSC_COMM_SELF, PETSC_ERR_ARG_WRONG, "nvshmem_calloc() failed to allocate %zu bytes", size);
PetscFunctionReturn(PETSC_SUCCESS);
}
PetscErrorCode PetscNvshmemFree_Private(void *ptr)
{
PetscFunctionBegin;
nvshmem_free(ptr);
PetscFunctionReturn(PETSC_SUCCESS);
}
PetscErrorCode PetscNvshmemFinalize(void)
{
PetscFunctionBegin;
nvshmem_finalize();
PetscFunctionReturn(PETSC_SUCCESS);
}
/* Free nvshmem related fields in the SF */
PetscErrorCode PetscSFReset_Basic_NVSHMEM(PetscSF sf)
{
PetscSF_Basic *bas = (PetscSF_Basic *)sf->data;
PetscFunctionBegin;
PetscCall(PetscFree2(bas->leafsigdisp, bas->leafbufdisp));
PetscCall(PetscSFFree(sf, PETSC_MEMTYPE_CUDA, bas->leafbufdisp_d));
PetscCall(PetscSFFree(sf, PETSC_MEMTYPE_CUDA, bas->leafsigdisp_d));
PetscCall(PetscSFFree(sf, PETSC_MEMTYPE_CUDA, bas->iranks_d));
PetscCall(PetscSFFree(sf, PETSC_MEMTYPE_CUDA, bas->ioffset_d));
PetscCall(PetscFree2(sf->rootsigdisp, sf->rootbufdisp));
PetscCall(PetscSFFree(sf, PETSC_MEMTYPE_CUDA, sf->rootbufdisp_d));
PetscCall(PetscSFFree(sf, PETSC_MEMTYPE_CUDA, sf->rootsigdisp_d));
PetscCall(PetscSFFree(sf, PETSC_MEMTYPE_CUDA, sf->ranks_d));
PetscCall(PetscSFFree(sf, PETSC_MEMTYPE_CUDA, sf->roffset_d));
PetscFunctionReturn(PETSC_SUCCESS);
}
/* Set up NVSHMEM related fields for an SF of type SFBASIC (only after PetscSFSetup_Basic() already set up dependent fields) */
static PetscErrorCode PetscSFSetUp_Basic_NVSHMEM(PetscSF sf)
{
cudaError_t cerr;
PetscSF_Basic *bas = (PetscSF_Basic *)sf->data;
PetscInt i, nRemoteRootRanks, nRemoteLeafRanks;
PetscMPIInt tag;
MPI_Comm comm;
MPI_Request *rootreqs, *leafreqs;
PetscInt tmp, stmp[4], rtmp[4]; /* tmps for send/recv buffers */
PetscFunctionBegin;
PetscCall(PetscObjectGetComm((PetscObject)sf, &comm));
PetscCall(PetscObjectGetNewTag((PetscObject)sf, &tag));
nRemoteRootRanks = sf->nranks - sf->ndranks;
nRemoteLeafRanks = bas->niranks - bas->ndiranks;
sf->nRemoteRootRanks = nRemoteRootRanks;
bas->nRemoteLeafRanks = nRemoteLeafRanks;
PetscCall(PetscMalloc2(nRemoteLeafRanks, &rootreqs, nRemoteRootRanks, &leafreqs));
stmp[0] = nRemoteRootRanks;
stmp[1] = sf->leafbuflen[PETSCSF_REMOTE];
stmp[2] = nRemoteLeafRanks;
stmp[3] = bas->rootbuflen[PETSCSF_REMOTE];
PetscCall(MPIU_Allreduce(stmp, rtmp, 4, MPIU_INT, MPI_MAX, comm));
sf->nRemoteRootRanksMax = rtmp[0];
sf->leafbuflen_rmax = rtmp[1];
bas->nRemoteLeafRanksMax = rtmp[2];
bas->rootbuflen_rmax = rtmp[3];
/* Total four rounds of MPI communications to set up the nvshmem fields */
/* Root ranks to leaf ranks: send info about rootsigdisp[] and rootbufdisp[] */
PetscCall(PetscMalloc2(nRemoteRootRanks, &sf->rootsigdisp, nRemoteRootRanks, &sf->rootbufdisp));
for (i = 0; i < nRemoteRootRanks; i++) PetscCallMPI(MPI_Irecv(&sf->rootsigdisp[i], 1, MPIU_INT, sf->ranks[i + sf->ndranks], tag, comm, &leafreqs[i])); /* Leaves recv */
for (i = 0; i < nRemoteLeafRanks; i++) PetscCallMPI(MPI_Send(&i, 1, MPIU_INT, bas->iranks[i + bas->ndiranks], tag, comm)); /* Roots send. Note i changes, so we use MPI_Send. */
PetscCallMPI(MPI_Waitall(nRemoteRootRanks, leafreqs, MPI_STATUSES_IGNORE));
for (i = 0; i < nRemoteRootRanks; i++) PetscCallMPI(MPI_Irecv(&sf->rootbufdisp[i], 1, MPIU_INT, sf->ranks[i + sf->ndranks], tag, comm, &leafreqs[i])); /* Leaves recv */
for (i = 0; i < nRemoteLeafRanks; i++) {
tmp = bas->ioffset[i + bas->ndiranks] - bas->ioffset[bas->ndiranks];
PetscCallMPI(MPI_Send(&tmp, 1, MPIU_INT, bas->iranks[i + bas->ndiranks], tag, comm)); /* Roots send. Note tmp changes, so we use MPI_Send. */
}
PetscCallMPI(MPI_Waitall(nRemoteRootRanks, leafreqs, MPI_STATUSES_IGNORE));
PetscCallCUDA(cudaMalloc((void **)&sf->rootbufdisp_d, nRemoteRootRanks * sizeof(PetscInt)));
PetscCallCUDA(cudaMalloc((void **)&sf->rootsigdisp_d, nRemoteRootRanks * sizeof(PetscInt)));
PetscCallCUDA(cudaMalloc((void **)&sf->ranks_d, nRemoteRootRanks * sizeof(PetscMPIInt)));
PetscCallCUDA(cudaMalloc((void **)&sf->roffset_d, (nRemoteRootRanks + 1) * sizeof(PetscInt)));
PetscCallCUDA(cudaMemcpyAsync(sf->rootbufdisp_d, sf->rootbufdisp, nRemoteRootRanks * sizeof(PetscInt), cudaMemcpyHostToDevice, PetscDefaultCudaStream));
PetscCallCUDA(cudaMemcpyAsync(sf->rootsigdisp_d, sf->rootsigdisp, nRemoteRootRanks * sizeof(PetscInt), cudaMemcpyHostToDevice, PetscDefaultCudaStream));
PetscCallCUDA(cudaMemcpyAsync(sf->ranks_d, sf->ranks + sf->ndranks, nRemoteRootRanks * sizeof(PetscMPIInt), cudaMemcpyHostToDevice, PetscDefaultCudaStream));
PetscCallCUDA(cudaMemcpyAsync(sf->roffset_d, sf->roffset + sf->ndranks, (nRemoteRootRanks + 1) * sizeof(PetscInt), cudaMemcpyHostToDevice, PetscDefaultCudaStream));
/* Leaf ranks to root ranks: send info about leafsigdisp[] and leafbufdisp[] */
PetscCall(PetscMalloc2(nRemoteLeafRanks, &bas->leafsigdisp, nRemoteLeafRanks, &bas->leafbufdisp));
for (i = 0; i < nRemoteLeafRanks; i++) PetscCallMPI(MPI_Irecv(&bas->leafsigdisp[i], 1, MPIU_INT, bas->iranks[i + bas->ndiranks], tag, comm, &rootreqs[i]));
for (i = 0; i < nRemoteRootRanks; i++) PetscCallMPI(MPI_Send(&i, 1, MPIU_INT, sf->ranks[i + sf->ndranks], tag, comm));
PetscCallMPI(MPI_Waitall(nRemoteLeafRanks, rootreqs, MPI_STATUSES_IGNORE));
for (i = 0; i < nRemoteLeafRanks; i++) PetscCallMPI(MPI_Irecv(&bas->leafbufdisp[i], 1, MPIU_INT, bas->iranks[i + bas->ndiranks], tag, comm, &rootreqs[i]));
for (i = 0; i < nRemoteRootRanks; i++) {
tmp = sf->roffset[i + sf->ndranks] - sf->roffset[sf->ndranks];
PetscCallMPI(MPI_Send(&tmp, 1, MPIU_INT, sf->ranks[i + sf->ndranks], tag, comm));
}
PetscCallMPI(MPI_Waitall(nRemoteLeafRanks, rootreqs, MPI_STATUSES_IGNORE));
PetscCallCUDA(cudaMalloc((void **)&bas->leafbufdisp_d, nRemoteLeafRanks * sizeof(PetscInt)));
PetscCallCUDA(cudaMalloc((void **)&bas->leafsigdisp_d, nRemoteLeafRanks * sizeof(PetscInt)));
PetscCallCUDA(cudaMalloc((void **)&bas->iranks_d, nRemoteLeafRanks * sizeof(PetscMPIInt)));
PetscCallCUDA(cudaMalloc((void **)&bas->ioffset_d, (nRemoteLeafRanks + 1) * sizeof(PetscInt)));
PetscCallCUDA(cudaMemcpyAsync(bas->leafbufdisp_d, bas->leafbufdisp, nRemoteLeafRanks * sizeof(PetscInt), cudaMemcpyHostToDevice, PetscDefaultCudaStream));
PetscCallCUDA(cudaMemcpyAsync(bas->leafsigdisp_d, bas->leafsigdisp, nRemoteLeafRanks * sizeof(PetscInt), cudaMemcpyHostToDevice, PetscDefaultCudaStream));
PetscCallCUDA(cudaMemcpyAsync(bas->iranks_d, bas->iranks + bas->ndiranks, nRemoteLeafRanks * sizeof(PetscMPIInt), cudaMemcpyHostToDevice, PetscDefaultCudaStream));
PetscCallCUDA(cudaMemcpyAsync(bas->ioffset_d, bas->ioffset + bas->ndiranks, (nRemoteLeafRanks + 1) * sizeof(PetscInt), cudaMemcpyHostToDevice, PetscDefaultCudaStream));
PetscCall(PetscFree2(rootreqs, leafreqs));
PetscFunctionReturn(PETSC_SUCCESS);
}
PetscErrorCode PetscSFLinkNvshmemCheck(PetscSF sf, PetscMemType rootmtype, const void *rootdata, PetscMemType leafmtype, const void *leafdata, PetscBool *use_nvshmem)
{
MPI_Comm comm;
PetscBool isBasic;
PetscMPIInt result = MPI_UNEQUAL;
PetscFunctionBegin;
PetscCall(PetscObjectGetComm((PetscObject)sf, &comm));
/* Check if the sf is eligible for NVSHMEM, if we have not checked yet.
Note the check result <use_nvshmem> must be the same over comm, since an SFLink must be collectively either NVSHMEM or MPI.
*/
sf->checked_nvshmem_eligibility = PETSC_TRUE;
if (sf->use_nvshmem && !sf->checked_nvshmem_eligibility) {
/* Only use NVSHMEM for SFBASIC on PETSC_COMM_WORLD */
PetscCall(PetscObjectTypeCompare((PetscObject)sf, PETSCSFBASIC, &isBasic));
if (isBasic) PetscCallMPI(MPI_Comm_compare(PETSC_COMM_WORLD, comm, &result));
if (!isBasic || (result != MPI_IDENT && result != MPI_CONGRUENT)) sf->use_nvshmem = PETSC_FALSE; /* If not eligible, clear the flag so that we don't try again */
/* Do further check: If on a rank, both rootdata and leafdata are NULL, we might think they are PETSC_MEMTYPE_CUDA (or HOST)
and then use NVSHMEM. But if root/leafmtypes on other ranks are PETSC_MEMTYPE_HOST (or DEVICE), this would lead to
inconsistency on the return value <use_nvshmem>. To be safe, we simply disable nvshmem on these rare SFs.
*/
if (sf->use_nvshmem) {
PetscInt hasNullRank = (!rootdata && !leafdata) ? 1 : 0;
PetscCall(MPIU_Allreduce(MPI_IN_PLACE, &hasNullRank, 1, MPIU_INT, MPI_LOR, comm));
if (hasNullRank) sf->use_nvshmem = PETSC_FALSE;
}
sf->checked_nvshmem_eligibility = PETSC_TRUE; /* If eligible, don't do above check again */
}
/* Check if rootmtype and leafmtype collectively are PETSC_MEMTYPE_CUDA */
if (sf->use_nvshmem) {
PetscInt oneCuda = (!rootdata || PetscMemTypeCUDA(rootmtype)) && (!leafdata || PetscMemTypeCUDA(leafmtype)) ? 1 : 0; /* Do I use cuda for both root&leafmtype? */
PetscInt allCuda = oneCuda; /* Assume the same for all ranks. But if not, in opt mode, return value <use_nvshmem> won't be collective! */
#if defined(PETSC_USE_DEBUG) /* Check in debug mode. Note MPI_Allreduce is expensive, so only in debug mode */
PetscCall(MPIU_Allreduce(&oneCuda, &allCuda, 1, MPIU_INT, MPI_LAND, comm));
PetscCheck(allCuda == oneCuda, comm, PETSC_ERR_SUP, "root/leaf mtypes are inconsistent among ranks, which may lead to SF nvshmem failure in opt mode. Add -use_nvshmem 0 to disable it.");
#endif
if (allCuda) {
PetscCall(PetscNvshmemInitializeCheck());
if (!sf->setup_nvshmem) { /* Set up nvshmem related fields on this SF on-demand */
PetscCall(PetscSFSetUp_Basic_NVSHMEM(sf));
sf->setup_nvshmem = PETSC_TRUE;
}
*use_nvshmem = PETSC_TRUE;
} else {
*use_nvshmem = PETSC_FALSE;
}
} else {
*use_nvshmem = PETSC_FALSE;
}
PetscFunctionReturn(PETSC_SUCCESS);
}
/* Build dependence between <stream> and <remoteCommStream> at the entry of NVSHMEM communication */
static PetscErrorCode PetscSFLinkBuildDependenceBegin(PetscSF sf, PetscSFLink link, PetscSFDirection direction)
{
cudaError_t cerr;
PetscSF_Basic *bas = (PetscSF_Basic *)sf->data;
PetscInt buflen = (direction == PETSCSF_ROOT2LEAF) ? bas->rootbuflen[PETSCSF_REMOTE] : sf->leafbuflen[PETSCSF_REMOTE];
PetscFunctionBegin;
if (buflen) {
PetscCallCUDA(cudaEventRecord(link->dataReady, link->stream));
PetscCallCUDA(cudaStreamWaitEvent(link->remoteCommStream, link->dataReady, 0));
}
PetscFunctionReturn(PETSC_SUCCESS);
}
/* Build dependence between <stream> and <remoteCommStream> at the exit of NVSHMEM communication */
static PetscErrorCode PetscSFLinkBuildDependenceEnd(PetscSF sf, PetscSFLink link, PetscSFDirection direction)
{
cudaError_t cerr;
PetscSF_Basic *bas = (PetscSF_Basic *)sf->data;
PetscInt buflen = (direction == PETSCSF_ROOT2LEAF) ? sf->leafbuflen[PETSCSF_REMOTE] : bas->rootbuflen[PETSCSF_REMOTE];
PetscFunctionBegin;
/* If unpack to non-null device buffer, build the endRemoteComm dependence */
if (buflen) {
PetscCallCUDA(cudaEventRecord(link->endRemoteComm, link->remoteCommStream));
PetscCallCUDA(cudaStreamWaitEvent(link->stream, link->endRemoteComm, 0));
}
PetscFunctionReturn(PETSC_SUCCESS);
}
/* Send/Put signals to remote ranks
Input parameters:
+ n - Number of remote ranks
. sig - Signal address in symmetric heap
. sigdisp - To i-th rank, use its signal at offset sigdisp[i]
. ranks - remote ranks
- newval - Set signals to this value
*/
__global__ static void NvshmemSendSignals(PetscInt n, uint64_t *sig, PetscInt *sigdisp, PetscMPIInt *ranks, uint64_t newval)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
/* Each thread puts one remote signal */
if (i < n) nvshmemx_uint64_signal(sig + sigdisp[i], newval, ranks[i]);
}
/* Wait until local signals equal to the expected value and then set them to a new value
Input parameters:
+ n - Number of signals
. sig - Local signal address
. expval - expected value
- newval - Set signals to this new value
*/
__global__ static void NvshmemWaitSignals(PetscInt n, uint64_t *sig, uint64_t expval, uint64_t newval)
{
#if 0
/* Akhil Langer@NVIDIA said using 1 thread and nvshmem_uint64_wait_until_all is better */
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < n) {
nvshmem_signal_wait_until(sig+i,NVSHMEM_CMP_EQ,expval);
sig[i] = newval;
}
#else
nvshmem_uint64_wait_until_all(sig, n, NULL /*no mask*/, NVSHMEM_CMP_EQ, expval);
for (int i = 0; i < n; i++) sig[i] = newval;
#endif
}
/* ===========================================================================================================
A set of routines to support receiver initiated communication using the get method
The getting protocol is:
Sender has a send buf (sbuf) and a signal variable (ssig); Receiver has a recv buf (rbuf) and a signal variable (rsig);
All signal variables have an initial value 0.
Sender: | Receiver:
1. Wait ssig be 0, then set it to 1
2. Pack data into stand alone sbuf |
3. Put 1 to receiver's rsig | 1. Wait rsig to be 1, then set it 0
| 2. Get data from remote sbuf to local rbuf
| 3. Put 1 to sender's ssig
| 4. Unpack data from local rbuf
===========================================================================================================*/
/* PrePack operation -- since sender will overwrite the send buffer which the receiver might be getting data from.
Sender waits for signals (from receivers) indicating receivers have finished getting data
*/
PetscErrorCode PetscSFLinkWaitSignalsOfCompletionOfGettingData_NVSHMEM(PetscSF sf, PetscSFLink link, PetscSFDirection direction)
{
PetscSF_Basic *bas = (PetscSF_Basic *)sf->data;
uint64_t *sig;
PetscInt n;
PetscFunctionBegin;
if (direction == PETSCSF_ROOT2LEAF) { /* leaf ranks are getting data */
sig = link->rootSendSig; /* leaf ranks set my rootSendsig */
n = bas->nRemoteLeafRanks;
} else { /* LEAF2ROOT */
sig = link->leafSendSig;
n = sf->nRemoteRootRanks;
}
if (n) {
NvshmemWaitSignals<<<1, 1, 0, link->remoteCommStream>>>(n, sig, 0, 1); /* wait the signals to be 0, then set them to 1 */
PetscCallCUDA(cudaGetLastError());
}
PetscFunctionReturn(PETSC_SUCCESS);
}
/* n thread blocks. Each takes in charge one remote rank */
__global__ static void GetDataFromRemotelyAccessible(PetscInt nsrcranks, PetscMPIInt *srcranks, const char *src, PetscInt *srcdisp, char *dst, PetscInt *dstdisp, PetscInt unitbytes)
{
int bid = blockIdx.x;
PetscMPIInt pe = srcranks[bid];
if (!nvshmem_ptr(src, pe)) {
PetscInt nelems = (dstdisp[bid + 1] - dstdisp[bid]) * unitbytes;
nvshmem_getmem_nbi(dst + (dstdisp[bid] - dstdisp[0]) * unitbytes, src + srcdisp[bid] * unitbytes, nelems, pe);
}
}
/* Start communication -- Get data in the given direction */
PetscErrorCode PetscSFLinkGetDataBegin_NVSHMEM(PetscSF sf, PetscSFLink link, PetscSFDirection direction)
{
cudaError_t cerr;
PetscSF_Basic *bas = (PetscSF_Basic *)sf->data;
PetscInt nsrcranks, ndstranks, nLocallyAccessible = 0;
char *src, *dst;
PetscInt *srcdisp_h, *dstdisp_h;
PetscInt *srcdisp_d, *dstdisp_d;
PetscMPIInt *srcranks_h;
PetscMPIInt *srcranks_d, *dstranks_d;
uint64_t *dstsig;
PetscInt *dstsigdisp_d;
PetscFunctionBegin;
PetscCall(PetscSFLinkBuildDependenceBegin(sf, link, direction));
if (direction == PETSCSF_ROOT2LEAF) { /* src is root, dst is leaf; we will move data from src to dst */
nsrcranks = sf->nRemoteRootRanks;
src = link->rootbuf[PETSCSF_REMOTE][PETSC_MEMTYPE_DEVICE]; /* root buf is the send buf; it is in symmetric heap */
srcdisp_h = sf->rootbufdisp; /* for my i-th remote root rank, I will access its buf at offset rootbufdisp[i] */
srcdisp_d = sf->rootbufdisp_d;
srcranks_h = sf->ranks + sf->ndranks; /* my (remote) root ranks */
srcranks_d = sf->ranks_d;
ndstranks = bas->nRemoteLeafRanks;
dst = link->leafbuf[PETSCSF_REMOTE][PETSC_MEMTYPE_DEVICE]; /* recv buf is the local leaf buf, also in symmetric heap */
dstdisp_h = sf->roffset + sf->ndranks; /* offsets of the local leaf buf. Note dstdisp[0] is not necessarily 0 */
dstdisp_d = sf->roffset_d;
dstranks_d = bas->iranks_d; /* my (remote) leaf ranks */
dstsig = link->leafRecvSig;
dstsigdisp_d = bas->leafsigdisp_d;
} else { /* src is leaf, dst is root; we will move data from src to dst */
nsrcranks = bas->nRemoteLeafRanks;
src = link->leafbuf[PETSCSF_REMOTE][PETSC_MEMTYPE_DEVICE]; /* leaf buf is the send buf */
srcdisp_h = bas->leafbufdisp; /* for my i-th remote root rank, I will access its buf at offset rootbufdisp[i] */
srcdisp_d = bas->leafbufdisp_d;
srcranks_h = bas->iranks + bas->ndiranks; /* my (remote) root ranks */
srcranks_d = bas->iranks_d;
ndstranks = sf->nRemoteRootRanks;
dst = link->rootbuf[PETSCSF_REMOTE][PETSC_MEMTYPE_DEVICE]; /* the local root buf is the recv buf */
dstdisp_h = bas->ioffset + bas->ndiranks; /* offsets of the local root buf. Note dstdisp[0] is not necessarily 0 */
dstdisp_d = bas->ioffset_d;
dstranks_d = sf->ranks_d; /* my (remote) root ranks */
dstsig = link->rootRecvSig;
dstsigdisp_d = sf->rootsigdisp_d;
}
/* After Pack operation -- src tells dst ranks that they are allowed to get data */
if (ndstranks) {
NvshmemSendSignals<<<(ndstranks + 255) / 256, 256, 0, link->remoteCommStream>>>(ndstranks, dstsig, dstsigdisp_d, dstranks_d, 1); /* set signals to 1 */
PetscCallCUDA(cudaGetLastError());
}
/* dst waits for signals (permissions) from src ranks to start getting data */
if (nsrcranks) {
NvshmemWaitSignals<<<1, 1, 0, link->remoteCommStream>>>(nsrcranks, dstsig, 1, 0); /* wait the signals to be 1, then set them to 0 */
PetscCallCUDA(cudaGetLastError());
}
/* dst gets data from src ranks using non-blocking nvshmem_gets, which are finished in PetscSFLinkGetDataEnd_NVSHMEM() */
/* Count number of locally accessible src ranks, which should be a small number */
for (int i = 0; i < nsrcranks; i++) {
if (nvshmem_ptr(src, srcranks_h[i])) nLocallyAccessible++;
}
/* Get data from remotely accessible PEs */
if (nLocallyAccessible < nsrcranks) {
GetDataFromRemotelyAccessible<<<nsrcranks, 1, 0, link->remoteCommStream>>>(nsrcranks, srcranks_d, src, srcdisp_d, dst, dstdisp_d, link->unitbytes);
PetscCallCUDA(cudaGetLastError());
}
/* Get data from locally accessible PEs */
if (nLocallyAccessible) {
for (int i = 0; i < nsrcranks; i++) {
int pe = srcranks_h[i];
if (nvshmem_ptr(src, pe)) {
size_t nelems = (dstdisp_h[i + 1] - dstdisp_h[i]) * link->unitbytes;
nvshmemx_getmem_nbi_on_stream(dst + (dstdisp_h[i] - dstdisp_h[0]) * link->unitbytes, src + srcdisp_h[i] * link->unitbytes, nelems, pe, link->remoteCommStream);
}
}
}
PetscFunctionReturn(PETSC_SUCCESS);
}
/* Finish the communication (can be done before Unpack)
Receiver tells its senders that they are allowed to reuse their send buffer (since receiver has got data from their send buffer)
*/
PetscErrorCode PetscSFLinkGetDataEnd_NVSHMEM(PetscSF sf, PetscSFLink link, PetscSFDirection direction)
{
cudaError_t cerr;
PetscSF_Basic *bas = (PetscSF_Basic *)sf->data;
uint64_t *srcsig;
PetscInt nsrcranks, *srcsigdisp;
PetscMPIInt *srcranks;
PetscFunctionBegin;
if (direction == PETSCSF_ROOT2LEAF) { /* leaf ranks are getting data */
nsrcranks = sf->nRemoteRootRanks;
srcsig = link->rootSendSig; /* I want to set their root signal */
srcsigdisp = sf->rootsigdisp_d; /* offset of each root signal */
srcranks = sf->ranks_d; /* ranks of the n root ranks */
} else { /* LEAF2ROOT, root ranks are getting data */
nsrcranks = bas->nRemoteLeafRanks;
srcsig = link->leafSendSig;
srcsigdisp = bas->leafsigdisp_d;
srcranks = bas->iranks_d;
}
if (nsrcranks) {
nvshmemx_quiet_on_stream(link->remoteCommStream); /* Finish the nonblocking get, so that we can unpack afterwards */
PetscCallCUDA(cudaGetLastError());
NvshmemSendSignals<<<(nsrcranks + 511) / 512, 512, 0, link->remoteCommStream>>>(nsrcranks, srcsig, srcsigdisp, srcranks, 0); /* set signals to 0 */
PetscCallCUDA(cudaGetLastError());
}
PetscCall(PetscSFLinkBuildDependenceEnd(sf, link, direction));
PetscFunctionReturn(PETSC_SUCCESS);
}
/* ===========================================================================================================
A set of routines to support sender initiated communication using the put-based method (the default)
The putting protocol is:
Sender has a send buf (sbuf) and a send signal var (ssig); Receiver has a stand-alone recv buf (rbuf)
and a recv signal var (rsig); All signal variables have an initial value 0. rbuf is allocated by SF and
is in nvshmem space.
Sender: | Receiver:
|
1. Pack data into sbuf |
2. Wait ssig be 0, then set it to 1 |
3. Put data to remote stand-alone rbuf |
4. Fence // make sure 5 happens after 3 |
5. Put 1 to receiver's rsig | 1. Wait rsig to be 1, then set it 0
| 2. Unpack data from local rbuf
| 3. Put 0 to sender's ssig
===========================================================================================================*/
/* n thread blocks. Each takes in charge one remote rank */
__global__ static void WaitAndPutDataToRemotelyAccessible(PetscInt ndstranks, PetscMPIInt *dstranks, char *dst, PetscInt *dstdisp, const char *src, PetscInt *srcdisp, uint64_t *srcsig, PetscInt unitbytes)
{
int bid = blockIdx.x;
PetscMPIInt pe = dstranks[bid];
if (!nvshmem_ptr(dst, pe)) {
PetscInt nelems = (srcdisp[bid + 1] - srcdisp[bid]) * unitbytes;
nvshmem_uint64_wait_until(srcsig + bid, NVSHMEM_CMP_EQ, 0); /* Wait until the sig = 0 */
srcsig[bid] = 1;
nvshmem_putmem_nbi(dst + dstdisp[bid] * unitbytes, src + (srcdisp[bid] - srcdisp[0]) * unitbytes, nelems, pe);
}
}
/* one-thread kernel, which takes in charge all locally accessible */
__global__ static void WaitSignalsFromLocallyAccessible(PetscInt ndstranks, PetscMPIInt *dstranks, uint64_t *srcsig, const char *dst)
{
for (int i = 0; i < ndstranks; i++) {
int pe = dstranks[i];
if (nvshmem_ptr(dst, pe)) {
nvshmem_uint64_wait_until(srcsig + i, NVSHMEM_CMP_EQ, 0); /* Wait until the sig = 0 */
srcsig[i] = 1;
}
}
}
/* Put data in the given direction */
PetscErrorCode PetscSFLinkPutDataBegin_NVSHMEM(PetscSF sf, PetscSFLink link, PetscSFDirection direction)
{
cudaError_t cerr;
PetscSF_Basic *bas = (PetscSF_Basic *)sf->data;
PetscInt ndstranks, nLocallyAccessible = 0;
char *src, *dst;
PetscInt *srcdisp_h, *dstdisp_h;
PetscInt *srcdisp_d, *dstdisp_d;
PetscMPIInt *dstranks_h;
PetscMPIInt *dstranks_d;
uint64_t *srcsig;
PetscFunctionBegin;
PetscCall(PetscSFLinkBuildDependenceBegin(sf, link, direction));
if (direction == PETSCSF_ROOT2LEAF) { /* put data in rootbuf to leafbuf */
ndstranks = bas->nRemoteLeafRanks; /* number of (remote) leaf ranks */
src = link->rootbuf[PETSCSF_REMOTE][PETSC_MEMTYPE_DEVICE]; /* Both src & dst must be symmetric */
dst = link->leafbuf[PETSCSF_REMOTE][PETSC_MEMTYPE_DEVICE];
srcdisp_h = bas->ioffset + bas->ndiranks; /* offsets of rootbuf. srcdisp[0] is not necessarily zero */
srcdisp_d = bas->ioffset_d;
srcsig = link->rootSendSig;
dstdisp_h = bas->leafbufdisp; /* for my i-th remote leaf rank, I will access its leaf buf at offset leafbufdisp[i] */
dstdisp_d = bas->leafbufdisp_d;
dstranks_h = bas->iranks + bas->ndiranks; /* remote leaf ranks */
dstranks_d = bas->iranks_d;
} else { /* put data in leafbuf to rootbuf */
ndstranks = sf->nRemoteRootRanks;
src = link->leafbuf[PETSCSF_REMOTE][PETSC_MEMTYPE_DEVICE];
dst = link->rootbuf[PETSCSF_REMOTE][PETSC_MEMTYPE_DEVICE];
srcdisp_h = sf->roffset + sf->ndranks; /* offsets of leafbuf */
srcdisp_d = sf->roffset_d;
srcsig = link->leafSendSig;
dstdisp_h = sf->rootbufdisp; /* for my i-th remote root rank, I will access its root buf at offset rootbufdisp[i] */
dstdisp_d = sf->rootbufdisp_d;
dstranks_h = sf->ranks + sf->ndranks; /* remote root ranks */
dstranks_d = sf->ranks_d;
}
/* Wait for signals and then put data to dst ranks using non-blocking nvshmem_put, which are finished in PetscSFLinkPutDataEnd_NVSHMEM */
/* Count number of locally accessible neighbors, which should be a small number */
for (int i = 0; i < ndstranks; i++) {
if (nvshmem_ptr(dst, dstranks_h[i])) nLocallyAccessible++;
}
/* For remotely accessible PEs, send data to them in one kernel call */
if (nLocallyAccessible < ndstranks) {
WaitAndPutDataToRemotelyAccessible<<<ndstranks, 1, 0, link->remoteCommStream>>>(ndstranks, dstranks_d, dst, dstdisp_d, src, srcdisp_d, srcsig, link->unitbytes);
PetscCallCUDA(cudaGetLastError());
}
/* For locally accessible PEs, use host API, which uses CUDA copy-engines and is much faster than device API */
if (nLocallyAccessible) {
WaitSignalsFromLocallyAccessible<<<1, 1, 0, link->remoteCommStream>>>(ndstranks, dstranks_d, srcsig, dst);
for (int i = 0; i < ndstranks; i++) {
int pe = dstranks_h[i];
if (nvshmem_ptr(dst, pe)) { /* If return a non-null pointer, then <pe> is locally accessible */
size_t nelems = (srcdisp_h[i + 1] - srcdisp_h[i]) * link->unitbytes;
/* Initiate the nonblocking communication */
nvshmemx_putmem_nbi_on_stream(dst + dstdisp_h[i] * link->unitbytes, src + (srcdisp_h[i] - srcdisp_h[0]) * link->unitbytes, nelems, pe, link->remoteCommStream);
}
}
}
if (nLocallyAccessible) { nvshmemx_quiet_on_stream(link->remoteCommStream); /* Calling nvshmem_fence/quiet() does not fence the above nvshmemx_putmem_nbi_on_stream! */ }
PetscFunctionReturn(PETSC_SUCCESS);
}
/* A one-thread kernel. The thread takes in charge all remote PEs */
__global__ static void PutDataEnd(PetscInt nsrcranks, PetscInt ndstranks, PetscMPIInt *dstranks, uint64_t *dstsig, PetscInt *dstsigdisp)
{
/* TODO: Shall we finished the non-blocking remote puts? */
/* 1. Send a signal to each dst rank */
/* According to Akhil@NVIDIA, IB is orderred, so no fence is needed for remote PEs.
For local PEs, we already called nvshmemx_quiet_on_stream(). Therefore, we are good to send signals to all dst ranks now.
*/
for (int i = 0; i < ndstranks; i++) nvshmemx_uint64_signal(dstsig + dstsigdisp[i], 1, dstranks[i]); /* set sig to 1 */
/* 2. Wait for signals from src ranks (if any) */
if (nsrcranks) {
nvshmem_uint64_wait_until_all(dstsig, nsrcranks, NULL /*no mask*/, NVSHMEM_CMP_EQ, 1); /* wait sigs to be 1, then set them to 0 */
for (int i = 0; i < nsrcranks; i++) dstsig[i] = 0;
}
}
/* Finish the communication -- A receiver waits until it can access its receive buffer */
PetscErrorCode PetscSFLinkPutDataEnd_NVSHMEM(PetscSF sf, PetscSFLink link, PetscSFDirection direction)
{
cudaError_t cerr;
PetscSF_Basic *bas = (PetscSF_Basic *)sf->data;
PetscMPIInt *dstranks;
uint64_t *dstsig;
PetscInt nsrcranks, ndstranks, *dstsigdisp;
PetscFunctionBegin;
if (direction == PETSCSF_ROOT2LEAF) { /* put root data to leaf */
nsrcranks = sf->nRemoteRootRanks;
ndstranks = bas->nRemoteLeafRanks;
dstranks = bas->iranks_d; /* leaf ranks */
dstsig = link->leafRecvSig; /* I will set my leaf ranks's RecvSig */
dstsigdisp = bas->leafsigdisp_d; /* for my i-th remote leaf rank, I will access its signal at offset leafsigdisp[i] */
} else { /* LEAF2ROOT */
nsrcranks = bas->nRemoteLeafRanks;
ndstranks = sf->nRemoteRootRanks;
dstranks = sf->ranks_d;
dstsig = link->rootRecvSig;
dstsigdisp = sf->rootsigdisp_d;
}
if (nsrcranks || ndstranks) {
PutDataEnd<<<1, 1, 0, link->remoteCommStream>>>(nsrcranks, ndstranks, dstranks, dstsig, dstsigdisp);
PetscCallCUDA(cudaGetLastError());
}
PetscCall(PetscSFLinkBuildDependenceEnd(sf, link, direction));
PetscFunctionReturn(PETSC_SUCCESS);
}
/* PostUnpack operation -- A receiver tells its senders that they are allowed to put data to here (it implies recv buf is free to take new data) */
PetscErrorCode PetscSFLinkSendSignalsToAllowPuttingData_NVSHMEM(PetscSF sf, PetscSFLink link, PetscSFDirection direction)
{
PetscSF_Basic *bas = (PetscSF_Basic *)sf->data;
uint64_t *srcsig;
PetscInt nsrcranks, *srcsigdisp_d;
PetscMPIInt *srcranks_d;
PetscFunctionBegin;
if (direction == PETSCSF_ROOT2LEAF) { /* I allow my root ranks to put data to me */
nsrcranks = sf->nRemoteRootRanks;
srcsig = link->rootSendSig; /* I want to set their send signals */
srcsigdisp_d = sf->rootsigdisp_d; /* offset of each root signal */
srcranks_d = sf->ranks_d; /* ranks of the n root ranks */
} else { /* LEAF2ROOT */
nsrcranks = bas->nRemoteLeafRanks;
srcsig = link->leafSendSig;
srcsigdisp_d = bas->leafsigdisp_d;
srcranks_d = bas->iranks_d;
}
if (nsrcranks) {
NvshmemSendSignals<<<(nsrcranks + 255) / 256, 256, 0, link->remoteCommStream>>>(nsrcranks, srcsig, srcsigdisp_d, srcranks_d, 0); /* Set remote signals to 0 */
PetscCallCUDA(cudaGetLastError());
}
PetscFunctionReturn(PETSC_SUCCESS);
}
/* Destructor when the link uses nvshmem for communication */
static PetscErrorCode PetscSFLinkDestroy_NVSHMEM(PetscSF sf, PetscSFLink link)
{
cudaError_t cerr;
PetscFunctionBegin;
PetscCallCUDA(cudaEventDestroy(link->dataReady));
PetscCallCUDA(cudaEventDestroy(link->endRemoteComm));
PetscCallCUDA(cudaStreamDestroy(link->remoteCommStream));
/* nvshmem does not need buffers on host, which should be NULL */
PetscCall(PetscNvshmemFree(link->leafbuf_alloc[PETSCSF_REMOTE][PETSC_MEMTYPE_DEVICE]));
PetscCall(PetscNvshmemFree(link->leafSendSig));
PetscCall(PetscNvshmemFree(link->leafRecvSig));
PetscCall(PetscNvshmemFree(link->rootbuf_alloc[PETSCSF_REMOTE][PETSC_MEMTYPE_DEVICE]));
PetscCall(PetscNvshmemFree(link->rootSendSig));
PetscCall(PetscNvshmemFree(link->rootRecvSig));
PetscFunctionReturn(PETSC_SUCCESS);
}
PetscErrorCode PetscSFLinkCreate_NVSHMEM(PetscSF sf, MPI_Datatype unit, PetscMemType rootmtype, const void *rootdata, PetscMemType leafmtype, const void *leafdata, MPI_Op op, PetscSFOperation sfop, PetscSFLink *mylink)
{
cudaError_t cerr;
PetscSF_Basic *bas = (PetscSF_Basic *)sf->data;
PetscSFLink *p, link;
PetscBool match, rootdirect[2], leafdirect[2];
int greatestPriority;
PetscFunctionBegin;
/* Check to see if we can directly send/recv root/leafdata with the given sf, sfop and op.
We only care root/leafdirect[PETSCSF_REMOTE], since we never need intermediate buffers in local communication with NVSHMEM.
*/
if (sfop == PETSCSF_BCAST) { /* Move data from rootbuf to leafbuf */
if (sf->use_nvshmem_get) {
rootdirect[PETSCSF_REMOTE] = PETSC_FALSE; /* send buffer has to be stand-alone (can't be rootdata) */
leafdirect[PETSCSF_REMOTE] = (PetscMemTypeNVSHMEM(leafmtype) && sf->leafcontig[PETSCSF_REMOTE] && op == MPI_REPLACE) ? PETSC_TRUE : PETSC_FALSE;
} else {
rootdirect[PETSCSF_REMOTE] = (PetscMemTypeNVSHMEM(rootmtype) && bas->rootcontig[PETSCSF_REMOTE]) ? PETSC_TRUE : PETSC_FALSE;
leafdirect[PETSCSF_REMOTE] = PETSC_FALSE; /* Our put-protocol always needs a nvshmem alloc'ed recv buffer */
}
} else if (sfop == PETSCSF_REDUCE) { /* Move data from leafbuf to rootbuf */
if (sf->use_nvshmem_get) {
rootdirect[PETSCSF_REMOTE] = (PetscMemTypeNVSHMEM(rootmtype) && bas->rootcontig[PETSCSF_REMOTE] && op == MPI_REPLACE) ? PETSC_TRUE : PETSC_FALSE;
leafdirect[PETSCSF_REMOTE] = PETSC_FALSE;
} else {
rootdirect[PETSCSF_REMOTE] = PETSC_FALSE;
leafdirect[PETSCSF_REMOTE] = (PetscMemTypeNVSHMEM(leafmtype) && sf->leafcontig[PETSCSF_REMOTE]) ? PETSC_TRUE : PETSC_FALSE;
}
} else { /* PETSCSF_FETCH */
rootdirect[PETSCSF_REMOTE] = PETSC_FALSE; /* FETCH always need a separate rootbuf */
leafdirect[PETSCSF_REMOTE] = PETSC_FALSE; /* We also force allocating a separate leafbuf so that leafdata and leafupdate can share mpi requests */
}
/* Look for free nvshmem links in cache */
for (p = &bas->avail; (link = *p); p = &link->next) {
if (link->use_nvshmem) {
PetscCall(MPIPetsc_Type_compare(unit, link->unit, &match));
if (match) {
*p = link->next; /* Remove from available list */
goto found;
}
}
}
PetscCall(PetscNew(&link));
PetscCall(PetscSFLinkSetUp_Host(sf, link, unit)); /* Compute link->unitbytes, dup link->unit etc. */
if (sf->backend == PETSCSF_BACKEND_CUDA) PetscCall(PetscSFLinkSetUp_CUDA(sf, link, unit)); /* Setup pack routines, streams etc */
#if defined(PETSC_HAVE_KOKKOS)
else if (sf->backend == PETSCSF_BACKEND_KOKKOS) PetscCall(PetscSFLinkSetUp_Kokkos(sf, link, unit));
#endif
link->rootdirect[PETSCSF_LOCAL] = PETSC_TRUE; /* For the local part we directly use root/leafdata */
link->leafdirect[PETSCSF_LOCAL] = PETSC_TRUE;
/* Init signals to zero */
if (!link->rootSendSig) PetscCall(PetscNvshmemCalloc(bas->nRemoteLeafRanksMax * sizeof(uint64_t), (void **)&link->rootSendSig));
if (!link->rootRecvSig) PetscCall(PetscNvshmemCalloc(bas->nRemoteLeafRanksMax * sizeof(uint64_t), (void **)&link->rootRecvSig));
if (!link->leafSendSig) PetscCall(PetscNvshmemCalloc(sf->nRemoteRootRanksMax * sizeof(uint64_t), (void **)&link->leafSendSig));
if (!link->leafRecvSig) PetscCall(PetscNvshmemCalloc(sf->nRemoteRootRanksMax * sizeof(uint64_t), (void **)&link->leafRecvSig));
link->use_nvshmem = PETSC_TRUE;
link->rootmtype = PETSC_MEMTYPE_DEVICE; /* Only need 0/1-based mtype from now on */
link->leafmtype = PETSC_MEMTYPE_DEVICE;
/* Overwrite some function pointers set by PetscSFLinkSetUp_CUDA */
link->Destroy = PetscSFLinkDestroy_NVSHMEM;
if (sf->use_nvshmem_get) { /* get-based protocol */
link->PrePack = PetscSFLinkWaitSignalsOfCompletionOfGettingData_NVSHMEM;
link->StartCommunication = PetscSFLinkGetDataBegin_NVSHMEM;
link->FinishCommunication = PetscSFLinkGetDataEnd_NVSHMEM;
} else { /* put-based protocol */
link->StartCommunication = PetscSFLinkPutDataBegin_NVSHMEM;
link->FinishCommunication = PetscSFLinkPutDataEnd_NVSHMEM;
link->PostUnpack = PetscSFLinkSendSignalsToAllowPuttingData_NVSHMEM;
}
PetscCallCUDA(cudaDeviceGetStreamPriorityRange(NULL, &greatestPriority));
PetscCallCUDA(cudaStreamCreateWithPriority(&link->remoteCommStream, cudaStreamNonBlocking, greatestPriority));
PetscCallCUDA(cudaEventCreateWithFlags(&link->dataReady, cudaEventDisableTiming));
PetscCallCUDA(cudaEventCreateWithFlags(&link->endRemoteComm, cudaEventDisableTiming));
found:
if (rootdirect[PETSCSF_REMOTE]) {
link->rootbuf[PETSCSF_REMOTE][PETSC_MEMTYPE_DEVICE] = (char *)rootdata + bas->rootstart[PETSCSF_REMOTE] * link->unitbytes;
} else {
if (!link->rootbuf_alloc[PETSCSF_REMOTE][PETSC_MEMTYPE_DEVICE]) PetscCall(PetscNvshmemMalloc(bas->rootbuflen_rmax * link->unitbytes, (void **)&link->rootbuf_alloc[PETSCSF_REMOTE][PETSC_MEMTYPE_DEVICE]));
link->rootbuf[PETSCSF_REMOTE][PETSC_MEMTYPE_DEVICE] = link->rootbuf_alloc[PETSCSF_REMOTE][PETSC_MEMTYPE_DEVICE];
}
if (leafdirect[PETSCSF_REMOTE]) {
link->leafbuf[PETSCSF_REMOTE][PETSC_MEMTYPE_DEVICE] = (char *)leafdata + sf->leafstart[PETSCSF_REMOTE] * link->unitbytes;
} else {
if (!link->leafbuf_alloc[PETSCSF_REMOTE][PETSC_MEMTYPE_DEVICE]) PetscCall(PetscNvshmemMalloc(sf->leafbuflen_rmax * link->unitbytes, (void **)&link->leafbuf_alloc[PETSCSF_REMOTE][PETSC_MEMTYPE_DEVICE]));
link->leafbuf[PETSCSF_REMOTE][PETSC_MEMTYPE_DEVICE] = link->leafbuf_alloc[PETSCSF_REMOTE][PETSC_MEMTYPE_DEVICE];
}
link->rootdirect[PETSCSF_REMOTE] = rootdirect[PETSCSF_REMOTE];
link->leafdirect[PETSCSF_REMOTE] = leafdirect[PETSCSF_REMOTE];
link->rootdata = rootdata; /* root/leafdata are keys to look up links in PetscSFXxxEnd */
link->leafdata = leafdata;
link->next = bas->inuse;
bas->inuse = link;
*mylink = link;
PetscFunctionReturn(PETSC_SUCCESS);
}
#if defined(PETSC_USE_REAL_SINGLE)
PetscErrorCode PetscNvshmemSum(PetscInt count, float *dst, const float *src)
{
PetscMPIInt num; /* Assume nvshmem's int is MPI's int */
PetscFunctionBegin;
PetscCall(PetscMPIIntCast(count, &num));
nvshmemx_float_sum_reduce_on_stream(NVSHMEM_TEAM_WORLD, dst, src, num, PetscDefaultCudaStream);
PetscFunctionReturn(PETSC_SUCCESS);
}
PetscErrorCode PetscNvshmemMax(PetscInt count, float *dst, const float *src)
{
PetscMPIInt num;
PetscFunctionBegin;
PetscCall(PetscMPIIntCast(count, &num));
nvshmemx_float_max_reduce_on_stream(NVSHMEM_TEAM_WORLD, dst, src, num, PetscDefaultCudaStream);
PetscFunctionReturn(PETSC_SUCCESS);
}
#elif defined(PETSC_USE_REAL_DOUBLE)
PetscErrorCode PetscNvshmemSum(PetscInt count, double *dst, const double *src)
{
PetscMPIInt num;
PetscFunctionBegin;
PetscCall(PetscMPIIntCast(count, &num));
nvshmemx_double_sum_reduce_on_stream(NVSHMEM_TEAM_WORLD, dst, src, num, PetscDefaultCudaStream);
PetscFunctionReturn(PETSC_SUCCESS);
}
PetscErrorCode PetscNvshmemMax(PetscInt count, double *dst, const double *src)
{
PetscMPIInt num;
PetscFunctionBegin;
PetscCall(PetscMPIIntCast(count, &num));
nvshmemx_double_max_reduce_on_stream(NVSHMEM_TEAM_WORLD, dst, src, num, PetscDefaultCudaStream);
PetscFunctionReturn(PETSC_SUCCESS);
}
#endif
|
e0d830a42fdfe7928e5e9986af09ebce5bb3f538.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "pbf_neighbor_search_util.h"
#include "pbf_grid.h"
#include "../../util/pbf_cuda_util.h"
#include <device_launch_parameters.h>
#include <thrust/device_vector.h>
#include <thrust/device_ptr.h>
#include <thrust/sort.h>
#include <sm_35_intrinsics.h>
#include <cub\cub.cuh>
#include <cub\device\device_radix_sort.cuh>
__constant__ scalar_t c_h;
__constant__ uint32_t c_max_pair_num;
__constant__ scalar_t c_cell_width;
__constant__ uint3 c_grid_size;
namespace {
__global__ void calcHashCUDA(
uint32_t* hash_particle,
uint32_t* index_particle,
const dom_dim* position,
scalar_t cell_width,
dom_udim grid_size,
uint32_t num_particle)
{
uint32_t index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= num_particle) return;
auto p = position[index];
auto grid_pos = pbf::cuda::calcGridPos(p, cell_width);
auto hash = pbf::cuda::calcGridHash(grid_pos, grid_size);
hash_particle[index] = hash;
index_particle[index] = index;
}
__global__ void findCellStartCUDA(
uint32_t* cell_start, uint32_t* cell_end,
const uint32_t* hash_particle, const uint32_t* index_particle,
uint32_t num_particle
)
{
extern __shared__ uint32_t shared_hash[]; // blockSize + 1 elements
uint32_t index = blockIdx.x * blockDim.x + threadIdx.x;
uint32_t hash;
// handle case when no. of particles not multiple of block size
if (index < num_particle) {
hash = hash_particle[index];
// Load hash data into shared memory so that we can look at neighboring particle's hash value without loading
// two hash values per thread
shared_hash[threadIdx.x + 1] = hash;
if (index > 0 && threadIdx.x == 0) {
// first thread in block must load neighbor particle hash
shared_hash[0] = hash_particle[index - 1];
}
}
__syncthreads();
if (index < num_particle) {
// If this particle has a different cell index to the previous particle then it must be the first particle in the cell,
// so store the index of this particle in the cell.
// As it isn't the first particle, it must also be the cell end of the previous particle's cell
if (index == 0 || hash != shared_hash[threadIdx.x]) {
cell_start[hash] = index;
if (index > 0)
cell_end[shared_hash[threadIdx.x]] = index;
}
if (index == num_particle - 1) {
cell_end[hash] = index + 1;
}
}
}
__global__ void findCellStartCUDA(
uint32_t* cell_start, uint32_t* cell_end,
const uint64_t* hash_particle, const uint32_t* index_particle,
uint32_t num_particle
)
{
extern __shared__ uint64_t shared_hash64[]; // blockSize + 1 elements
uint32_t index = blockIdx.x * blockDim.x + threadIdx.x;
uint64_t hash;
// handle case when no. of particles not multiple of block size
if (index < num_particle) {
hash = hash_particle[index];
// Load hash data into shared memory so that we can look at neighboring particle's hash value without loading
// two hash values per thread
shared_hash64[threadIdx.x + 1] = hash;
if (index > 0 && threadIdx.x == 0) {
// first thread in block must load neighbor particle hash
shared_hash64[0] = hash_particle[index - 1];
}
}
__syncthreads();
if (index < num_particle) {
// If this particle has a different cell index to the previous particle then it must be the first particle in the cell,
// so store the index of this particle in the cell.
// As it isn't the first particle, it must also be the cell end of the previous particle's cell
if (index == 0 || hash != shared_hash64[threadIdx.x]) {
cell_start[hash] = index;
if (index > 0)
cell_end[shared_hash64[threadIdx.x]] = index;
}
if (index == num_particle - 1) {
cell_end[hash] = index + 1;
}
}
}
__global__ void reorderDataCUDA(
dom_dim* sorted_pos,
dom_dim* sorted_vel,
const dom_dim* old_pos,
const dom_dim* old_vel,
const uint32_t* index_particle,
uint32_t num_particle)
{
uint32_t index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= num_particle) return;
uint32_t index_before_sort = index_particle[index];
sorted_pos[index] = old_pos[index_before_sort];
sorted_vel[index] = old_vel[index_before_sort];
}
__global__ void reorderDataCUDA(
dom_dim* sorted,
const dom_dim* old,
const uint32_t* index_particle,
uint32_t num)
{
uint32_t index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= num) return;
uint32_t index_before_sort = index_particle[index];
sorted[index] = old[index_before_sort];
}
__global__ void restoreOrderCUDA(
dom_dim* restored_pos,
dom_dim* restored_vel,
const dom_dim* sorted_pos,
const dom_dim* sorted_vel,
const uint32_t* index_particle,
uint32_t num_particle)
{
uint32_t index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= num_particle) return;
uint32_t index_before_sort = index_particle[index];
restored_pos[index_before_sort] = sorted_pos[index];
restored_vel[index_before_sort] = sorted_vel[index];
}
} // end of unnamed ns
namespace pbf {
namespace cuda {
void getRadixSortStorageSize(
size_t& temp_storage_size,
uint32_t num_item
)
{
uint32_t* key_in = nullptr;
uint32_t* key_out = nullptr;
uint32_t* val_in = nullptr;
uint32_t* val_out = nullptr;
hipcub::DeviceRadixSort::SortPairs(NULL, temp_storage_size, key_in, key_out, val_in, val_out, num_item);
}
void sortHashIndexCUB(
void* temp_storage, size_t temp_storage_size,
uint32_t* hash, uint32_t* sorted_hash,
uint32_t* index, uint32_t* sorted_index,
uint32_t num_particle
)
{
hipcub::DeviceRadixSort::SortPairs(temp_storage, temp_storage_size,
hash, sorted_hash, index, sorted_index, num_particle);
}
void calcHash(
uint32_t* hash, uint32_t* index,
const dom_dim* position,
scalar_t cell_width,
dom_udim grid_size,
uint32_t num_particle)
{
uint32_t num_thread, num_block;
computeGridSize(num_particle, 256, num_block, num_thread);
if (num_block > 0)
calcHashCUDA << < num_block, num_thread >> >
(hash, index, position, cell_width, grid_size, num_particle);
#ifdef _DEBUG
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipDeviceSynchronize());
#endif
}
// z-order hash
namespace {
__global__ void calcZOrderHashCUDA(
uint64_t* hash_particle,
uint32_t* index_particle,
const dom_dim* position,
scalar_t cell_width,
dom_udim grid_size,
uint32_t num_particle)
{
uint32_t index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= num_particle) return;
auto p = position[index];
auto grid_pos = pbf::cuda::calcGridPos(p, cell_width);
auto hash = pbf::cuda::calcGridHashZOrder(grid_pos);
hash_particle[index] = hash;
index_particle[index] = index;
}
}
void calcZOrderHash(
uint64_t* hash, uint32_t* index,
const dom_dim* position,
scalar_t cell_width,
dom_udim grid_size,
uint32_t num_particle)
{
uint32_t num_thread, num_block;
computeGridSize(num_particle, 256, num_block, num_thread);
if (num_block > 0)
calcZOrderHashCUDA<< < num_block, num_thread >> >
(hash, index, position, cell_width, grid_size, num_particle);
#ifdef _DEBUG
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipDeviceSynchronize());
#endif
}
void sortHashIndex(
uint32_t* hash, uint32_t* index,
uint32_t num_particle)
{
if (num_particle > 0) {
thrust::sort_by_key(thrust::device_ptr<uint32_t>(hash), thrust::device_ptr<uint32_t>(hash + num_particle), thrust::device_ptr<uint32_t>(index));
}
//thrust::sort_by_key(hash, hash + num_particle, index);
#ifdef _DEBUG
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipDeviceSynchronize());
#endif
}
void findCellStart(
uint32_t* cell_start, uint32_t* cell_end,
const uint32_t* hash, const uint32_t* index,
uint32_t num_particle)
{
uint32_t num_thread, num_block;
computeGridSize(num_particle, 128, num_block, num_thread);
uint32_t smem_size = sizeof(uint32_t) * (num_thread + 1);
if (num_block > 0)
findCellStartCUDA << < num_block, num_thread, smem_size >> >
(cell_start, cell_end, hash, index, num_particle);
#ifdef _DEBUG
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipDeviceSynchronize());
#endif
}
void findCellStart(
uint32_t* cell_start, uint32_t* cell_end,
const uint64_t* hash, const uint32_t* index,
uint32_t num_particle)
{
uint32_t num_thread, num_block;
computeGridSize(num_particle, 128, num_block, num_thread);
uint32_t smem_size = sizeof(uint64_t) * (num_thread + 1);
if (num_block > 0)
findCellStartCUDA << < num_block, num_thread, smem_size >> >
(cell_start, cell_end, hash, index, num_particle);
#ifdef _DEBUG
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipDeviceSynchronize());
#endif
}
void reorderData(
pbf_phase_array& sorted,
const pbf_phase_array& old,
const uint32_t* index,
uint32_t num_particle)
{
uint32_t num_thread, num_block;
computeGridSize(num_particle, 192, num_block, num_thread);
if (num_block > 0)
reorderDataCUDA << < num_block, num_thread >> >(sorted.x, sorted.v, old.x, old.v, index, num_particle);
#ifdef _DEBUG
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipDeviceSynchronize());
#endif
}
void reorderData(
dom_dim* sorted,
const dom_dim* old,
const uint32_t* index,
uint32_t num)
{
uint32_t num_thread, num_block;
computeGridSize(num, 192, num_block, num_thread);
if (num_block > 0)
reorderDataCUDA << < num_block, num_thread >> >(sorted, old, index, num);
#ifdef _DEBUG
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipDeviceSynchronize());
#endif
}
void restoreOrder(
pbf_phase_array& restored,
const pbf_phase_array& sorted,
const uint32_t* index,
uint32_t num_particle)
{
uint32_t num_thread, num_block;
computeGridSize(num_particle, 192, num_block, num_thread);
if (num_block > 0)
restoreOrderCUDA << < num_block, num_thread >> >(restored.x, restored.v, sorted.x, sorted.v, index, num_particle);
#ifdef _DEBUG
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipDeviceSynchronize());
#endif
}
namespace {
template<typename T>
__device__ void findPair(T* pair_indices, uint16_t& pair_cnt,
uint32_t pair_index, const dom_dim& self_pos, const dom_dim* __restrict__ other_pos)
{
const auto pair_pos = other_pos[pair_index];
if (pair_cnt < c_max_pair_num-1) {
#if 1
const auto h2 = c_h * c_h;
const auto pos_diff = self_pos - pair_pos;
const auto r2 = glm::dot(pos_diff, pos_diff);
if (r2 < h2) {
pair_indices[pair_cnt * 32] = pair_index; // global
//pair_indices[pair_cnt * 128] = pair_index; // shared
++pair_cnt;
//printf("here we come\n");
}
#else
// set pair anyway
pair_indices[pair_cnt * 32] = pair_index;
++pair_cnt;
#endif
}
}
template<typename T>
__device__ void findPair(T* pair_indices, uint16_t& pair_cnt,
uint32_t pair_index, const dom_dim& self_pos, const dom_dim& pair_pos)
{
if (pair_cnt < c_max_pair_num - 1) {
#if 1
const auto h2 = c_h * c_h;
const auto pos_diff = self_pos - pair_pos;
//const auto r2 = glm::dot(pos_diff, pos_diff);
const auto r2 = fmaf(pos_diff.z, pos_diff.z, fmaf(pos_diff.y, pos_diff.y, pos_diff.x * pos_diff.x));
if (r2 < h2) {
pair_indices[pair_cnt * 32] = pair_index; // global
//pair_indices[pair_cnt * 128] = pair_index; // shared
++pair_cnt;
//printf("here we come\n");
}
#else
// set pair anyway
pair_indices[pair_cnt * 32] = pair_index;
++pair_cnt;
#endif
}
}
template<typename T>
__device__ void searchCell(
T* pair_indices, uint16_t& pair_cnt,
uint32_t start_index, uint32_t end_index,
const dom_dim& self_pos, const dom_dim* __restrict__ other_pos)
{
if (start_index != 0xFFFFFFFF) {
// iterate over perticles in this cell
for (auto i = start_index; i < end_index; i += 1) {
// position loading
const dom_dim pair_pos_0 = other_pos[i];
findPair(pair_indices, pair_cnt, i, self_pos, pair_pos_0);
//findPair(pair_indices, pair_cnt, i, self_pos, other_pos);
}
}
}
template<typename T>
__device__ void searchGrid(
T* pair_indices, uint16_t& pair_cnt,
const uint32_t* __restrict__ cell_start, const uint32_t* __restrict__ cell_end,
const dom_dim& self_pos, const dom_dim* __restrict__ other_pos)
{
auto grid = pbf::cuda::calcGridPos(self_pos, c_cell_width);
auto sum_prop = dom_dim(0.f);
#pragma unroll
for (int z = -1; z <= 1; ++z) {
#pragma unroll
for (int y = -1; y <= 1; ++y) {
#pragma unroll
for (int x = -1; x <= 1; ++x) {
dom_idim neighbor_grid(grid.x + x, grid.y + y, grid.z + z);
auto neighbor_grid_hash = pbf::cuda::calcGridHash(neighbor_grid, c_grid_size);
auto start_index = cell_start[neighbor_grid_hash];
auto end_index = cell_end[neighbor_grid_hash];
searchCell(pair_indices, pair_cnt, start_index, end_index, self_pos, other_pos);
}
}
}
//printf("pair_cnt: %d\n", pair_cnt);
}
template<typename T>
__global__ void detectNeighborsCUDA(
T* neighbor_list,
const dom_dim* __restrict__ position,
const uint32_t* __restrict__ paricle_hash, const uint32_t* __restrict__ particle_index,
const uint32_t* __restrict__ cell_start, const uint32_t* __restrict__ cell_end,
uint32_t num_particle
)
{
//__shared__ uint16_t pair_indices_shared[25 * 128]; // [max number of pair particle * thread number]
uint32_t index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= num_particle)
return;
const auto self_pos = position[index];
uint16_t pair_cnt = 0;
//T* neighbot_list_local = neighbor_list + (index % 32) + (index / 32) * 32 * c_max_pair_num;
T* neighbot_list_local = neighbor_list + (index & 31) + ((index >> 5) << 5) * c_max_pair_num;
searchGrid(neighbot_list_local, pair_cnt, cell_start, cell_end, self_pos, position);
//printf("pair_cnt: %d\n", pair_cnt);
//if (pair_cnt > 25)
// pair_cnt = 25;
//__syncthreads();
//for (uint32_t i = 0; i < pair_cnt; ++i) {
// neighbot_list_local[i * 32] = (pair_indices_shared + threadIdx.x)[i * 128];
//}
}
} // end of unnamed ns
namespace {
namespace opt {
__device__ void findPair(uint32_t* pair_indices, uint16_t& pair_cnt,
uint32_t pair_index, const float4& self_pos, const float4* other_pos)
{
const float4 pair_pos = other_pos[pair_index];
if (pair_cnt < c_max_pair_num - 1) {
#if 1
const auto h2 = c_h * c_h;
const auto pos_diff = glm::vec3(self_pos.x - pair_pos.x, self_pos.y - pair_pos.y, self_pos.z - pair_pos.z);
const auto r2 = glm::dot(pos_diff, pos_diff);
if (r2 < h2) {
pair_indices[pair_cnt * 32] = pair_index; // global
//pair_indices[pair_cnt * 128] = pair_index; // shared
++pair_cnt;
//printf("here we come\n");
}
#else
// set pair anyway
pair_indices[pair_cnt * 32] = pair_index;
++pair_cnt;
#endif
}
}
__device__ void searchCell(
uint32_t* pair_indices, uint16_t& pair_cnt,
uint32_t start_index, uint32_t end_index,
const float4& self_pos, const float4* other_pos)
{
if (start_index != 0xFFFFFFFF) {
// iterate over perticles in this cell
for (auto i = start_index; i < end_index; ++i) {
opt::findPair(pair_indices, pair_cnt, i, self_pos, other_pos);
}
//printf("particle in a cell: %d\n", end_index - start_index);
}
}
__device__ void searchGrid(
uint32_t* pair_indices, uint16_t& pair_cnt, uint32_t* cell_index_shared,
const uint32_t* cell_start, const uint32_t* cell_end,
const float4& self_pos, const float4* other_pos)
{
auto grid = pbf::cuda::calcGridPos(self_pos, c_cell_width);
auto sum_prop = dom_dim(0.f);
#pragma unroll
for (int z = -1; z <= 1; ++z) {
#pragma unroll
for (int y = -1; y <= 1; ++y) {
#if 1
#pragma unroll
for (int x = -1; x <= 1; ++x) {
dom_idim neighbor_grid(grid.x + x, grid.y + y, grid.z + z);
auto neighbor_grid_hash = pbf::cuda::calcGridHash(neighbor_grid, c_grid_size);
cell_index_shared[0 + 128 * (x + 1)] = cell_start[neighbor_grid_hash];
cell_index_shared[0 + 128 * (x + 1) + 128 * 3] = cell_end[neighbor_grid_hash];
}
#pragma unroll
for (int x = -1; x <= 1; ++x) {
auto start_index = cell_index_shared[0 + 128 * (x + 1)];
auto end_index = cell_index_shared[0 + 128 * (x + 1) + 128 * 3];
opt::searchCell(pair_indices, pair_cnt, start_index, end_index, self_pos, other_pos);
}
#else
#pragma unroll
// no shared
for (int x = -1; x <= 1; ++x) {
dom_idim neighbor_grid(grid.x + x, grid.y + y, grid.z + z);
auto neighbor_grid_hash = pbf::cuda::calcGridHash(neighbor_grid, c_grid_size);
//auto start_index = __ldg(&cell_start[neighbor_grid_hash]);
//auto end_index = __ldg(&cell_end[neighbor_grid_hash]);
auto start_index = cell_start[neighbor_grid_hash];
auto end_index = cell_end[neighbor_grid_hash];
opt::searchCell(pair_indices, pair_cnt, start_index, end_index, self_pos, other_pos);
}
#endif
}
}
//printf("pair_cnt: %d\n", pair_cnt);
}
__global__ void detectNeighborsCUDA(
uint32_t* neighbor_list,
const float4* position,
const uint32_t* paricle_hash, const uint32_t* particle_index,
const uint32_t* cell_start, const uint32_t* cell_end,
uint32_t num_particle
)
{
__shared__ uint32_t cell_index[3 * 2 * 128];
uint32_t index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= num_particle)
return;
const auto self_pos = position[index];
uint16_t pair_cnt = 0;
uint32_t* neighbot_list_local = neighbor_list + (index % 32) + (index / 32) * 32 * c_max_pair_num;
opt::searchGrid(neighbot_list_local, pair_cnt, cell_index + threadIdx.x, cell_start, cell_end, self_pos, position);
}
} // end of opt ns
} // end of unnamed ns
namespace {
namespace zorder {
inline
__device__ void findPair(uint16_t* pair_indices, uint16_t& pair_cnt,
uint32_t pair_index, const float4& self_pos, const float4* other_pos)
{
const float4 pair_pos = other_pos[pair_index];
if (pair_cnt < c_max_pair_num - 1) {
#if 1
const float h2 = c_h * c_h;
const auto pos_diff = make_float3(self_pos.x - pair_pos.x, self_pos.y - pair_pos.y, self_pos.z - pair_pos.z);
const auto r2 = pos_diff.x * pos_diff.x + pos_diff.y * pos_diff.y + pos_diff.z * pos_diff.z;
if (r2 < h2) {
//pair_indices[pair_cnt * 32] = pair_index; // global
pair_indices[pair_cnt * 128] = pair_index; // shared
++pair_cnt;
//printf("here we come\n");
}
#else
// set pair anyway
pair_indices[pair_cnt * 32] = pair_index;
++pair_cnt;
#endif
}
}
inline
__device__ void findPair(uint16_t* pair_indices, uint16_t& pair_cnt,
uint32_t pair_index, const float4& self_pos, const float4& pair_pos)
{
if (pair_cnt < c_max_pair_num - 1) {
#if 1
const float h2 = c_h * c_h;
const auto pos_diff = glm::vec3(self_pos.x - pair_pos.x, self_pos.y - pair_pos.y, self_pos.z - pair_pos.z);
const auto r2 = glm::dot(pos_diff, pos_diff);
//const auto pos_diff = make_float3(self_pos.x - pair_pos.x, self_pos.y - pair_pos.y, self_pos.z - pair_pos.z);
//const auto r2 = pos_diff.x * pos_diff.x + pos_diff.y * pos_diff.y + pos_diff.z * pos_diff.z;
if (r2 < h2) {
pair_indices[pair_cnt * 32] = pair_index; // global
//pair_indices[pair_cnt * 128] = pair_index; // shared
++pair_cnt;
//printf("here we come\n");
}
#else
// set pair anyway
pair_indices[pair_cnt * 32] = pair_index;
++pair_cnt;
#endif
}
}
inline
__device__ void searchCell(
uint16_t* pair_indices, uint16_t& pair_cnt,
const uint32_t* cell_start, const uint32_t* cell_end,
dom_idim neighbor_grid,
const float4& self_pos, const float4* other_pos)
{
auto neighbor_grid_hash = pbf::cuda::calcGridHashZOrder(neighbor_grid);
//auto start_index = __ldg(&cell_start[neighbor_grid_hash]);
//auto end_index = __ldg(&cell_end[neighbor_grid_hash]);
auto start_index = cell_start[neighbor_grid_hash];
auto end_index = cell_end[neighbor_grid_hash];
if (start_index != 0xFFFFFFFF) {
// iterate over perticles in this cell
for (auto i = start_index; i < end_index; i += 1) {
// position loading
const float4 pair_pos_0 = other_pos[i];
//float4 pair_pos_1;
//if (i < end_index - 1)
// pair_pos_1 = other_pos[i + 1];
//float4 pair_pos_2;
//if (i < end_index - 2)
// pair_pos_2 = other_pos[i + 2];
//
zorder::findPair(pair_indices, pair_cnt, i, self_pos, pair_pos_0);
//if (i < end_index - 1)
// zorder::findPair(pair_indices, pair_cnt, i + 1, self_pos, pair_pos_1);
//if (i < end_index - 2)
// zorder::findPair(pair_indices, pair_cnt, i + 2, self_pos, pair_pos_2);
}
//printf("particle in a cell: %u\n", end_index - start_index);
}
}
inline
__device__ void searchGrid(
uint16_t* pair_indices, uint16_t& pair_cnt,
const uint32_t* cell_start, const uint32_t* cell_end,
const float4& self_pos, const float4* other_pos)
{
auto grid = pbf::cuda::calcGridPos(self_pos, c_cell_width);
//if (grid.x < 1 || grid.y < 1 || grid.z < 1)
//printf("%d, %d, %d\n", grid.x, grid.y, grid.z);
#if 1
#pragma unroll
for (int z = -1; z <= 1; ++z) {
#pragma unroll
for (int y = -1; y <= 1; ++y) {
#pragma unroll
for (int x = -1; x <= 1; ++x) {
dom_idim neighbor_grid(grid.x + x, grid.y + y, grid.z + z);
zorder::searchCell(pair_indices, pair_cnt, cell_start, cell_end, neighbor_grid, self_pos, other_pos);
}
}
}
#else
{ // 0
dom_idim neighbor_grid(grid.x - 1, grid.y - 1, grid.z - 1);
zorder::searchCell(pair_indices, pair_cnt, cell_start, cell_end, neighbor_grid, self_pos, other_pos);
}
{ // 1
dom_idim neighbor_grid(grid.x - 0, grid.y - 1, grid.z - 1);
zorder::searchCell(pair_indices, pair_cnt, cell_start, cell_end, neighbor_grid, self_pos, other_pos);
}
{ // 2
dom_idim neighbor_grid(grid.x - 1, grid.y - 0, grid.z - 1);
zorder::searchCell(pair_indices, pair_cnt, cell_start, cell_end, neighbor_grid, self_pos, other_pos);
}
{ // 3
dom_idim neighbor_grid(grid.x - 0, grid.y - 0, grid.z - 1);
zorder::searchCell(pair_indices, pair_cnt, cell_start, cell_end, neighbor_grid, self_pos, other_pos);
}
{ // 4
dom_idim neighbor_grid(grid.x - 1, grid.y - 1, grid.z - 0);
zorder::searchCell(pair_indices, pair_cnt, cell_start, cell_end, neighbor_grid, self_pos, other_pos);
}
{ // 5
dom_idim neighbor_grid(grid.x - 0, grid.y - 1, grid.z - 0);
zorder::searchCell(pair_indices, pair_cnt, cell_start, cell_end, neighbor_grid, self_pos, other_pos);
}
{ // 6
dom_idim neighbor_grid(grid.x - 1, grid.y - 0, grid.z - 0);
zorder::searchCell(pair_indices, pair_cnt, cell_start, cell_end, neighbor_grid, self_pos, other_pos);
}
{ // 7
dom_idim neighbor_grid(grid.x - 0, grid.y - 0, grid.z - 0);
zorder::searchCell(pair_indices, pair_cnt, cell_start, cell_end, neighbor_grid, self_pos, other_pos);
}
{ // 8
dom_idim neighbor_grid(grid.x + 1, grid.y - 1, grid.z - 1);
zorder::searchCell(pair_indices, pair_cnt, cell_start, cell_end, neighbor_grid, self_pos, other_pos);
}
{ // 9
dom_idim neighbor_grid(grid.x + 1, grid.y - 0, grid.z - 1);
zorder::searchCell(pair_indices, pair_cnt, cell_start, cell_end, neighbor_grid, self_pos, other_pos);
}
{ // 10
dom_idim neighbor_grid(grid.x + 1, grid.y - 1, grid.z - 0);
zorder::searchCell(pair_indices, pair_cnt, cell_start, cell_end, neighbor_grid, self_pos, other_pos);
}
{ // 11
dom_idim neighbor_grid(grid.x + 1, grid.y - 0, grid.z - 0);
zorder::searchCell(pair_indices, pair_cnt, cell_start, cell_end, neighbor_grid, self_pos, other_pos);
}
{ // 12
dom_idim neighbor_grid(grid.x - 1, grid.y + 1, grid.z - 1);
zorder::searchCell(pair_indices, pair_cnt, cell_start, cell_end, neighbor_grid, self_pos, other_pos);
}
{ // 13
dom_idim neighbor_grid(grid.x - 0, grid.y + 1, grid.z - 1);
zorder::searchCell(pair_indices, pair_cnt, cell_start, cell_end, neighbor_grid, self_pos, other_pos);
}
{ // 14
dom_idim neighbor_grid(grid.x - 1, grid.y + 1, grid.z - 0);
zorder::searchCell(pair_indices, pair_cnt, cell_start, cell_end, neighbor_grid, self_pos, other_pos);
}
{ // 15
dom_idim neighbor_grid(grid.x - 0, grid.y + 1, grid.z - 0);
zorder::searchCell(pair_indices, pair_cnt, cell_start, cell_end, neighbor_grid, self_pos, other_pos);
}
{ // 16
dom_idim neighbor_grid(grid.x + 1, grid.y + 1, grid.z - 1);
zorder::searchCell(pair_indices, pair_cnt, cell_start, cell_end, neighbor_grid, self_pos, other_pos);
}
{ // 17
dom_idim neighbor_grid(grid.x + 1, grid.y + 1, grid.z - 0);
zorder::searchCell(pair_indices, pair_cnt, cell_start, cell_end, neighbor_grid, self_pos, other_pos);
}
{ // 18
dom_idim neighbor_grid(grid.x - 1, grid.y - 1, grid.z + 1);
zorder::searchCell(pair_indices, pair_cnt, cell_start, cell_end, neighbor_grid, self_pos, other_pos);
}
{ // 19
dom_idim neighbor_grid(grid.x - 0, grid.y - 1, grid.z + 1);
zorder::searchCell(pair_indices, pair_cnt, cell_start, cell_end, neighbor_grid, self_pos, other_pos);
}
{ // 20
dom_idim neighbor_grid(grid.x - 1, grid.y - 0, grid.z + 1);
zorder::searchCell(pair_indices, pair_cnt, cell_start, cell_end, neighbor_grid, self_pos, other_pos);
}
{ // 21
dom_idim neighbor_grid(grid.x - 0, grid.y - 0, grid.z + 1);
zorder::searchCell(pair_indices, pair_cnt, cell_start, cell_end, neighbor_grid, self_pos, other_pos);
}
{ // 22
dom_idim neighbor_grid(grid.x + 1, grid.y - 1, grid.z + 1);
zorder::searchCell(pair_indices, pair_cnt, cell_start, cell_end, neighbor_grid, self_pos, other_pos);
}
{ // 23
dom_idim neighbor_grid(grid.x + 1, grid.y - 0, grid.z + 1);
zorder::searchCell(pair_indices, pair_cnt, cell_start, cell_end, neighbor_grid, self_pos, other_pos);
}
{ // 24
dom_idim neighbor_grid(grid.x - 1, grid.y + 1, grid.z + 1);
zorder::searchCell(pair_indices, pair_cnt, cell_start, cell_end, neighbor_grid, self_pos, other_pos);
}
{ // 25
dom_idim neighbor_grid(grid.x - 0, grid.y + 1, grid.z + 1);
zorder::searchCell(pair_indices, pair_cnt, cell_start, cell_end, neighbor_grid, self_pos, other_pos);
}
{ // 26
dom_idim neighbor_grid(grid.x + 1, grid.y + 1, grid.z + 1);
zorder::searchCell(pair_indices, pair_cnt, cell_start, cell_end, neighbor_grid, self_pos, other_pos);
}
#endif
//printf("pair_cnt: %d\n", pair_cnt);
}
__global__ void detectNeighborsCUDA(
uint16_t* neighbor_list,
const float4* position,
const uint32_t* cell_start, const uint32_t* cell_end,
uint32_t num_particle
)
{
__shared__ uint16_t pair_indices_shared[25 * 128]; // [max number of pair particle * thread number]
uint32_t index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= num_particle)
return;
const auto self_pos = position[index];
uint16_t pair_cnt = 0;
// shared
//uint16_t* pair_indices = &pair_indices_shared[threadIdx.x];
//zorder::searchGrid(pair_indices, pair_cnt, cell_start, cell_end, self_pos, position);
//__syncthreads();
uint16_t* neighbot_list_local = neighbor_list + (index % 32) + (index / 32) * 32 * c_max_pair_num;
// no shared
zorder::searchGrid(neighbot_list_local, pair_cnt, cell_start, cell_end, self_pos, position);
// store from shared
//for (uint16_t i = 0; i < pair_cnt; ++i) {
// neighbot_list_local[i * 32] = (pair_indices_shared)[i * 128 + threadIdx.x];
//}
//printf("pair cnt: %u\n", pair_cnt);
}
} // end of zorder ns
} // end of unnamed ns
void detectNeighbors(
uint32_t* neighbor_list,
const dom_dim* position,
const uint32_t* cell_start, const uint32_t* cell_end,
scalar_t cell_width, dom_udim grid_size,
scalar_t smoothing_length, uint32_t num_particle,
uint32_t max_pair_particle_num
)
{
// constant memory
hipMemcpyToSymbol(c_h, &smoothing_length, sizeof(scalar_t));
hipMemcpyToSymbol(c_max_pair_num, &max_pair_particle_num, sizeof(uint32_t));
hipMemcpyToSymbol(c_cell_width, &cell_width, sizeof(scalar_t));
uint3 grid_size_ = make_uint3(grid_size.x, grid_size.y, grid_size.z);
hipMemcpyToSymbol(c_grid_size, &grid_size_, sizeof(uint3));
uint32_t num_thread, num_block;
computeGridSize(num_particle, 128, num_block, num_thread);
#if 1
if (num_block > 0)
detectNeighborsCUDA<uint32_t><< < num_block, num_thread >> >
(neighbor_list, position, nullptr, nullptr, cell_start, cell_end, num_particle);
#else
// dom_dim to float4
std::vector<dom_dim> pos_host(num_particle);
hipMemcpy(pos_host.data(), position, sizeof(dom_dim) * num_particle, hipMemcpyDeviceToHost);
std::vector<float4> pos4_host(num_particle);
for (uint32_t i = 0; i < num_particle; ++i) {
pos4_host[i] = make_float4(pos_host[i].x, pos_host[i].y, pos_host[i].z, 0.f);
}
float4* pos4;
hipMalloc(&pos4, sizeof(float4) * num_particle);
hipMemcpy(pos4, pos4_host.data(), sizeof(float4) * num_particle, hipMemcpyHostToDevice);
// uint16_t* neighbor_list
uint16_t* nl16;
hipMalloc(&nl16, sizeof(uint16_t) * num_particle * max_pair_particle_num);
hipMemset(nl16, 0xFFFF, sizeof(uint16_t) * num_particle * max_pair_particle_num);
#ifdef _DEBUG
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipDeviceSynchronize());
#endif
// kernel launch
if (num_block > 0)
zorder::detectNeighborsCUDA << < num_block, num_thread >> >
(nl16, pos4, cell_start, cell_end, num_particle);
// uint16_t to uint32_t
std::vector<uint32_t> nl_host(num_particle * max_pair_particle_num);
std::vector<uint16_t> nl16_host(num_particle * max_pair_particle_num);
hipMemcpy(nl16_host.data(), nl16, sizeof(uint16_t) * num_particle * max_pair_particle_num, hipMemcpyDeviceToHost);
for (uint32_t i = 0; i < num_particle * max_pair_particle_num; ++i) {
const auto nl_v = nl16_host[i];
if (nl_v != 0xFFFF)
nl_host[i] = nl16_host[i];
else
nl_host[i] = 0xFFFFFFFF;
}
hipMemcpy(neighbor_list, nl_host.data(), sizeof(uint32_t) * num_particle * max_pair_particle_num, hipMemcpyHostToDevice);
hipFree(nl16);
hipFree(pos4);
#endif
#ifdef _DEBUG
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipDeviceSynchronize());
#endif
//exit(0);
}
} // end of cuda ns
} // end of pbf ns
| e0d830a42fdfe7928e5e9986af09ebce5bb3f538.cu | #include "pbf_neighbor_search_util.h"
#include "pbf_grid.h"
#include "../../util/pbf_cuda_util.h"
#include <device_launch_parameters.h>
#include <thrust/device_vector.h>
#include <thrust/device_ptr.h>
#include <thrust/sort.h>
#include <sm_35_intrinsics.h>
#include <cub\cub.cuh>
#include <cub\device\device_radix_sort.cuh>
__constant__ scalar_t c_h;
__constant__ uint32_t c_max_pair_num;
__constant__ scalar_t c_cell_width;
__constant__ uint3 c_grid_size;
namespace {
__global__ void calcHashCUDA(
uint32_t* hash_particle,
uint32_t* index_particle,
const dom_dim* position,
scalar_t cell_width,
dom_udim grid_size,
uint32_t num_particle)
{
uint32_t index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= num_particle) return;
auto p = position[index];
auto grid_pos = pbf::cuda::calcGridPos(p, cell_width);
auto hash = pbf::cuda::calcGridHash(grid_pos, grid_size);
hash_particle[index] = hash;
index_particle[index] = index;
}
__global__ void findCellStartCUDA(
uint32_t* cell_start, uint32_t* cell_end,
const uint32_t* hash_particle, const uint32_t* index_particle,
uint32_t num_particle
)
{
extern __shared__ uint32_t shared_hash[]; // blockSize + 1 elements
uint32_t index = blockIdx.x * blockDim.x + threadIdx.x;
uint32_t hash;
// handle case when no. of particles not multiple of block size
if (index < num_particle) {
hash = hash_particle[index];
// Load hash data into shared memory so that we can look at neighboring particle's hash value without loading
// two hash values per thread
shared_hash[threadIdx.x + 1] = hash;
if (index > 0 && threadIdx.x == 0) {
// first thread in block must load neighbor particle hash
shared_hash[0] = hash_particle[index - 1];
}
}
__syncthreads();
if (index < num_particle) {
// If this particle has a different cell index to the previous particle then it must be the first particle in the cell,
// so store the index of this particle in the cell.
// As it isn't the first particle, it must also be the cell end of the previous particle's cell
if (index == 0 || hash != shared_hash[threadIdx.x]) {
cell_start[hash] = index;
if (index > 0)
cell_end[shared_hash[threadIdx.x]] = index;
}
if (index == num_particle - 1) {
cell_end[hash] = index + 1;
}
}
}
__global__ void findCellStartCUDA(
uint32_t* cell_start, uint32_t* cell_end,
const uint64_t* hash_particle, const uint32_t* index_particle,
uint32_t num_particle
)
{
extern __shared__ uint64_t shared_hash64[]; // blockSize + 1 elements
uint32_t index = blockIdx.x * blockDim.x + threadIdx.x;
uint64_t hash;
// handle case when no. of particles not multiple of block size
if (index < num_particle) {
hash = hash_particle[index];
// Load hash data into shared memory so that we can look at neighboring particle's hash value without loading
// two hash values per thread
shared_hash64[threadIdx.x + 1] = hash;
if (index > 0 && threadIdx.x == 0) {
// first thread in block must load neighbor particle hash
shared_hash64[0] = hash_particle[index - 1];
}
}
__syncthreads();
if (index < num_particle) {
// If this particle has a different cell index to the previous particle then it must be the first particle in the cell,
// so store the index of this particle in the cell.
// As it isn't the first particle, it must also be the cell end of the previous particle's cell
if (index == 0 || hash != shared_hash64[threadIdx.x]) {
cell_start[hash] = index;
if (index > 0)
cell_end[shared_hash64[threadIdx.x]] = index;
}
if (index == num_particle - 1) {
cell_end[hash] = index + 1;
}
}
}
__global__ void reorderDataCUDA(
dom_dim* sorted_pos,
dom_dim* sorted_vel,
const dom_dim* old_pos,
const dom_dim* old_vel,
const uint32_t* index_particle,
uint32_t num_particle)
{
uint32_t index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= num_particle) return;
uint32_t index_before_sort = index_particle[index];
sorted_pos[index] = old_pos[index_before_sort];
sorted_vel[index] = old_vel[index_before_sort];
}
__global__ void reorderDataCUDA(
dom_dim* sorted,
const dom_dim* old,
const uint32_t* index_particle,
uint32_t num)
{
uint32_t index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= num) return;
uint32_t index_before_sort = index_particle[index];
sorted[index] = old[index_before_sort];
}
__global__ void restoreOrderCUDA(
dom_dim* restored_pos,
dom_dim* restored_vel,
const dom_dim* sorted_pos,
const dom_dim* sorted_vel,
const uint32_t* index_particle,
uint32_t num_particle)
{
uint32_t index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= num_particle) return;
uint32_t index_before_sort = index_particle[index];
restored_pos[index_before_sort] = sorted_pos[index];
restored_vel[index_before_sort] = sorted_vel[index];
}
} // end of unnamed ns
namespace pbf {
namespace cuda {
void getRadixSortStorageSize(
size_t& temp_storage_size,
uint32_t num_item
)
{
uint32_t* key_in = nullptr;
uint32_t* key_out = nullptr;
uint32_t* val_in = nullptr;
uint32_t* val_out = nullptr;
cub::DeviceRadixSort::SortPairs(NULL, temp_storage_size, key_in, key_out, val_in, val_out, num_item);
}
void sortHashIndexCUB(
void* temp_storage, size_t temp_storage_size,
uint32_t* hash, uint32_t* sorted_hash,
uint32_t* index, uint32_t* sorted_index,
uint32_t num_particle
)
{
cub::DeviceRadixSort::SortPairs(temp_storage, temp_storage_size,
hash, sorted_hash, index, sorted_index, num_particle);
}
void calcHash(
uint32_t* hash, uint32_t* index,
const dom_dim* position,
scalar_t cell_width,
dom_udim grid_size,
uint32_t num_particle)
{
uint32_t num_thread, num_block;
computeGridSize(num_particle, 256, num_block, num_thread);
if (num_block > 0)
calcHashCUDA << < num_block, num_thread >> >
(hash, index, position, cell_width, grid_size, num_particle);
#ifdef _DEBUG
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
#endif
}
// z-order hash
namespace {
__global__ void calcZOrderHashCUDA(
uint64_t* hash_particle,
uint32_t* index_particle,
const dom_dim* position,
scalar_t cell_width,
dom_udim grid_size,
uint32_t num_particle)
{
uint32_t index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= num_particle) return;
auto p = position[index];
auto grid_pos = pbf::cuda::calcGridPos(p, cell_width);
auto hash = pbf::cuda::calcGridHashZOrder(grid_pos);
hash_particle[index] = hash;
index_particle[index] = index;
}
}
void calcZOrderHash(
uint64_t* hash, uint32_t* index,
const dom_dim* position,
scalar_t cell_width,
dom_udim grid_size,
uint32_t num_particle)
{
uint32_t num_thread, num_block;
computeGridSize(num_particle, 256, num_block, num_thread);
if (num_block > 0)
calcZOrderHashCUDA<< < num_block, num_thread >> >
(hash, index, position, cell_width, grid_size, num_particle);
#ifdef _DEBUG
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
#endif
}
void sortHashIndex(
uint32_t* hash, uint32_t* index,
uint32_t num_particle)
{
if (num_particle > 0) {
thrust::sort_by_key(thrust::device_ptr<uint32_t>(hash), thrust::device_ptr<uint32_t>(hash + num_particle), thrust::device_ptr<uint32_t>(index));
}
//thrust::sort_by_key(hash, hash + num_particle, index);
#ifdef _DEBUG
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
#endif
}
void findCellStart(
uint32_t* cell_start, uint32_t* cell_end,
const uint32_t* hash, const uint32_t* index,
uint32_t num_particle)
{
uint32_t num_thread, num_block;
computeGridSize(num_particle, 128, num_block, num_thread);
uint32_t smem_size = sizeof(uint32_t) * (num_thread + 1);
if (num_block > 0)
findCellStartCUDA << < num_block, num_thread, smem_size >> >
(cell_start, cell_end, hash, index, num_particle);
#ifdef _DEBUG
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
#endif
}
void findCellStart(
uint32_t* cell_start, uint32_t* cell_end,
const uint64_t* hash, const uint32_t* index,
uint32_t num_particle)
{
uint32_t num_thread, num_block;
computeGridSize(num_particle, 128, num_block, num_thread);
uint32_t smem_size = sizeof(uint64_t) * (num_thread + 1);
if (num_block > 0)
findCellStartCUDA << < num_block, num_thread, smem_size >> >
(cell_start, cell_end, hash, index, num_particle);
#ifdef _DEBUG
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
#endif
}
void reorderData(
pbf_phase_array& sorted,
const pbf_phase_array& old,
const uint32_t* index,
uint32_t num_particle)
{
uint32_t num_thread, num_block;
computeGridSize(num_particle, 192, num_block, num_thread);
if (num_block > 0)
reorderDataCUDA << < num_block, num_thread >> >(sorted.x, sorted.v, old.x, old.v, index, num_particle);
#ifdef _DEBUG
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
#endif
}
void reorderData(
dom_dim* sorted,
const dom_dim* old,
const uint32_t* index,
uint32_t num)
{
uint32_t num_thread, num_block;
computeGridSize(num, 192, num_block, num_thread);
if (num_block > 0)
reorderDataCUDA << < num_block, num_thread >> >(sorted, old, index, num);
#ifdef _DEBUG
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
#endif
}
void restoreOrder(
pbf_phase_array& restored,
const pbf_phase_array& sorted,
const uint32_t* index,
uint32_t num_particle)
{
uint32_t num_thread, num_block;
computeGridSize(num_particle, 192, num_block, num_thread);
if (num_block > 0)
restoreOrderCUDA << < num_block, num_thread >> >(restored.x, restored.v, sorted.x, sorted.v, index, num_particle);
#ifdef _DEBUG
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
#endif
}
namespace {
template<typename T>
__device__ void findPair(T* pair_indices, uint16_t& pair_cnt,
uint32_t pair_index, const dom_dim& self_pos, const dom_dim* __restrict__ other_pos)
{
const auto pair_pos = other_pos[pair_index];
if (pair_cnt < c_max_pair_num-1) {
#if 1
const auto h2 = c_h * c_h;
const auto pos_diff = self_pos - pair_pos;
const auto r2 = glm::dot(pos_diff, pos_diff);
if (r2 < h2) {
pair_indices[pair_cnt * 32] = pair_index; // global
//pair_indices[pair_cnt * 128] = pair_index; // shared
++pair_cnt;
//printf("here we come\n");
}
#else
// set pair anyway
pair_indices[pair_cnt * 32] = pair_index;
++pair_cnt;
#endif
}
}
template<typename T>
__device__ void findPair(T* pair_indices, uint16_t& pair_cnt,
uint32_t pair_index, const dom_dim& self_pos, const dom_dim& pair_pos)
{
if (pair_cnt < c_max_pair_num - 1) {
#if 1
const auto h2 = c_h * c_h;
const auto pos_diff = self_pos - pair_pos;
//const auto r2 = glm::dot(pos_diff, pos_diff);
const auto r2 = fmaf(pos_diff.z, pos_diff.z, fmaf(pos_diff.y, pos_diff.y, pos_diff.x * pos_diff.x));
if (r2 < h2) {
pair_indices[pair_cnt * 32] = pair_index; // global
//pair_indices[pair_cnt * 128] = pair_index; // shared
++pair_cnt;
//printf("here we come\n");
}
#else
// set pair anyway
pair_indices[pair_cnt * 32] = pair_index;
++pair_cnt;
#endif
}
}
template<typename T>
__device__ void searchCell(
T* pair_indices, uint16_t& pair_cnt,
uint32_t start_index, uint32_t end_index,
const dom_dim& self_pos, const dom_dim* __restrict__ other_pos)
{
if (start_index != 0xFFFFFFFF) {
// iterate over perticles in this cell
for (auto i = start_index; i < end_index; i += 1) {
// position loading
const dom_dim pair_pos_0 = other_pos[i];
findPair(pair_indices, pair_cnt, i, self_pos, pair_pos_0);
//findPair(pair_indices, pair_cnt, i, self_pos, other_pos);
}
}
}
template<typename T>
__device__ void searchGrid(
T* pair_indices, uint16_t& pair_cnt,
const uint32_t* __restrict__ cell_start, const uint32_t* __restrict__ cell_end,
const dom_dim& self_pos, const dom_dim* __restrict__ other_pos)
{
auto grid = pbf::cuda::calcGridPos(self_pos, c_cell_width);
auto sum_prop = dom_dim(0.f);
#pragma unroll
for (int z = -1; z <= 1; ++z) {
#pragma unroll
for (int y = -1; y <= 1; ++y) {
#pragma unroll
for (int x = -1; x <= 1; ++x) {
dom_idim neighbor_grid(grid.x + x, grid.y + y, grid.z + z);
auto neighbor_grid_hash = pbf::cuda::calcGridHash(neighbor_grid, c_grid_size);
auto start_index = cell_start[neighbor_grid_hash];
auto end_index = cell_end[neighbor_grid_hash];
searchCell(pair_indices, pair_cnt, start_index, end_index, self_pos, other_pos);
}
}
}
//printf("pair_cnt: %d\n", pair_cnt);
}
template<typename T>
__global__ void detectNeighborsCUDA(
T* neighbor_list,
const dom_dim* __restrict__ position,
const uint32_t* __restrict__ paricle_hash, const uint32_t* __restrict__ particle_index,
const uint32_t* __restrict__ cell_start, const uint32_t* __restrict__ cell_end,
uint32_t num_particle
)
{
//__shared__ uint16_t pair_indices_shared[25 * 128]; // [max number of pair particle * thread number]
uint32_t index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= num_particle)
return;
const auto self_pos = position[index];
uint16_t pair_cnt = 0;
//T* neighbot_list_local = neighbor_list + (index % 32) + (index / 32) * 32 * c_max_pair_num;
T* neighbot_list_local = neighbor_list + (index & 31) + ((index >> 5) << 5) * c_max_pair_num;
searchGrid(neighbot_list_local, pair_cnt, cell_start, cell_end, self_pos, position);
//printf("pair_cnt: %d\n", pair_cnt);
//if (pair_cnt > 25)
// pair_cnt = 25;
//__syncthreads();
//for (uint32_t i = 0; i < pair_cnt; ++i) {
// neighbot_list_local[i * 32] = (pair_indices_shared + threadIdx.x)[i * 128];
//}
}
} // end of unnamed ns
namespace {
namespace opt {
__device__ void findPair(uint32_t* pair_indices, uint16_t& pair_cnt,
uint32_t pair_index, const float4& self_pos, const float4* other_pos)
{
const float4 pair_pos = other_pos[pair_index];
if (pair_cnt < c_max_pair_num - 1) {
#if 1
const auto h2 = c_h * c_h;
const auto pos_diff = glm::vec3(self_pos.x - pair_pos.x, self_pos.y - pair_pos.y, self_pos.z - pair_pos.z);
const auto r2 = glm::dot(pos_diff, pos_diff);
if (r2 < h2) {
pair_indices[pair_cnt * 32] = pair_index; // global
//pair_indices[pair_cnt * 128] = pair_index; // shared
++pair_cnt;
//printf("here we come\n");
}
#else
// set pair anyway
pair_indices[pair_cnt * 32] = pair_index;
++pair_cnt;
#endif
}
}
__device__ void searchCell(
uint32_t* pair_indices, uint16_t& pair_cnt,
uint32_t start_index, uint32_t end_index,
const float4& self_pos, const float4* other_pos)
{
if (start_index != 0xFFFFFFFF) {
// iterate over perticles in this cell
for (auto i = start_index; i < end_index; ++i) {
opt::findPair(pair_indices, pair_cnt, i, self_pos, other_pos);
}
//printf("particle in a cell: %d\n", end_index - start_index);
}
}
__device__ void searchGrid(
uint32_t* pair_indices, uint16_t& pair_cnt, uint32_t* cell_index_shared,
const uint32_t* cell_start, const uint32_t* cell_end,
const float4& self_pos, const float4* other_pos)
{
auto grid = pbf::cuda::calcGridPos(self_pos, c_cell_width);
auto sum_prop = dom_dim(0.f);
#pragma unroll
for (int z = -1; z <= 1; ++z) {
#pragma unroll
for (int y = -1; y <= 1; ++y) {
#if 1
#pragma unroll
for (int x = -1; x <= 1; ++x) {
dom_idim neighbor_grid(grid.x + x, grid.y + y, grid.z + z);
auto neighbor_grid_hash = pbf::cuda::calcGridHash(neighbor_grid, c_grid_size);
cell_index_shared[0 + 128 * (x + 1)] = cell_start[neighbor_grid_hash];
cell_index_shared[0 + 128 * (x + 1) + 128 * 3] = cell_end[neighbor_grid_hash];
}
#pragma unroll
for (int x = -1; x <= 1; ++x) {
auto start_index = cell_index_shared[0 + 128 * (x + 1)];
auto end_index = cell_index_shared[0 + 128 * (x + 1) + 128 * 3];
opt::searchCell(pair_indices, pair_cnt, start_index, end_index, self_pos, other_pos);
}
#else
#pragma unroll
// no shared
for (int x = -1; x <= 1; ++x) {
dom_idim neighbor_grid(grid.x + x, grid.y + y, grid.z + z);
auto neighbor_grid_hash = pbf::cuda::calcGridHash(neighbor_grid, c_grid_size);
//auto start_index = __ldg(&cell_start[neighbor_grid_hash]);
//auto end_index = __ldg(&cell_end[neighbor_grid_hash]);
auto start_index = cell_start[neighbor_grid_hash];
auto end_index = cell_end[neighbor_grid_hash];
opt::searchCell(pair_indices, pair_cnt, start_index, end_index, self_pos, other_pos);
}
#endif
}
}
//printf("pair_cnt: %d\n", pair_cnt);
}
__global__ void detectNeighborsCUDA(
uint32_t* neighbor_list,
const float4* position,
const uint32_t* paricle_hash, const uint32_t* particle_index,
const uint32_t* cell_start, const uint32_t* cell_end,
uint32_t num_particle
)
{
__shared__ uint32_t cell_index[3 * 2 * 128];
uint32_t index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= num_particle)
return;
const auto self_pos = position[index];
uint16_t pair_cnt = 0;
uint32_t* neighbot_list_local = neighbor_list + (index % 32) + (index / 32) * 32 * c_max_pair_num;
opt::searchGrid(neighbot_list_local, pair_cnt, cell_index + threadIdx.x, cell_start, cell_end, self_pos, position);
}
} // end of opt ns
} // end of unnamed ns
namespace {
namespace zorder {
inline
__device__ void findPair(uint16_t* pair_indices, uint16_t& pair_cnt,
uint32_t pair_index, const float4& self_pos, const float4* other_pos)
{
const float4 pair_pos = other_pos[pair_index];
if (pair_cnt < c_max_pair_num - 1) {
#if 1
const float h2 = c_h * c_h;
const auto pos_diff = make_float3(self_pos.x - pair_pos.x, self_pos.y - pair_pos.y, self_pos.z - pair_pos.z);
const auto r2 = pos_diff.x * pos_diff.x + pos_diff.y * pos_diff.y + pos_diff.z * pos_diff.z;
if (r2 < h2) {
//pair_indices[pair_cnt * 32] = pair_index; // global
pair_indices[pair_cnt * 128] = pair_index; // shared
++pair_cnt;
//printf("here we come\n");
}
#else
// set pair anyway
pair_indices[pair_cnt * 32] = pair_index;
++pair_cnt;
#endif
}
}
inline
__device__ void findPair(uint16_t* pair_indices, uint16_t& pair_cnt,
uint32_t pair_index, const float4& self_pos, const float4& pair_pos)
{
if (pair_cnt < c_max_pair_num - 1) {
#if 1
const float h2 = c_h * c_h;
const auto pos_diff = glm::vec3(self_pos.x - pair_pos.x, self_pos.y - pair_pos.y, self_pos.z - pair_pos.z);
const auto r2 = glm::dot(pos_diff, pos_diff);
//const auto pos_diff = make_float3(self_pos.x - pair_pos.x, self_pos.y - pair_pos.y, self_pos.z - pair_pos.z);
//const auto r2 = pos_diff.x * pos_diff.x + pos_diff.y * pos_diff.y + pos_diff.z * pos_diff.z;
if (r2 < h2) {
pair_indices[pair_cnt * 32] = pair_index; // global
//pair_indices[pair_cnt * 128] = pair_index; // shared
++pair_cnt;
//printf("here we come\n");
}
#else
// set pair anyway
pair_indices[pair_cnt * 32] = pair_index;
++pair_cnt;
#endif
}
}
inline
__device__ void searchCell(
uint16_t* pair_indices, uint16_t& pair_cnt,
const uint32_t* cell_start, const uint32_t* cell_end,
dom_idim neighbor_grid,
const float4& self_pos, const float4* other_pos)
{
auto neighbor_grid_hash = pbf::cuda::calcGridHashZOrder(neighbor_grid);
//auto start_index = __ldg(&cell_start[neighbor_grid_hash]);
//auto end_index = __ldg(&cell_end[neighbor_grid_hash]);
auto start_index = cell_start[neighbor_grid_hash];
auto end_index = cell_end[neighbor_grid_hash];
if (start_index != 0xFFFFFFFF) {
// iterate over perticles in this cell
for (auto i = start_index; i < end_index; i += 1) {
// position loading
const float4 pair_pos_0 = other_pos[i];
//float4 pair_pos_1;
//if (i < end_index - 1)
// pair_pos_1 = other_pos[i + 1];
//float4 pair_pos_2;
//if (i < end_index - 2)
// pair_pos_2 = other_pos[i + 2];
//
zorder::findPair(pair_indices, pair_cnt, i, self_pos, pair_pos_0);
//if (i < end_index - 1)
// zorder::findPair(pair_indices, pair_cnt, i + 1, self_pos, pair_pos_1);
//if (i < end_index - 2)
// zorder::findPair(pair_indices, pair_cnt, i + 2, self_pos, pair_pos_2);
}
//printf("particle in a cell: %u\n", end_index - start_index);
}
}
inline
__device__ void searchGrid(
uint16_t* pair_indices, uint16_t& pair_cnt,
const uint32_t* cell_start, const uint32_t* cell_end,
const float4& self_pos, const float4* other_pos)
{
auto grid = pbf::cuda::calcGridPos(self_pos, c_cell_width);
//if (grid.x < 1 || grid.y < 1 || grid.z < 1)
//printf("%d, %d, %d\n", grid.x, grid.y, grid.z);
#if 1
#pragma unroll
for (int z = -1; z <= 1; ++z) {
#pragma unroll
for (int y = -1; y <= 1; ++y) {
#pragma unroll
for (int x = -1; x <= 1; ++x) {
dom_idim neighbor_grid(grid.x + x, grid.y + y, grid.z + z);
zorder::searchCell(pair_indices, pair_cnt, cell_start, cell_end, neighbor_grid, self_pos, other_pos);
}
}
}
#else
{ // 0
dom_idim neighbor_grid(grid.x - 1, grid.y - 1, grid.z - 1);
zorder::searchCell(pair_indices, pair_cnt, cell_start, cell_end, neighbor_grid, self_pos, other_pos);
}
{ // 1
dom_idim neighbor_grid(grid.x - 0, grid.y - 1, grid.z - 1);
zorder::searchCell(pair_indices, pair_cnt, cell_start, cell_end, neighbor_grid, self_pos, other_pos);
}
{ // 2
dom_idim neighbor_grid(grid.x - 1, grid.y - 0, grid.z - 1);
zorder::searchCell(pair_indices, pair_cnt, cell_start, cell_end, neighbor_grid, self_pos, other_pos);
}
{ // 3
dom_idim neighbor_grid(grid.x - 0, grid.y - 0, grid.z - 1);
zorder::searchCell(pair_indices, pair_cnt, cell_start, cell_end, neighbor_grid, self_pos, other_pos);
}
{ // 4
dom_idim neighbor_grid(grid.x - 1, grid.y - 1, grid.z - 0);
zorder::searchCell(pair_indices, pair_cnt, cell_start, cell_end, neighbor_grid, self_pos, other_pos);
}
{ // 5
dom_idim neighbor_grid(grid.x - 0, grid.y - 1, grid.z - 0);
zorder::searchCell(pair_indices, pair_cnt, cell_start, cell_end, neighbor_grid, self_pos, other_pos);
}
{ // 6
dom_idim neighbor_grid(grid.x - 1, grid.y - 0, grid.z - 0);
zorder::searchCell(pair_indices, pair_cnt, cell_start, cell_end, neighbor_grid, self_pos, other_pos);
}
{ // 7
dom_idim neighbor_grid(grid.x - 0, grid.y - 0, grid.z - 0);
zorder::searchCell(pair_indices, pair_cnt, cell_start, cell_end, neighbor_grid, self_pos, other_pos);
}
{ // 8
dom_idim neighbor_grid(grid.x + 1, grid.y - 1, grid.z - 1);
zorder::searchCell(pair_indices, pair_cnt, cell_start, cell_end, neighbor_grid, self_pos, other_pos);
}
{ // 9
dom_idim neighbor_grid(grid.x + 1, grid.y - 0, grid.z - 1);
zorder::searchCell(pair_indices, pair_cnt, cell_start, cell_end, neighbor_grid, self_pos, other_pos);
}
{ // 10
dom_idim neighbor_grid(grid.x + 1, grid.y - 1, grid.z - 0);
zorder::searchCell(pair_indices, pair_cnt, cell_start, cell_end, neighbor_grid, self_pos, other_pos);
}
{ // 11
dom_idim neighbor_grid(grid.x + 1, grid.y - 0, grid.z - 0);
zorder::searchCell(pair_indices, pair_cnt, cell_start, cell_end, neighbor_grid, self_pos, other_pos);
}
{ // 12
dom_idim neighbor_grid(grid.x - 1, grid.y + 1, grid.z - 1);
zorder::searchCell(pair_indices, pair_cnt, cell_start, cell_end, neighbor_grid, self_pos, other_pos);
}
{ // 13
dom_idim neighbor_grid(grid.x - 0, grid.y + 1, grid.z - 1);
zorder::searchCell(pair_indices, pair_cnt, cell_start, cell_end, neighbor_grid, self_pos, other_pos);
}
{ // 14
dom_idim neighbor_grid(grid.x - 1, grid.y + 1, grid.z - 0);
zorder::searchCell(pair_indices, pair_cnt, cell_start, cell_end, neighbor_grid, self_pos, other_pos);
}
{ // 15
dom_idim neighbor_grid(grid.x - 0, grid.y + 1, grid.z - 0);
zorder::searchCell(pair_indices, pair_cnt, cell_start, cell_end, neighbor_grid, self_pos, other_pos);
}
{ // 16
dom_idim neighbor_grid(grid.x + 1, grid.y + 1, grid.z - 1);
zorder::searchCell(pair_indices, pair_cnt, cell_start, cell_end, neighbor_grid, self_pos, other_pos);
}
{ // 17
dom_idim neighbor_grid(grid.x + 1, grid.y + 1, grid.z - 0);
zorder::searchCell(pair_indices, pair_cnt, cell_start, cell_end, neighbor_grid, self_pos, other_pos);
}
{ // 18
dom_idim neighbor_grid(grid.x - 1, grid.y - 1, grid.z + 1);
zorder::searchCell(pair_indices, pair_cnt, cell_start, cell_end, neighbor_grid, self_pos, other_pos);
}
{ // 19
dom_idim neighbor_grid(grid.x - 0, grid.y - 1, grid.z + 1);
zorder::searchCell(pair_indices, pair_cnt, cell_start, cell_end, neighbor_grid, self_pos, other_pos);
}
{ // 20
dom_idim neighbor_grid(grid.x - 1, grid.y - 0, grid.z + 1);
zorder::searchCell(pair_indices, pair_cnt, cell_start, cell_end, neighbor_grid, self_pos, other_pos);
}
{ // 21
dom_idim neighbor_grid(grid.x - 0, grid.y - 0, grid.z + 1);
zorder::searchCell(pair_indices, pair_cnt, cell_start, cell_end, neighbor_grid, self_pos, other_pos);
}
{ // 22
dom_idim neighbor_grid(grid.x + 1, grid.y - 1, grid.z + 1);
zorder::searchCell(pair_indices, pair_cnt, cell_start, cell_end, neighbor_grid, self_pos, other_pos);
}
{ // 23
dom_idim neighbor_grid(grid.x + 1, grid.y - 0, grid.z + 1);
zorder::searchCell(pair_indices, pair_cnt, cell_start, cell_end, neighbor_grid, self_pos, other_pos);
}
{ // 24
dom_idim neighbor_grid(grid.x - 1, grid.y + 1, grid.z + 1);
zorder::searchCell(pair_indices, pair_cnt, cell_start, cell_end, neighbor_grid, self_pos, other_pos);
}
{ // 25
dom_idim neighbor_grid(grid.x - 0, grid.y + 1, grid.z + 1);
zorder::searchCell(pair_indices, pair_cnt, cell_start, cell_end, neighbor_grid, self_pos, other_pos);
}
{ // 26
dom_idim neighbor_grid(grid.x + 1, grid.y + 1, grid.z + 1);
zorder::searchCell(pair_indices, pair_cnt, cell_start, cell_end, neighbor_grid, self_pos, other_pos);
}
#endif
//printf("pair_cnt: %d\n", pair_cnt);
}
__global__ void detectNeighborsCUDA(
uint16_t* neighbor_list,
const float4* position,
const uint32_t* cell_start, const uint32_t* cell_end,
uint32_t num_particle
)
{
__shared__ uint16_t pair_indices_shared[25 * 128]; // [max number of pair particle * thread number]
uint32_t index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= num_particle)
return;
const auto self_pos = position[index];
uint16_t pair_cnt = 0;
// shared
//uint16_t* pair_indices = &pair_indices_shared[threadIdx.x];
//zorder::searchGrid(pair_indices, pair_cnt, cell_start, cell_end, self_pos, position);
//__syncthreads();
uint16_t* neighbot_list_local = neighbor_list + (index % 32) + (index / 32) * 32 * c_max_pair_num;
// no shared
zorder::searchGrid(neighbot_list_local, pair_cnt, cell_start, cell_end, self_pos, position);
// store from shared
//for (uint16_t i = 0; i < pair_cnt; ++i) {
// neighbot_list_local[i * 32] = (pair_indices_shared)[i * 128 + threadIdx.x];
//}
//printf("pair cnt: %u\n", pair_cnt);
}
} // end of zorder ns
} // end of unnamed ns
void detectNeighbors(
uint32_t* neighbor_list,
const dom_dim* position,
const uint32_t* cell_start, const uint32_t* cell_end,
scalar_t cell_width, dom_udim grid_size,
scalar_t smoothing_length, uint32_t num_particle,
uint32_t max_pair_particle_num
)
{
// constant memory
cudaMemcpyToSymbol(c_h, &smoothing_length, sizeof(scalar_t));
cudaMemcpyToSymbol(c_max_pair_num, &max_pair_particle_num, sizeof(uint32_t));
cudaMemcpyToSymbol(c_cell_width, &cell_width, sizeof(scalar_t));
uint3 grid_size_ = make_uint3(grid_size.x, grid_size.y, grid_size.z);
cudaMemcpyToSymbol(c_grid_size, &grid_size_, sizeof(uint3));
uint32_t num_thread, num_block;
computeGridSize(num_particle, 128, num_block, num_thread);
#if 1
if (num_block > 0)
detectNeighborsCUDA<uint32_t><< < num_block, num_thread >> >
(neighbor_list, position, nullptr, nullptr, cell_start, cell_end, num_particle);
#else
// dom_dim to float4
std::vector<dom_dim> pos_host(num_particle);
cudaMemcpy(pos_host.data(), position, sizeof(dom_dim) * num_particle, cudaMemcpyDeviceToHost);
std::vector<float4> pos4_host(num_particle);
for (uint32_t i = 0; i < num_particle; ++i) {
pos4_host[i] = make_float4(pos_host[i].x, pos_host[i].y, pos_host[i].z, 0.f);
}
float4* pos4;
cudaMalloc(&pos4, sizeof(float4) * num_particle);
cudaMemcpy(pos4, pos4_host.data(), sizeof(float4) * num_particle, cudaMemcpyHostToDevice);
// uint16_t* neighbor_list
uint16_t* nl16;
cudaMalloc(&nl16, sizeof(uint16_t) * num_particle * max_pair_particle_num);
cudaMemset(nl16, 0xFFFF, sizeof(uint16_t) * num_particle * max_pair_particle_num);
#ifdef _DEBUG
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
#endif
// kernel launch
if (num_block > 0)
zorder::detectNeighborsCUDA << < num_block, num_thread >> >
(nl16, pos4, cell_start, cell_end, num_particle);
// uint16_t to uint32_t
std::vector<uint32_t> nl_host(num_particle * max_pair_particle_num);
std::vector<uint16_t> nl16_host(num_particle * max_pair_particle_num);
cudaMemcpy(nl16_host.data(), nl16, sizeof(uint16_t) * num_particle * max_pair_particle_num, cudaMemcpyDeviceToHost);
for (uint32_t i = 0; i < num_particle * max_pair_particle_num; ++i) {
const auto nl_v = nl16_host[i];
if (nl_v != 0xFFFF)
nl_host[i] = nl16_host[i];
else
nl_host[i] = 0xFFFFFFFF;
}
cudaMemcpy(neighbor_list, nl_host.data(), sizeof(uint32_t) * num_particle * max_pair_particle_num, cudaMemcpyHostToDevice);
cudaFree(nl16);
cudaFree(pos4);
#endif
#ifdef _DEBUG
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
#endif
//exit(0);
}
} // end of cuda ns
} // end of pbf ns
|
8f1200f59059b977db191bd9d60044272d37196a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <join/join_common_utils.cuh>
#include <join/join_common_utils.hpp>
#include <join/mixed_join_common_utils.cuh>
#include <cudf/ast/detail/expression_evaluator.cuh>
#include <cudf/ast/detail/expression_parser.hpp>
#include <cudf/detail/utilities/cuda.cuh>
#include <cudf/table/table_device_view.cuh>
#include <cudf/utilities/span.hpp>
#include <hipcub/hipcub.hpp>
namespace cudf {
namespace detail {
namespace cg = cooperative_groups;
template <int block_size, bool has_nulls>
__global__ void compute_mixed_join_output_size_semi(
table_device_view left_table,
table_device_view right_table,
table_device_view probe,
table_device_view build,
row_equality const equality_probe,
join_kind const join_type,
cudf::detail::semi_map_type::device_view hash_table_view,
ast::detail::expression_device_view device_expression_data,
bool const swap_tables,
std::size_t* output_size,
cudf::device_span<cudf::size_type> matches_per_row)
{
// The (required) extern storage of the shared memory array leads to
// conflicting declarations between different templates. The easiest
// workaround is to declare an arbitrary (here char) array type then cast it
// after the fact to the appropriate type.
extern __shared__ char raw_intermediate_storage[];
cudf::ast::detail::IntermediateDataType<has_nulls>* intermediate_storage =
reinterpret_cast<cudf::ast::detail::IntermediateDataType<has_nulls>*>(raw_intermediate_storage);
auto thread_intermediate_storage =
intermediate_storage + (threadIdx.x * device_expression_data.num_intermediates);
std::size_t thread_counter{0};
cudf::size_type const start_idx = threadIdx.x + blockIdx.x * block_size;
cudf::size_type const stride = block_size * gridDim.x;
cudf::size_type const left_num_rows = left_table.num_rows();
cudf::size_type const right_num_rows = right_table.num_rows();
auto const outer_num_rows = (swap_tables ? right_num_rows : left_num_rows);
auto evaluator = cudf::ast::detail::expression_evaluator<has_nulls>(
left_table, right_table, device_expression_data);
row_hash hash_probe{nullate::DYNAMIC{has_nulls}, probe};
// TODO: Address asymmetry in operator.
auto equality = single_expression_equality<has_nulls>{
evaluator, thread_intermediate_storage, swap_tables, equality_probe};
for (cudf::size_type outer_row_index = start_idx; outer_row_index < outer_num_rows;
outer_row_index += stride) {
matches_per_row[outer_row_index] =
((join_type == join_kind::LEFT_ANTI_JOIN) !=
(hash_table_view.contains(outer_row_index, hash_probe, equality)));
thread_counter += matches_per_row[outer_row_index];
}
using BlockReduce = hipcub::BlockReduce<cudf::size_type, block_size>;
__shared__ typename BlockReduce::TempStorage temp_storage;
std::size_t block_counter = BlockReduce(temp_storage).Sum(thread_counter);
// Add block counter to global counter
if (threadIdx.x == 0) atomicAdd(output_size, block_counter);
}
template __global__ void compute_mixed_join_output_size_semi<DEFAULT_JOIN_BLOCK_SIZE, true>(
table_device_view left_table,
table_device_view right_table,
table_device_view probe,
table_device_view build,
row_equality const equality_probe,
join_kind const join_type,
cudf::detail::semi_map_type::device_view hash_table_view,
ast::detail::expression_device_view device_expression_data,
bool const swap_tables,
std::size_t* output_size,
cudf::device_span<cudf::size_type> matches_per_row);
template __global__ void compute_mixed_join_output_size_semi<DEFAULT_JOIN_BLOCK_SIZE, false>(
table_device_view left_table,
table_device_view right_table,
table_device_view probe,
table_device_view build,
row_equality const equality_probe,
join_kind const join_type,
cudf::detail::semi_map_type::device_view hash_table_view,
ast::detail::expression_device_view device_expression_data,
bool const swap_tables,
std::size_t* output_size,
cudf::device_span<cudf::size_type> matches_per_row);
} // namespace detail
} // namespace cudf
| 8f1200f59059b977db191bd9d60044272d37196a.cu | /*
* Copyright (c) 2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <join/join_common_utils.cuh>
#include <join/join_common_utils.hpp>
#include <join/mixed_join_common_utils.cuh>
#include <cudf/ast/detail/expression_evaluator.cuh>
#include <cudf/ast/detail/expression_parser.hpp>
#include <cudf/detail/utilities/cuda.cuh>
#include <cudf/table/table_device_view.cuh>
#include <cudf/utilities/span.hpp>
#include <cub/cub.cuh>
namespace cudf {
namespace detail {
namespace cg = cooperative_groups;
template <int block_size, bool has_nulls>
__global__ void compute_mixed_join_output_size_semi(
table_device_view left_table,
table_device_view right_table,
table_device_view probe,
table_device_view build,
row_equality const equality_probe,
join_kind const join_type,
cudf::detail::semi_map_type::device_view hash_table_view,
ast::detail::expression_device_view device_expression_data,
bool const swap_tables,
std::size_t* output_size,
cudf::device_span<cudf::size_type> matches_per_row)
{
// The (required) extern storage of the shared memory array leads to
// conflicting declarations between different templates. The easiest
// workaround is to declare an arbitrary (here char) array type then cast it
// after the fact to the appropriate type.
extern __shared__ char raw_intermediate_storage[];
cudf::ast::detail::IntermediateDataType<has_nulls>* intermediate_storage =
reinterpret_cast<cudf::ast::detail::IntermediateDataType<has_nulls>*>(raw_intermediate_storage);
auto thread_intermediate_storage =
intermediate_storage + (threadIdx.x * device_expression_data.num_intermediates);
std::size_t thread_counter{0};
cudf::size_type const start_idx = threadIdx.x + blockIdx.x * block_size;
cudf::size_type const stride = block_size * gridDim.x;
cudf::size_type const left_num_rows = left_table.num_rows();
cudf::size_type const right_num_rows = right_table.num_rows();
auto const outer_num_rows = (swap_tables ? right_num_rows : left_num_rows);
auto evaluator = cudf::ast::detail::expression_evaluator<has_nulls>(
left_table, right_table, device_expression_data);
row_hash hash_probe{nullate::DYNAMIC{has_nulls}, probe};
// TODO: Address asymmetry in operator.
auto equality = single_expression_equality<has_nulls>{
evaluator, thread_intermediate_storage, swap_tables, equality_probe};
for (cudf::size_type outer_row_index = start_idx; outer_row_index < outer_num_rows;
outer_row_index += stride) {
matches_per_row[outer_row_index] =
((join_type == join_kind::LEFT_ANTI_JOIN) !=
(hash_table_view.contains(outer_row_index, hash_probe, equality)));
thread_counter += matches_per_row[outer_row_index];
}
using BlockReduce = cub::BlockReduce<cudf::size_type, block_size>;
__shared__ typename BlockReduce::TempStorage temp_storage;
std::size_t block_counter = BlockReduce(temp_storage).Sum(thread_counter);
// Add block counter to global counter
if (threadIdx.x == 0) atomicAdd(output_size, block_counter);
}
template __global__ void compute_mixed_join_output_size_semi<DEFAULT_JOIN_BLOCK_SIZE, true>(
table_device_view left_table,
table_device_view right_table,
table_device_view probe,
table_device_view build,
row_equality const equality_probe,
join_kind const join_type,
cudf::detail::semi_map_type::device_view hash_table_view,
ast::detail::expression_device_view device_expression_data,
bool const swap_tables,
std::size_t* output_size,
cudf::device_span<cudf::size_type> matches_per_row);
template __global__ void compute_mixed_join_output_size_semi<DEFAULT_JOIN_BLOCK_SIZE, false>(
table_device_view left_table,
table_device_view right_table,
table_device_view probe,
table_device_view build,
row_equality const equality_probe,
join_kind const join_type,
cudf::detail::semi_map_type::device_view hash_table_view,
ast::detail::expression_device_view device_expression_data,
bool const swap_tables,
std::size_t* output_size,
cudf::device_span<cudf::size_type> matches_per_row);
} // namespace detail
} // namespace cudf
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.