hip_filename stringlengths 5 84 | hip_content stringlengths 79 9.69M | cuda_filename stringlengths 4 83 | cuda_content stringlengths 19 9.69M |
|---|---|---|---|
8e96d2efa41c817fa96933b84e92a02169fbf8da.hip | // !!! This is a file automatically generated by hipify!!!
/**CDUA
* 1 CUDA CDUA eventsNVIDIA Visual Profiler.
* 2CUDA CUDA-GDB /NSight.
* 3CUDA warp .
* 4CUDA CUDA .
*/
#include <stdio.h>
#include <iostream>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
/**CPUGPU kernel
* CUDA Events CUDA
*
* C/C++ API
*
* CUDA API
* C++ Python
*/
// = /
//
// Define the constant variables.
#define N 50000000 // The number of elements in array.
// Define kernel function.
__global__ void gpuAdd(int *device_a, int *device_b, int *device_c)
{
// Getting the thread index of current kernel.
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < N)
{
device_c[tid] = device_a[tid] + device_b[tid];
//
tid += blockDim.x * gridDim.x;
}
}
int main(int argc, char **argv)
{
// Defining host arrays using Dynamic Memory Allocation.
int *host_a, *host_b, *host_c;
host_a = (int*)malloc(N * sizeof(int));
host_b = (int*)malloc(N * sizeof(int));
host_c = (int*)malloc(N * sizeof(int));
// Define device pointers.
int *device_a, *device_b, *device_c;
// CUDA Events.
// CUDA
hipEvent_t event_start, event_stop;
// CUDA
hipEventCreate(&event_start);
hipEventCreate(&event_stop);
// CUDA
hipEventRecord(event_start, 0);
// Allocate thr memory on device.
hipMalloc((void**)&device_a, N * sizeof(int));
hipMalloc((void**)&device_b, N * sizeof(int));
hipMalloc((void**)&device_c, N * sizeof(int));
// Initialize arrays.
for (int i = 0; i < N; ++i)
{
host_a[i] = 2 * i * i;
host_b[i] = i;
}
// Copy input data from host to device memory.
hipMemcpy(device_a, host_a, N * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(device_b, host_b, N * sizeof(int), hipMemcpyHostToDevice);
// Call kernel passing device pointers as parameters.
hipLaunchKernelGGL(( gpuAdd) , dim3(512), dim3(512) , 0, 0, device_a, device_b, device_c);
// Copy result back to host memory from device memory.
hipMemcpy(host_c, device_c, N * sizeof(int), hipMemcpyDeviceToHost);
hipDeviceSynchronize();
// CUDA
hipEventRecord(event_stop, 0);
hipEventSynchronize(event_stop);
// CUDA
float event_lapsed_time;
hipEventElapsedTime(&event_lapsed_time, event_start, event_stop);
printf("Time to add %d numbers: %3.lf ms.\n", N, event_lapsed_time);
// GPU
int correct_flag = 1;
std::cout << "Vector addition on GPU.\n";
for (int i = 0; i < N; ++i)
{
if (host_a[i] + host_b[i] != host_c[i])
{
correct_flag = 0;
}
}
if (correct_flag == 1)
{
std::cout << "GPU has computed sum correctly.\n";
}
else
{
std::cout << "There is an error in GPU computation.\n";
}
// Free up host Dynamic Memory.
free(host_a);
free(host_b);
free(host_c);
// Free up memory on device.
hipFree(device_a);
hipFree(device_b);
hipFree(device_c);
return 0;
} | 8e96d2efa41c817fa96933b84e92a02169fbf8da.cu | /**CDUA 中高级编程概念
* 1、测量 CUDA 程序的性能:CDUA events、NVIDIA Visual Profiler.
* 2、CUDA 中错误处理:从代码中进行处理、CUDA-GDB 调试器/NSight.
* 3、CUDA 程序性能的提升:使用适当的块和线程数量、最大化数学运算效率、使用合并的或跨步式的访存、避免 warp 内分支、使用锁定页面的内存.
* 4、CUDA 流:使用多个 CUDA 流.
*/
#include <stdio.h>
#include <iostream>
#include <cuda.h>
#include <cuda_runtime.h>
/**CPU时间度量性能取决于高精度的定时器;GPU kernel 是异步运行的
* CUDA Events 是在 CUDA 应用运行的特定时刻被记录的时间戳。
*
* C/C++ 的 API 参数分为入参和出参,
* 入参就是函数所需要的使用的参数;出参就是函数所需要返回的参数。
* CUDA API 返回值都是用于标志该操作的成功或者错误;而将需要的返回参数作为参数列表传入。
* C++ 不支持返回多个返回值,故采用参数列表作为返回;而 Python 支持返回多个返回值。
*/
// 数学运算效率 = 数学运算操作 / 访存操作
// 提升程序性能,前提分析程序的瓶颈在哪里。
// Define the constant variables.
#define N 50000000 // The number of elements in array.
// Define kernel function.
__global__ void gpuAdd(int *device_a, int *device_b, int *device_c)
{
// Getting the thread index of current kernel.
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < N)
{
device_c[tid] = device_a[tid] + device_b[tid];
// 偏移量
tid += blockDim.x * gridDim.x;
}
}
int main(int argc, char **argv)
{
// Defining host arrays using Dynamic Memory Allocation.
int *host_a, *host_b, *host_c;
host_a = (int*)malloc(N * sizeof(int));
host_b = (int*)malloc(N * sizeof(int));
host_c = (int*)malloc(N * sizeof(int));
// Define device pointers.
int *device_a, *device_b, *device_c;
// CUDA Events.
// 定义 CUDA 事件类型变量。
cudaEvent_t event_start, event_stop;
// 创建 CUDA 事件。
cudaEventCreate(&event_start);
cudaEventCreate(&event_stop);
// 记录 CUDA 事件。
cudaEventRecord(event_start, 0);
// Allocate thr memory on device.
cudaMalloc((void**)&device_a, N * sizeof(int));
cudaMalloc((void**)&device_b, N * sizeof(int));
cudaMalloc((void**)&device_c, N * sizeof(int));
// Initialize arrays.
for (int i = 0; i < N; ++i)
{
host_a[i] = 2 * i * i;
host_b[i] = i;
}
// Copy input data from host to device memory.
cudaMemcpy(device_a, host_a, N * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(device_b, host_b, N * sizeof(int), cudaMemcpyHostToDevice);
// Call kernel passing device pointers as parameters.
gpuAdd <<< 512, 512 >>> (device_a, device_b, device_c);
// Copy result back to host memory from device memory.
cudaMemcpy(host_c, device_c, N * sizeof(int), cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
// 记录 CUDA 事件。
cudaEventRecord(event_stop, 0);
cudaEventSynchronize(event_stop);
// 定义变量用于计算 CUDA 事件,度量性能。
float event_lapsed_time;
cudaEventElapsedTime(&event_lapsed_time, event_start, event_stop);
printf("Time to add %d numbers: %3.lf ms.\n", N, event_lapsed_time);
// 验证 GPU 计算结果。
int correct_flag = 1;
std::cout << "Vector addition on GPU.\n";
for (int i = 0; i < N; ++i)
{
if (host_a[i] + host_b[i] != host_c[i])
{
correct_flag = 0;
}
}
if (correct_flag == 1)
{
std::cout << "GPU has computed sum correctly.\n";
}
else
{
std::cout << "There is an error in GPU computation.\n";
}
// Free up host Dynamic Memory.
free(host_a);
free(host_b);
free(host_c);
// Free up memory on device.
cudaFree(device_a);
cudaFree(device_b);
cudaFree(device_c);
return 0;
} |
c52920b605464af65361c1653a3ff992c26749ab.hip | // !!! This is a file automatically generated by hipify!!!
/** \file "force.cu" : implements the kernel for the tidal force calculation
*/
#define __CUDA 1
#include "fargo.h"
#undef __CUDA
#include <stdarg.h>
#include <helper_cuda.h>
#include <hip/hip_runtime.h>
// BLOCK_X : in azimuth
//#define BLOCK_X DEF_BLOCK_X_FORCE
#define BLOCK_X 16
// BLOCK_Y : in radius
#define BLOCK_Y 8
#define GET_TAB(u,x,y,pitch) *(u + __mul24(y, pitch) + x)
// improve legibility
#define rmed CRadiiStuff[(nr+1)*6+ig]
#define surf CRadiiStuff[(nr+1)*9+ig]
//static double ForceX[MAX1D];
//static double ForceY[MAX1D];
//__constant__ double CRadiiStuff[8192];
__device__ double CRadiiStuff[32768];
__global__ void kernel_force (double *Rho,
// double *Rho2,
double *fx,
double *fy,
double eps2,
double xp, double yp,
int ns, int nr,
int pitch, double dphi) {
// jg & ig, g like 'global' (global memory <=> full grid)
// Below, we recompute x and y for each zone using cos/sin.
// This method turns out to be faster, on high-end platforms,
// than a coalesced read of tabulated values.
int jg = threadIdx.x + blockIdx.x * blockDim.x;
int ig = threadIdx.y + blockIdx.y * blockDim.y;
int idg = __mul24(ig, pitch) + jg;
double phi= (double)jg*dphi;
double dx = rmed * cos(phi) - xp;
double dy = rmed * sin(phi) - yp;
// double cellmass = surf * (Rho[idg]+Rho2[idg]);
double cellmass = surf * Rho[idg];
double dist2 = dx*dx+dy*dy;
dist2 += eps2;
double invd3 = 1.0/dist2 * rsqrt(dist2);
fx[idg] = cellmass*dx*invd3;
fy[idg] = cellmass*dy*invd3;
}
// Gaussian function (original)
//--------------------------------------------------------------
__global__ void kernel_force_Gauss_cutoff (double *Rho,
double *fx,
double *fy,
double eps2,
double invrh2,
double xp, double yp,
int ns, int nr,
int pitch, double dphi) {
// jg & ig, g like 'global' (global memory <=> full grid)
// Below, we recompute x and y for each zone using cos/sin.
// This method turns out to be faster, on high-end platforms,
// than a coalesced read of tabulated values.
int jg = threadIdx.x + blockIdx.x * blockDim.x;
int ig = threadIdx.y + blockIdx.y * blockDim.y;
int idg = __mul24(ig, pitch) + jg;
double phi= (double)jg*dphi;
double dx = rmed * cos(phi) - xp;
double dy = rmed * sin(phi) - yp;
double cellmass = surf * Rho[idg];
double dist2 = dx*dx+dy*dy;
// Gaussian toruq cut-off (original)
cellmass *= 1.0-exp(-dist2*invrh2);
dist2 += eps2;
double invd3 = 1.0/dist2 * rsqrt(dist2);
fx[idg] = cellmass*dx*invd3;
fy[idg] = cellmass*dy*invd3;
}
//--------------------------------------------------------------
// Heviside function (Crida et al. 2009)
//---------------------------------------------------------------
__global__ void kernel_force_Heaviside_cutoff (double *Rho,
double *fx,
double *fy,
double eps2,
double invrh2,
double heaviside_b,
double xp, double yp,
int ns, int nr,
int pitch, double dphi) {
// jg & ig, g like 'global' (global memory <=> full grid)
// Below, we recompute x and y for each zone using cos/sin.
// This method turns out to be faster, on high-end platforms,
// than a coalesced read of tabulated values.
int jg = threadIdx.x + blockIdx.x * blockDim.x;
int ig = threadIdx.y + blockIdx.y * blockDim.y;
int idg = __mul24(ig, pitch) + jg;
double phi= (double)jg*dphi;
double dx = rmed * cos(phi) - xp;
double dy = rmed * sin(phi) - yp;
double cellmass = surf * Rho[idg];
double dist2 = dx*dx+dy*dy;
// Heaviside torque cut-off function (Crida et al. 2009)
cellmass *= 1.0/(exp(-10.0*(sqrt(dist2*invrh2)/heaviside_b-1.0))+1.0);
dist2 += eps2;
double invd3 = 1.0/dist2 * rsqrt(dist2);
fx[idg] = cellmass*dx*invd3;
fy[idg] = cellmass*dy*invd3;
}
//---------------------------------------------------------------
extern "C"
Force ComputeForce_gpu (PolarGrid *Rho, double x0, double y0, double smoothing, double mass, int exclude) {
int nr, ns;
Force result;
double fxi = 0.0, fxo = 0.0, fyi = 0.0, fyo = 0.0;
double a;
nr = Rho->Nrad;
ns = Rho->Nsec;
// planetary Hill radius
a = sqrt(x0*x0+y0*y0);
double rh = a*pow(mass/3.0, 1.0/3.0);
dim3 block (BLOCK_X, BLOCK_Y);
dim3 grid ((ns+block.x-1)/block.x, (nr+block.y-1)/block.y);
checkCudaErrors(hipMemcpyToSymbol(CRadiiStuff, (void *)RadiiStuff, (size_t)(14*(nr+1))*sizeof(double), 0, hipMemcpyHostToDevice));
// no torque cut-off
if (exclude == 0) {
hipLaunchKernelGGL(( kernel_force) , dim3(grid), dim3(block) , 0, 0, Rho->gpu_field,
// dust_density[0]->gpu_field,
Work->gpu_field,
TemperInt->gpu_field,
smoothing*smoothing,
x0,
y0,
ns,
nr,
Rho->pitch/sizeof(double),
2.0*M_PI/(double)ns);
}
// Gaussian torqu cut-off
else if (exclude == 1) {
hipLaunchKernelGGL(( kernel_force_Gauss_cutoff) , dim3(grid), dim3(block) , 0, 0, Rho->gpu_field,
Work->gpu_field,
TemperInt->gpu_field,
smoothing*smoothing,
1.0/(rh*rh),
x0,
y0,
ns,
nr,
Rho->pitch/sizeof(double),
2.0*M_PI/(double)ns);
}
// Heaviside torque cut-off
else if (exclude == 2) {
hipLaunchKernelGGL(( kernel_force_Heaviside_cutoff) , dim3(grid), dim3(block) , 0, 0, Rho->gpu_field,
Work->gpu_field,
TemperInt->gpu_field,
smoothing*smoothing,
1.0/(rh*rh),
HEAVISIDEB,
x0,
y0,
ns,
nr,
Rho->pitch/sizeof(double),
2.0*M_PI/(double)ns);
}
hipDeviceSynchronize();
getLastCudaError ("ComputeForce_gpu: kernel failed");
AzimuthalAverage (Work, ForceX);
AzimuthalAverage (TemperInt, ForceY);
getLastCudaError ("grabuge dans les azimuthal average");
for (int i = 0; i < nr; i++) {
if (Rmed[i] < a) {
fxi += G*ForceX[i];
fyi += G*ForceY[i];
} else {
fxo += G*ForceX[i];
fyo += G*ForceY[i];
}
}
result.fx_inner = fxi;
result.fy_inner = fyi;
result.fx_outer = fxo;
result.fy_outer = fyo;
result.fx_ex_inner = fxi;
result.fy_ex_inner = fyi;
result.fx_ex_outer = fxo;
result.fy_ex_outer = fyo;
return result;
}
| c52920b605464af65361c1653a3ff992c26749ab.cu | /** \file "force.cu" : implements the kernel for the tidal force calculation
*/
#define __CUDA 1
#include "fargo.h"
#undef __CUDA
#include <stdarg.h>
#include <helper_cuda.h>
#include <cuda.h>
// BLOCK_X : in azimuth
//#define BLOCK_X DEF_BLOCK_X_FORCE
#define BLOCK_X 16
// BLOCK_Y : in radius
#define BLOCK_Y 8
#define GET_TAB(u,x,y,pitch) *(u + __mul24(y, pitch) + x)
// improve legibility
#define rmed CRadiiStuff[(nr+1)*6+ig]
#define surf CRadiiStuff[(nr+1)*9+ig]
//static double ForceX[MAX1D];
//static double ForceY[MAX1D];
//__constant__ double CRadiiStuff[8192];
__device__ double CRadiiStuff[32768];
__global__ void kernel_force (double *Rho,
// double *Rho2,
double *fx,
double *fy,
double eps2,
double xp, double yp,
int ns, int nr,
int pitch, double dphi) {
// jg & ig, g like 'global' (global memory <=> full grid)
// Below, we recompute x and y for each zone using cos/sin.
// This method turns out to be faster, on high-end platforms,
// than a coalesced read of tabulated values.
int jg = threadIdx.x + blockIdx.x * blockDim.x;
int ig = threadIdx.y + blockIdx.y * blockDim.y;
int idg = __mul24(ig, pitch) + jg;
double phi= (double)jg*dphi;
double dx = rmed * cos(phi) - xp;
double dy = rmed * sin(phi) - yp;
// double cellmass = surf * (Rho[idg]+Rho2[idg]);
double cellmass = surf * Rho[idg];
double dist2 = dx*dx+dy*dy;
dist2 += eps2;
double invd3 = 1.0/dist2 * rsqrt(dist2);
fx[idg] = cellmass*dx*invd3;
fy[idg] = cellmass*dy*invd3;
}
// Gaussian function (original)
//--------------------------------------------------------------
__global__ void kernel_force_Gauss_cutoff (double *Rho,
double *fx,
double *fy,
double eps2,
double invrh2,
double xp, double yp,
int ns, int nr,
int pitch, double dphi) {
// jg & ig, g like 'global' (global memory <=> full grid)
// Below, we recompute x and y for each zone using cos/sin.
// This method turns out to be faster, on high-end platforms,
// than a coalesced read of tabulated values.
int jg = threadIdx.x + blockIdx.x * blockDim.x;
int ig = threadIdx.y + blockIdx.y * blockDim.y;
int idg = __mul24(ig, pitch) + jg;
double phi= (double)jg*dphi;
double dx = rmed * cos(phi) - xp;
double dy = rmed * sin(phi) - yp;
double cellmass = surf * Rho[idg];
double dist2 = dx*dx+dy*dy;
// Gaussian toruq cut-off (original)
cellmass *= 1.0-exp(-dist2*invrh2);
dist2 += eps2;
double invd3 = 1.0/dist2 * rsqrt(dist2);
fx[idg] = cellmass*dx*invd3;
fy[idg] = cellmass*dy*invd3;
}
//--------------------------------------------------------------
// Heviside function (Crida et al. 2009)
//---------------------------------------------------------------
__global__ void kernel_force_Heaviside_cutoff (double *Rho,
double *fx,
double *fy,
double eps2,
double invrh2,
double heaviside_b,
double xp, double yp,
int ns, int nr,
int pitch, double dphi) {
// jg & ig, g like 'global' (global memory <=> full grid)
// Below, we recompute x and y for each zone using cos/sin.
// This method turns out to be faster, on high-end platforms,
// than a coalesced read of tabulated values.
int jg = threadIdx.x + blockIdx.x * blockDim.x;
int ig = threadIdx.y + blockIdx.y * blockDim.y;
int idg = __mul24(ig, pitch) + jg;
double phi= (double)jg*dphi;
double dx = rmed * cos(phi) - xp;
double dy = rmed * sin(phi) - yp;
double cellmass = surf * Rho[idg];
double dist2 = dx*dx+dy*dy;
// Heaviside torque cut-off function (Crida et al. 2009)
cellmass *= 1.0/(exp(-10.0*(sqrt(dist2*invrh2)/heaviside_b-1.0))+1.0);
dist2 += eps2;
double invd3 = 1.0/dist2 * rsqrt(dist2);
fx[idg] = cellmass*dx*invd3;
fy[idg] = cellmass*dy*invd3;
}
//---------------------------------------------------------------
extern "C"
Force ComputeForce_gpu (PolarGrid *Rho, double x0, double y0, double smoothing, double mass, int exclude) {
int nr, ns;
Force result;
double fxi = 0.0, fxo = 0.0, fyi = 0.0, fyo = 0.0;
double a;
nr = Rho->Nrad;
ns = Rho->Nsec;
// planetary Hill radius
a = sqrt(x0*x0+y0*y0);
double rh = a*pow(mass/3.0, 1.0/3.0);
dim3 block (BLOCK_X, BLOCK_Y);
dim3 grid ((ns+block.x-1)/block.x, (nr+block.y-1)/block.y);
checkCudaErrors(cudaMemcpyToSymbol(CRadiiStuff, (void *)RadiiStuff, (size_t)(14*(nr+1))*sizeof(double), 0, cudaMemcpyHostToDevice));
// no torque cut-off
if (exclude == 0) {
kernel_force <<< grid, block >>> (Rho->gpu_field,
// dust_density[0]->gpu_field,
Work->gpu_field,
TemperInt->gpu_field,
smoothing*smoothing,
x0,
y0,
ns,
nr,
Rho->pitch/sizeof(double),
2.0*M_PI/(double)ns);
}
// Gaussian torqu cut-off
else if (exclude == 1) {
kernel_force_Gauss_cutoff <<< grid, block >>> (Rho->gpu_field,
Work->gpu_field,
TemperInt->gpu_field,
smoothing*smoothing,
1.0/(rh*rh),
x0,
y0,
ns,
nr,
Rho->pitch/sizeof(double),
2.0*M_PI/(double)ns);
}
// Heaviside torque cut-off
else if (exclude == 2) {
kernel_force_Heaviside_cutoff <<< grid, block >>> (Rho->gpu_field,
Work->gpu_field,
TemperInt->gpu_field,
smoothing*smoothing,
1.0/(rh*rh),
HEAVISIDEB,
x0,
y0,
ns,
nr,
Rho->pitch/sizeof(double),
2.0*M_PI/(double)ns);
}
cudaThreadSynchronize();
getLastCudaError ("ComputeForce_gpu: kernel failed");
AzimuthalAverage (Work, ForceX);
AzimuthalAverage (TemperInt, ForceY);
getLastCudaError ("grabuge dans les azimuthal average");
for (int i = 0; i < nr; i++) {
if (Rmed[i] < a) {
fxi += G*ForceX[i];
fyi += G*ForceY[i];
} else {
fxo += G*ForceX[i];
fyo += G*ForceY[i];
}
}
result.fx_inner = fxi;
result.fy_inner = fyi;
result.fx_outer = fxo;
result.fy_outer = fyo;
result.fx_ex_inner = fxi;
result.fy_ex_inner = fyi;
result.fx_ex_outer = fxo;
result.fy_ex_outer = fyo;
return result;
}
|
abada01d1b7dbe0a91d85775d886ede19ba84b24.hip | // !!! This is a file automatically generated by hipify!!!
// includes, cuda
#include <cstdint>
#include <climits>
#include <hip/hip_runtime.h>
#include <helper_cuda.h>
#include <cudaDefs.h>
#include <imageManager.h>
#include "arrayUtils.cuh"
#define BENCHMARK_NUM_REPS 100 // number of repetitions for benchmarking
#define TPB_1D 32 // ThreadsPerBlock in one dimension
#define TPB_2D 1024 // ThreadsPerBlock = 16*16 (2D block)
#define TPB_REDUCTION 512 // ThreadsPerBlock (1D block)
hipError_t error = hipSuccess;
hipDeviceProp_t deviceProp = hipDeviceProp_t();
using DT = uint8_t; // Working data type
struct alignas(8) ResultType
{
float fitness;
uint32_t idx;
ResultType& operator=(ResultType&&) = default; //Forcing a move assignment operator to be generated by the compiler
__host__ __device__ volatile ResultType& operator=(volatile const ResultType& other) volatile
{
fitness = other.fitness;
idx = other.idx;
return *this;
}
};
struct Image
{
uint32_t width = 0;
uint32_t height = 0;
uint32_t pitch = 0;
DT* ptr = nullptr;
};
void prepareData(const char* imageFileName, Image& img)
{
FIBITMAP* tmpA = ImageManager::GenericLoader(imageFileName, 0);
img.width = FreeImage_GetWidth(tmpA);
img.height = FreeImage_GetHeight(tmpA);
img.pitch = FreeImage_GetPitch(tmpA); // FREEIMAGE align row data ... You have to use pitch instead of width
//Create a memory block using UNIFIED MEMORY to store original image. This is a redundant copy, however the data will be ready to use directly by GPU.
uint8_t* tmpB = nullptr;
size_t imageSize = static_cast<size_t>(img.pitch * img.height * FreeImage_GetBPP(tmpA)) >> 3;
checkCudaErrors(hipMallocManaged(&tmpB, imageSize));
checkCudaErrors(hipMemcpy(tmpB, FreeImage_GetBits(tmpA), imageSize, hipMemcpyHostToDevice));
//checkHostMatrix(tmpB, img.pitch, img.height, img.width, "%d ", "Reference");
FreeImage_Unload(tmpA);
//Create a memory block using UNIFIED MEMORY to store DT data and convert tmpB -> img.ptr
checkCudaErrors(hipMallocManaged(&img.ptr, img.width * img.height * sizeof(DT)));
dim3 block{ 256,1,1 };
dim3 grid{ getNumberOfParts(img.width * img.height, 256), 1, 1 };
arrayReshape<uint8_t, DT> <<<grid, block>> > (tmpB, img.width, img.height, img.pitch, img.width, img.height, img.width*sizeof(DT), img.ptr);
//From now, we have a new pitch of the final data.
img.pitch = img.width * sizeof(DT);
//Some synchronization must be called when using UNIFIED MEMORY in async. Previous kernel was called asynchronously!!!
hipDeviceSynchronize();
//checkHostMatrix(img.ptr, img.width * sizeof(DT), img.height, img.width, "%0.2f ", "Reference");
}
//Every THREAD of 2D block [16x16] computes one final fitness value for a single pixel of the reference image. One corner of the query image is "virtually" attached to this pixel position.
//A SINGLE THREAD compares the query image with the given region of the reference image.
__global__ void find(const DT* __restrict__ ref, const uint32_t rWidth, const uint32_t rHeight,
const DT* __restrict__ query, const uint32_t qWidth, const uint32_t qHeight,
ResultType* __restrict__ blockResults)
{
uint32_t tid = threadIdx.x + threadIdx.y * blockDim.x;
uint32_t rx = blockIdx.x * blockDim.x + threadIdx.x;
uint32_t ry = blockIdx.y * blockDim.y + threadIdx.y;
uint32_t qx, qy;
const DT* r = nullptr;
const DT* q = nullptr;
__shared__ ResultType sData[TPB_2D];
sData[tid] = { FLT_MAX, ry * rWidth + rx };
if (ry > rHeight - qHeight) return;
if (rx > rWidth - qWidth) return;
r = &ref[ry * rWidth + rx]; //Pointer to starting ROW position in the reference image.
q = &query[0]; //Pointer to starting ROW position in the query image.
sData[tid].fitness = 0.0f;
for (qy=0; qy < qHeight; qy++) //Each thread will process the whole query image
{
for (qx = 0; qx < qWidth; qx++) //Each thread will process the whole query image
{
sData[tid].fitness += (r[qx] - q[qx]) * (r[qx] - q[qx]); //Cummulate the value
}
r += rWidth; //Move one row down in the reference image.
q += qWidth; //Move one row down in the query image.
}
__syncthreads(); //The parallel reduction will start here, all WARPS has to finish previous instructions.
for (uint32_t s = (TPB_2D >> 1); s > 32; s >>= 1) //This can be UNROLLED when the TPB is fixed for the application
{
if (tid < s)
{
if (sData[tid + s].fitness < sData[tid].fitness)
{
sData[tid] = sData[tid + s];
}
}
__syncthreads();
}
if (tid < 32) //Only one warm is active here, no sync is needed.
{
volatile ResultType* vsData = sData;
vsData[tid] = (vsData[tid].fitness < vsData[tid + 32].fitness) ? vsData[tid] : vsData[tid + 32];
vsData[tid] = (vsData[tid].fitness < vsData[tid + 16].fitness) ? vsData[tid] : vsData[tid + 16];
vsData[tid] = (vsData[tid].fitness < vsData[tid + 8].fitness) ? vsData[tid] : vsData[tid + 8];
vsData[tid] = (vsData[tid].fitness < vsData[tid + 4].fitness) ? vsData[tid] : vsData[tid + 4];
vsData[tid] = (vsData[tid].fitness < vsData[tid + 2].fitness) ? vsData[tid] : vsData[tid + 2];
vsData[tid] = (vsData[tid].fitness < vsData[tid + 1].fitness) ? vsData[tid] : vsData[tid + 1];
}
if (tid == 0) //0-th thread stores the final BEST result for a given block
{
blockResults[blockIdx.y * gridDim.x + blockIdx.x] = sData[0];
}
}
//One 1D block reduction
__global__ void getBest(ResultType* data, const uint32_t length)
{
__shared__ ResultType sData[TPB_REDUCTION];
uint32_t tid = threadIdx.x;
const uint32_t offset = blockDim.x;
sData[tid] = { FLT_MAX , tid }; //Initial fill of the shared memory
if (tid < length)
{
sData[tid] = data[tid];
}
uint32_t nextId = tid + offset;
ResultType* ptr = &data[nextId]; //Pointer to global mem;
while (nextId < length) //Compare rest of data from the global memory
{
if (ptr->fitness < sData[tid].fitness)
{
sData[tid] = *ptr;
}
ptr += offset;
nextId += offset;
}
__syncthreads(); //Start reduction from now
for (uint32_t s = (TPB_REDUCTION >> 1); s > 32; s >>= 1) //This can be UNROLLED when the TPB is fixed for the application
{
if (tid < s)
{
if (sData[tid + s].fitness < sData[tid].fitness)
{
sData[tid] = sData[tid + s];
}
}
__syncthreads();
}
if (tid < 32) //Only one warp is active here, no sync is needed.
{
volatile ResultType* vsData = sData;
vsData[tid] = (vsData[tid].fitness < vsData[tid + 32].fitness) ? vsData[tid] : vsData[tid + 32];
vsData[tid] = (vsData[tid].fitness < vsData[tid + 16].fitness) ? vsData[tid] : vsData[tid + 16];
vsData[tid] = (vsData[tid].fitness < vsData[tid + 8].fitness) ? vsData[tid] : vsData[tid + 8];
vsData[tid] = (vsData[tid].fitness < vsData[tid + 4].fitness) ? vsData[tid] : vsData[tid + 4];
vsData[tid] = (vsData[tid].fitness < vsData[tid + 2].fitness) ? vsData[tid] : vsData[tid + 2];
vsData[tid] = (vsData[tid].fitness < vsData[tid + 1].fitness) ? vsData[tid] : vsData[tid + 1];
}
if (tid == 0) //The zero thread saves the result into Global mem
{
data[0] = sData[0];
}
}
int main(int argc, char* argv[])
{
initializeCUDA(deviceProp);
Image ref;
Image query;
FreeImage_Initialise();
prepareData("./Data/reference.tif", ref);
prepareData("./Data/query.tif", query);
FreeImage_DeInitialise();
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
//How many block of the size of [16x16] will process the reference image?
//Too much to manage. That's we use a 1D grid of [16x16] blocks that will move down the image.
//This we need (((ref.width - query.width + 1) + 16 - 1)/16) blocks!!!
uint32_t noBlocksX = ((ref.width - query.width + 1) + TPB_1D - 1) / TPB_1D;
uint32_t noBlocksY = ((ref.height - query.height + 1) + TPB_1D - 1) / TPB_1D;
ResultType* blockResults = nullptr;
size_t blockResultsSize = static_cast<size_t>(noBlocksX * noBlocksY *sizeof(ResultType));
checkCudaErrors(hipMallocManaged(&blockResults, blockResultsSize));
checkCudaErrors(hipEventRecord(start, 0));
//uint32_t counter = 0;
//while (true)
//{
//1. Try to compute all possible matches.
dim3 block{ TPB_1D , TPB_1D ,1 };
dim3 grid{ noBlocksX, noBlocksY, 1 };
find << <grid, block >> > (ref.ptr, ref.width, ref.height, query.ptr, query.width, query.height, blockResults);
//2. Search for the best match
block = { TPB_REDUCTION ,1,1 };
grid = { 1, 1, 1 };
getBest << <grid, block >> > (blockResults, noBlocksX * noBlocksY);
hipDeviceSynchronize();
// printf("%d ", ++counter);
//}
checkCudaErrors(hipEventRecord(stop, 0));
checkCudaErrors(hipEventSynchronize(stop));
float elapsedTime;
checkCudaErrors(hipEventElapsedTime(&elapsedTime, start, stop));
hipEventDestroy(start);
hipEventDestroy(stop);
printf("Best fitness value: %f\n", blockResults[0].fitness);
printf("Winner index: %u\n", blockResults[0].idx);
printf("Winner's LEFT-TOP CORNER X: %u\n", blockResults[0].idx % ref.width);
printf("Winner's LEFT-TOP CORNER Y: %u\n", ref.height - (blockResults[0].idx / ref.width) - query.height);
printf("Computation time: %f ms\n", elapsedTime);
if (ref.ptr) hipFree(ref.ptr);
if (query.ptr) hipFree(query.ptr);
if (blockResults) hipFree(blockResults);
}
| abada01d1b7dbe0a91d85775d886ede19ba84b24.cu | // includes, cuda
#include <cstdint>
#include <climits>
#include <cuda_runtime.h>
#include <helper_cuda.h>
#include <cudaDefs.h>
#include <imageManager.h>
#include "arrayUtils.cuh"
#define BENCHMARK_NUM_REPS 100 // number of repetitions for benchmarking
#define TPB_1D 32 // ThreadsPerBlock in one dimension
#define TPB_2D 1024 // ThreadsPerBlock = 16*16 (2D block)
#define TPB_REDUCTION 512 // ThreadsPerBlock (1D block)
cudaError_t error = cudaSuccess;
cudaDeviceProp deviceProp = cudaDeviceProp();
using DT = uint8_t; // Working data type
struct alignas(8) ResultType
{
float fitness;
uint32_t idx;
ResultType& operator=(ResultType&&) = default; //Forcing a move assignment operator to be generated by the compiler
__host__ __device__ volatile ResultType& operator=(volatile const ResultType& other) volatile
{
fitness = other.fitness;
idx = other.idx;
return *this;
}
};
struct Image
{
uint32_t width = 0;
uint32_t height = 0;
uint32_t pitch = 0;
DT* ptr = nullptr;
};
void prepareData(const char* imageFileName, Image& img)
{
FIBITMAP* tmpA = ImageManager::GenericLoader(imageFileName, 0);
img.width = FreeImage_GetWidth(tmpA);
img.height = FreeImage_GetHeight(tmpA);
img.pitch = FreeImage_GetPitch(tmpA); // FREEIMAGE align row data ... You have to use pitch instead of width
//Create a memory block using UNIFIED MEMORY to store original image. This is a redundant copy, however the data will be ready to use directly by GPU.
uint8_t* tmpB = nullptr;
size_t imageSize = static_cast<size_t>(img.pitch * img.height * FreeImage_GetBPP(tmpA)) >> 3;
checkCudaErrors(cudaMallocManaged(&tmpB, imageSize));
checkCudaErrors(cudaMemcpy(tmpB, FreeImage_GetBits(tmpA), imageSize, cudaMemcpyHostToDevice));
//checkHostMatrix(tmpB, img.pitch, img.height, img.width, "%d ", "Reference");
FreeImage_Unload(tmpA);
//Create a memory block using UNIFIED MEMORY to store DT data and convert tmpB -> img.ptr
checkCudaErrors(cudaMallocManaged(&img.ptr, img.width * img.height * sizeof(DT)));
dim3 block{ 256,1,1 };
dim3 grid{ getNumberOfParts(img.width * img.height, 256), 1, 1 };
arrayReshape<uint8_t, DT> <<<grid, block>> > (tmpB, img.width, img.height, img.pitch, img.width, img.height, img.width*sizeof(DT), img.ptr);
//From now, we have a new pitch of the final data.
img.pitch = img.width * sizeof(DT);
//Some synchronization must be called when using UNIFIED MEMORY in async. Previous kernel was called asynchronously!!!
cudaDeviceSynchronize();
//checkHostMatrix(img.ptr, img.width * sizeof(DT), img.height, img.width, "%0.2f ", "Reference");
}
//Every THREAD of 2D block [16x16] computes one final fitness value for a single pixel of the reference image. One corner of the query image is "virtually" attached to this pixel position.
//A SINGLE THREAD compares the query image with the given region of the reference image.
__global__ void find(const DT* __restrict__ ref, const uint32_t rWidth, const uint32_t rHeight,
const DT* __restrict__ query, const uint32_t qWidth, const uint32_t qHeight,
ResultType* __restrict__ blockResults)
{
uint32_t tid = threadIdx.x + threadIdx.y * blockDim.x;
uint32_t rx = blockIdx.x * blockDim.x + threadIdx.x;
uint32_t ry = blockIdx.y * blockDim.y + threadIdx.y;
uint32_t qx, qy;
const DT* r = nullptr;
const DT* q = nullptr;
__shared__ ResultType sData[TPB_2D];
sData[tid] = { FLT_MAX, ry * rWidth + rx };
if (ry > rHeight - qHeight) return;
if (rx > rWidth - qWidth) return;
r = &ref[ry * rWidth + rx]; //Pointer to starting ROW position in the reference image.
q = &query[0]; //Pointer to starting ROW position in the query image.
sData[tid].fitness = 0.0f;
for (qy=0; qy < qHeight; qy++) //Each thread will process the whole query image
{
for (qx = 0; qx < qWidth; qx++) //Each thread will process the whole query image
{
sData[tid].fitness += (r[qx] - q[qx]) * (r[qx] - q[qx]); //Cummulate the value
}
r += rWidth; //Move one row down in the reference image.
q += qWidth; //Move one row down in the query image.
}
__syncthreads(); //The parallel reduction will start here, all WARPS has to finish previous instructions.
for (uint32_t s = (TPB_2D >> 1); s > 32; s >>= 1) //This can be UNROLLED when the TPB is fixed for the application
{
if (tid < s)
{
if (sData[tid + s].fitness < sData[tid].fitness)
{
sData[tid] = sData[tid + s];
}
}
__syncthreads();
}
if (tid < 32) //Only one warm is active here, no sync is needed.
{
volatile ResultType* vsData = sData;
vsData[tid] = (vsData[tid].fitness < vsData[tid + 32].fitness) ? vsData[tid] : vsData[tid + 32];
vsData[tid] = (vsData[tid].fitness < vsData[tid + 16].fitness) ? vsData[tid] : vsData[tid + 16];
vsData[tid] = (vsData[tid].fitness < vsData[tid + 8].fitness) ? vsData[tid] : vsData[tid + 8];
vsData[tid] = (vsData[tid].fitness < vsData[tid + 4].fitness) ? vsData[tid] : vsData[tid + 4];
vsData[tid] = (vsData[tid].fitness < vsData[tid + 2].fitness) ? vsData[tid] : vsData[tid + 2];
vsData[tid] = (vsData[tid].fitness < vsData[tid + 1].fitness) ? vsData[tid] : vsData[tid + 1];
}
if (tid == 0) //0-th thread stores the final BEST result for a given block
{
blockResults[blockIdx.y * gridDim.x + blockIdx.x] = sData[0];
}
}
//One 1D block reduction
__global__ void getBest(ResultType* data, const uint32_t length)
{
__shared__ ResultType sData[TPB_REDUCTION];
uint32_t tid = threadIdx.x;
const uint32_t offset = blockDim.x;
sData[tid] = { FLT_MAX , tid }; //Initial fill of the shared memory
if (tid < length)
{
sData[tid] = data[tid];
}
uint32_t nextId = tid + offset;
ResultType* ptr = &data[nextId]; //Pointer to global mem;
while (nextId < length) //Compare rest of data from the global memory
{
if (ptr->fitness < sData[tid].fitness)
{
sData[tid] = *ptr;
}
ptr += offset;
nextId += offset;
}
__syncthreads(); //Start reduction from now
for (uint32_t s = (TPB_REDUCTION >> 1); s > 32; s >>= 1) //This can be UNROLLED when the TPB is fixed for the application
{
if (tid < s)
{
if (sData[tid + s].fitness < sData[tid].fitness)
{
sData[tid] = sData[tid + s];
}
}
__syncthreads();
}
if (tid < 32) //Only one warp is active here, no sync is needed.
{
volatile ResultType* vsData = sData;
vsData[tid] = (vsData[tid].fitness < vsData[tid + 32].fitness) ? vsData[tid] : vsData[tid + 32];
vsData[tid] = (vsData[tid].fitness < vsData[tid + 16].fitness) ? vsData[tid] : vsData[tid + 16];
vsData[tid] = (vsData[tid].fitness < vsData[tid + 8].fitness) ? vsData[tid] : vsData[tid + 8];
vsData[tid] = (vsData[tid].fitness < vsData[tid + 4].fitness) ? vsData[tid] : vsData[tid + 4];
vsData[tid] = (vsData[tid].fitness < vsData[tid + 2].fitness) ? vsData[tid] : vsData[tid + 2];
vsData[tid] = (vsData[tid].fitness < vsData[tid + 1].fitness) ? vsData[tid] : vsData[tid + 1];
}
if (tid == 0) //The zero thread saves the result into Global mem
{
data[0] = sData[0];
}
}
int main(int argc, char* argv[])
{
initializeCUDA(deviceProp);
Image ref;
Image query;
FreeImage_Initialise();
prepareData("./Data/reference.tif", ref);
prepareData("./Data/query.tif", query);
FreeImage_DeInitialise();
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
//How many block of the size of [16x16] will process the reference image?
//Too much to manage. That's we use a 1D grid of [16x16] blocks that will move down the image.
//This we need (((ref.width - query.width + 1) + 16 - 1)/16) blocks!!!
uint32_t noBlocksX = ((ref.width - query.width + 1) + TPB_1D - 1) / TPB_1D;
uint32_t noBlocksY = ((ref.height - query.height + 1) + TPB_1D - 1) / TPB_1D;
ResultType* blockResults = nullptr;
size_t blockResultsSize = static_cast<size_t>(noBlocksX * noBlocksY *sizeof(ResultType));
checkCudaErrors(cudaMallocManaged(&blockResults, blockResultsSize));
checkCudaErrors(cudaEventRecord(start, 0));
//uint32_t counter = 0;
//while (true)
//{
//1. Try to compute all possible matches.
dim3 block{ TPB_1D , TPB_1D ,1 };
dim3 grid{ noBlocksX, noBlocksY, 1 };
find << <grid, block >> > (ref.ptr, ref.width, ref.height, query.ptr, query.width, query.height, blockResults);
//2. Search for the best match
block = { TPB_REDUCTION ,1,1 };
grid = { 1, 1, 1 };
getBest << <grid, block >> > (blockResults, noBlocksX * noBlocksY);
cudaDeviceSynchronize();
// printf("%d ", ++counter);
//}
checkCudaErrors(cudaEventRecord(stop, 0));
checkCudaErrors(cudaEventSynchronize(stop));
float elapsedTime;
checkCudaErrors(cudaEventElapsedTime(&elapsedTime, start, stop));
cudaEventDestroy(start);
cudaEventDestroy(stop);
printf("Best fitness value: %f\n", blockResults[0].fitness);
printf("Winner index: %u\n", blockResults[0].idx);
printf("Winner's LEFT-TOP CORNER X: %u\n", blockResults[0].idx % ref.width);
printf("Winner's LEFT-TOP CORNER Y: %u\n", ref.height - (blockResults[0].idx / ref.width) - query.height);
printf("Computation time: %f ms\n", elapsedTime);
if (ref.ptr) cudaFree(ref.ptr);
if (query.ptr) cudaFree(query.ptr);
if (blockResults) cudaFree(blockResults);
}
|
0d508631b152a286cf3dcc1d0440da8127c83a88.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at
// the Lawrence Livermore National Laboratory. LLNL-CODE-734707. All Rights
// reserved. See files LICENSE and NOTICE for details.
//
// This file is part of CEED, a collection of benchmarks, miniapps, software
// libraries and APIs for efficient high-order finite element and spectral
// element discretizations for exascale applications. For more information and
// source code availability see http://github.com/ceed.
//
// The CEED research is supported by the Exascale Computing Project 17-SC-20-SC,
// a collaborative effort of two U.S. Department of Energy organizations (Office
// of Science and the National Nuclear Security Administration) responsible for
// the planning and preparation of a capable exascale ecosystem, including
// software, applications, hardware, advanced system engineering and early
// testbed platforms, in support of the nation's exascale computing imperative.
#include <ceed/ceed.h>
#include <hip/hip_runtime.h>
const int sizeMax = 16;
__constant__ CeedScalar c_B[sizeMax*sizeMax];
__constant__ CeedScalar c_G[sizeMax*sizeMax];
//------------------------------------------------------------------------------
// Interp device initalization
//------------------------------------------------------------------------------
extern "C" int CeedCudaInitInterp(CeedScalar *d_B, CeedInt P1d, CeedInt Q1d,
CeedScalar **c_B_ptr) {
const int Bsize = P1d*Q1d*sizeof(CeedScalar);
hipMemcpyToSymbol(c_B, d_B, Bsize, 0, hipMemcpyDeviceToDevice);
hipGetSymbolAddress((void **)c_B_ptr, c_B);
return 0;
}
//------------------------------------------------------------------------------
// Grad device initalization
//------------------------------------------------------------------------------
extern "C" int CeedCudaInitInterpGrad(CeedScalar *d_B, CeedScalar *d_G,
CeedInt P1d, CeedInt Q1d, CeedScalar **c_B_ptr, CeedScalar **c_G_ptr) {
const int Bsize = P1d*Q1d*sizeof(CeedScalar);
hipMemcpyToSymbol(c_B, d_B, Bsize, 0, hipMemcpyDeviceToDevice);
hipGetSymbolAddress((void **)c_B_ptr, c_B);
hipMemcpyToSymbol(c_G, d_G, Bsize, 0, hipMemcpyDeviceToDevice);
hipGetSymbolAddress((void **)c_G_ptr, c_G);
return 0;
}
//------------------------------------------------------------------------------
| 0d508631b152a286cf3dcc1d0440da8127c83a88.cu | // Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at
// the Lawrence Livermore National Laboratory. LLNL-CODE-734707. All Rights
// reserved. See files LICENSE and NOTICE for details.
//
// This file is part of CEED, a collection of benchmarks, miniapps, software
// libraries and APIs for efficient high-order finite element and spectral
// element discretizations for exascale applications. For more information and
// source code availability see http://github.com/ceed.
//
// The CEED research is supported by the Exascale Computing Project 17-SC-20-SC,
// a collaborative effort of two U.S. Department of Energy organizations (Office
// of Science and the National Nuclear Security Administration) responsible for
// the planning and preparation of a capable exascale ecosystem, including
// software, applications, hardware, advanced system engineering and early
// testbed platforms, in support of the nation's exascale computing imperative.
#include <ceed/ceed.h>
#include <cuda.h>
const int sizeMax = 16;
__constant__ CeedScalar c_B[sizeMax*sizeMax];
__constant__ CeedScalar c_G[sizeMax*sizeMax];
//------------------------------------------------------------------------------
// Interp device initalization
//------------------------------------------------------------------------------
extern "C" int CeedCudaInitInterp(CeedScalar *d_B, CeedInt P1d, CeedInt Q1d,
CeedScalar **c_B_ptr) {
const int Bsize = P1d*Q1d*sizeof(CeedScalar);
cudaMemcpyToSymbol(c_B, d_B, Bsize, 0, cudaMemcpyDeviceToDevice);
cudaGetSymbolAddress((void **)c_B_ptr, c_B);
return 0;
}
//------------------------------------------------------------------------------
// Grad device initalization
//------------------------------------------------------------------------------
extern "C" int CeedCudaInitInterpGrad(CeedScalar *d_B, CeedScalar *d_G,
CeedInt P1d, CeedInt Q1d, CeedScalar **c_B_ptr, CeedScalar **c_G_ptr) {
const int Bsize = P1d*Q1d*sizeof(CeedScalar);
cudaMemcpyToSymbol(c_B, d_B, Bsize, 0, cudaMemcpyDeviceToDevice);
cudaGetSymbolAddress((void **)c_B_ptr, c_B);
cudaMemcpyToSymbol(c_G, d_G, Bsize, 0, cudaMemcpyDeviceToDevice);
cudaGetSymbolAddress((void **)c_G_ptr, c_G);
return 0;
}
//------------------------------------------------------------------------------
|
76aba64a3b7960a15f9b6094d3e524fd73e2e452.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "cudaUtility.h"
// clip float to [min,max]
static inline __device__ float clip( const float x, float min, float max )
{
return x > max ? max : x < min ? min : x;
}
// clip vector to [min,max]
static inline __device__ float4 clip( const float4& px, float min, float max )
{
return make_float4(clip(px.x, min, max),
clip(px.y, min, max),
clip(px.z, min, max),
clip(px.w, min, max));
}
// gpuPreSuperResNet
template<typename T>
__global__ void gpuPreSuperResNet( T* input, int iWidth, float* output, int oWidth, int oHeight, float2 res_scale, float pixel_scale )
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const int n = oWidth * oHeight;
if( x >= oWidth || y >= oHeight )
return;
const int dx = ((float)x * res_scale.x);
const int dy = ((float)y * res_scale.y);
const T px = input[ dy * iWidth + dx ];
const float3 rgb = make_float3(px.x * pixel_scale, px.y * pixel_scale, px.z * pixel_scale);
output[n * 0 + y * oWidth + x] = rgb.x;
output[n * 1 + y * oWidth + x] = rgb.y;
output[n * 2 + y * oWidth + x] = rgb.z;
}
// cudaPreSuperResNet
hipError_t cudaPreSuperResNet( float4* input, size_t inputWidth, size_t inputHeight,
float* output, size_t outputWidth, size_t outputHeight,
float maxPixelValue, hipStream_t stream )
{
if( !input || !output )
return hipErrorInvalidDevicePointer;
if( inputWidth == 0 || outputWidth == 0 || inputHeight == 0 || outputHeight == 0 )
return hipErrorInvalidValue;
const float2 res_scale = make_float2( float(inputWidth) / float(outputWidth),
float(inputHeight) / float(outputHeight) );
const float pixel_scale = 1.0f / maxPixelValue;
// launch kernel
const dim3 blockDim(8, 8);
const dim3 gridDim(iDivUp(outputWidth,blockDim.x), iDivUp(outputHeight,blockDim.y));
hipLaunchKernelGGL(( gpuPreSuperResNet<float4>), dim3(gridDim), dim3(blockDim), 0, stream, input, inputWidth, output, outputWidth, outputHeight, res_scale, pixel_scale);
return CUDA(hipGetLastError());
}
// gpuPostSuperResNet
template<typename T>
__global__ void gpuPostSuperResNet( float* input, int iWidth, int iHeight, T* output, int oWidth, int oHeight, float2 res_scale, float pixel_scale )
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const int n = iWidth * iHeight;
if( x >= oWidth || y >= oHeight )
return;
const int dx = ((float)x * res_scale.x);
const int dy = ((float)y * res_scale.y);
const float4 rgb = clip(make_float4(input[n * 0 + dy * iWidth + dx] * pixel_scale,
input[n * 1 + dy * iWidth + dx] * pixel_scale,
input[n * 2 + dy * iWidth + dx] * pixel_scale,
pixel_scale), 0.0f, pixel_scale);
output[y * oWidth + x] = rgb;
}
// cudaPostSuperResNet
hipError_t cudaPostSuperResNet( float* input, size_t inputWidth, size_t inputHeight,
float4* output, size_t outputWidth, size_t outputHeight,
float maxPixelValue, hipStream_t stream )
{
if( !input || !output )
return hipErrorInvalidDevicePointer;
if( inputWidth == 0 || outputWidth == 0 || inputHeight == 0 || outputHeight == 0 )
return hipErrorInvalidValue;
const float2 res_scale = make_float2( float(inputWidth) / float(outputWidth),
float(inputHeight) / float(outputHeight) );
// launch kernel
const dim3 blockDim(8, 8);
const dim3 gridDim(iDivUp(outputWidth,blockDim.x), iDivUp(outputHeight,blockDim.y));
hipLaunchKernelGGL(( gpuPostSuperResNet<float4>), dim3(gridDim), dim3(blockDim), 0, stream, input, inputWidth, inputHeight, output, outputWidth, outputHeight, res_scale, maxPixelValue);
return CUDA(hipGetLastError());
}
| 76aba64a3b7960a15f9b6094d3e524fd73e2e452.cu | /*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "cudaUtility.h"
// clip float to [min,max]
static inline __device__ float clip( const float x, float min, float max )
{
return x > max ? max : x < min ? min : x;
}
// clip vector to [min,max]
static inline __device__ float4 clip( const float4& px, float min, float max )
{
return make_float4(clip(px.x, min, max),
clip(px.y, min, max),
clip(px.z, min, max),
clip(px.w, min, max));
}
// gpuPreSuperResNet
template<typename T>
__global__ void gpuPreSuperResNet( T* input, int iWidth, float* output, int oWidth, int oHeight, float2 res_scale, float pixel_scale )
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const int n = oWidth * oHeight;
if( x >= oWidth || y >= oHeight )
return;
const int dx = ((float)x * res_scale.x);
const int dy = ((float)y * res_scale.y);
const T px = input[ dy * iWidth + dx ];
const float3 rgb = make_float3(px.x * pixel_scale, px.y * pixel_scale, px.z * pixel_scale);
output[n * 0 + y * oWidth + x] = rgb.x;
output[n * 1 + y * oWidth + x] = rgb.y;
output[n * 2 + y * oWidth + x] = rgb.z;
}
// cudaPreSuperResNet
cudaError_t cudaPreSuperResNet( float4* input, size_t inputWidth, size_t inputHeight,
float* output, size_t outputWidth, size_t outputHeight,
float maxPixelValue, cudaStream_t stream )
{
if( !input || !output )
return cudaErrorInvalidDevicePointer;
if( inputWidth == 0 || outputWidth == 0 || inputHeight == 0 || outputHeight == 0 )
return cudaErrorInvalidValue;
const float2 res_scale = make_float2( float(inputWidth) / float(outputWidth),
float(inputHeight) / float(outputHeight) );
const float pixel_scale = 1.0f / maxPixelValue;
// launch kernel
const dim3 blockDim(8, 8);
const dim3 gridDim(iDivUp(outputWidth,blockDim.x), iDivUp(outputHeight,blockDim.y));
gpuPreSuperResNet<float4><<<gridDim, blockDim, 0, stream>>>(input, inputWidth, output, outputWidth, outputHeight, res_scale, pixel_scale);
return CUDA(cudaGetLastError());
}
// gpuPostSuperResNet
template<typename T>
__global__ void gpuPostSuperResNet( float* input, int iWidth, int iHeight, T* output, int oWidth, int oHeight, float2 res_scale, float pixel_scale )
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const int n = iWidth * iHeight;
if( x >= oWidth || y >= oHeight )
return;
const int dx = ((float)x * res_scale.x);
const int dy = ((float)y * res_scale.y);
const float4 rgb = clip(make_float4(input[n * 0 + dy * iWidth + dx] * pixel_scale,
input[n * 1 + dy * iWidth + dx] * pixel_scale,
input[n * 2 + dy * iWidth + dx] * pixel_scale,
pixel_scale), 0.0f, pixel_scale);
output[y * oWidth + x] = rgb;
}
// cudaPostSuperResNet
cudaError_t cudaPostSuperResNet( float* input, size_t inputWidth, size_t inputHeight,
float4* output, size_t outputWidth, size_t outputHeight,
float maxPixelValue, cudaStream_t stream )
{
if( !input || !output )
return cudaErrorInvalidDevicePointer;
if( inputWidth == 0 || outputWidth == 0 || inputHeight == 0 || outputHeight == 0 )
return cudaErrorInvalidValue;
const float2 res_scale = make_float2( float(inputWidth) / float(outputWidth),
float(inputHeight) / float(outputHeight) );
// launch kernel
const dim3 blockDim(8, 8);
const dim3 gridDim(iDivUp(outputWidth,blockDim.x), iDivUp(outputHeight,blockDim.y));
gpuPostSuperResNet<float4><<<gridDim, blockDim, 0, stream>>>(input, inputWidth, inputHeight, output, outputWidth, outputHeight, res_scale, maxPixelValue);
return CUDA(cudaGetLastError());
}
|
d06d7cddba034865939bba64c037c9c7d2a14205.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <faiss/gpu/impl/IVFInterleaved.cuh>
#include <faiss/gpu/impl/scan/IVFInterleavedImpl.cuh>
namespace faiss {
namespace gpu {
constexpr uint32_t kMaxUInt32 = std::numeric_limits<uint32_t>::max();
// Second-pass kernel to further k-select the results from the first pass across
// IVF lists and produce the final results
template <int ThreadsPerBlock, int NumWarpQ, int NumThreadQ>
__global__ void ivfInterleavedScan2(
Tensor<float, 3, true> distanceIn,
Tensor<idx_t, 3, true> indicesIn,
Tensor<idx_t, 2, true> listIds,
int k,
void** listIndices,
IndicesOptions opt,
bool dir,
Tensor<float, 2, true> distanceOut,
Tensor<idx_t, 2, true> indicesOut) {
int queryId = blockIdx.x;
constexpr int kNumWarps = ThreadsPerBlock / kWarpSize;
__shared__ float smemK[kNumWarps * NumWarpQ];
// The BlockSelect value type is uint32_t, as we pack together which probe
// (up to nprobe - 1) and which k (up to k - 1) from each individual list
// together, and both nprobe and k are limited to GPU_MAX_SELECTION_K.
__shared__ uint32_t smemV[kNumWarps * NumWarpQ];
// To avoid creating excessive specializations, we combine direction
// kernels, selecting for the smallest element. If `dir` is true, we negate
// all values being selected (so that we are selecting the largest element).
BlockSelect<
float,
uint32_t,
false,
Comparator<float>,
NumWarpQ,
NumThreadQ,
ThreadsPerBlock>
heap(kFloatMax, kMaxUInt32, smemK, smemV, k);
// nprobe x k
idx_t num = distanceIn.getSize(1) * distanceIn.getSize(2);
const float* distanceBase = distanceIn[queryId].data();
idx_t limit = utils::roundDown(num, kWarpSize);
// This will keep our negation factor
float adj = dir ? -1 : 1;
idx_t i = threadIdx.x;
for (; i < limit; i += blockDim.x) {
// We represent the index as (probe id)(k)
// Right now, both are limited to a maximum of 2048, but we will
// dedicate each to the high and low words of a uint32_t
static_assert(GPU_MAX_SELECTION_K <= 65536, "");
uint32_t curProbe = i / k;
uint32_t curK = i % k;
// Since nprobe and k are limited, we can pack both of these together
// into a uint32_t
uint32_t index = (curProbe << 16) | (curK & (uint32_t)0xffff);
// The IDs reported from the list may be -1, if a particular IVF list
// doesn't even have k entries in it
if (listIds[queryId][curProbe] != -1) {
// Adjust the value we are selecting based on the sorting order
heap.addThreadQ(distanceBase[i] * adj, index);
}
heap.checkThreadQ();
}
// Handle warp divergence separately
if (i < num) {
uint32_t curProbe = i / k;
uint32_t curK = i % k;
uint32_t index = (curProbe << 16) | (curK & (uint32_t)0xffff);
idx_t listId = listIds[queryId][curProbe];
if (listId != -1) {
heap.addThreadQ(distanceBase[i] * adj, index);
}
}
// Merge all final results
heap.reduce();
for (int i = threadIdx.x; i < k; i += blockDim.x) {
// Re-adjust the value we are selecting based on the sorting order
distanceOut[queryId][i] = smemK[i] * adj;
auto packedIndex = smemV[i];
// We need to remap to the user-provided indices
idx_t index = -1;
// We may not have at least k values to return; in this function, max
// uint32 is our sentinel value
if (packedIndex != kMaxUInt32) {
uint32_t curProbe = packedIndex >> 16;
uint32_t curK = packedIndex & 0xffff;
idx_t listId = listIds[queryId][curProbe];
idx_t listOffset = indicesIn[queryId][curProbe][curK];
if (opt == INDICES_32_BIT) {
index = (idx_t)((int*)listIndices[listId])[listOffset];
} else if (opt == INDICES_64_BIT) {
index = ((idx_t*)listIndices[listId])[listOffset];
} else {
index = (listId << 32 | (idx_t)listOffset);
}
}
indicesOut[queryId][i] = index;
}
}
void runIVFInterleavedScan2(
Tensor<float, 3, true>& distanceIn,
Tensor<idx_t, 3, true>& indicesIn,
Tensor<idx_t, 2, true>& listIds,
int k,
DeviceVector<void*>& listIndices,
IndicesOptions indicesOptions,
bool dir,
Tensor<float, 2, true>& distanceOut,
Tensor<idx_t, 2, true>& indicesOut,
hipStream_t stream) {
#define IVF_SCAN_2(THREADS, NUM_WARP_Q, NUM_THREAD_Q) \
hipLaunchKernelGGL(( ivfInterleavedScan2<THREADS, NUM_WARP_Q, NUM_THREAD_Q>) \
, dim3(distanceIn.getSize(0)), dim3(THREADS), 0, stream, \
distanceIn, \
indicesIn, \
listIds, \
k, \
listIndices.data(), \
indicesOptions, \
dir, \
distanceOut, \
indicesOut)
if (k == 1) {
IVF_SCAN_2(128, 1, 1);
} else if (k <= 32) {
IVF_SCAN_2(128, 32, 2);
} else if (k <= 64) {
IVF_SCAN_2(128, 64, 3);
} else if (k <= 128) {
IVF_SCAN_2(128, 128, 3);
} else if (k <= 256) {
IVF_SCAN_2(128, 256, 4);
} else if (k <= 512) {
IVF_SCAN_2(128, 512, 8);
} else if (k <= 1024) {
IVF_SCAN_2(128, 1024, 8);
}
#if GPU_MAX_SELECTION_K >= 2048
else if (k <= 2048) {
IVF_SCAN_2(64, 2048, 8);
}
#endif
}
void runIVFInterleavedScan(
Tensor<float, 2, true>& queries,
Tensor<idx_t, 2, true>& listIds,
DeviceVector<void*>& listData,
DeviceVector<void*>& listIndices,
IndicesOptions indicesOptions,
DeviceVector<idx_t>& listLengths,
int k,
faiss::MetricType metric,
bool useResidual,
Tensor<float, 3, true>& residualBase,
GpuScalarQuantizer* scalarQ,
// output
Tensor<float, 2, true>& outDistances,
// output
Tensor<idx_t, 2, true>& outIndices,
GpuResources* res) {
// caught for exceptions at a higher level
FAISS_ASSERT(k <= GPU_MAX_SELECTION_K);
const auto ivf_interleaved_call = [&](const auto func) {
func(queries,
listIds,
listData,
listIndices,
indicesOptions,
listLengths,
k,
metric,
useResidual,
residualBase,
scalarQ,
outDistances,
outIndices,
res);
};
if (k == 1) {
ivf_interleaved_call(ivfInterleavedScanImpl<IVFINTERLEAVED_1_PARAMS>);
} else if (k <= 32) {
ivf_interleaved_call(ivfInterleavedScanImpl<IVFINTERLEAVED_32_PARAMS>);
} else if (k <= 64) {
ivf_interleaved_call(ivfInterleavedScanImpl<IVFINTERLEAVED_64_PARAMS>);
} else if (k <= 128) {
ivf_interleaved_call(ivfInterleavedScanImpl<IVFINTERLEAVED_128_PARAMS>);
} else if (k <= 256) {
ivf_interleaved_call(ivfInterleavedScanImpl<IVFINTERLEAVED_256_PARAMS>);
} else if (k <= 512) {
ivf_interleaved_call(ivfInterleavedScanImpl<IVFINTERLEAVED_512_PARAMS>);
} else if (k <= 1024) {
ivf_interleaved_call(
ivfInterleavedScanImpl<IVFINTERLEAVED_1024_PARAMS>);
}
#if GPU_MAX_SELECTION_K >= 2048
else if (k <= 2048) {
ivf_interleaved_call(
ivfInterleavedScanImpl<IVFINTERLEAVED_2048_PARAMS>);
}
#endif
}
} // namespace gpu
} // namespace faiss
| d06d7cddba034865939bba64c037c9c7d2a14205.cu | /**
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <faiss/gpu/impl/IVFInterleaved.cuh>
#include <faiss/gpu/impl/scan/IVFInterleavedImpl.cuh>
namespace faiss {
namespace gpu {
constexpr uint32_t kMaxUInt32 = std::numeric_limits<uint32_t>::max();
// Second-pass kernel to further k-select the results from the first pass across
// IVF lists and produce the final results
template <int ThreadsPerBlock, int NumWarpQ, int NumThreadQ>
__global__ void ivfInterleavedScan2(
Tensor<float, 3, true> distanceIn,
Tensor<idx_t, 3, true> indicesIn,
Tensor<idx_t, 2, true> listIds,
int k,
void** listIndices,
IndicesOptions opt,
bool dir,
Tensor<float, 2, true> distanceOut,
Tensor<idx_t, 2, true> indicesOut) {
int queryId = blockIdx.x;
constexpr int kNumWarps = ThreadsPerBlock / kWarpSize;
__shared__ float smemK[kNumWarps * NumWarpQ];
// The BlockSelect value type is uint32_t, as we pack together which probe
// (up to nprobe - 1) and which k (up to k - 1) from each individual list
// together, and both nprobe and k are limited to GPU_MAX_SELECTION_K.
__shared__ uint32_t smemV[kNumWarps * NumWarpQ];
// To avoid creating excessive specializations, we combine direction
// kernels, selecting for the smallest element. If `dir` is true, we negate
// all values being selected (so that we are selecting the largest element).
BlockSelect<
float,
uint32_t,
false,
Comparator<float>,
NumWarpQ,
NumThreadQ,
ThreadsPerBlock>
heap(kFloatMax, kMaxUInt32, smemK, smemV, k);
// nprobe x k
idx_t num = distanceIn.getSize(1) * distanceIn.getSize(2);
const float* distanceBase = distanceIn[queryId].data();
idx_t limit = utils::roundDown(num, kWarpSize);
// This will keep our negation factor
float adj = dir ? -1 : 1;
idx_t i = threadIdx.x;
for (; i < limit; i += blockDim.x) {
// We represent the index as (probe id)(k)
// Right now, both are limited to a maximum of 2048, but we will
// dedicate each to the high and low words of a uint32_t
static_assert(GPU_MAX_SELECTION_K <= 65536, "");
uint32_t curProbe = i / k;
uint32_t curK = i % k;
// Since nprobe and k are limited, we can pack both of these together
// into a uint32_t
uint32_t index = (curProbe << 16) | (curK & (uint32_t)0xffff);
// The IDs reported from the list may be -1, if a particular IVF list
// doesn't even have k entries in it
if (listIds[queryId][curProbe] != -1) {
// Adjust the value we are selecting based on the sorting order
heap.addThreadQ(distanceBase[i] * adj, index);
}
heap.checkThreadQ();
}
// Handle warp divergence separately
if (i < num) {
uint32_t curProbe = i / k;
uint32_t curK = i % k;
uint32_t index = (curProbe << 16) | (curK & (uint32_t)0xffff);
idx_t listId = listIds[queryId][curProbe];
if (listId != -1) {
heap.addThreadQ(distanceBase[i] * adj, index);
}
}
// Merge all final results
heap.reduce();
for (int i = threadIdx.x; i < k; i += blockDim.x) {
// Re-adjust the value we are selecting based on the sorting order
distanceOut[queryId][i] = smemK[i] * adj;
auto packedIndex = smemV[i];
// We need to remap to the user-provided indices
idx_t index = -1;
// We may not have at least k values to return; in this function, max
// uint32 is our sentinel value
if (packedIndex != kMaxUInt32) {
uint32_t curProbe = packedIndex >> 16;
uint32_t curK = packedIndex & 0xffff;
idx_t listId = listIds[queryId][curProbe];
idx_t listOffset = indicesIn[queryId][curProbe][curK];
if (opt == INDICES_32_BIT) {
index = (idx_t)((int*)listIndices[listId])[listOffset];
} else if (opt == INDICES_64_BIT) {
index = ((idx_t*)listIndices[listId])[listOffset];
} else {
index = (listId << 32 | (idx_t)listOffset);
}
}
indicesOut[queryId][i] = index;
}
}
void runIVFInterleavedScan2(
Tensor<float, 3, true>& distanceIn,
Tensor<idx_t, 3, true>& indicesIn,
Tensor<idx_t, 2, true>& listIds,
int k,
DeviceVector<void*>& listIndices,
IndicesOptions indicesOptions,
bool dir,
Tensor<float, 2, true>& distanceOut,
Tensor<idx_t, 2, true>& indicesOut,
cudaStream_t stream) {
#define IVF_SCAN_2(THREADS, NUM_WARP_Q, NUM_THREAD_Q) \
ivfInterleavedScan2<THREADS, NUM_WARP_Q, NUM_THREAD_Q> \
<<<distanceIn.getSize(0), THREADS, 0, stream>>>( \
distanceIn, \
indicesIn, \
listIds, \
k, \
listIndices.data(), \
indicesOptions, \
dir, \
distanceOut, \
indicesOut)
if (k == 1) {
IVF_SCAN_2(128, 1, 1);
} else if (k <= 32) {
IVF_SCAN_2(128, 32, 2);
} else if (k <= 64) {
IVF_SCAN_2(128, 64, 3);
} else if (k <= 128) {
IVF_SCAN_2(128, 128, 3);
} else if (k <= 256) {
IVF_SCAN_2(128, 256, 4);
} else if (k <= 512) {
IVF_SCAN_2(128, 512, 8);
} else if (k <= 1024) {
IVF_SCAN_2(128, 1024, 8);
}
#if GPU_MAX_SELECTION_K >= 2048
else if (k <= 2048) {
IVF_SCAN_2(64, 2048, 8);
}
#endif
}
void runIVFInterleavedScan(
Tensor<float, 2, true>& queries,
Tensor<idx_t, 2, true>& listIds,
DeviceVector<void*>& listData,
DeviceVector<void*>& listIndices,
IndicesOptions indicesOptions,
DeviceVector<idx_t>& listLengths,
int k,
faiss::MetricType metric,
bool useResidual,
Tensor<float, 3, true>& residualBase,
GpuScalarQuantizer* scalarQ,
// output
Tensor<float, 2, true>& outDistances,
// output
Tensor<idx_t, 2, true>& outIndices,
GpuResources* res) {
// caught for exceptions at a higher level
FAISS_ASSERT(k <= GPU_MAX_SELECTION_K);
const auto ivf_interleaved_call = [&](const auto func) {
func(queries,
listIds,
listData,
listIndices,
indicesOptions,
listLengths,
k,
metric,
useResidual,
residualBase,
scalarQ,
outDistances,
outIndices,
res);
};
if (k == 1) {
ivf_interleaved_call(ivfInterleavedScanImpl<IVFINTERLEAVED_1_PARAMS>);
} else if (k <= 32) {
ivf_interleaved_call(ivfInterleavedScanImpl<IVFINTERLEAVED_32_PARAMS>);
} else if (k <= 64) {
ivf_interleaved_call(ivfInterleavedScanImpl<IVFINTERLEAVED_64_PARAMS>);
} else if (k <= 128) {
ivf_interleaved_call(ivfInterleavedScanImpl<IVFINTERLEAVED_128_PARAMS>);
} else if (k <= 256) {
ivf_interleaved_call(ivfInterleavedScanImpl<IVFINTERLEAVED_256_PARAMS>);
} else if (k <= 512) {
ivf_interleaved_call(ivfInterleavedScanImpl<IVFINTERLEAVED_512_PARAMS>);
} else if (k <= 1024) {
ivf_interleaved_call(
ivfInterleavedScanImpl<IVFINTERLEAVED_1024_PARAMS>);
}
#if GPU_MAX_SELECTION_K >= 2048
else if (k <= 2048) {
ivf_interleaved_call(
ivfInterleavedScanImpl<IVFINTERLEAVED_2048_PARAMS>);
}
#endif
}
} // namespace gpu
} // namespace faiss
|
c931bd47cf5ac6b7100568d658b32dd1fe41239b.hip | // !!! This is a file automatically generated by hipify!!!
// ----------------------------------------------------------------------------
// Gunrock -- Fast and Efficient GPU Graph Library
// ----------------------------------------------------------------------------
// This source code is distributed under the terms of LICENSE.TXT
// in the root directory of this source distribution.
// ----------------------------------------------------------------------------
/**
* @file louvain_app.cu
*
* @brief Community Detection (Louvain) application
*/
#include <gunrock/gunrock.h>
// Utilities and correctness-checking
#include <gunrock/util/test_utils.cuh>
// Graph definations
#include <gunrock/graphio/graphio.cuh>
#include <gunrock/app/app_base.cuh>
#include <gunrock/app/test_base.cuh>
// single-source shortest path includes
#include <gunrock/app/louvain/louvain_enactor.cuh>
#include <gunrock/app/louvain/louvain_test.cuh>
namespace gunrock {
namespace app {
namespace louvain {
hipError_t UseParameters(util::Parameters ¶meters) {
hipError_t retval = hipSuccess;
GUARD_CU(UseParameters_app(parameters));
GUARD_CU(UseParameters_problem(parameters));
GUARD_CU(UseParameters_enactor(parameters));
GUARD_CU(UseParameters_test(parameters));
return retval;
}
/**
* @brief Run Louvain tests
* @tparam GraphT Type of the graph
* @tparam ValueT Type of the distances
* @param[in] parameters Excution parameters
* @param[in] graph Input graph
* @param[in] ref_distances Reference distances
* @param[in] target Whether to perform the Louvain
* \return hipError_t error message(s), if any
*/
template <typename GraphT, typename ValueT = typename GraphT::ValueT>
hipError_t RunTests(util::Parameters ¶meters, GraphT &graph,
typename GraphT::VertexT *ref_communities = NULL,
util::Location target = util::DEVICE) {
hipError_t retval = hipSuccess;
typedef typename GraphT::VertexT VertexT;
typedef typename GraphT::SizeT SizeT;
typedef Problem<GraphT> ProblemT;
typedef Enactor<ProblemT> EnactorT;
util::CpuTimer cpu_timer, total_timer;
cpu_timer.Start();
total_timer.Start();
// parse configurations from parameters
bool quiet_mode = parameters.Get<bool>("quiet");
int num_runs = parameters.Get<int>("num-runs");
std::string validation = parameters.Get<std::string>("validation");
util::Info info("Louvain", parameters, graph); // initialize Info structure
// allocate problem specific host data, e.g.:
VertexT *h_communities = new VertexT[graph.nodes];
// Allocate problem and enactor on GPU, and initialize them
ProblemT problem(parameters);
EnactorT enactor;
GUARD_CU(problem.Init(graph, target));
GUARD_CU(enactor.Init(problem, target));
cpu_timer.Stop();
parameters.Set("preprocess-time", cpu_timer.ElapsedMillis());
// perform the algorithm
for (int run_num = 0; run_num < num_runs; ++run_num) {
GUARD_CU(problem.Reset(target));
GUARD_CU(enactor.Reset(target));
util::PrintMsg("__________________________", !quiet_mode);
cpu_timer.Start();
GUARD_CU(enactor.Enact());
cpu_timer.Stop();
info.CollectSingleRun(cpu_timer.ElapsedMillis());
util::PrintMsg("--------------------------", !quiet_mode);
if (validation == "each") {
util::PrintMsg(
"Run " + std::to_string(run_num) + " elapsed: " +
std::to_string(cpu_timer.ElapsedMillis()) + " ms, #passes = " +
std::to_string(enactor.enactor_slices[0].enactor_stats.iteration),
!quiet_mode);
GUARD_CU(problem.Extract(h_communities, NULL, target));
SizeT num_errors = app::louvain::Validate_Results(
parameters, graph, h_communities, ref_communities);
} else {
util::PrintMsg(
"Run " + std::to_string(run_num) + " elapsed: " +
std::to_string(cpu_timer.ElapsedMillis()) + " ms, #passes = " +
std::to_string(enactor.enactor_slices[0].enactor_stats.iteration),
!quiet_mode);
}
}
cpu_timer.Start();
// Copy out results
GUARD_CU(problem.Extract(h_communities, NULL, target));
if (validation == "last") {
SizeT num_errors = app::louvain::Validate_Results(
parameters, graph, h_communities, ref_communities);
}
// compute running statistics
info.ComputeTraversalStats(enactor, (VertexT *)NULL);
// Display_Memory_Usage(problem);
#ifdef ENABLE_PERFORMANCE_PROFILING
// Display_Performance_Profiling(&enactor);
#endif
// Clean up
GUARD_CU(enactor.Release(target));
GUARD_CU(problem.Release(target));
delete[] h_communities;
h_communities = NULL;
cpu_timer.Stop();
total_timer.Stop();
info.Finalize(cpu_timer.ElapsedMillis(), total_timer.ElapsedMillis());
return retval;
}
} // namespace louvain
} // namespace app
} // namespace gunrock
/*
* @brief Entry of gunrock_template function
* @tparam GraphT Type of the graph
* @tparam ValueT Type of the distances
* @param[in] parameters Excution parameters
* @param[in] graph Input graph
* @param[out] distances Return shortest distance to source per vertex
* @param[out] preds Return predecessors of each vertex
* \return double Return accumulated elapsed times for all runs
*/
template <typename GraphT, typename ValueT = typename GraphT::ValueT>
double gunrock_louvain(gunrock::util::Parameters ¶meters, GraphT &graph,
typename GraphT::VertexT *communities) {
typedef typename GraphT::VertexT VertexT;
typedef gunrock::app::louvain::Problem<GraphT> ProblemT;
typedef gunrock::app::louvain::Enactor<ProblemT> EnactorT;
gunrock::util::CpuTimer cpu_timer;
gunrock::util::Location target = gunrock::util::DEVICE;
double total_time = 0;
if (parameters.UseDefault("quiet")) parameters.Set("quiet", true);
// Allocate problem and enactor on GPU, and initialize them
ProblemT problem(parameters);
EnactorT enactor;
problem.Init(graph, target);
enactor.Init(problem, target);
int num_runs = parameters.Get<int>("num-runs");
for (int run_num = 0; run_num < num_runs; ++run_num) {
problem.Reset(target);
enactor.Reset(target);
cpu_timer.Start();
enactor.Enact();
cpu_timer.Stop();
total_time += cpu_timer.ElapsedMillis();
problem.Extract(communities, NULL, target);
}
enactor.Release(target);
problem.Release(target);
return total_time;
}
/*
* @brief Simple interface take in graph as CSR format
* @param[in] num_nodes Number of veritces in the input graph
* @param[in] num_edges Number of edges in the input graph
* @param[in] row_offsets CSR-formatted graph input row offsets
* @param[in] col_indices CSR-formatted graph input column indices
* @param[in] edge_values CSR-formatted graph input edge weights
* @param[in] num_runs Number of runs to perform Louvain
* @param[in] sources Sources to begin traverse, one for each run
* @param[in] mark_preds Whether to output predecessor info
* @param[out] distances Return shortest distance to source per vertex
* @param[out] preds Return predecessors of each vertex
* \return double Return accumulated elapsed times for all runs
*/
template <typename VertexT = int, typename SizeT = int,
typename GValueT = unsigned int, typename TValueT = GValueT>
float louvain(const SizeT num_nodes, const SizeT num_edges,
const SizeT *row_offsets, const VertexT *col_indices,
const GValueT *edge_values, const int num_runs,
VertexT *communities) {
typedef typename gunrock::app::TestGraph<VertexT, SizeT, GValueT,
gunrock::graph::HAS_EDGE_VALUES |
gunrock::graph::HAS_CSR>
GraphT;
typedef typename GraphT::CsrT CsrT;
// Setup parameters
gunrock::util::Parameters parameters("louvain");
gunrock::graphio::UseParameters(parameters);
gunrock::app::louvain::UseParameters(parameters);
gunrock::app::UseParameters_test(parameters);
parameters.Parse_CommandLine(0, NULL);
parameters.Set("graph-type", "by-pass");
parameters.Set("num-runs", num_runs);
bool quiet = parameters.Get<bool>("quiet");
GraphT graph;
// Assign pointers into gunrock graph format
graph.CsrT::Allocate(num_nodes, num_edges, gunrock::util::HOST);
graph.CsrT::row_offsets.SetPointer((SizeT *)row_offsets, num_nodes + 1,
gunrock::util::HOST);
graph.CsrT::column_indices.SetPointer((VertexT *)col_indices, num_edges,
gunrock::util::HOST);
graph.CsrT::edge_values.SetPointer((GValueT *)edge_values, num_edges,
gunrock::util::HOST);
// graph.FromCsr(graph.csr(), true, quiet);
gunrock::graphio::LoadGraph(parameters, graph);
// Run the Louvain
double elapsed_time = gunrock_louvain(parameters, graph, communities);
// Cleanup
graph.Release();
return elapsed_time;
}
float louvain(const int num_nodes, const int num_edges, const int *row_offsets,
const int *col_indices, const int *edge_values,
int *communities) {
return louvain(num_nodes, num_edges, row_offsets, col_indices, edge_values,
1 /* num_runs */, communities);
}
// Leave this at the end of the file
// Local Variables:
// mode:c++
// c-file-style: "NVIDIA"
// End:
| c931bd47cf5ac6b7100568d658b32dd1fe41239b.cu | // ----------------------------------------------------------------------------
// Gunrock -- Fast and Efficient GPU Graph Library
// ----------------------------------------------------------------------------
// This source code is distributed under the terms of LICENSE.TXT
// in the root directory of this source distribution.
// ----------------------------------------------------------------------------
/**
* @file louvain_app.cu
*
* @brief Community Detection (Louvain) application
*/
#include <gunrock/gunrock.h>
// Utilities and correctness-checking
#include <gunrock/util/test_utils.cuh>
// Graph definations
#include <gunrock/graphio/graphio.cuh>
#include <gunrock/app/app_base.cuh>
#include <gunrock/app/test_base.cuh>
// single-source shortest path includes
#include <gunrock/app/louvain/louvain_enactor.cuh>
#include <gunrock/app/louvain/louvain_test.cuh>
namespace gunrock {
namespace app {
namespace louvain {
cudaError_t UseParameters(util::Parameters ¶meters) {
cudaError_t retval = cudaSuccess;
GUARD_CU(UseParameters_app(parameters));
GUARD_CU(UseParameters_problem(parameters));
GUARD_CU(UseParameters_enactor(parameters));
GUARD_CU(UseParameters_test(parameters));
return retval;
}
/**
* @brief Run Louvain tests
* @tparam GraphT Type of the graph
* @tparam ValueT Type of the distances
* @param[in] parameters Excution parameters
* @param[in] graph Input graph
* @param[in] ref_distances Reference distances
* @param[in] target Whether to perform the Louvain
* \return cudaError_t error message(s), if any
*/
template <typename GraphT, typename ValueT = typename GraphT::ValueT>
cudaError_t RunTests(util::Parameters ¶meters, GraphT &graph,
typename GraphT::VertexT *ref_communities = NULL,
util::Location target = util::DEVICE) {
cudaError_t retval = cudaSuccess;
typedef typename GraphT::VertexT VertexT;
typedef typename GraphT::SizeT SizeT;
typedef Problem<GraphT> ProblemT;
typedef Enactor<ProblemT> EnactorT;
util::CpuTimer cpu_timer, total_timer;
cpu_timer.Start();
total_timer.Start();
// parse configurations from parameters
bool quiet_mode = parameters.Get<bool>("quiet");
int num_runs = parameters.Get<int>("num-runs");
std::string validation = parameters.Get<std::string>("validation");
util::Info info("Louvain", parameters, graph); // initialize Info structure
// allocate problem specific host data, e.g.:
VertexT *h_communities = new VertexT[graph.nodes];
// Allocate problem and enactor on GPU, and initialize them
ProblemT problem(parameters);
EnactorT enactor;
GUARD_CU(problem.Init(graph, target));
GUARD_CU(enactor.Init(problem, target));
cpu_timer.Stop();
parameters.Set("preprocess-time", cpu_timer.ElapsedMillis());
// perform the algorithm
for (int run_num = 0; run_num < num_runs; ++run_num) {
GUARD_CU(problem.Reset(target));
GUARD_CU(enactor.Reset(target));
util::PrintMsg("__________________________", !quiet_mode);
cpu_timer.Start();
GUARD_CU(enactor.Enact());
cpu_timer.Stop();
info.CollectSingleRun(cpu_timer.ElapsedMillis());
util::PrintMsg("--------------------------", !quiet_mode);
if (validation == "each") {
util::PrintMsg(
"Run " + std::to_string(run_num) + " elapsed: " +
std::to_string(cpu_timer.ElapsedMillis()) + " ms, #passes = " +
std::to_string(enactor.enactor_slices[0].enactor_stats.iteration),
!quiet_mode);
GUARD_CU(problem.Extract(h_communities, NULL, target));
SizeT num_errors = app::louvain::Validate_Results(
parameters, graph, h_communities, ref_communities);
} else {
util::PrintMsg(
"Run " + std::to_string(run_num) + " elapsed: " +
std::to_string(cpu_timer.ElapsedMillis()) + " ms, #passes = " +
std::to_string(enactor.enactor_slices[0].enactor_stats.iteration),
!quiet_mode);
}
}
cpu_timer.Start();
// Copy out results
GUARD_CU(problem.Extract(h_communities, NULL, target));
if (validation == "last") {
SizeT num_errors = app::louvain::Validate_Results(
parameters, graph, h_communities, ref_communities);
}
// compute running statistics
info.ComputeTraversalStats(enactor, (VertexT *)NULL);
// Display_Memory_Usage(problem);
#ifdef ENABLE_PERFORMANCE_PROFILING
// Display_Performance_Profiling(&enactor);
#endif
// Clean up
GUARD_CU(enactor.Release(target));
GUARD_CU(problem.Release(target));
delete[] h_communities;
h_communities = NULL;
cpu_timer.Stop();
total_timer.Stop();
info.Finalize(cpu_timer.ElapsedMillis(), total_timer.ElapsedMillis());
return retval;
}
} // namespace louvain
} // namespace app
} // namespace gunrock
/*
* @brief Entry of gunrock_template function
* @tparam GraphT Type of the graph
* @tparam ValueT Type of the distances
* @param[in] parameters Excution parameters
* @param[in] graph Input graph
* @param[out] distances Return shortest distance to source per vertex
* @param[out] preds Return predecessors of each vertex
* \return double Return accumulated elapsed times for all runs
*/
template <typename GraphT, typename ValueT = typename GraphT::ValueT>
double gunrock_louvain(gunrock::util::Parameters ¶meters, GraphT &graph,
typename GraphT::VertexT *communities) {
typedef typename GraphT::VertexT VertexT;
typedef gunrock::app::louvain::Problem<GraphT> ProblemT;
typedef gunrock::app::louvain::Enactor<ProblemT> EnactorT;
gunrock::util::CpuTimer cpu_timer;
gunrock::util::Location target = gunrock::util::DEVICE;
double total_time = 0;
if (parameters.UseDefault("quiet")) parameters.Set("quiet", true);
// Allocate problem and enactor on GPU, and initialize them
ProblemT problem(parameters);
EnactorT enactor;
problem.Init(graph, target);
enactor.Init(problem, target);
int num_runs = parameters.Get<int>("num-runs");
for (int run_num = 0; run_num < num_runs; ++run_num) {
problem.Reset(target);
enactor.Reset(target);
cpu_timer.Start();
enactor.Enact();
cpu_timer.Stop();
total_time += cpu_timer.ElapsedMillis();
problem.Extract(communities, NULL, target);
}
enactor.Release(target);
problem.Release(target);
return total_time;
}
/*
* @brief Simple interface take in graph as CSR format
* @param[in] num_nodes Number of veritces in the input graph
* @param[in] num_edges Number of edges in the input graph
* @param[in] row_offsets CSR-formatted graph input row offsets
* @param[in] col_indices CSR-formatted graph input column indices
* @param[in] edge_values CSR-formatted graph input edge weights
* @param[in] num_runs Number of runs to perform Louvain
* @param[in] sources Sources to begin traverse, one for each run
* @param[in] mark_preds Whether to output predecessor info
* @param[out] distances Return shortest distance to source per vertex
* @param[out] preds Return predecessors of each vertex
* \return double Return accumulated elapsed times for all runs
*/
template <typename VertexT = int, typename SizeT = int,
typename GValueT = unsigned int, typename TValueT = GValueT>
float louvain(const SizeT num_nodes, const SizeT num_edges,
const SizeT *row_offsets, const VertexT *col_indices,
const GValueT *edge_values, const int num_runs,
VertexT *communities) {
typedef typename gunrock::app::TestGraph<VertexT, SizeT, GValueT,
gunrock::graph::HAS_EDGE_VALUES |
gunrock::graph::HAS_CSR>
GraphT;
typedef typename GraphT::CsrT CsrT;
// Setup parameters
gunrock::util::Parameters parameters("louvain");
gunrock::graphio::UseParameters(parameters);
gunrock::app::louvain::UseParameters(parameters);
gunrock::app::UseParameters_test(parameters);
parameters.Parse_CommandLine(0, NULL);
parameters.Set("graph-type", "by-pass");
parameters.Set("num-runs", num_runs);
bool quiet = parameters.Get<bool>("quiet");
GraphT graph;
// Assign pointers into gunrock graph format
graph.CsrT::Allocate(num_nodes, num_edges, gunrock::util::HOST);
graph.CsrT::row_offsets.SetPointer((SizeT *)row_offsets, num_nodes + 1,
gunrock::util::HOST);
graph.CsrT::column_indices.SetPointer((VertexT *)col_indices, num_edges,
gunrock::util::HOST);
graph.CsrT::edge_values.SetPointer((GValueT *)edge_values, num_edges,
gunrock::util::HOST);
// graph.FromCsr(graph.csr(), true, quiet);
gunrock::graphio::LoadGraph(parameters, graph);
// Run the Louvain
double elapsed_time = gunrock_louvain(parameters, graph, communities);
// Cleanup
graph.Release();
return elapsed_time;
}
float louvain(const int num_nodes, const int num_edges, const int *row_offsets,
const int *col_indices, const int *edge_values,
int *communities) {
return louvain(num_nodes, num_edges, row_offsets, col_indices, edge_values,
1 /* num_runs */, communities);
}
// Leave this at the end of the file
// Local Variables:
// mode:c++
// c-file-style: "NVIDIA"
// End:
|
5733dfcb5d5b9a173df561634652397f117933f1.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdlib.h>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <iostream>
__global__ void fun(int *y){
int x = 0;
*y = 1/x;
printf("%d\n", 1/x);
}
int main(void)
{
int y;
int *dev_y;
hipMalloc((void**)&dev_y, sizeof(int));
hipLaunchKernelGGL(( fun), dim3(1),dim3(1), 0, 0, dev_y);
hipMemcpy(&y, dev_y, sizeof(int), hipMemcpyDeviceToHost);
hipFree(dev_y);
// printf("%d\n", y);
return y;
}
// 1
| 5733dfcb5d5b9a173df561634652397f117933f1.cu | #include <stdlib.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <iostream>
__global__ void fun(int *y){
int x = 0;
*y = 1/x;
printf("%d\n", 1/x);
}
int main(void)
{
int y;
int *dev_y;
cudaMalloc((void**)&dev_y, sizeof(int));
fun<<<1,1>>>(dev_y);
cudaMemcpy(&y, dev_y, sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(dev_y);
// printf("%d\n", y);
return y;
}
//编译通过 1;
|
21799bd1abef3796caab446e5cb88ad5692c0523.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.5.4) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date October 2020
@precisions normal z -> c d s
*/
#include "magmasparse_internal.h"
#define BLOCK_SIZE 256
// axpy kernel for matrices stored in the MAGMA format
__global__ void
zgecsrreimsplit_kernel(
int num_rows,
int num_cols,
magma_index_t* rowidx,
magmaDoubleComplex * A,
magmaDoubleComplex * ReA,
magmaDoubleComplex * ImA )
{
int row = blockIdx.x*blockDim.x+threadIdx.x;
int j;
if( row<num_rows ){
for( j=rowidx[row]; j<rowidx[row+1]; j++ ){
ReA[ j ] = MAGMA_Z_MAKE( MAGMA_Z_REAL( A[ j ] ), 0.0 );
ImA[ j ] = MAGMA_Z_MAKE( MAGMA_Z_IMAG( A[ j ] ), 0.0 );
}
}
}
/**
Purpose
-------
This routine takes an input matrix A in CSR format and located on the GPU
and splits it into two matrixes ReA and ImA containing the real and the
imaginary contributions of A.
The output matrices are allocated within the routine.
Arguments
---------
@param[in]
A magma_z_matrix
input matrix A.
@param[out]
ReA magma_z_matrix*
output matrix contaning real contributions.
@param[out]
ImA magma_z_matrix*
output matrix contaning complex contributions.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zblas
********************************************************************/
extern "C"
magma_int_t
magma_zgecsrreimsplit(
magma_z_matrix A,
magma_z_matrix *ReA,
magma_z_matrix *ImA,
magma_queue_t queue )
{
magma_zmtransfer( A, ReA, Magma_DEV, Magma_DEV, queue );
magma_zmtransfer( A, ImA, Magma_DEV, Magma_DEV, queue );
int m = A.num_rows;
int n = A.num_cols;
dim3 grid( magma_ceildiv( m, BLOCK_SIZE ) );
magma_int_t threads = BLOCK_SIZE;
hipLaunchKernelGGL(( zgecsrreimsplit_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
m, n, A.row, A.dval, ReA->dval, ImA->dval );
return MAGMA_SUCCESS;
}
| 21799bd1abef3796caab446e5cb88ad5692c0523.cu | /*
-- MAGMA (version 2.5.4) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date October 2020
@precisions normal z -> c d s
*/
#include "magmasparse_internal.h"
#define BLOCK_SIZE 256
// axpy kernel for matrices stored in the MAGMA format
__global__ void
zgecsrreimsplit_kernel(
int num_rows,
int num_cols,
magma_index_t* rowidx,
magmaDoubleComplex * A,
magmaDoubleComplex * ReA,
magmaDoubleComplex * ImA )
{
int row = blockIdx.x*blockDim.x+threadIdx.x;
int j;
if( row<num_rows ){
for( j=rowidx[row]; j<rowidx[row+1]; j++ ){
ReA[ j ] = MAGMA_Z_MAKE( MAGMA_Z_REAL( A[ j ] ), 0.0 );
ImA[ j ] = MAGMA_Z_MAKE( MAGMA_Z_IMAG( A[ j ] ), 0.0 );
}
}
}
/**
Purpose
-------
This routine takes an input matrix A in CSR format and located on the GPU
and splits it into two matrixes ReA and ImA containing the real and the
imaginary contributions of A.
The output matrices are allocated within the routine.
Arguments
---------
@param[in]
A magma_z_matrix
input matrix A.
@param[out]
ReA magma_z_matrix*
output matrix contaning real contributions.
@param[out]
ImA magma_z_matrix*
output matrix contaning complex contributions.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zblas
********************************************************************/
extern "C"
magma_int_t
magma_zgecsrreimsplit(
magma_z_matrix A,
magma_z_matrix *ReA,
magma_z_matrix *ImA,
magma_queue_t queue )
{
magma_zmtransfer( A, ReA, Magma_DEV, Magma_DEV, queue );
magma_zmtransfer( A, ImA, Magma_DEV, Magma_DEV, queue );
int m = A.num_rows;
int n = A.num_cols;
dim3 grid( magma_ceildiv( m, BLOCK_SIZE ) );
magma_int_t threads = BLOCK_SIZE;
zgecsrreimsplit_kernel<<< grid, threads, 0, queue->cuda_stream() >>>
( m, n, A.row, A.dval, ReA->dval, ImA->dval );
return MAGMA_SUCCESS;
}
|
c672041a6fefc80b9e3f91340d6b4e83a66c2e1d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "src/cuda/concat/concat.cuh"
#include "megdnn/dtype.h"
#include "src/cuda/utils.cuh"
namespace megdnn {
namespace cuda {
namespace concat {
template <typename T>
__global__ void forward_kernel(
const T** srcs, T* dst, size_t nr_srcs, size_t A, size_t B, size_t C,
const size_t* Bv, const size_t* table_outer, const size_t* table_inner) {
size_t addr = threadIdx.x + blockIdx.x * blockDim.x;
if (addr < A * B * C) {
size_t c = addr % C;
size_t b = addr / C % B;
size_t a = addr / (B * C);
size_t i = table_outer[b];
size_t B_src = Bv[i];
size_t b_src = table_inner[b];
size_t addr_src = (a * B_src + b_src) * C + c;
dst[addr] = srcs[i][addr_src];
}
}
template <typename T>
void forward_proxy(
const T** srcs, T* dst, size_t nr_srcs, size_t A, size_t B, size_t C,
const size_t* Bv, const size_t* table_outer, const size_t* table_inner,
hipStream_t stream) {
size_t total_nr_elem = A * B * C;
size_t NR_BLOCKS = DIVUP(total_nr_elem, NR_THREADS);
hipLaunchKernelGGL(( forward_kernel), dim3(NR_BLOCKS), dim3(NR_THREADS), 0, stream,
srcs, dst, nr_srcs, A, B, C, Bv, table_outer, table_inner);
after_kernel_launch();
}
#define INST(T) \
template void forward_proxy<T>( \
const T**, T*, size_t, size_t, size_t, size_t, const size_t*, \
const size_t*, const size_t*, hipStream_t);
#define cb(DType) INST(typename DTypeTrait<DType>::ctype)
MEGDNN_FOREACH_COMPUTING_DTYPE(cb)
#undef cb
#undef INST
} // namespace concat
} // namespace cuda
} // namespace megdnn
// vim: syntax=cpp.doxygen
| c672041a6fefc80b9e3f91340d6b4e83a66c2e1d.cu | #include "src/cuda/concat/concat.cuh"
#include "megdnn/dtype.h"
#include "src/cuda/utils.cuh"
namespace megdnn {
namespace cuda {
namespace concat {
template <typename T>
__global__ void forward_kernel(
const T** srcs, T* dst, size_t nr_srcs, size_t A, size_t B, size_t C,
const size_t* Bv, const size_t* table_outer, const size_t* table_inner) {
size_t addr = threadIdx.x + blockIdx.x * blockDim.x;
if (addr < A * B * C) {
size_t c = addr % C;
size_t b = addr / C % B;
size_t a = addr / (B * C);
size_t i = table_outer[b];
size_t B_src = Bv[i];
size_t b_src = table_inner[b];
size_t addr_src = (a * B_src + b_src) * C + c;
dst[addr] = srcs[i][addr_src];
}
}
template <typename T>
void forward_proxy(
const T** srcs, T* dst, size_t nr_srcs, size_t A, size_t B, size_t C,
const size_t* Bv, const size_t* table_outer, const size_t* table_inner,
cudaStream_t stream) {
size_t total_nr_elem = A * B * C;
size_t NR_BLOCKS = DIVUP(total_nr_elem, NR_THREADS);
forward_kernel<<<NR_BLOCKS, NR_THREADS, 0, stream>>>(
srcs, dst, nr_srcs, A, B, C, Bv, table_outer, table_inner);
after_kernel_launch();
}
#define INST(T) \
template void forward_proxy<T>( \
const T**, T*, size_t, size_t, size_t, size_t, const size_t*, \
const size_t*, const size_t*, cudaStream_t);
#define cb(DType) INST(typename DTypeTrait<DType>::ctype)
MEGDNN_FOREACH_COMPUTING_DTYPE(cb)
#undef cb
#undef INST
} // namespace concat
} // namespace cuda
} // namespace megdnn
// vim: syntax=cpp.doxygen
|
15c5c378a239f08ee56116971b777d3301462c3c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//-------------------------------------------------------------------------//
// //
// This benchmark is a serial C version of the NPB SP code. This C //
// version is developed by the Center for Manycore Programming at Seoul //
// National University and derived from the serial Fortran versions in //
// "NPB3.3-SER" developed by NAS. //
// //
// Permission to use, copy, distribute and modify this software for any //
// purpose with or without fee is hereby granted. This software is //
// provided "as is" without express or implied warranty. //
// //
// Information on NPB 3.3, including the technical report, the original //
// specifications, source code, results and information on how to submit //
// new results, is available at: //
// //
// http://www.nas.nasa.gov/Software/NPB/ //
// //
// Send comments or suggestions for this C version to cmp@aces.snu.ac.kr //
// //
// Center for Manycore Programming //
// School of Computer Science and Engineering //
// Seoul National University //
// Seoul 151-744, Korea //
// //
// E-mail: cmp@aces.snu.ac.kr //
// //
//-------------------------------------------------------------------------//
//-------------------------------------------------------------------------//
// Authors: Sangmin Seo, Jungwon Kim, Jun Lee, Jeongho Nah, Gangwon Jo, //
// and Jaejin Lee //
//-------------------------------------------------------------------------//
#include <math.h>
#include <assert.h>
#include <omp.h>
#include "header.h"
__global__ void compute_rhs_intro(
dim3 gridOffset,
int* grid_points,
double (*u )/*[KMAX]*/[5][JMAXP+1][IMAXP+1],
double (*us )/*[KMAX]*/[JMAXP+1][IMAXP+1],
double (*vs )/*[KMAX]*/[JMAXP+1][IMAXP+1],
double (*ws )/*[KMAX]*/[JMAXP+1][IMAXP+1],
double (*qs )/*[KMAX]*/[JMAXP+1][IMAXP+1],
double (*rho_i )/*[KMAX]*/[JMAXP+1][IMAXP+1],
double (*speed )/*[KMAX]*/[JMAXP+1][IMAXP+1],
double (*square )/*[KMAX]*/[JMAXP+1][IMAXP+1],
double (*rhs )/*[KMAX]*/[5][JMAXP+1][IMAXP+1],
double (*forcing)/*[KMAX]*/[5][JMAXP+1][IMAXP+1]
) {
int i = blockDim.x * blockIdx.x + threadIdx.x + gridOffset.x;
int j = blockDim.y * blockIdx.y + threadIdx.y + gridOffset.y;
int k = blockDim.z * blockIdx.z + threadIdx.z + gridOffset.z;
int m;
double rho_inv, aux;
if (k >= 0 && k <= grid_points[2]-1) {
if (j >= 0 && j <= grid_points[1]-1) {
if (i >= 0 && i <= grid_points[0]-1) {
rho_inv = 1.0/u[k][0][j][i];
rho_i[k][j][i] = rho_inv;
us[k][j][i] = u[k][1][j][i] * rho_inv;
vs[k][j][i] = u[k][2][j][i] * rho_inv;
ws[k][j][i] = u[k][3][j][i] * rho_inv;
square[k][j][i] = 0.5* (
u[k][1][j][i]*u[k][1][j][i] +
u[k][2][j][i]*u[k][2][j][i] +
u[k][3][j][i]*u[k][3][j][i] ) * rho_inv;
qs[k][j][i] = square[k][j][i] * rho_inv;
//-------------------------------------------------------------------
// (don't need speed and ainx until the lhs computation)
//-------------------------------------------------------------------
aux = c1c2*rho_inv* (u[k][4][j][i] - square[k][j][i]);
speed[k][j][i] = sqrt(aux);
}
}
}
//---------------------------------------------------------------------
// copy the exact forcing term to the right hand side; because
// this forcing term is known, we can store it on the whole grid
// including the boundary
//---------------------------------------------------------------------
if (k >= 0 && k <= grid_points[2]-1) {
if (j >= 0 && j <= grid_points[1]-1) {
if (i >= 0 && i <= grid_points[0]-1) {
#pragma unroll
for (m = 0; m < 5; m++) {
rhs[k][m][j][i] = forcing[k][m][j][i];
}
}
}
}
}
__global__ void compute_rhs_xi(
dim3 gridOffset,
int nx2, int ny2, int nz2,
double (*u )/*[KMAX]*/[5][JMAXP+1][IMAXP+1],
double (*us )/*[KMAX]*/[JMAXP+1][IMAXP+1],
double (*vs )/*[KMAX]*/[JMAXP+1][IMAXP+1],
double (*ws )/*[KMAX]*/[JMAXP+1][IMAXP+1],
double (*qs )/*[KMAX]*/[JMAXP+1][IMAXP+1],
double (*rho_i )/*[KMAX]*/[JMAXP+1][IMAXP+1],
double (*square )/*[KMAX]*/[JMAXP+1][IMAXP+1],
double (*rhs )/*[KMAX]*/[5][JMAXP+1][IMAXP+1],
double dx1tx1, double dx2tx1, double dx3tx1, double dx4tx1, double dx5tx1, double tx2,
double xxcon2, double xxcon3, double xxcon4, double xxcon5
) {
int i = blockDim.x * blockIdx.x + threadIdx.x + gridOffset.x;
int j = blockDim.y * blockIdx.y + threadIdx.y + gridOffset.y;
int k = blockDim.z * blockIdx.z + threadIdx.z + gridOffset.z;
int m;
double uijk, up1, um1;
if (k >= 1 && k <= nz2) {
if (j >= 1 && j <= ny2) {
if (i >= 1 && i <= nx2) {
uijk = us[k][j][i];
up1 = us[k][j][i+1];
um1 = us[k][j][i-1];
rhs[k][0][j][i] = rhs[k][0][j][i] + dx1tx1 *
(u[k][0][j][i+1] - 2.0*u[k][0][j][i] + u[k][0][j][i-1]) -
tx2 * (u[k][1][j][i+1] - u[k][1][j][i-1]);
rhs[k][1][j][i] = rhs[k][1][j][i] + dx2tx1 *
(u[k][1][j][i+1] - 2.0*u[k][1][j][i] + u[k][1][j][i-1]) +
xxcon2*con43 * (up1 - 2.0*uijk + um1) -
tx2 * (u[k][1][j][i+1]*up1 - u[k][1][j][i-1]*um1 +
(u[k][4][j][i+1] - square[k][j][i+1] -
u[k][4][j][i-1] + square[k][j][i-1]) * c2);
rhs[k][2][j][i] = rhs[k][2][j][i] + dx3tx1 *
(u[k][2][j][i+1] - 2.0*u[k][2][j][i] + u[k][2][j][i-1]) +
xxcon2 * (vs[k][j][i+1] - 2.0*vs[k][j][i] + vs[k][j][i-1]) -
tx2 * (u[k][2][j][i+1]*up1 - u[k][2][j][i-1]*um1);
rhs[k][3][j][i] = rhs[k][3][j][i] + dx4tx1 *
(u[k][3][j][i+1] - 2.0*u[k][3][j][i] + u[k][3][j][i-1]) +
xxcon2 * (ws[k][j][i+1] - 2.0*ws[k][j][i] + ws[k][j][i-1]) -
tx2 * (u[k][3][j][i+1]*up1 - u[k][3][j][i-1]*um1);
rhs[k][4][j][i] = rhs[k][4][j][i] + dx5tx1 *
(u[k][4][j][i+1] - 2.0*u[k][4][j][i] + u[k][4][j][i-1]) +
xxcon3 * (qs[k][j][i+1] - 2.0*qs[k][j][i] + qs[k][j][i-1]) +
xxcon4 * (up1*up1 - 2.0*uijk*uijk + um1*um1) +
xxcon5 * (u[k][4][j][i+1]*rho_i[k][j][i+1] -
2.0*u[k][4][j][i]*rho_i[k][j][i] +
u[k][4][j][i-1]*rho_i[k][j][i-1]) -
tx2 * ( (c1*u[k][4][j][i+1] - c2*square[k][j][i+1])*up1 -
(c1*u[k][4][j][i-1] - c2*square[k][j][i-1])*um1 );
}
}
//---------------------------------------------------------------------
// add fourth order xi-direction dissipation
//---------------------------------------------------------------------
if (j >= 1 && j <= ny2) {
if (i == 1)
#pragma unroll
for (m = 0; m < 5; m++) {
rhs[k][m][j][i] = rhs[k][m][j][i]- dssp *
(5.0*u[k][m][j][i] - 4.0*u[k][m][j][i+1] + u[k][m][j][i+2]);
}
if (i == 2)
#pragma unroll
for (m = 0; m < 5; m++) {
rhs[k][m][j][i] = rhs[k][m][j][i] - dssp *
(-4.0*u[k][m][j][i-1] + 6.0*u[k][m][j][i] -
4.0*u[k][m][j][i+1] + u[k][m][j][i+2]);
}
}
if (j >= 1 && j <= ny2) {
if (i >= 3 && i <= nx2-2) {
#pragma unroll
for (m = 0; m < 5; m++) {
rhs[k][m][j][i] = rhs[k][m][j][i] - dssp *
( u[k][m][j][i-2] - 4.0*u[k][m][j][i-1] +
6.0*u[k][m][j][i] - 4.0*u[k][m][j][i+1] +
u[k][m][j][i+2] );
}
}
}
if (j >= 1 && j <= ny2) {
if (i == nx2-1)
#pragma unroll
for (m = 0; m < 5; m++) {
rhs[k][m][j][i] = rhs[k][m][j][i] - dssp *
( u[k][m][j][i-2] - 4.0*u[k][m][j][i-1] +
6.0*u[k][m][j][i] - 4.0*u[k][m][j][i+1] );
}
if (i == nx2)
#pragma unroll
for (m = 0; m < 5; m++) {
rhs[k][m][j][i] = rhs[k][m][j][i] - dssp *
( u[k][m][j][i-2] - 4.0*u[k][m][j][i-1] + 5.0*u[k][m][j][i] );
}
}
}
}
__global__ void compute_rhs_eta(
dim3 gridOffset,
int nx2, int ny2, int nz2,
double (*u )/*[KMAX]*/[5][JMAXP+1][IMAXP+1],
double (*us )/*[KMAX]*/[JMAXP+1][IMAXP+1],
double (*vs )/*[KMAX]*/[JMAXP+1][IMAXP+1],
double (*ws )/*[KMAX]*/[JMAXP+1][IMAXP+1],
double (*qs )/*[KMAX]*/[JMAXP+1][IMAXP+1],
double (*rho_i )/*[KMAX]*/[JMAXP+1][IMAXP+1],
double (*square )/*[KMAX]*/[JMAXP+1][IMAXP+1],
double (*rhs )/*[KMAX]*/[5][JMAXP+1][IMAXP+1],
double dy1ty1, double dy2ty1, double dy3ty1, double dy4ty1, double dy5ty1, double ty2,
double yycon2, double yycon3, double yycon4, double yycon5
) {
int i = blockDim.x * blockIdx.x + threadIdx.x + gridOffset.x;
int j = blockDim.y * blockIdx.y + threadIdx.y + gridOffset.y;
int k = blockDim.z * blockIdx.z + threadIdx.z + gridOffset.z;
int m;
double vijk, vp1, vm1;
if (k >= 1 && k <= nz2) {
if (j >= 1 && j <= ny2) {
if (i >= 1 && i <= nx2) {
vijk = vs[k][j][i];
vp1 = vs[k][j+1][i];
vm1 = vs[k][j-1][i];
rhs[k][0][j][i] = rhs[k][0][j][i] + dy1ty1 *
(u[k][0][j+1][i] - 2.0*u[k][0][j][i] + u[k][0][j-1][i]) -
ty2 * (u[k][2][j+1][i] - u[k][2][j-1][i]);
rhs[k][1][j][i] = rhs[k][1][j][i] + dy2ty1 *
(u[k][1][j+1][i] - 2.0*u[k][1][j][i] + u[k][1][j-1][i]) +
yycon2 * (us[k][j+1][i] - 2.0*us[k][j][i] + us[k][j-1][i]) -
ty2 * (u[k][1][j+1][i]*vp1 - u[k][1][j-1][i]*vm1);
rhs[k][2][j][i] = rhs[k][2][j][i] + dy3ty1 *
(u[k][2][j+1][i] - 2.0*u[k][2][j][i] + u[k][2][j-1][i]) +
yycon2*con43 * (vp1 - 2.0*vijk + vm1) -
ty2 * (u[k][2][j+1][i]*vp1 - u[k][2][j-1][i]*vm1 +
(u[k][4][j+1][i] - square[k][j+1][i] -
u[k][4][j-1][i] + square[k][j-1][i]) * c2);
rhs[k][3][j][i] = rhs[k][3][j][i] + dy4ty1 *
(u[k][3][j+1][i] - 2.0*u[k][3][j][i] + u[k][3][j-1][i]) +
yycon2 * (ws[k][j+1][i] - 2.0*ws[k][j][i] + ws[k][j-1][i]) -
ty2 * (u[k][3][j+1][i]*vp1 - u[k][3][j-1][i]*vm1);
rhs[k][4][j][i] = rhs[k][4][j][i] + dy5ty1 *
(u[k][4][j+1][i] - 2.0*u[k][4][j][i] + u[k][4][j-1][i]) +
yycon3 * (qs[k][j+1][i] - 2.0*qs[k][j][i] + qs[k][j-1][i]) +
yycon4 * (vp1*vp1 - 2.0*vijk*vijk + vm1*vm1) +
yycon5 * (u[k][4][j+1][i]*rho_i[k][j+1][i] -
2.0*u[k][4][j][i]*rho_i[k][j][i] +
u[k][4][j-1][i]*rho_i[k][j-1][i]) -
ty2 * ((c1*u[k][4][j+1][i] - c2*square[k][j+1][i]) * vp1 -
(c1*u[k][4][j-1][i] - c2*square[k][j-1][i]) * vm1);
}
}
//---------------------------------------------------------------------
// add fourth order eta-direction dissipation
//---------------------------------------------------------------------
if (j == 1)
if (i >= 1 && i <= nx2) {
#pragma unroll
for (m = 0; m < 5; m++) {
rhs[k][m][j][i] = rhs[k][m][j][i]- dssp *
( 5.0*u[k][m][j][i] - 4.0*u[k][m][j+1][i] + u[k][m][j+2][i]);
}
}
if (j == 2)
if (i >= 1 && i <= nx2) {
#pragma unroll
for (m = 0; m < 5; m++) {
rhs[k][m][j][i] = rhs[k][m][j][i] - dssp *
(-4.0*u[k][m][j-1][i] + 6.0*u[k][m][j][i] -
4.0*u[k][m][j+1][i] + u[k][m][j+2][i]);
}
}
if (j >= 3 && j <= ny2-2) {
if (i >= 1 && i <= nx2) {
#pragma unroll
for (m = 0; m < 5; m++) {
rhs[k][m][j][i] = rhs[k][m][j][i] - dssp *
( u[k][m][j-2][i] - 4.0*u[k][m][j-1][i] +
6.0*u[k][m][j][i] - 4.0*u[k][m][j+1][i] +
u[k][m][j+2][i] );
}
}
}
if (j == ny2-1)
if (i >= 1 && i <= nx2) {
#pragma unroll
for (m = 0; m < 5; m++) {
rhs[k][m][j][i] = rhs[k][m][j][i] - dssp *
( u[k][m][j-2][i] - 4.0*u[k][m][j-1][i] +
6.0*u[k][m][j][i] - 4.0*u[k][m][j+1][i] );
}
}
if (j == ny2)
if (i >= 1 && i <= nx2) {
#pragma unroll
for (m = 0; m < 5; m++) {
rhs[k][m][j][i] = rhs[k][m][j][i] - dssp *
( u[k][m][j-2][i] - 4.0*u[k][m][j-1][i] + 5.0*u[k][m][j][i] );
}
}
}
}
__global__ void compute_rhs_zeta(
dim3 gridOffset,
int nx2, int ny2, int nz2,
double (*u )/*[KMAX]*/[5][JMAXP+1][IMAXP+1],
double (*us )/*[KMAX]*/[JMAXP+1][IMAXP+1],
double (*vs )/*[KMAX]*/[JMAXP+1][IMAXP+1],
double (*ws )/*[KMAX]*/[JMAXP+1][IMAXP+1],
double (*qs )/*[KMAX]*/[JMAXP+1][IMAXP+1],
double (*rho_i )/*[KMAX]*/[JMAXP+1][IMAXP+1],
double (*square )/*[KMAX]*/[JMAXP+1][IMAXP+1],
double (*rhs )/*[KMAX]*/[5][JMAXP+1][IMAXP+1],
double dz1tz1, double dz2tz1, double dz3tz1, double dz4tz1, double dz5tz1, double tz2,
double zzcon2, double zzcon3, double zzcon4, double zzcon5
) {
int i = blockDim.x * blockIdx.x + threadIdx.x + gridOffset.x;
int j = blockDim.y * blockIdx.y + threadIdx.y + gridOffset.y;
int k = blockDim.z * blockIdx.z + threadIdx.z + gridOffset.z;
int m;
double wijk, wp1, wm1;
if (k >= 1 && k <= nz2) {
if (j >= 1 && j <= ny2) {
if (i >= 1 && i <= nx2) {
wijk = ws[k][j][i];
wp1 = ws[k+1][j][i];
wm1 = ws[k-1][j][i];
rhs[k][0][j][i] = rhs[k][0][j][i] + dz1tz1 *
(u[k+1][0][j][i] - 2.0*u[k][0][j][i] + u[k-1][0][j][i]) -
tz2 * (u[k+1][3][j][i] - u[k-1][3][j][i]);
rhs[k][1][j][i] = rhs[k][1][j][i] + dz2tz1 *
(u[k+1][1][j][i] - 2.0*u[k][1][j][i] + u[k-1][1][j][i]) +
zzcon2 * (us[k+1][j][i] - 2.0*us[k][j][i] + us[k-1][j][i]) -
tz2 * (u[k+1][1][j][i]*wp1 - u[k-1][1][j][i]*wm1);
rhs[k][2][j][i] = rhs[k][2][j][i] + dz3tz1 *
(u[k+1][2][j][i] - 2.0*u[k][2][j][i] + u[k-1][2][j][i]) +
zzcon2 * (vs[k+1][j][i] - 2.0*vs[k][j][i] + vs[k-1][j][i]) -
tz2 * (u[k+1][2][j][i]*wp1 - u[k-1][2][j][i]*wm1);
rhs[k][3][j][i] = rhs[k][3][j][i] + dz4tz1 *
(u[k+1][3][j][i] - 2.0*u[k][3][j][i] + u[k-1][3][j][i]) +
zzcon2*con43 * (wp1 - 2.0*wijk + wm1) -
tz2 * (u[k+1][3][j][i]*wp1 - u[k-1][3][j][i]*wm1 +
(u[k+1][4][j][i] - square[k+1][j][i] -
u[k-1][4][j][i] + square[k-1][j][i]) * c2);
rhs[k][4][j][i] = rhs[k][4][j][i] + dz5tz1 *
(u[k+1][4][j][i] - 2.0*u[k][4][j][i] + u[k-1][4][j][i]) +
zzcon3 * (qs[k+1][j][i] - 2.0*qs[k][j][i] + qs[k-1][j][i]) +
zzcon4 * (wp1*wp1 - 2.0*wijk*wijk + wm1*wm1) +
zzcon5 * (u[k+1][4][j][i]*rho_i[k+1][j][i] -
2.0*u[k][4][j][i]*rho_i[k][j][i] +
u[k-1][4][j][i]*rho_i[k-1][j][i]) -
tz2 * ((c1*u[k+1][4][j][i] - c2*square[k+1][j][i])*wp1 -
(c1*u[k-1][4][j][i] - c2*square[k-1][j][i])*wm1);
}
}
}
//---------------------------------------------------------------------
// add fourth order zeta-direction dissipation
//---------------------------------------------------------------------
if (k == 1)
if (j >= 1 && j <= ny2) {
if (i >= 1 && i <= nx2) {
#pragma unroll
for (m = 0; m < 5; m++) {
rhs[k][m][j][i] = rhs[k][m][j][i]- dssp *
(5.0*u[k][m][j][i] - 4.0*u[k+1][m][j][i] + u[k+2][m][j][i]);
}
}
}
if (k == 2)
if (j >= 1 && j <= ny2) {
if (i >= 1 && i <= nx2) {
#pragma unroll
for (m = 0; m < 5; m++) {
rhs[k][m][j][i] = rhs[k][m][j][i] - dssp *
(-4.0*u[k-1][m][j][i] + 6.0*u[k][m][j][i] -
4.0*u[k+1][m][j][i] + u[k+2][m][j][i]);
}
}
}
if (k >= 3 && k <= nz2-2) {
if (j >= 1 && j <= ny2) {
if (i >= 1 && i <= nx2) {
#pragma unroll
for (m = 0; m < 5; m++) {
rhs[k][m][j][i] = rhs[k][m][j][i] - dssp *
( u[k-2][m][j][i] - 4.0*u[k-1][m][j][i] +
6.0*u[k][m][j][i] - 4.0*u[k+1][m][j][i] +
u[k+2][m][j][i] );
}
}
}
}
if (k == nz2-1)
if (j >= 1 && j <= ny2) {
if (i >= 1 && i <= nx2) {
#pragma unroll
for (m = 0; m < 5; m++) {
rhs[k][m][j][i] = rhs[k][m][j][i] - dssp *
( u[k-2][m][j][i] - 4.0*u[k-1][m][j][i] +
6.0*u[k][m][j][i] - 4.0*u[k+1][m][j][i] );
}
}
}
if (k == nz2)
if (j >= 1 && j <= ny2) {
if (i >= 1 && i <= nx2) {
#pragma unroll
for (m = 0; m < 5; m++) {
rhs[k][m][j][i] = rhs[k][m][j][i] - dssp *
( u[k-2][m][j][i] - 4.0*u[k-1][m][j][i] + 5.0*u[k][m][j][i] );
}
}
}
}
__global__ void compute_rhs_tail(
dim3 gridOffset,
int nx2, int ny2, int nz2,
double (*u )/*[KMAX]*/[5][JMAXP+1][IMAXP+1],
double (*us )/*[KMAX]*/[JMAXP+1][IMAXP+1],
double (*vs )/*[KMAX]*/[JMAXP+1][IMAXP+1],
double (*ws )/*[KMAX]*/[JMAXP+1][IMAXP+1],
double (*qs )/*[KMAX]*/[JMAXP+1][IMAXP+1],
double (*rho_i )/*[KMAX]*/[JMAXP+1][IMAXP+1],
double (*square )/*[KMAX]*/[JMAXP+1][IMAXP+1],
double (*rhs )/*[KMAX]*/[5][JMAXP+1][IMAXP+1],
double dt
) {
int i = blockDim.x * blockIdx.x + threadIdx.x + gridOffset.x;
int j = blockDim.y * blockIdx.y + threadIdx.y + gridOffset.y;
int k = blockDim.z * blockIdx.z + threadIdx.z + gridOffset.z;
int m;
if (k >= 1 && k <= nz2) {
if (j >= 1 && j <= ny2) {
if (i >= 1 && i <= nx2) {
#pragma unroll
for (m = 0; m < 5; m++) {
rhs[k][m][j][i] = rhs[k][m][j][i] * dt;
}
}
}
}
}
void compute_rhs()
{
if (timeron) timer_start(t_rhs);
hipLaunchKernelGGL(( compute_rhs_intro) , dim3(gridDim_), dim3(blockDim_) , 0, 0,
gridOffset, dev_grid_points[device], dev_u[device], dev_us[device], dev_vs[device], dev_ws[device], dev_qs[device], dev_rho_i[device], dev_speed[device], dev_square[device], dev_rhs[device], dev_forcing[device]
);
//---------------------------------------------------------------------
// compute xi-direction fluxes
//---------------------------------------------------------------------
if (timeron) timer_start(t_rhsx);
hipLaunchKernelGGL(( compute_rhs_xi) , dim3(gridDim_), dim3(blockDim_) , 0, 0,
gridOffset, nx2, ny2, nz2, dev_u[device], dev_us[device], dev_vs[device], dev_ws[device], dev_qs[device], dev_rho_i[device], dev_square[device], dev_rhs[device], dx1tx1, dx2tx1, dx3tx1, dx4tx1, dx5tx1, tx2, xxcon2, xxcon3, xxcon4, xxcon5
);
if (timeron) {
timer_stop(t_rhsx);
}
//---------------------------------------------------------------------
// compute eta-direction fluxes
//---------------------------------------------------------------------
if (timeron) timer_start(t_rhsy);
hipLaunchKernelGGL(( compute_rhs_eta) , dim3(gridDim_), dim3(blockDim_) , 0, 0,
gridOffset, nx2, ny2, nz2, dev_u[device], dev_us[device], dev_vs[device], dev_ws[device], dev_qs[device], dev_rho_i[device], dev_square[device], dev_rhs[device], dy1ty1, dy2ty1, dy3ty1, dy4ty1, dy5ty1, ty2, yycon2, yycon3, yycon4, yycon5
);
if (timeron) {
timer_stop(t_rhsy);
}
//---------------------------------------------------------------------
// compute zeta-direction fluxes
//---------------------------------------------------------------------
if (omp_get_num_threads() > 1)
cuda_sync_rhs();
if (timeron) timer_start(t_rhsz);
hipLaunchKernelGGL(( compute_rhs_zeta) , dim3(gridDim_), dim3(blockDim_) , 0, 0,
gridOffset, nx2, ny2, nz2, dev_u[device], dev_us[device], dev_vs[device], dev_ws[device], dev_qs[device], dev_rho_i[device], dev_square[device], dev_rhs[device], dz1tz1, dz2tz1, dz3tz1, dz4tz1, dz5tz1, tz2, zzcon2, zzcon3, zzcon4, zzcon5
);
if (timeron) {
timer_stop(t_rhsz);
}
hipLaunchKernelGGL(( compute_rhs_tail) , dim3(gridDim_), dim3(blockDim_) , 0, 0,
gridOffset, nx2, ny2, nz2, dev_u[device], dev_us[device], dev_vs[device], dev_ws[device], dev_qs[device], dev_rho_i[device], dev_square[device], dev_rhs[device], dt
);
if (timeron) timer_stop(t_rhs);
}
| 15c5c378a239f08ee56116971b777d3301462c3c.cu | //-------------------------------------------------------------------------//
// //
// This benchmark is a serial C version of the NPB SP code. This C //
// version is developed by the Center for Manycore Programming at Seoul //
// National University and derived from the serial Fortran versions in //
// "NPB3.3-SER" developed by NAS. //
// //
// Permission to use, copy, distribute and modify this software for any //
// purpose with or without fee is hereby granted. This software is //
// provided "as is" without express or implied warranty. //
// //
// Information on NPB 3.3, including the technical report, the original //
// specifications, source code, results and information on how to submit //
// new results, is available at: //
// //
// http://www.nas.nasa.gov/Software/NPB/ //
// //
// Send comments or suggestions for this C version to cmp@aces.snu.ac.kr //
// //
// Center for Manycore Programming //
// School of Computer Science and Engineering //
// Seoul National University //
// Seoul 151-744, Korea //
// //
// E-mail: cmp@aces.snu.ac.kr //
// //
//-------------------------------------------------------------------------//
//-------------------------------------------------------------------------//
// Authors: Sangmin Seo, Jungwon Kim, Jun Lee, Jeongho Nah, Gangwon Jo, //
// and Jaejin Lee //
//-------------------------------------------------------------------------//
#include <math.h>
#include <assert.h>
#include <omp.h>
#include "header.h"
__global__ void compute_rhs_intro(
dim3 gridOffset,
int* grid_points,
double (*u )/*[KMAX]*/[5][JMAXP+1][IMAXP+1],
double (*us )/*[KMAX]*/[JMAXP+1][IMAXP+1],
double (*vs )/*[KMAX]*/[JMAXP+1][IMAXP+1],
double (*ws )/*[KMAX]*/[JMAXP+1][IMAXP+1],
double (*qs )/*[KMAX]*/[JMAXP+1][IMAXP+1],
double (*rho_i )/*[KMAX]*/[JMAXP+1][IMAXP+1],
double (*speed )/*[KMAX]*/[JMAXP+1][IMAXP+1],
double (*square )/*[KMAX]*/[JMAXP+1][IMAXP+1],
double (*rhs )/*[KMAX]*/[5][JMAXP+1][IMAXP+1],
double (*forcing)/*[KMAX]*/[5][JMAXP+1][IMAXP+1]
) {
int i = blockDim.x * blockIdx.x + threadIdx.x + gridOffset.x;
int j = blockDim.y * blockIdx.y + threadIdx.y + gridOffset.y;
int k = blockDim.z * blockIdx.z + threadIdx.z + gridOffset.z;
int m;
double rho_inv, aux;
if (k >= 0 && k <= grid_points[2]-1) {
if (j >= 0 && j <= grid_points[1]-1) {
if (i >= 0 && i <= grid_points[0]-1) {
rho_inv = 1.0/u[k][0][j][i];
rho_i[k][j][i] = rho_inv;
us[k][j][i] = u[k][1][j][i] * rho_inv;
vs[k][j][i] = u[k][2][j][i] * rho_inv;
ws[k][j][i] = u[k][3][j][i] * rho_inv;
square[k][j][i] = 0.5* (
u[k][1][j][i]*u[k][1][j][i] +
u[k][2][j][i]*u[k][2][j][i] +
u[k][3][j][i]*u[k][3][j][i] ) * rho_inv;
qs[k][j][i] = square[k][j][i] * rho_inv;
//-------------------------------------------------------------------
// (don't need speed and ainx until the lhs computation)
//-------------------------------------------------------------------
aux = c1c2*rho_inv* (u[k][4][j][i] - square[k][j][i]);
speed[k][j][i] = sqrt(aux);
}
}
}
//---------------------------------------------------------------------
// copy the exact forcing term to the right hand side; because
// this forcing term is known, we can store it on the whole grid
// including the boundary
//---------------------------------------------------------------------
if (k >= 0 && k <= grid_points[2]-1) {
if (j >= 0 && j <= grid_points[1]-1) {
if (i >= 0 && i <= grid_points[0]-1) {
#pragma unroll
for (m = 0; m < 5; m++) {
rhs[k][m][j][i] = forcing[k][m][j][i];
}
}
}
}
}
__global__ void compute_rhs_xi(
dim3 gridOffset,
int nx2, int ny2, int nz2,
double (*u )/*[KMAX]*/[5][JMAXP+1][IMAXP+1],
double (*us )/*[KMAX]*/[JMAXP+1][IMAXP+1],
double (*vs )/*[KMAX]*/[JMAXP+1][IMAXP+1],
double (*ws )/*[KMAX]*/[JMAXP+1][IMAXP+1],
double (*qs )/*[KMAX]*/[JMAXP+1][IMAXP+1],
double (*rho_i )/*[KMAX]*/[JMAXP+1][IMAXP+1],
double (*square )/*[KMAX]*/[JMAXP+1][IMAXP+1],
double (*rhs )/*[KMAX]*/[5][JMAXP+1][IMAXP+1],
double dx1tx1, double dx2tx1, double dx3tx1, double dx4tx1, double dx5tx1, double tx2,
double xxcon2, double xxcon3, double xxcon4, double xxcon5
) {
int i = blockDim.x * blockIdx.x + threadIdx.x + gridOffset.x;
int j = blockDim.y * blockIdx.y + threadIdx.y + gridOffset.y;
int k = blockDim.z * blockIdx.z + threadIdx.z + gridOffset.z;
int m;
double uijk, up1, um1;
if (k >= 1 && k <= nz2) {
if (j >= 1 && j <= ny2) {
if (i >= 1 && i <= nx2) {
uijk = us[k][j][i];
up1 = us[k][j][i+1];
um1 = us[k][j][i-1];
rhs[k][0][j][i] = rhs[k][0][j][i] + dx1tx1 *
(u[k][0][j][i+1] - 2.0*u[k][0][j][i] + u[k][0][j][i-1]) -
tx2 * (u[k][1][j][i+1] - u[k][1][j][i-1]);
rhs[k][1][j][i] = rhs[k][1][j][i] + dx2tx1 *
(u[k][1][j][i+1] - 2.0*u[k][1][j][i] + u[k][1][j][i-1]) +
xxcon2*con43 * (up1 - 2.0*uijk + um1) -
tx2 * (u[k][1][j][i+1]*up1 - u[k][1][j][i-1]*um1 +
(u[k][4][j][i+1] - square[k][j][i+1] -
u[k][4][j][i-1] + square[k][j][i-1]) * c2);
rhs[k][2][j][i] = rhs[k][2][j][i] + dx3tx1 *
(u[k][2][j][i+1] - 2.0*u[k][2][j][i] + u[k][2][j][i-1]) +
xxcon2 * (vs[k][j][i+1] - 2.0*vs[k][j][i] + vs[k][j][i-1]) -
tx2 * (u[k][2][j][i+1]*up1 - u[k][2][j][i-1]*um1);
rhs[k][3][j][i] = rhs[k][3][j][i] + dx4tx1 *
(u[k][3][j][i+1] - 2.0*u[k][3][j][i] + u[k][3][j][i-1]) +
xxcon2 * (ws[k][j][i+1] - 2.0*ws[k][j][i] + ws[k][j][i-1]) -
tx2 * (u[k][3][j][i+1]*up1 - u[k][3][j][i-1]*um1);
rhs[k][4][j][i] = rhs[k][4][j][i] + dx5tx1 *
(u[k][4][j][i+1] - 2.0*u[k][4][j][i] + u[k][4][j][i-1]) +
xxcon3 * (qs[k][j][i+1] - 2.0*qs[k][j][i] + qs[k][j][i-1]) +
xxcon4 * (up1*up1 - 2.0*uijk*uijk + um1*um1) +
xxcon5 * (u[k][4][j][i+1]*rho_i[k][j][i+1] -
2.0*u[k][4][j][i]*rho_i[k][j][i] +
u[k][4][j][i-1]*rho_i[k][j][i-1]) -
tx2 * ( (c1*u[k][4][j][i+1] - c2*square[k][j][i+1])*up1 -
(c1*u[k][4][j][i-1] - c2*square[k][j][i-1])*um1 );
}
}
//---------------------------------------------------------------------
// add fourth order xi-direction dissipation
//---------------------------------------------------------------------
if (j >= 1 && j <= ny2) {
if (i == 1)
#pragma unroll
for (m = 0; m < 5; m++) {
rhs[k][m][j][i] = rhs[k][m][j][i]- dssp *
(5.0*u[k][m][j][i] - 4.0*u[k][m][j][i+1] + u[k][m][j][i+2]);
}
if (i == 2)
#pragma unroll
for (m = 0; m < 5; m++) {
rhs[k][m][j][i] = rhs[k][m][j][i] - dssp *
(-4.0*u[k][m][j][i-1] + 6.0*u[k][m][j][i] -
4.0*u[k][m][j][i+1] + u[k][m][j][i+2]);
}
}
if (j >= 1 && j <= ny2) {
if (i >= 3 && i <= nx2-2) {
#pragma unroll
for (m = 0; m < 5; m++) {
rhs[k][m][j][i] = rhs[k][m][j][i] - dssp *
( u[k][m][j][i-2] - 4.0*u[k][m][j][i-1] +
6.0*u[k][m][j][i] - 4.0*u[k][m][j][i+1] +
u[k][m][j][i+2] );
}
}
}
if (j >= 1 && j <= ny2) {
if (i == nx2-1)
#pragma unroll
for (m = 0; m < 5; m++) {
rhs[k][m][j][i] = rhs[k][m][j][i] - dssp *
( u[k][m][j][i-2] - 4.0*u[k][m][j][i-1] +
6.0*u[k][m][j][i] - 4.0*u[k][m][j][i+1] );
}
if (i == nx2)
#pragma unroll
for (m = 0; m < 5; m++) {
rhs[k][m][j][i] = rhs[k][m][j][i] - dssp *
( u[k][m][j][i-2] - 4.0*u[k][m][j][i-1] + 5.0*u[k][m][j][i] );
}
}
}
}
__global__ void compute_rhs_eta(
dim3 gridOffset,
int nx2, int ny2, int nz2,
double (*u )/*[KMAX]*/[5][JMAXP+1][IMAXP+1],
double (*us )/*[KMAX]*/[JMAXP+1][IMAXP+1],
double (*vs )/*[KMAX]*/[JMAXP+1][IMAXP+1],
double (*ws )/*[KMAX]*/[JMAXP+1][IMAXP+1],
double (*qs )/*[KMAX]*/[JMAXP+1][IMAXP+1],
double (*rho_i )/*[KMAX]*/[JMAXP+1][IMAXP+1],
double (*square )/*[KMAX]*/[JMAXP+1][IMAXP+1],
double (*rhs )/*[KMAX]*/[5][JMAXP+1][IMAXP+1],
double dy1ty1, double dy2ty1, double dy3ty1, double dy4ty1, double dy5ty1, double ty2,
double yycon2, double yycon3, double yycon4, double yycon5
) {
int i = blockDim.x * blockIdx.x + threadIdx.x + gridOffset.x;
int j = blockDim.y * blockIdx.y + threadIdx.y + gridOffset.y;
int k = blockDim.z * blockIdx.z + threadIdx.z + gridOffset.z;
int m;
double vijk, vp1, vm1;
if (k >= 1 && k <= nz2) {
if (j >= 1 && j <= ny2) {
if (i >= 1 && i <= nx2) {
vijk = vs[k][j][i];
vp1 = vs[k][j+1][i];
vm1 = vs[k][j-1][i];
rhs[k][0][j][i] = rhs[k][0][j][i] + dy1ty1 *
(u[k][0][j+1][i] - 2.0*u[k][0][j][i] + u[k][0][j-1][i]) -
ty2 * (u[k][2][j+1][i] - u[k][2][j-1][i]);
rhs[k][1][j][i] = rhs[k][1][j][i] + dy2ty1 *
(u[k][1][j+1][i] - 2.0*u[k][1][j][i] + u[k][1][j-1][i]) +
yycon2 * (us[k][j+1][i] - 2.0*us[k][j][i] + us[k][j-1][i]) -
ty2 * (u[k][1][j+1][i]*vp1 - u[k][1][j-1][i]*vm1);
rhs[k][2][j][i] = rhs[k][2][j][i] + dy3ty1 *
(u[k][2][j+1][i] - 2.0*u[k][2][j][i] + u[k][2][j-1][i]) +
yycon2*con43 * (vp1 - 2.0*vijk + vm1) -
ty2 * (u[k][2][j+1][i]*vp1 - u[k][2][j-1][i]*vm1 +
(u[k][4][j+1][i] - square[k][j+1][i] -
u[k][4][j-1][i] + square[k][j-1][i]) * c2);
rhs[k][3][j][i] = rhs[k][3][j][i] + dy4ty1 *
(u[k][3][j+1][i] - 2.0*u[k][3][j][i] + u[k][3][j-1][i]) +
yycon2 * (ws[k][j+1][i] - 2.0*ws[k][j][i] + ws[k][j-1][i]) -
ty2 * (u[k][3][j+1][i]*vp1 - u[k][3][j-1][i]*vm1);
rhs[k][4][j][i] = rhs[k][4][j][i] + dy5ty1 *
(u[k][4][j+1][i] - 2.0*u[k][4][j][i] + u[k][4][j-1][i]) +
yycon3 * (qs[k][j+1][i] - 2.0*qs[k][j][i] + qs[k][j-1][i]) +
yycon4 * (vp1*vp1 - 2.0*vijk*vijk + vm1*vm1) +
yycon5 * (u[k][4][j+1][i]*rho_i[k][j+1][i] -
2.0*u[k][4][j][i]*rho_i[k][j][i] +
u[k][4][j-1][i]*rho_i[k][j-1][i]) -
ty2 * ((c1*u[k][4][j+1][i] - c2*square[k][j+1][i]) * vp1 -
(c1*u[k][4][j-1][i] - c2*square[k][j-1][i]) * vm1);
}
}
//---------------------------------------------------------------------
// add fourth order eta-direction dissipation
//---------------------------------------------------------------------
if (j == 1)
if (i >= 1 && i <= nx2) {
#pragma unroll
for (m = 0; m < 5; m++) {
rhs[k][m][j][i] = rhs[k][m][j][i]- dssp *
( 5.0*u[k][m][j][i] - 4.0*u[k][m][j+1][i] + u[k][m][j+2][i]);
}
}
if (j == 2)
if (i >= 1 && i <= nx2) {
#pragma unroll
for (m = 0; m < 5; m++) {
rhs[k][m][j][i] = rhs[k][m][j][i] - dssp *
(-4.0*u[k][m][j-1][i] + 6.0*u[k][m][j][i] -
4.0*u[k][m][j+1][i] + u[k][m][j+2][i]);
}
}
if (j >= 3 && j <= ny2-2) {
if (i >= 1 && i <= nx2) {
#pragma unroll
for (m = 0; m < 5; m++) {
rhs[k][m][j][i] = rhs[k][m][j][i] - dssp *
( u[k][m][j-2][i] - 4.0*u[k][m][j-1][i] +
6.0*u[k][m][j][i] - 4.0*u[k][m][j+1][i] +
u[k][m][j+2][i] );
}
}
}
if (j == ny2-1)
if (i >= 1 && i <= nx2) {
#pragma unroll
for (m = 0; m < 5; m++) {
rhs[k][m][j][i] = rhs[k][m][j][i] - dssp *
( u[k][m][j-2][i] - 4.0*u[k][m][j-1][i] +
6.0*u[k][m][j][i] - 4.0*u[k][m][j+1][i] );
}
}
if (j == ny2)
if (i >= 1 && i <= nx2) {
#pragma unroll
for (m = 0; m < 5; m++) {
rhs[k][m][j][i] = rhs[k][m][j][i] - dssp *
( u[k][m][j-2][i] - 4.0*u[k][m][j-1][i] + 5.0*u[k][m][j][i] );
}
}
}
}
__global__ void compute_rhs_zeta(
dim3 gridOffset,
int nx2, int ny2, int nz2,
double (*u )/*[KMAX]*/[5][JMAXP+1][IMAXP+1],
double (*us )/*[KMAX]*/[JMAXP+1][IMAXP+1],
double (*vs )/*[KMAX]*/[JMAXP+1][IMAXP+1],
double (*ws )/*[KMAX]*/[JMAXP+1][IMAXP+1],
double (*qs )/*[KMAX]*/[JMAXP+1][IMAXP+1],
double (*rho_i )/*[KMAX]*/[JMAXP+1][IMAXP+1],
double (*square )/*[KMAX]*/[JMAXP+1][IMAXP+1],
double (*rhs )/*[KMAX]*/[5][JMAXP+1][IMAXP+1],
double dz1tz1, double dz2tz1, double dz3tz1, double dz4tz1, double dz5tz1, double tz2,
double zzcon2, double zzcon3, double zzcon4, double zzcon5
) {
int i = blockDim.x * blockIdx.x + threadIdx.x + gridOffset.x;
int j = blockDim.y * blockIdx.y + threadIdx.y + gridOffset.y;
int k = blockDim.z * blockIdx.z + threadIdx.z + gridOffset.z;
int m;
double wijk, wp1, wm1;
if (k >= 1 && k <= nz2) {
if (j >= 1 && j <= ny2) {
if (i >= 1 && i <= nx2) {
wijk = ws[k][j][i];
wp1 = ws[k+1][j][i];
wm1 = ws[k-1][j][i];
rhs[k][0][j][i] = rhs[k][0][j][i] + dz1tz1 *
(u[k+1][0][j][i] - 2.0*u[k][0][j][i] + u[k-1][0][j][i]) -
tz2 * (u[k+1][3][j][i] - u[k-1][3][j][i]);
rhs[k][1][j][i] = rhs[k][1][j][i] + dz2tz1 *
(u[k+1][1][j][i] - 2.0*u[k][1][j][i] + u[k-1][1][j][i]) +
zzcon2 * (us[k+1][j][i] - 2.0*us[k][j][i] + us[k-1][j][i]) -
tz2 * (u[k+1][1][j][i]*wp1 - u[k-1][1][j][i]*wm1);
rhs[k][2][j][i] = rhs[k][2][j][i] + dz3tz1 *
(u[k+1][2][j][i] - 2.0*u[k][2][j][i] + u[k-1][2][j][i]) +
zzcon2 * (vs[k+1][j][i] - 2.0*vs[k][j][i] + vs[k-1][j][i]) -
tz2 * (u[k+1][2][j][i]*wp1 - u[k-1][2][j][i]*wm1);
rhs[k][3][j][i] = rhs[k][3][j][i] + dz4tz1 *
(u[k+1][3][j][i] - 2.0*u[k][3][j][i] + u[k-1][3][j][i]) +
zzcon2*con43 * (wp1 - 2.0*wijk + wm1) -
tz2 * (u[k+1][3][j][i]*wp1 - u[k-1][3][j][i]*wm1 +
(u[k+1][4][j][i] - square[k+1][j][i] -
u[k-1][4][j][i] + square[k-1][j][i]) * c2);
rhs[k][4][j][i] = rhs[k][4][j][i] + dz5tz1 *
(u[k+1][4][j][i] - 2.0*u[k][4][j][i] + u[k-1][4][j][i]) +
zzcon3 * (qs[k+1][j][i] - 2.0*qs[k][j][i] + qs[k-1][j][i]) +
zzcon4 * (wp1*wp1 - 2.0*wijk*wijk + wm1*wm1) +
zzcon5 * (u[k+1][4][j][i]*rho_i[k+1][j][i] -
2.0*u[k][4][j][i]*rho_i[k][j][i] +
u[k-1][4][j][i]*rho_i[k-1][j][i]) -
tz2 * ((c1*u[k+1][4][j][i] - c2*square[k+1][j][i])*wp1 -
(c1*u[k-1][4][j][i] - c2*square[k-1][j][i])*wm1);
}
}
}
//---------------------------------------------------------------------
// add fourth order zeta-direction dissipation
//---------------------------------------------------------------------
if (k == 1)
if (j >= 1 && j <= ny2) {
if (i >= 1 && i <= nx2) {
#pragma unroll
for (m = 0; m < 5; m++) {
rhs[k][m][j][i] = rhs[k][m][j][i]- dssp *
(5.0*u[k][m][j][i] - 4.0*u[k+1][m][j][i] + u[k+2][m][j][i]);
}
}
}
if (k == 2)
if (j >= 1 && j <= ny2) {
if (i >= 1 && i <= nx2) {
#pragma unroll
for (m = 0; m < 5; m++) {
rhs[k][m][j][i] = rhs[k][m][j][i] - dssp *
(-4.0*u[k-1][m][j][i] + 6.0*u[k][m][j][i] -
4.0*u[k+1][m][j][i] + u[k+2][m][j][i]);
}
}
}
if (k >= 3 && k <= nz2-2) {
if (j >= 1 && j <= ny2) {
if (i >= 1 && i <= nx2) {
#pragma unroll
for (m = 0; m < 5; m++) {
rhs[k][m][j][i] = rhs[k][m][j][i] - dssp *
( u[k-2][m][j][i] - 4.0*u[k-1][m][j][i] +
6.0*u[k][m][j][i] - 4.0*u[k+1][m][j][i] +
u[k+2][m][j][i] );
}
}
}
}
if (k == nz2-1)
if (j >= 1 && j <= ny2) {
if (i >= 1 && i <= nx2) {
#pragma unroll
for (m = 0; m < 5; m++) {
rhs[k][m][j][i] = rhs[k][m][j][i] - dssp *
( u[k-2][m][j][i] - 4.0*u[k-1][m][j][i] +
6.0*u[k][m][j][i] - 4.0*u[k+1][m][j][i] );
}
}
}
if (k == nz2)
if (j >= 1 && j <= ny2) {
if (i >= 1 && i <= nx2) {
#pragma unroll
for (m = 0; m < 5; m++) {
rhs[k][m][j][i] = rhs[k][m][j][i] - dssp *
( u[k-2][m][j][i] - 4.0*u[k-1][m][j][i] + 5.0*u[k][m][j][i] );
}
}
}
}
__global__ void compute_rhs_tail(
dim3 gridOffset,
int nx2, int ny2, int nz2,
double (*u )/*[KMAX]*/[5][JMAXP+1][IMAXP+1],
double (*us )/*[KMAX]*/[JMAXP+1][IMAXP+1],
double (*vs )/*[KMAX]*/[JMAXP+1][IMAXP+1],
double (*ws )/*[KMAX]*/[JMAXP+1][IMAXP+1],
double (*qs )/*[KMAX]*/[JMAXP+1][IMAXP+1],
double (*rho_i )/*[KMAX]*/[JMAXP+1][IMAXP+1],
double (*square )/*[KMAX]*/[JMAXP+1][IMAXP+1],
double (*rhs )/*[KMAX]*/[5][JMAXP+1][IMAXP+1],
double dt
) {
int i = blockDim.x * blockIdx.x + threadIdx.x + gridOffset.x;
int j = blockDim.y * blockIdx.y + threadIdx.y + gridOffset.y;
int k = blockDim.z * blockIdx.z + threadIdx.z + gridOffset.z;
int m;
if (k >= 1 && k <= nz2) {
if (j >= 1 && j <= ny2) {
if (i >= 1 && i <= nx2) {
#pragma unroll
for (m = 0; m < 5; m++) {
rhs[k][m][j][i] = rhs[k][m][j][i] * dt;
}
}
}
}
}
void compute_rhs()
{
if (timeron) timer_start(t_rhs);
compute_rhs_intro <<< gridDim_, blockDim_ >>> (
gridOffset, dev_grid_points[device], dev_u[device], dev_us[device], dev_vs[device], dev_ws[device], dev_qs[device], dev_rho_i[device], dev_speed[device], dev_square[device], dev_rhs[device], dev_forcing[device]
);
//---------------------------------------------------------------------
// compute xi-direction fluxes
//---------------------------------------------------------------------
if (timeron) timer_start(t_rhsx);
compute_rhs_xi <<< gridDim_, blockDim_ >>> (
gridOffset, nx2, ny2, nz2, dev_u[device], dev_us[device], dev_vs[device], dev_ws[device], dev_qs[device], dev_rho_i[device], dev_square[device], dev_rhs[device], dx1tx1, dx2tx1, dx3tx1, dx4tx1, dx5tx1, tx2, xxcon2, xxcon3, xxcon4, xxcon5
);
if (timeron) {
timer_stop(t_rhsx);
}
//---------------------------------------------------------------------
// compute eta-direction fluxes
//---------------------------------------------------------------------
if (timeron) timer_start(t_rhsy);
compute_rhs_eta <<< gridDim_, blockDim_ >>> (
gridOffset, nx2, ny2, nz2, dev_u[device], dev_us[device], dev_vs[device], dev_ws[device], dev_qs[device], dev_rho_i[device], dev_square[device], dev_rhs[device], dy1ty1, dy2ty1, dy3ty1, dy4ty1, dy5ty1, ty2, yycon2, yycon3, yycon4, yycon5
);
if (timeron) {
timer_stop(t_rhsy);
}
//---------------------------------------------------------------------
// compute zeta-direction fluxes
//---------------------------------------------------------------------
if (omp_get_num_threads() > 1)
cuda_sync_rhs();
if (timeron) timer_start(t_rhsz);
compute_rhs_zeta <<< gridDim_, blockDim_ >>> (
gridOffset, nx2, ny2, nz2, dev_u[device], dev_us[device], dev_vs[device], dev_ws[device], dev_qs[device], dev_rho_i[device], dev_square[device], dev_rhs[device], dz1tz1, dz2tz1, dz3tz1, dz4tz1, dz5tz1, tz2, zzcon2, zzcon3, zzcon4, zzcon5
);
if (timeron) {
timer_stop(t_rhsz);
}
compute_rhs_tail <<< gridDim_, blockDim_ >>> (
gridOffset, nx2, ny2, nz2, dev_u[device], dev_us[device], dev_vs[device], dev_ws[device], dev_qs[device], dev_rho_i[device], dev_square[device], dev_rhs[device], dt
);
if (timeron) timer_stop(t_rhs);
}
|
40546daa75bce4899e3267c05f99ca48546fc16c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void kApplySigmoid(float* mat, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) {
target[i] = 1 / (1 + __expf(-mat[i]));
}
} | 40546daa75bce4899e3267c05f99ca48546fc16c.cu | #include "includes.h"
__global__ void kApplySigmoid(float* mat, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) {
target[i] = 1 / (1 + __expf(-mat[i]));
}
} |
fdc3ac3459e77048cbd68fb6025647d2e9fb41f4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Copyright 2016 Fixstars Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http ://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include <libsgm.h>
#include "internal.h"
#include "utility.hpp"
namespace {
template<typename SRC_T, typename DST_T>
__global__ void check_consistency_kernel(DST_T* d_leftDisp, const DST_T* d_rightDisp, const SRC_T* d_left, int width, int height, int src_pitch, int dst_pitch, bool subpixel) {
const int j = blockIdx.x * blockDim.x + threadIdx.x;
const int i = blockIdx.y * blockDim.y + threadIdx.y;
// left-right consistency check, only on leftDisp, but could be done for rightDisp too
SRC_T mask = d_left[i * src_pitch + j];
DST_T org = d_leftDisp[i * dst_pitch + j];
int d = org;
if (subpixel) {
d >>= sgm::StereoSGM::SUBPIXEL_SHIFT;
}
int k = j - d;
if (mask == 0 || org == sgm::INVALID_DISP || (k >= 0 && k < width && abs(d_rightDisp[i * dst_pitch + k] - d) > 1)) {
// masked or left-right inconsistent pixel -> invalid
d_leftDisp[i * dst_pitch + j] = static_cast<DST_T>(sgm::INVALID_DISP);
}
}
}
namespace sgm {
namespace details {
void check_consistency(uint8_t* d_left_disp, const uint8_t* d_right_disp, const void* d_src_left, int width, int height, int depth_bits, int src_pitch, int dst_pitch, bool subpixel) {
const dim3 blocks(width / 16, height / 16);
const dim3 threads(16, 16);
if (depth_bits == 16) {
check_consistency_kernel<uint16_t> << < blocks, threads >> > (d_left_disp, d_right_disp, (uint16_t*)d_src_left, width, height, src_pitch, dst_pitch, subpixel);
}
else if (depth_bits == 8) {
check_consistency_kernel<uint8_t> << < blocks, threads >> > (d_left_disp, d_right_disp, (uint8_t*)d_src_left, width, height, src_pitch, dst_pitch, subpixel);
}
CudaKernelCheck();
}
void check_consistency(uint16_t* d_left_disp, const uint16_t* d_right_disp, const void* d_src_left, int width, int height, int depth_bits, int src_pitch, int dst_pitch, bool subpixel) {
const dim3 blocks(width / 16, height / 16);
const dim3 threads(16, 16);
if (depth_bits == 16) {
check_consistency_kernel<uint16_t> << < blocks, threads >> > (d_left_disp, d_right_disp, (uint16_t*)d_src_left, width, height, src_pitch, dst_pitch, subpixel);
}
else if (depth_bits == 8) {
check_consistency_kernel<uint8_t> << < blocks, threads >> > (d_left_disp, d_right_disp, (uint8_t*)d_src_left, width, height, src_pitch, dst_pitch, subpixel);
}
CudaKernelCheck();
}
}
}
| fdc3ac3459e77048cbd68fb6025647d2e9fb41f4.cu | /*
Copyright 2016 Fixstars Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http ://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include <libsgm.h>
#include "internal.h"
#include "utility.hpp"
namespace {
template<typename SRC_T, typename DST_T>
__global__ void check_consistency_kernel(DST_T* d_leftDisp, const DST_T* d_rightDisp, const SRC_T* d_left, int width, int height, int src_pitch, int dst_pitch, bool subpixel) {
const int j = blockIdx.x * blockDim.x + threadIdx.x;
const int i = blockIdx.y * blockDim.y + threadIdx.y;
// left-right consistency check, only on leftDisp, but could be done for rightDisp too
SRC_T mask = d_left[i * src_pitch + j];
DST_T org = d_leftDisp[i * dst_pitch + j];
int d = org;
if (subpixel) {
d >>= sgm::StereoSGM::SUBPIXEL_SHIFT;
}
int k = j - d;
if (mask == 0 || org == sgm::INVALID_DISP || (k >= 0 && k < width && abs(d_rightDisp[i * dst_pitch + k] - d) > 1)) {
// masked or left-right inconsistent pixel -> invalid
d_leftDisp[i * dst_pitch + j] = static_cast<DST_T>(sgm::INVALID_DISP);
}
}
}
namespace sgm {
namespace details {
void check_consistency(uint8_t* d_left_disp, const uint8_t* d_right_disp, const void* d_src_left, int width, int height, int depth_bits, int src_pitch, int dst_pitch, bool subpixel) {
const dim3 blocks(width / 16, height / 16);
const dim3 threads(16, 16);
if (depth_bits == 16) {
check_consistency_kernel<uint16_t> << < blocks, threads >> > (d_left_disp, d_right_disp, (uint16_t*)d_src_left, width, height, src_pitch, dst_pitch, subpixel);
}
else if (depth_bits == 8) {
check_consistency_kernel<uint8_t> << < blocks, threads >> > (d_left_disp, d_right_disp, (uint8_t*)d_src_left, width, height, src_pitch, dst_pitch, subpixel);
}
CudaKernelCheck();
}
void check_consistency(uint16_t* d_left_disp, const uint16_t* d_right_disp, const void* d_src_left, int width, int height, int depth_bits, int src_pitch, int dst_pitch, bool subpixel) {
const dim3 blocks(width / 16, height / 16);
const dim3 threads(16, 16);
if (depth_bits == 16) {
check_consistency_kernel<uint16_t> << < blocks, threads >> > (d_left_disp, d_right_disp, (uint16_t*)d_src_left, width, height, src_pitch, dst_pitch, subpixel);
}
else if (depth_bits == 8) {
check_consistency_kernel<uint8_t> << < blocks, threads >> > (d_left_disp, d_right_disp, (uint8_t*)d_src_left, width, height, src_pitch, dst_pitch, subpixel);
}
CudaKernelCheck();
}
}
}
|
6655b4734b3be0d27cbdaf0274283c2d1b04eb3f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "SF_CUDA.cuh"
#include <chrono>
#include <iostream>
#include "Math_Helper.cuh"
namespace SF_CUDA
{
// Host variables
Person* cells;
// Device variables
Person* deviceCells;
// 1 (thread) block = 1 cell
dim3 blocksPerGrid(CELLS_PER_AXIS, CELLS_PER_AXIS, 1);
// Per space: 9 threads, 1 for each influencing cell
dim3 threadsPerBlock(MAX_OCCUPATION, 3, 3);
__device__ float2 calculateSF(Person* personA, Person* personB)
{
float v_a0 = magnitude(personA->velocity);
float v_b0 = magnitude(personB->velocity);
if (v_a0 * v_a0 < 0.001f || v_b0 * v_b0 < 0.001f)
{
return make_float2(0.f, 0.f);
}
float2 dir_a = personA->goal - personA->position;
float2 e_a = dir_a / magnitude(dir_a);
float2 dir_b = personB->goal - personB->position;
float2 e_b = dir_b / magnitude(dir_b);
float2 e2 = EPSILON * v_a0 * e_a - v_b0 * e_b;
e2 = normalize(e2);
float2 e1 = make_float2(e2.y, -e2.x);
const float2 r_ab = personA->position - personB->position;
float e1_result = dot(r_ab, e1);
e1_result *= e1_result;
float e2_result = dot(r_ab, e2);
e2_result *= e2_result;
float gamma_a = dot(r_ab, e2) >= 0.f ? THETA : 1 + DELTA * v_a0;
float V_ab = S * std::powf(EULER, -std::sqrtf(e1_result + e2_result / (gamma_a * gamma_a)) / R);
float2 f_ab = make_float2(-r_ab.x * V_ab, -r_ab.y * V_ab);
return f_ab;
}
__global__ void calculateCellForce(Person* device_grid)
{
// Saves forces of each cell on influenced person
__shared__ float2 totalForces[MAX_OCCUPATION][9];
short2 cellAPos = make_short2(blockIdx.x, blockIdx.y);
int cellA = cellPosToIndex(cellAPos);
// Influenced person/space
Person* personA = &device_grid[cellA * MAX_OCCUPATION + threadIdx.x];
// If space is empty, thread can terminate early
if (personA->state != OCCUPIED)
return;
short2 cellBPos = make_short2(cellAPos.x - 1 + threadIdx.y, cellAPos.y - 1 + threadIdx.z);
int cellB = cellPosToIndex(cellBPos);
float2 forceVector = make_float2(0.f, 0.f);
if (cellB >= 0 && cellB < CELLS_PER_AXIS * CELLS_PER_AXIS)
{
// Number of people in influencing cell, important for congestion avoidance
int blockppl = 0;
// Iterate over space in neighbor cell
for (int i = 0; i < MAX_OCCUPATION; i++)
{
// Ignore yourself
if (threadIdx.y == 1 && threadIdx.z == 1 && threadIdx.x % MAX_OCCUPATION == i)
continue;
Person* other = &device_grid[cellB * MAX_OCCUPATION + i];
if (other->state != OCCUPIED)
continue;
forceVector = forceVector + calculateSF(personA, other);
blockppl++;
}
// Number of people in influenced cell
int ppl = mask_to_int(__ballot_sync(0xFFFFFFFF, personA->state == OCCUPIED));
// Only calculate avoidance force if influencing cell =/= influenced cell
if ((threadIdx.y != 1 || threadIdx.z != 1) && (blockppl > 20 || ppl > 26))
{
forceVector.x -= (threadIdx.y - 1.f) * (blockppl - 20) * AVOIDANCE_FORCE;
forceVector.y -= (threadIdx.z - 1.f) * (blockppl - 20) * AVOIDANCE_FORCE;
}
}
// Save calculated force in shared memory
totalForces[threadIdx.x][threadIdx.y + threadIdx.z * 3] = forceVector;
// Wait for all threads to complete calculation
__syncthreads();
// Only center cell sums and applies all social forces
if (threadIdx.y == 1 && threadIdx.z == 1)
{
float2 resultForce = make_float2(0.f, 0.f);
for (int i = 0; i < 9; i++)
{
if (float2_isnan(totalForces[threadIdx.x][i]))
continue;
resultForce = resultForce + totalForces[threadIdx.x][i];
}
personA->updateVelocity(personA->velocity - resultForce * DELTA);
float2 newPos = personA->position + personA->velocity * DELTA;
// Check if person moves to other cell
int oldCell = personPosToCellIndex(personA->position.x, personA->position.y);
int newCell = personPosToCellIndex(newPos.x, newPos.y);
if (oldCell != newCell)
{
bool reservedSpace = false;
if (newCell >= 0 && newCell < CELLS_PER_AXIS * CELLS_PER_AXIS)
{
// Look for space in new cell
for (int i = newCell * MAX_OCCUPATION; i < (newCell + 1) * MAX_OCCUPATION; i++)
{
if (atomicCAS(&device_grid[i].state, FREE, RESERVED) == FREE)
{
device_grid[cellA * MAX_OCCUPATION + threadIdx.x].state = LEAVING;
device_grid[i] = Person(device_grid[cellA * MAX_OCCUPATION + threadIdx.x]);
device_grid[i].state = RESERVED;
reservedSpace = true;
break;
}
}
}
// If entry to other cell was denied, block movement
if (!reservedSpace)
{
personA->velocity = make_float2(0.f, 0.f);
}
}
}
}
__global__ void completeMove(Person* device_grid)
{
int cell = cellPosToIndex(blockIdx.x, blockIdx.y);
Person* person = &device_grid[cell * MAX_OCCUPATION + threadIdx.x];
// Terminate early if space is empty
if (person->state == FREE)
return;
// Mark space as FREE again and terminate
if (person->state == LEAVING)
{
person->state = FREE;
return;
}
// If person moved to other cell, mark space as OCCUPIED
if (person->state == RESERVED)
{
person->state = OCCUPIED;
}
// Update position
person->position = person->position + person->velocity * DELTA;
// Update direction to goal
float2 goalDir = make_float2(
person->goal.x - person->position.x,
person->goal.y - person->position.y);
goalDir = normalize(goalDir);
person->direction = goalDir;
person->updateVelocity(goalDir * SPEED);
}
bool add_to_grid(const Person& p)
{
int cell = cellPosToIndex(p.position / CELL_SIZE);
for (int i = 0; i < MAX_OCCUPATION; i++)
{
int index = cell * MAX_OCCUPATION + i;
if (cells[index].state != FREE)
continue;
cells[index] = Person(p);
return true;
}
return false;
}
void init()
{
cells = static_cast<Person*>(malloc(sizeof(Person) * CELLS_PER_AXIS * CELLS_PER_AXIS * MAX_OCCUPATION));
for (int i = 0; i < CELLS_PER_AXIS * CELLS_PER_AXIS * MAX_OCCUPATION; i++)
{
cells[i] = Person();
}
int totallySpawned = 0;
for (int i = 0; i < SPAWNED_ACTORS; i++)
{
bool spawned = false;
while (!spawned)
{
spawned = add_to_grid(Person(getRandomPos(), getRandomPos()));
}
totallySpawned++;
}
std::cout << "Spawned " << totallySpawned << " people.\n";
hipError_t error = hipMalloc((void**)&deviceCells, CELLS_PER_AXIS * CELLS_PER_AXIS * MAX_OCCUPATION * sizeof(Person));
if (error)
std::cout << "Error while allocating CUDA memory\n";
std::cout << "Allocated " << CELLS_PER_AXIS * CELLS_PER_AXIS * MAX_OCCUPATION * sizeof(Person) / 1024 / 1024 << " MB on GPU\n";
hipMemcpy(deviceCells, cells, CELLS_PER_AXIS * CELLS_PER_AXIS * MAX_OCCUPATION * sizeof(Person), hipMemcpyHostToDevice);
}
double timeH2D = 0.f;
double timeD2H = 0.f;
int transfersMeasured = 0;
void simulate()
{
auto t1 = std::chrono::high_resolution_clock::now();
hipMemcpy(deviceCells, cells, CELLS_PER_AXIS * CELLS_PER_AXIS * MAX_OCCUPATION * sizeof(Person), hipMemcpyHostToDevice);
auto t2 = std::chrono::high_resolution_clock::now();
std::chrono::duration<double, std::milli> ms_double = t2 - t1;
timeH2D += ms_double.count();
calculateCellForce << < blocksPerGrid, threadsPerBlock >> > (deviceCells);
hipError_t error = hipDeviceSynchronize();
if (error)
{
std::cout << "CalculateForce: " << hipGetErrorName << ": " << hipGetErrorString(error) << "\n";
}
completeMove << < blocksPerGrid, MAX_OCCUPATION >> > (deviceCells);
hipDeviceSynchronize();
error = hipDeviceSynchronize();
if (error)
{
std::cout << "CompleteMove: " << hipGetErrorName << ": " << hipGetErrorString(error) << "\n";
}
t1 = std::chrono::high_resolution_clock::now();
hipMemcpy(cells, deviceCells, CELLS_PER_AXIS * CELLS_PER_AXIS * MAX_OCCUPATION * sizeof(Person), hipMemcpyDeviceToHost);
t2 = std::chrono::high_resolution_clock::now();
std::chrono::duration<double, std::milli> ms_double2 = t2 - t1;
timeD2H += ms_double2.count();
transfersMeasured++;
}
void printTransferTime()
{
std::cout << "Avg. Host to Device = " << timeH2D / transfersMeasured << "\n";
std::cout << "Avg. Device to Host = " << timeD2H / transfersMeasured << "\n";
}
std::vector<PersonVisuals> convertToVisual()
{
std::vector<PersonVisuals> persons;
for (int i = 0; i < CELLS_PER_AXIS * CELLS_PER_AXIS * MAX_OCCUPATION; i++)
{
Person& p = cells[i];
if (p.state != FREE)
{
if (dist(p.position, p.goal) < MIN_DIST)
{
p.goal = getRandomPos();
}
float2 dir = p.direction;
dir.y = -dir.y;
persons.emplace_back(simCoordToGL(p.position), dir);
}
}
return persons;
}
} | 6655b4734b3be0d27cbdaf0274283c2d1b04eb3f.cu | #include "SF_CUDA.cuh"
#include <chrono>
#include <iostream>
#include "Math_Helper.cuh"
namespace SF_CUDA
{
// Host variables
Person* cells;
// Device variables
Person* deviceCells;
// 1 (thread) block = 1 cell
dim3 blocksPerGrid(CELLS_PER_AXIS, CELLS_PER_AXIS, 1);
// Per space: 9 threads, 1 for each influencing cell
dim3 threadsPerBlock(MAX_OCCUPATION, 3, 3);
__device__ float2 calculateSF(Person* personA, Person* personB)
{
float v_a0 = magnitude(personA->velocity);
float v_b0 = magnitude(personB->velocity);
if (v_a0 * v_a0 < 0.001f || v_b0 * v_b0 < 0.001f)
{
return make_float2(0.f, 0.f);
}
float2 dir_a = personA->goal - personA->position;
float2 e_a = dir_a / magnitude(dir_a);
float2 dir_b = personB->goal - personB->position;
float2 e_b = dir_b / magnitude(dir_b);
float2 e2 = EPSILON * v_a0 * e_a - v_b0 * e_b;
e2 = normalize(e2);
float2 e1 = make_float2(e2.y, -e2.x);
const float2 r_ab = personA->position - personB->position;
float e1_result = dot(r_ab, e1);
e1_result *= e1_result;
float e2_result = dot(r_ab, e2);
e2_result *= e2_result;
float gamma_a = dot(r_ab, e2) >= 0.f ? THETA : 1 + DELTA * v_a0;
float V_ab = S * std::powf(EULER, -std::sqrtf(e1_result + e2_result / (gamma_a * gamma_a)) / R);
float2 f_ab = make_float2(-r_ab.x * V_ab, -r_ab.y * V_ab);
return f_ab;
}
__global__ void calculateCellForce(Person* device_grid)
{
// Saves forces of each cell on influenced person
__shared__ float2 totalForces[MAX_OCCUPATION][9];
short2 cellAPos = make_short2(blockIdx.x, blockIdx.y);
int cellA = cellPosToIndex(cellAPos);
// Influenced person/space
Person* personA = &device_grid[cellA * MAX_OCCUPATION + threadIdx.x];
// If space is empty, thread can terminate early
if (personA->state != OCCUPIED)
return;
short2 cellBPos = make_short2(cellAPos.x - 1 + threadIdx.y, cellAPos.y - 1 + threadIdx.z);
int cellB = cellPosToIndex(cellBPos);
float2 forceVector = make_float2(0.f, 0.f);
if (cellB >= 0 && cellB < CELLS_PER_AXIS * CELLS_PER_AXIS)
{
// Number of people in influencing cell, important for congestion avoidance
int blockppl = 0;
// Iterate over space in neighbor cell
for (int i = 0; i < MAX_OCCUPATION; i++)
{
// Ignore yourself
if (threadIdx.y == 1 && threadIdx.z == 1 && threadIdx.x % MAX_OCCUPATION == i)
continue;
Person* other = &device_grid[cellB * MAX_OCCUPATION + i];
if (other->state != OCCUPIED)
continue;
forceVector = forceVector + calculateSF(personA, other);
blockppl++;
}
// Number of people in influenced cell
int ppl = mask_to_int(__ballot_sync(0xFFFFFFFF, personA->state == OCCUPIED));
// Only calculate avoidance force if influencing cell =/= influenced cell
if ((threadIdx.y != 1 || threadIdx.z != 1) && (blockppl > 20 || ppl > 26))
{
forceVector.x -= (threadIdx.y - 1.f) * (blockppl - 20) * AVOIDANCE_FORCE;
forceVector.y -= (threadIdx.z - 1.f) * (blockppl - 20) * AVOIDANCE_FORCE;
}
}
// Save calculated force in shared memory
totalForces[threadIdx.x][threadIdx.y + threadIdx.z * 3] = forceVector;
// Wait for all threads to complete calculation
__syncthreads();
// Only center cell sums and applies all social forces
if (threadIdx.y == 1 && threadIdx.z == 1)
{
float2 resultForce = make_float2(0.f, 0.f);
for (int i = 0; i < 9; i++)
{
if (float2_isnan(totalForces[threadIdx.x][i]))
continue;
resultForce = resultForce + totalForces[threadIdx.x][i];
}
personA->updateVelocity(personA->velocity - resultForce * DELTA);
float2 newPos = personA->position + personA->velocity * DELTA;
// Check if person moves to other cell
int oldCell = personPosToCellIndex(personA->position.x, personA->position.y);
int newCell = personPosToCellIndex(newPos.x, newPos.y);
if (oldCell != newCell)
{
bool reservedSpace = false;
if (newCell >= 0 && newCell < CELLS_PER_AXIS * CELLS_PER_AXIS)
{
// Look for space in new cell
for (int i = newCell * MAX_OCCUPATION; i < (newCell + 1) * MAX_OCCUPATION; i++)
{
if (atomicCAS(&device_grid[i].state, FREE, RESERVED) == FREE)
{
device_grid[cellA * MAX_OCCUPATION + threadIdx.x].state = LEAVING;
device_grid[i] = Person(device_grid[cellA * MAX_OCCUPATION + threadIdx.x]);
device_grid[i].state = RESERVED;
reservedSpace = true;
break;
}
}
}
// If entry to other cell was denied, block movement
if (!reservedSpace)
{
personA->velocity = make_float2(0.f, 0.f);
}
}
}
}
__global__ void completeMove(Person* device_grid)
{
int cell = cellPosToIndex(blockIdx.x, blockIdx.y);
Person* person = &device_grid[cell * MAX_OCCUPATION + threadIdx.x];
// Terminate early if space is empty
if (person->state == FREE)
return;
// Mark space as FREE again and terminate
if (person->state == LEAVING)
{
person->state = FREE;
return;
}
// If person moved to other cell, mark space as OCCUPIED
if (person->state == RESERVED)
{
person->state = OCCUPIED;
}
// Update position
person->position = person->position + person->velocity * DELTA;
// Update direction to goal
float2 goalDir = make_float2(
person->goal.x - person->position.x,
person->goal.y - person->position.y);
goalDir = normalize(goalDir);
person->direction = goalDir;
person->updateVelocity(goalDir * SPEED);
}
bool add_to_grid(const Person& p)
{
int cell = cellPosToIndex(p.position / CELL_SIZE);
for (int i = 0; i < MAX_OCCUPATION; i++)
{
int index = cell * MAX_OCCUPATION + i;
if (cells[index].state != FREE)
continue;
cells[index] = Person(p);
return true;
}
return false;
}
void init()
{
cells = static_cast<Person*>(malloc(sizeof(Person) * CELLS_PER_AXIS * CELLS_PER_AXIS * MAX_OCCUPATION));
for (int i = 0; i < CELLS_PER_AXIS * CELLS_PER_AXIS * MAX_OCCUPATION; i++)
{
cells[i] = Person();
}
int totallySpawned = 0;
for (int i = 0; i < SPAWNED_ACTORS; i++)
{
bool spawned = false;
while (!spawned)
{
spawned = add_to_grid(Person(getRandomPos(), getRandomPos()));
}
totallySpawned++;
}
std::cout << "Spawned " << totallySpawned << " people.\n";
cudaError_t error = cudaMalloc((void**)&deviceCells, CELLS_PER_AXIS * CELLS_PER_AXIS * MAX_OCCUPATION * sizeof(Person));
if (error)
std::cout << "Error while allocating CUDA memory\n";
std::cout << "Allocated " << CELLS_PER_AXIS * CELLS_PER_AXIS * MAX_OCCUPATION * sizeof(Person) / 1024 / 1024 << " MB on GPU\n";
cudaMemcpy(deviceCells, cells, CELLS_PER_AXIS * CELLS_PER_AXIS * MAX_OCCUPATION * sizeof(Person), cudaMemcpyHostToDevice);
}
double timeH2D = 0.f;
double timeD2H = 0.f;
int transfersMeasured = 0;
void simulate()
{
auto t1 = std::chrono::high_resolution_clock::now();
cudaMemcpy(deviceCells, cells, CELLS_PER_AXIS * CELLS_PER_AXIS * MAX_OCCUPATION * sizeof(Person), cudaMemcpyHostToDevice);
auto t2 = std::chrono::high_resolution_clock::now();
std::chrono::duration<double, std::milli> ms_double = t2 - t1;
timeH2D += ms_double.count();
calculateCellForce << < blocksPerGrid, threadsPerBlock >> > (deviceCells);
cudaError_t error = cudaDeviceSynchronize();
if (error)
{
std::cout << "CalculateForce: " << cudaGetErrorName << ": " << cudaGetErrorString(error) << "\n";
}
completeMove << < blocksPerGrid, MAX_OCCUPATION >> > (deviceCells);
cudaDeviceSynchronize();
error = cudaDeviceSynchronize();
if (error)
{
std::cout << "CompleteMove: " << cudaGetErrorName << ": " << cudaGetErrorString(error) << "\n";
}
t1 = std::chrono::high_resolution_clock::now();
cudaMemcpy(cells, deviceCells, CELLS_PER_AXIS * CELLS_PER_AXIS * MAX_OCCUPATION * sizeof(Person), cudaMemcpyDeviceToHost);
t2 = std::chrono::high_resolution_clock::now();
std::chrono::duration<double, std::milli> ms_double2 = t2 - t1;
timeD2H += ms_double2.count();
transfersMeasured++;
}
void printTransferTime()
{
std::cout << "Avg. Host to Device = " << timeH2D / transfersMeasured << "\n";
std::cout << "Avg. Device to Host = " << timeD2H / transfersMeasured << "\n";
}
std::vector<PersonVisuals> convertToVisual()
{
std::vector<PersonVisuals> persons;
for (int i = 0; i < CELLS_PER_AXIS * CELLS_PER_AXIS * MAX_OCCUPATION; i++)
{
Person& p = cells[i];
if (p.state != FREE)
{
if (dist(p.position, p.goal) < MIN_DIST)
{
p.goal = getRandomPos();
}
float2 dir = p.direction;
dir.y = -dir.y;
persons.emplace_back(simCoordToGL(p.position), dir);
}
}
return persons;
}
} |
296b8dbdda84df1dd20b1e812b168e3461556f22.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <raft/cuda_utils.cuh>
#include <raft/random/rng.cuh>
#include <stats/weighted_mean.cuh>
#include "test_utils.h"
namespace MLCommon {
namespace Stats {
template <typename T>
struct WeightedMeanInputs {
T tolerance;
int M, N;
unsigned long long int seed;
};
template <typename T>
::std::ostream& operator<<(::std::ostream& os, const WeightedMeanInputs<T>& I) {
return os << "{ " << I.tolerance << ", " << I.M << ", " << I.N << ", "
<< I.seed << "}" << std::endl;
}
///// weighted row-wise mean test and support functions
template <typename T>
void naiveRowWeightedMean(T* R, T* D, T* W, int M, int N, bool rowMajor) {
int istr = rowMajor ? 1 : M;
int jstr = rowMajor ? N : 1;
//sum the weights
T WS = 0;
for (int i = 0; i < N; i++) WS += W[i];
for (int j = 0; j < M; j++) {
R[j] = (T)0;
for (int i = 0; i < N; i++) {
//R[j] += (W[i]*D[i*istr + j*jstr] - R[j])/(T)(i+1);
R[j] += (W[i] * D[i * istr + j * jstr]) / WS;
}
}
}
template <typename T>
class RowWeightedMeanTest
: public ::testing::TestWithParam<WeightedMeanInputs<T>> {
protected:
void SetUp() override {
params = ::testing::TestWithParam<WeightedMeanInputs<T>>::GetParam();
raft::random::Rng r(params.seed);
int rows = params.M, cols = params.N, len = rows * cols;
hipStream_t stream;
CUDA_CHECK(hipStreamCreate(&stream));
//device-side data
din.resize(len);
dweights.resize(cols);
dexp.resize(rows);
dact.resize(rows);
//create random matrix and weights
r.uniform(din.data().get(), len, T(-1.0), T(1.0), stream);
r.uniform(dweights.data().get(), cols, T(-1.0), T(1.0), stream);
//host-side data
thrust::host_vector<T> hin = din;
thrust::host_vector<T> hweights = dweights;
thrust::host_vector<T> hexp(rows);
//compute naive result & copy to GPU
naiveRowWeightedMean(hexp.data(), hin.data(), hweights.data(), rows, cols,
true);
dexp = hexp;
//compute ml-prims result
rowWeightedMean(dact.data().get(), din.data().get(), dweights.data().get(),
cols, rows, stream);
//adjust tolerance to account for round-off accumulation
params.tolerance *= params.N;
CUDA_CHECK(hipStreamDestroy(stream));
}
void TearDown() override {}
protected:
WeightedMeanInputs<T> params;
thrust::host_vector<T> hin, hweights;
thrust::device_vector<T> din, dweights, dexp, dact;
};
///// weighted column-wise mean test and support functions
template <typename T>
void naiveColWeightedMean(T* R, T* D, T* W, int M, int N, bool rowMajor) {
int istr = rowMajor ? 1 : M;
int jstr = rowMajor ? N : 1;
//sum the weights
T WS = 0;
for (int j = 0; j < M; j++) WS += W[j];
for (int i = 0; i < N; i++) {
R[i] = (T)0;
for (int j = 0; j < M; j++) {
//R[i] += (W[j]*D[i*istr + j*jstr] - R[i])/(T)(j+1);
R[i] += (W[j] * D[i * istr + j * jstr]) / WS;
}
}
}
template <typename T>
class ColWeightedMeanTest
: public ::testing::TestWithParam<WeightedMeanInputs<T>> {
void SetUp() override {
params = ::testing::TestWithParam<WeightedMeanInputs<T>>::GetParam();
raft::random::Rng r(params.seed);
int rows = params.M, cols = params.N, len = rows * cols;
hipStream_t stream;
CUDA_CHECK(hipStreamCreate(&stream));
//device-side data
din.resize(len);
dweights.resize(rows);
dexp.resize(cols);
dact.resize(cols);
//create random matrix and weights
r.uniform(din.data().get(), len, T(-1.0), T(1.0), stream);
r.uniform(dweights.data().get(), rows, T(-1.0), T(1.0), stream);
//host-side data
thrust::host_vector<T> hin = din;
thrust::host_vector<T> hweights = dweights;
thrust::host_vector<T> hexp(cols);
//compute naive result & copy to GPU
naiveColWeightedMean(hexp.data(), hin.data(), hweights.data(), rows, cols,
true);
dexp = hexp;
//compute ml-prims result
colWeightedMean(dact.data().get(), din.data().get(), dweights.data().get(),
cols, rows, stream);
//adjust tolerance to account for round-off accumulation
params.tolerance *= params.M;
CUDA_CHECK(hipStreamDestroy(stream));
}
void TearDown() override {}
protected:
WeightedMeanInputs<T> params;
thrust::host_vector<T> hin, hweights;
thrust::device_vector<T> din, dweights, dexp, dact;
};
////// Parameter sets and test instantiation
static const float tolF = 128 * std::numeric_limits<float>::epsilon();
static const double tolD = 256 * std::numeric_limits<double>::epsilon();
const std::vector<WeightedMeanInputs<float>> inputsf = {
{tolF, 4, 4, 1234}, {tolF, 1024, 32, 1234}, {tolF, 1024, 64, 1234},
{tolF, 1024, 128, 1234}, {tolF, 1024, 256, 1234}, {tolF, 1024, 32, 1234},
{tolF, 1024, 64, 1234}, {tolF, 1024, 128, 1234}, {tolF, 1024, 256, 1234}};
const std::vector<WeightedMeanInputs<double>> inputsd = {
{tolD, 4, 4, 1234}, {tolD, 1024, 32, 1234}, {tolD, 1024, 64, 1234},
{tolD, 1024, 128, 1234}, {tolD, 1024, 256, 1234}, {tolD, 1024, 32, 1234},
{tolD, 1024, 64, 1234}, {tolD, 1024, 128, 1234}, {tolD, 1024, 256, 1234}};
using RowWeightedMeanTestF = RowWeightedMeanTest<float>;
TEST_P(RowWeightedMeanTestF, Result) {
ASSERT_TRUE(devArrMatch(dexp.data().get(), dact.data().get(), params.M,
raft::CompareApprox<float>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(RowWeightedMeanTest, RowWeightedMeanTestF,
::testing::ValuesIn(inputsf));
using RowWeightedMeanTestD = RowWeightedMeanTest<double>;
TEST_P(RowWeightedMeanTestD, Result) {
ASSERT_TRUE(devArrMatch(dexp.data().get(), dact.data().get(), params.M,
raft::CompareApprox<double>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(RowWeightedMeanTest, RowWeightedMeanTestD,
::testing::ValuesIn(inputsd));
using ColWeightedMeanTestF = ColWeightedMeanTest<float>;
TEST_P(ColWeightedMeanTestF, Result) {
ASSERT_TRUE(devArrMatch(dexp.data().get(), dact.data().get(), params.N,
raft::CompareApprox<float>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(ColWeightedMeanTest, ColWeightedMeanTestF,
::testing::ValuesIn(inputsf));
using ColWeightedMeanTestD = ColWeightedMeanTest<double>;
TEST_P(ColWeightedMeanTestD, Result) {
ASSERT_TRUE(devArrMatch(dexp.data().get(), dact.data().get(), params.N,
raft::CompareApprox<double>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(ColWeightedMeanTest, ColWeightedMeanTestD,
::testing::ValuesIn(inputsd));
}; // end namespace Stats
}; // end namespace MLCommon
| 296b8dbdda84df1dd20b1e812b168e3461556f22.cu | /*
* Copyright (c) 2019-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <raft/cuda_utils.cuh>
#include <raft/random/rng.cuh>
#include <stats/weighted_mean.cuh>
#include "test_utils.h"
namespace MLCommon {
namespace Stats {
template <typename T>
struct WeightedMeanInputs {
T tolerance;
int M, N;
unsigned long long int seed;
};
template <typename T>
::std::ostream& operator<<(::std::ostream& os, const WeightedMeanInputs<T>& I) {
return os << "{ " << I.tolerance << ", " << I.M << ", " << I.N << ", "
<< I.seed << "}" << std::endl;
}
///// weighted row-wise mean test and support functions
template <typename T>
void naiveRowWeightedMean(T* R, T* D, T* W, int M, int N, bool rowMajor) {
int istr = rowMajor ? 1 : M;
int jstr = rowMajor ? N : 1;
//sum the weights
T WS = 0;
for (int i = 0; i < N; i++) WS += W[i];
for (int j = 0; j < M; j++) {
R[j] = (T)0;
for (int i = 0; i < N; i++) {
//R[j] += (W[i]*D[i*istr + j*jstr] - R[j])/(T)(i+1);
R[j] += (W[i] * D[i * istr + j * jstr]) / WS;
}
}
}
template <typename T>
class RowWeightedMeanTest
: public ::testing::TestWithParam<WeightedMeanInputs<T>> {
protected:
void SetUp() override {
params = ::testing::TestWithParam<WeightedMeanInputs<T>>::GetParam();
raft::random::Rng r(params.seed);
int rows = params.M, cols = params.N, len = rows * cols;
cudaStream_t stream;
CUDA_CHECK(cudaStreamCreate(&stream));
//device-side data
din.resize(len);
dweights.resize(cols);
dexp.resize(rows);
dact.resize(rows);
//create random matrix and weights
r.uniform(din.data().get(), len, T(-1.0), T(1.0), stream);
r.uniform(dweights.data().get(), cols, T(-1.0), T(1.0), stream);
//host-side data
thrust::host_vector<T> hin = din;
thrust::host_vector<T> hweights = dweights;
thrust::host_vector<T> hexp(rows);
//compute naive result & copy to GPU
naiveRowWeightedMean(hexp.data(), hin.data(), hweights.data(), rows, cols,
true);
dexp = hexp;
//compute ml-prims result
rowWeightedMean(dact.data().get(), din.data().get(), dweights.data().get(),
cols, rows, stream);
//adjust tolerance to account for round-off accumulation
params.tolerance *= params.N;
CUDA_CHECK(cudaStreamDestroy(stream));
}
void TearDown() override {}
protected:
WeightedMeanInputs<T> params;
thrust::host_vector<T> hin, hweights;
thrust::device_vector<T> din, dweights, dexp, dact;
};
///// weighted column-wise mean test and support functions
template <typename T>
void naiveColWeightedMean(T* R, T* D, T* W, int M, int N, bool rowMajor) {
int istr = rowMajor ? 1 : M;
int jstr = rowMajor ? N : 1;
//sum the weights
T WS = 0;
for (int j = 0; j < M; j++) WS += W[j];
for (int i = 0; i < N; i++) {
R[i] = (T)0;
for (int j = 0; j < M; j++) {
//R[i] += (W[j]*D[i*istr + j*jstr] - R[i])/(T)(j+1);
R[i] += (W[j] * D[i * istr + j * jstr]) / WS;
}
}
}
template <typename T>
class ColWeightedMeanTest
: public ::testing::TestWithParam<WeightedMeanInputs<T>> {
void SetUp() override {
params = ::testing::TestWithParam<WeightedMeanInputs<T>>::GetParam();
raft::random::Rng r(params.seed);
int rows = params.M, cols = params.N, len = rows * cols;
cudaStream_t stream;
CUDA_CHECK(cudaStreamCreate(&stream));
//device-side data
din.resize(len);
dweights.resize(rows);
dexp.resize(cols);
dact.resize(cols);
//create random matrix and weights
r.uniform(din.data().get(), len, T(-1.0), T(1.0), stream);
r.uniform(dweights.data().get(), rows, T(-1.0), T(1.0), stream);
//host-side data
thrust::host_vector<T> hin = din;
thrust::host_vector<T> hweights = dweights;
thrust::host_vector<T> hexp(cols);
//compute naive result & copy to GPU
naiveColWeightedMean(hexp.data(), hin.data(), hweights.data(), rows, cols,
true);
dexp = hexp;
//compute ml-prims result
colWeightedMean(dact.data().get(), din.data().get(), dweights.data().get(),
cols, rows, stream);
//adjust tolerance to account for round-off accumulation
params.tolerance *= params.M;
CUDA_CHECK(cudaStreamDestroy(stream));
}
void TearDown() override {}
protected:
WeightedMeanInputs<T> params;
thrust::host_vector<T> hin, hweights;
thrust::device_vector<T> din, dweights, dexp, dact;
};
////// Parameter sets and test instantiation
static const float tolF = 128 * std::numeric_limits<float>::epsilon();
static const double tolD = 256 * std::numeric_limits<double>::epsilon();
const std::vector<WeightedMeanInputs<float>> inputsf = {
{tolF, 4, 4, 1234}, {tolF, 1024, 32, 1234}, {tolF, 1024, 64, 1234},
{tolF, 1024, 128, 1234}, {tolF, 1024, 256, 1234}, {tolF, 1024, 32, 1234},
{tolF, 1024, 64, 1234}, {tolF, 1024, 128, 1234}, {tolF, 1024, 256, 1234}};
const std::vector<WeightedMeanInputs<double>> inputsd = {
{tolD, 4, 4, 1234}, {tolD, 1024, 32, 1234}, {tolD, 1024, 64, 1234},
{tolD, 1024, 128, 1234}, {tolD, 1024, 256, 1234}, {tolD, 1024, 32, 1234},
{tolD, 1024, 64, 1234}, {tolD, 1024, 128, 1234}, {tolD, 1024, 256, 1234}};
using RowWeightedMeanTestF = RowWeightedMeanTest<float>;
TEST_P(RowWeightedMeanTestF, Result) {
ASSERT_TRUE(devArrMatch(dexp.data().get(), dact.data().get(), params.M,
raft::CompareApprox<float>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(RowWeightedMeanTest, RowWeightedMeanTestF,
::testing::ValuesIn(inputsf));
using RowWeightedMeanTestD = RowWeightedMeanTest<double>;
TEST_P(RowWeightedMeanTestD, Result) {
ASSERT_TRUE(devArrMatch(dexp.data().get(), dact.data().get(), params.M,
raft::CompareApprox<double>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(RowWeightedMeanTest, RowWeightedMeanTestD,
::testing::ValuesIn(inputsd));
using ColWeightedMeanTestF = ColWeightedMeanTest<float>;
TEST_P(ColWeightedMeanTestF, Result) {
ASSERT_TRUE(devArrMatch(dexp.data().get(), dact.data().get(), params.N,
raft::CompareApprox<float>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(ColWeightedMeanTest, ColWeightedMeanTestF,
::testing::ValuesIn(inputsf));
using ColWeightedMeanTestD = ColWeightedMeanTest<double>;
TEST_P(ColWeightedMeanTestD, Result) {
ASSERT_TRUE(devArrMatch(dexp.data().get(), dact.data().get(), params.N,
raft::CompareApprox<double>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(ColWeightedMeanTest, ColWeightedMeanTestD,
::testing::ValuesIn(inputsd));
}; // end namespace Stats
}; // end namespace MLCommon
|
45d4d2e49cf041a25403031dff74d9b09ebd4ebf.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "avro.h"
#include "avro_gpu.h"
#include "avro_reader_impl.hpp"
#include "io/comp/gpuinflate.h"
#include <hip/hip_runtime.h>
#include <inttypes.h>
namespace cudf {
namespace io {
namespace avro {
#if 0
#define LOG_PRINTF(...) std::printf(__VA_ARGS__)
#else
#define LOG_PRINTF(...) (void)0
#endif
/**
* @brief Function that translates Avro datatype to GDF dtype
**/
gdf_dtype to_dtype(const avro::schema_entry *col) {
switch (col->kind) {
case avro::type_boolean:
return GDF_BOOL8;
case avro::type_int:
return GDF_INT32;
case avro::type_long:
return GDF_INT64;
case avro::type_float:
return GDF_FLOAT32;
case avro::type_double:
return GDF_FLOAT64;
case avro::type_bytes:
case avro::type_string:
return GDF_STRING;
case avro::type_enum:
return (!col->symbols.empty()) ? GDF_STRING : GDF_INT32;
default:
return GDF_invalid;
}
}
/**
* @brief A helper wrapper for Avro file metadata. Provides some additional
* convenience methods for initializing and accessing the metadata and schema
**/
class avro_metadata : public avro::file_metadata {
public:
explicit avro_metadata(datasource *const src) : source(src) {}
/**
* @brief Initializes the parser and filters down to a subset of rows
*
* @param[in,out] row_start Starting row of the selection
* @param[in,out] row_count Total number of rows selected
**/
void init_and_select_rows(int &row_start, int &row_count) {
const auto buffer = source->get_buffer(0, source->size());
avro::container pod(buffer->data(), buffer->size());
CUDF_EXPECTS(pod.parse(this, row_count, row_start),
"Cannot parse metadata");
print_metadata();
row_start = skip_rows;
row_count = num_rows;
}
/**
* @brief Filters and reduces down to a selection of columns
*
* @param[in] use_names List of column names to select
*
* @return List of column names
**/
auto select_columns(std::vector<std::string> use_names) {
std::vector<std::pair<int, std::string>> selection;
const auto num_avro_columns = static_cast<int>(columns.size());
if (!use_names.empty()) {
for (int i = 0, column_id = 0; i < (int)use_names.size(); i++) {
for (int j = 0; j < num_avro_columns; j++, column_id++) {
if (column_id >= num_avro_columns) {
column_id = 0;
}
if (columns[column_id].name == use_names[i] &&
GDF_invalid !=
to_dtype(&schema[columns[column_id].schema_data_idx])) {
selection.emplace_back(column_id, columns[column_id].name);
column_id++;
break;
}
}
}
} else {
// Iterate backwards as fastavro returns from last-to-first?!
for (int i = num_avro_columns - 1; i >= 0; --i) {
const auto dtype = to_dtype(&schema[columns[i].schema_data_idx]);
CUDF_EXPECTS(dtype != GDF_invalid, "Unsupported data type");
selection.emplace_back(i, columns[i].name);
}
}
CUDF_EXPECTS(selection.size() > 0, "Filtered out all columns");
return selection;
}
void print_metadata() const {
LOG_PRINTF("\n[+] Metadata:\n");
LOG_PRINTF(" size = %zd\n", metadata_size);
LOG_PRINTF(" codec = \"%s\"\n", codec.c_str());
LOG_PRINTF(" sync marker = 0x%016" PRIx64 "%016" PRIx64 "\n",
sync_marker[1], sync_marker[0]);
LOG_PRINTF(" schema (%zd entries):\n", schema.size());
for (size_t i = 0; i < schema.size(); ++i) {
LOG_PRINTF(
" [%zd] num_children=%d, parent_idx=%d, type=%d, name=\"%s\"\n", i,
schema[i].num_children, schema[i].parent_idx, schema[i].kind,
schema[i].name.c_str());
}
LOG_PRINTF(" datablocks (%zd entries):\n", block_list.size());
LOG_PRINTF(" num rows = %zd (max block size = %d, total_data_size = %zd)\n",
num_rows, max_block_size, total_data_size);
LOG_PRINTF(" num columns = %zd\n", columns.size());
LOG_PRINTF(" user data entries = %zd\n", user_data.size());
for (const auto &entry : user_data) {
LOG_PRINTF(" key: %s, value: %s\n", entry.first.c_str(),
entry.second.c_str());
}
}
private:
datasource *const source;
};
reader::Impl::Impl(std::unique_ptr<datasource> source,
reader_options const &options)
: source_(std::move(source)), columns_(options.columns) {
// Open the source Avro dataset metadata
md_ = std::make_unique<avro_metadata>(source_.get());
}
table reader::Impl::read(int skip_rows, int num_rows) {
// Select and read partial metadata / schema within the subset of rows
md_->init_and_select_rows(skip_rows, num_rows);
// Select only columns required by the options
selected_cols_ = md_->select_columns(columns_);
if (selected_cols_.empty()) {
return table();
}
// Initialize gdf_columns, but hold off on allocating storage space
LOG_PRINTF("[+] Selected columns: %zd\n", selected_cols_.size());
LOG_PRINTF("[+] Selected skip_rows: %d, num_rows: %d\n", skip_rows, num_rows);
std::vector<gdf_column_wrapper> columns;
for (const auto &col : selected_cols_) {
auto &col_schema = md_->schema[md_->columns[col.first].schema_data_idx];
columns.emplace_back(static_cast<gdf_size_type>(num_rows),
to_dtype(&col_schema),
gdf_dtype_extra_info{TIME_UNIT_NONE}, col.second);
LOG_PRINTF(" %2zd: name=%s size=%zd type=%d data=%lx valid=%lx\n",
columns.size() - 1, columns.back()->col_name,
(size_t)columns.back()->size, columns.back()->dtype,
(uint64_t)columns.back()->data, (uint64_t)columns.back()->valid);
}
if (md_->total_data_size > 0) {
device_buffer<uint8_t> block_data(align_size(md_->total_data_size));
const auto buffer =
source_->get_buffer(md_->block_list[0].offset, md_->total_data_size);
CUDA_TRY(hipMemcpyAsync(block_data.data(), buffer->data(), buffer->size(),
hipMemcpyHostToDevice));
if (md_->codec != "" && md_->codec != "null") {
auto decomp_block_data = decompress_data(block_data);
block_data = std::move(decomp_block_data);
} else {
auto dst_ofs = md_->block_list[0].offset;
for (size_t i = 0; i < md_->block_list.size(); i++) {
md_->block_list[i].offset -= dst_ofs;
}
}
size_t total_dictionary_entries = 0;
size_t dictionary_data_size = 0;
std::vector<std::pair<uint32_t, uint32_t>> dict(columns.size());
for (size_t i = 0; i < columns.size(); ++i) {
columns[i].allocate();
size_t valid_bytes = columns[i]->size >> 3;
size_t valid_size = gdf_valid_allocation_size(columns[i]->size);
uint8_t *valid = reinterpret_cast<uint8_t *>(columns[i]->valid);
CUDA_TRY(hipMemsetAsync(valid, -1, valid_bytes));
if (columns[i]->size & 7) {
CUDA_TRY(hipMemsetAsync(valid + valid_bytes, (1 << (columns[i]->size & 7)) - 1, 1));
valid_bytes++;
}
if (valid_bytes < valid_size) {
CUDA_TRY(hipMemsetAsync(valid + valid_bytes, 0, valid_size - valid_bytes));
}
auto col_idx = selected_cols_[i].first;
auto &col_schema = md_->schema[md_->columns[col_idx].schema_data_idx];
dict[i].first = static_cast<uint32_t>(total_dictionary_entries);
dict[i].second = static_cast<uint32_t>(col_schema.symbols.size());
total_dictionary_entries += dict[i].second;
for (const auto &sym : col_schema.symbols) {
dictionary_data_size += sym.length();
}
}
hostdevice_vector<uint8_t> global_dictionary(total_dictionary_entries * sizeof(gpu::nvstrdesc_s) + dictionary_data_size);
if (total_dictionary_entries > 0) {
size_t dict_pos = total_dictionary_entries * sizeof(gpu::nvstrdesc_s);
for (size_t i = 0; i < columns.size(); ++i) {
auto col_idx = selected_cols_[i].first;
auto &col_schema = md_->schema[md_->columns[col_idx].schema_data_idx];
auto index = &(reinterpret_cast<gpu::nvstrdesc_s *>(global_dictionary.host_ptr()))[dict[i].first];
for (size_t j = 0; j < dict[i].second; j++) {
size_t len = col_schema.symbols[j].length();
char *ptr = reinterpret_cast<char *>(global_dictionary.device_ptr() +
dict_pos);
index[j].ptr = ptr;
index[j].count = len;
memcpy(global_dictionary.host_ptr() + dict_pos,
col_schema.symbols[j].c_str(), len);
dict_pos += len;
}
}
CUDA_TRY(hipMemcpyAsync(
global_dictionary.device_ptr(), global_dictionary.host_ptr(),
global_dictionary.memory_size(), hipMemcpyHostToDevice, 0));
}
// Write out columns
decode_data(block_data, dict, global_dictionary, total_dictionary_entries,
columns);
// Perform any final column preparation (may reference decoded data)
for (auto &column : columns) {
column.finalize();
}
} else {
for (auto &column : columns) {
column.allocate();
column.finalize();
}
}
// Transfer ownership to raw pointer output arguments
std::vector<gdf_column *> out_cols(columns.size());
for (size_t i = 0; i < columns.size(); ++i) {
out_cols[i] = columns[i].release();
}
return cudf::table(out_cols.data(), out_cols.size());
}
device_buffer<uint8_t> reader::Impl::decompress_data(
const device_buffer<uint8_t> &comp_block_data) {
size_t uncompressed_data_size = 0;
hostdevice_vector<gpu_inflate_input_s> inflate_in(md_->block_list.size());
hostdevice_vector<gpu_inflate_status_s> inflate_out(md_->block_list.size());
if (md_->codec == "deflate") {
// Guess an initial maximum uncompressed block size
uint32_t initial_blk_len = (md_->max_block_size * 2 + 0xfff) & ~0xfff;
uncompressed_data_size = initial_blk_len * md_->block_list.size();
for (size_t i = 0; i < inflate_in.size(); ++i) {
inflate_in[i].dstSize = initial_blk_len;
}
} else if (md_->codec == "snappy") {
// Extract the uncompressed length from the snappy stream
for (size_t i = 0; i < md_->block_list.size(); i++) {
const auto buffer = source_->get_buffer(md_->block_list[i].offset, 4);
const uint8_t *blk = buffer->data();
uint32_t blk_len = blk[0];
if (blk_len > 0x7f) {
blk_len = (blk_len & 0x7f) | (blk[1] << 7);
if (blk_len > 0x3fff) {
blk_len = (blk_len & 0x3fff) | (blk[2] << 14);
if (blk_len > 0x1fffff) {
blk_len = (blk_len & 0x1fffff) | (blk[3] << 21);
}
}
}
inflate_in[i].dstSize = blk_len;
uncompressed_data_size += blk_len;
}
} else {
CUDF_FAIL("Unsupported compression codec\n");
}
device_buffer<uint8_t> decomp_block_data(uncompressed_data_size);
const auto base_offset = md_->block_list[0].offset;
for (size_t i = 0, dst_pos = 0; i < md_->block_list.size(); i++) {
const auto src_pos = md_->block_list[i].offset - base_offset;
inflate_in[i].srcDevice = comp_block_data.data() + src_pos;
inflate_in[i].srcSize = md_->block_list[i].size;
inflate_in[i].dstDevice = decomp_block_data.data() + dst_pos;
// Update blocks offsets & sizes to refer to uncompressed data
md_->block_list[i].offset = dst_pos;
md_->block_list[i].size = static_cast<uint32_t>(inflate_in[i].dstSize);
dst_pos += md_->block_list[i].size;
}
for (int loop_cnt = 0; loop_cnt < 2; loop_cnt++) {
CUDA_TRY(hipMemcpyAsync(inflate_in.device_ptr(), inflate_in.host_ptr(),
inflate_in.memory_size(), hipMemcpyHostToDevice,
0));
CUDA_TRY(hipMemsetAsync(inflate_out.device_ptr(), 0,
inflate_out.memory_size(), 0));
if (md_->codec == "deflate") {
CUDA_TRY(gpuinflate(inflate_in.device_ptr(), inflate_out.device_ptr(),
inflate_in.size(), 0, 0));
} else if (md_->codec == "snappy") {
CUDA_TRY(gpu_unsnap(inflate_in.device_ptr(), inflate_out.device_ptr(),
inflate_in.size(), 0));
} else {
CUDF_FAIL("Unsupported compression codec\n");
}
CUDA_TRY(hipMemcpyAsync(inflate_out.host_ptr(), inflate_out.device_ptr(),
inflate_out.memory_size(), hipMemcpyDeviceToHost,
0));
CUDA_TRY(hipStreamSynchronize(0));
// Check if larger output is required, as it's not known ahead of time
if (md_->codec == "deflate" && !loop_cnt) {
size_t actual_uncompressed_size = 0;
for (size_t i = 0; i < md_->block_list.size(); i++) {
// If error status is 1 (buffer too small), the `bytes_written` field
// is actually contains the uncompressed data size
if (inflate_out[i].status == 1 &&
inflate_out[i].bytes_written > inflate_in[i].dstSize) {
inflate_in[i].dstSize = inflate_out[i].bytes_written;
}
actual_uncompressed_size += inflate_in[i].dstSize;
}
if (actual_uncompressed_size > uncompressed_data_size) {
decomp_block_data.resize(actual_uncompressed_size);
for (size_t i = 0, dst_pos = 0; i < md_->block_list.size(); i++) {
inflate_in[i].dstDevice = decomp_block_data.data() + dst_pos;
md_->block_list[i].offset = dst_pos;
md_->block_list[i].size = static_cast<uint32_t>(inflate_in[i].dstSize);
dst_pos += md_->block_list[i].size;
}
} else {
break;
}
} else {
break;
}
}
return decomp_block_data;
}
void reader::Impl::decode_data(
const device_buffer<uint8_t> &block_data,
const std::vector<std::pair<uint32_t, uint32_t>> &dict,
const hostdevice_vector<uint8_t> &global_dictionary,
size_t total_dictionary_entries,
const std::vector<gdf_column_wrapper> &columns) {
// Build gpu schema
hostdevice_vector<gpu::schemadesc_s> schema_desc(md_->schema.size());
uint32_t min_row_data_size = 0;
int skip_field_cnt = 0;
for (size_t i = 0; i < md_->schema.size(); i++) {
type_kind_e kind = md_->schema[i].kind;
if (skip_field_cnt != 0) {
// Exclude union members from min_row_data_size
skip_field_cnt += md_->schema[i].num_children - 1;
} else {
switch (kind) {
case type_union:
skip_field_cnt = md_->schema[i].num_children;
// fall through
case type_boolean:
case type_int:
case type_long:
case type_bytes:
case type_string:
case type_enum:
min_row_data_size += 1;
break;
case type_float:
min_row_data_size += 4;
break;
case type_double:
min_row_data_size += 8;
break;
default:
break;
}
}
if (kind == type_enum && !md_->schema[i].symbols.size()) {
kind = type_int;
}
schema_desc[i].kind = kind;
schema_desc[i].count = (kind == type_enum) ? 0 : (uint32_t)md_->schema[i].num_children;
schema_desc[i].dataptr = nullptr;
CUDF_EXPECTS(kind != type_union || md_->schema[i].num_children < 2 ||
(md_->schema[i].num_children == 2 &&
(md_->schema[i + 1].kind == type_null ||
md_->schema[i + 2].kind == type_null)),
"Union with non-null type not currently supported");
}
std::vector<void*> valid_alias(columns.size(), nullptr);
for (size_t i = 0; i < columns.size(); i++) {
auto col_idx = selected_cols_[i].first;
int schema_data_idx = md_->columns[col_idx].schema_data_idx;
int schema_null_idx = md_->columns[col_idx].schema_null_idx;
schema_desc[schema_data_idx].dataptr = columns[i]->data;
if (schema_null_idx >= 0) {
if (!schema_desc[schema_null_idx].dataptr) {
schema_desc[schema_null_idx].dataptr = columns[i]->valid;
} else {
valid_alias[i] = schema_desc[schema_null_idx].dataptr;
}
}
if (md_->schema[schema_data_idx].kind == type_enum) {
schema_desc[schema_data_idx].count = dict[i].first;
}
}
device_buffer<block_desc_s> block_list(md_->block_list.size());
CUDA_TRY(hipMemcpyAsync(schema_desc.device_ptr(), schema_desc.host_ptr(),
schema_desc.memory_size(), hipMemcpyHostToDevice, 0));
CUDA_TRY(hipMemcpyAsync(block_list.data(), md_->block_list.data(),
md_->block_list.size() * sizeof(block_desc_s),
hipMemcpyHostToDevice, 0));
CUDA_TRY(DecodeAvroColumnData(
block_list.data(), schema_desc.device_ptr(),
reinterpret_cast<gpu::nvstrdesc_s *>(global_dictionary.device_ptr()),
block_data.data(), static_cast<uint32_t>(block_list.size()),
static_cast<uint32_t>(schema_desc.size()),
static_cast<uint32_t>(total_dictionary_entries), md_->num_rows,
md_->skip_rows, min_row_data_size, 0));
// Copy valid bits that are shared between columns
for (size_t i = 0; i < columns.size(); i++) {
if (valid_alias[i] != nullptr) {
CUDA_TRY(hipMemcpyAsync(columns[i]->valid, valid_alias[i],
gdf_valid_allocation_size(columns[i]->size),
hipMemcpyHostToDevice, 0));
}
}
CUDA_TRY(hipMemcpyAsync(schema_desc.host_ptr(), schema_desc.device_ptr(),
schema_desc.memory_size(), hipMemcpyDeviceToHost,
0));
CUDA_TRY(hipStreamSynchronize(0));
for (size_t i = 0; i < columns.size(); i++) {
const auto col_idx = selected_cols_[i].first;
const auto schema_null_idx = md_->columns[col_idx].schema_null_idx;
columns[i]->null_count =
(schema_null_idx >= 0) ? schema_desc[schema_null_idx].count : 0;
}
}
reader::reader(std::string filepath, reader_options const &options)
: impl_(std::make_unique<Impl>(datasource::create(filepath), options)) {}
reader::reader(const char *buffer, size_t length, reader_options const &options)
: impl_(std::make_unique<Impl>(datasource::create(buffer, length),
options)) {}
reader::reader(std::shared_ptr<arrow::io::RandomAccessFile> file,
reader_options const &options)
: impl_(std::make_unique<Impl>(datasource::create(file), options)) {}
table reader::read_all() { return impl_->read(0, -1); }
table reader::read_rows(size_t skip_rows, size_t num_rows) {
return impl_->read(skip_rows,
(num_rows != 0) ? static_cast<int>(num_rows) : -1);
}
reader::~reader() = default;
} // namespace avro
} // namespace io
} // namespace cudf
| 45d4d2e49cf041a25403031dff74d9b09ebd4ebf.cu | /*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "avro.h"
#include "avro_gpu.h"
#include "avro_reader_impl.hpp"
#include "io/comp/gpuinflate.h"
#include <cuda_runtime.h>
#include <inttypes.h>
namespace cudf {
namespace io {
namespace avro {
#if 0
#define LOG_PRINTF(...) std::printf(__VA_ARGS__)
#else
#define LOG_PRINTF(...) (void)0
#endif
/**
* @brief Function that translates Avro datatype to GDF dtype
**/
gdf_dtype to_dtype(const avro::schema_entry *col) {
switch (col->kind) {
case avro::type_boolean:
return GDF_BOOL8;
case avro::type_int:
return GDF_INT32;
case avro::type_long:
return GDF_INT64;
case avro::type_float:
return GDF_FLOAT32;
case avro::type_double:
return GDF_FLOAT64;
case avro::type_bytes:
case avro::type_string:
return GDF_STRING;
case avro::type_enum:
return (!col->symbols.empty()) ? GDF_STRING : GDF_INT32;
default:
return GDF_invalid;
}
}
/**
* @brief A helper wrapper for Avro file metadata. Provides some additional
* convenience methods for initializing and accessing the metadata and schema
**/
class avro_metadata : public avro::file_metadata {
public:
explicit avro_metadata(datasource *const src) : source(src) {}
/**
* @brief Initializes the parser and filters down to a subset of rows
*
* @param[in,out] row_start Starting row of the selection
* @param[in,out] row_count Total number of rows selected
**/
void init_and_select_rows(int &row_start, int &row_count) {
const auto buffer = source->get_buffer(0, source->size());
avro::container pod(buffer->data(), buffer->size());
CUDF_EXPECTS(pod.parse(this, row_count, row_start),
"Cannot parse metadata");
print_metadata();
row_start = skip_rows;
row_count = num_rows;
}
/**
* @brief Filters and reduces down to a selection of columns
*
* @param[in] use_names List of column names to select
*
* @return List of column names
**/
auto select_columns(std::vector<std::string> use_names) {
std::vector<std::pair<int, std::string>> selection;
const auto num_avro_columns = static_cast<int>(columns.size());
if (!use_names.empty()) {
for (int i = 0, column_id = 0; i < (int)use_names.size(); i++) {
for (int j = 0; j < num_avro_columns; j++, column_id++) {
if (column_id >= num_avro_columns) {
column_id = 0;
}
if (columns[column_id].name == use_names[i] &&
GDF_invalid !=
to_dtype(&schema[columns[column_id].schema_data_idx])) {
selection.emplace_back(column_id, columns[column_id].name);
column_id++;
break;
}
}
}
} else {
// Iterate backwards as fastavro returns from last-to-first?!
for (int i = num_avro_columns - 1; i >= 0; --i) {
const auto dtype = to_dtype(&schema[columns[i].schema_data_idx]);
CUDF_EXPECTS(dtype != GDF_invalid, "Unsupported data type");
selection.emplace_back(i, columns[i].name);
}
}
CUDF_EXPECTS(selection.size() > 0, "Filtered out all columns");
return selection;
}
void print_metadata() const {
LOG_PRINTF("\n[+] Metadata:\n");
LOG_PRINTF(" size = %zd\n", metadata_size);
LOG_PRINTF(" codec = \"%s\"\n", codec.c_str());
LOG_PRINTF(" sync marker = 0x%016" PRIx64 "%016" PRIx64 "\n",
sync_marker[1], sync_marker[0]);
LOG_PRINTF(" schema (%zd entries):\n", schema.size());
for (size_t i = 0; i < schema.size(); ++i) {
LOG_PRINTF(
" [%zd] num_children=%d, parent_idx=%d, type=%d, name=\"%s\"\n", i,
schema[i].num_children, schema[i].parent_idx, schema[i].kind,
schema[i].name.c_str());
}
LOG_PRINTF(" datablocks (%zd entries):\n", block_list.size());
LOG_PRINTF(" num rows = %zd (max block size = %d, total_data_size = %zd)\n",
num_rows, max_block_size, total_data_size);
LOG_PRINTF(" num columns = %zd\n", columns.size());
LOG_PRINTF(" user data entries = %zd\n", user_data.size());
for (const auto &entry : user_data) {
LOG_PRINTF(" key: %s, value: %s\n", entry.first.c_str(),
entry.second.c_str());
}
}
private:
datasource *const source;
};
reader::Impl::Impl(std::unique_ptr<datasource> source,
reader_options const &options)
: source_(std::move(source)), columns_(options.columns) {
// Open the source Avro dataset metadata
md_ = std::make_unique<avro_metadata>(source_.get());
}
table reader::Impl::read(int skip_rows, int num_rows) {
// Select and read partial metadata / schema within the subset of rows
md_->init_and_select_rows(skip_rows, num_rows);
// Select only columns required by the options
selected_cols_ = md_->select_columns(columns_);
if (selected_cols_.empty()) {
return table();
}
// Initialize gdf_columns, but hold off on allocating storage space
LOG_PRINTF("[+] Selected columns: %zd\n", selected_cols_.size());
LOG_PRINTF("[+] Selected skip_rows: %d, num_rows: %d\n", skip_rows, num_rows);
std::vector<gdf_column_wrapper> columns;
for (const auto &col : selected_cols_) {
auto &col_schema = md_->schema[md_->columns[col.first].schema_data_idx];
columns.emplace_back(static_cast<gdf_size_type>(num_rows),
to_dtype(&col_schema),
gdf_dtype_extra_info{TIME_UNIT_NONE}, col.second);
LOG_PRINTF(" %2zd: name=%s size=%zd type=%d data=%lx valid=%lx\n",
columns.size() - 1, columns.back()->col_name,
(size_t)columns.back()->size, columns.back()->dtype,
(uint64_t)columns.back()->data, (uint64_t)columns.back()->valid);
}
if (md_->total_data_size > 0) {
device_buffer<uint8_t> block_data(align_size(md_->total_data_size));
const auto buffer =
source_->get_buffer(md_->block_list[0].offset, md_->total_data_size);
CUDA_TRY(cudaMemcpyAsync(block_data.data(), buffer->data(), buffer->size(),
cudaMemcpyHostToDevice));
if (md_->codec != "" && md_->codec != "null") {
auto decomp_block_data = decompress_data(block_data);
block_data = std::move(decomp_block_data);
} else {
auto dst_ofs = md_->block_list[0].offset;
for (size_t i = 0; i < md_->block_list.size(); i++) {
md_->block_list[i].offset -= dst_ofs;
}
}
size_t total_dictionary_entries = 0;
size_t dictionary_data_size = 0;
std::vector<std::pair<uint32_t, uint32_t>> dict(columns.size());
for (size_t i = 0; i < columns.size(); ++i) {
columns[i].allocate();
size_t valid_bytes = columns[i]->size >> 3;
size_t valid_size = gdf_valid_allocation_size(columns[i]->size);
uint8_t *valid = reinterpret_cast<uint8_t *>(columns[i]->valid);
CUDA_TRY(cudaMemsetAsync(valid, -1, valid_bytes));
if (columns[i]->size & 7) {
CUDA_TRY(cudaMemsetAsync(valid + valid_bytes, (1 << (columns[i]->size & 7)) - 1, 1));
valid_bytes++;
}
if (valid_bytes < valid_size) {
CUDA_TRY(cudaMemsetAsync(valid + valid_bytes, 0, valid_size - valid_bytes));
}
auto col_idx = selected_cols_[i].first;
auto &col_schema = md_->schema[md_->columns[col_idx].schema_data_idx];
dict[i].first = static_cast<uint32_t>(total_dictionary_entries);
dict[i].second = static_cast<uint32_t>(col_schema.symbols.size());
total_dictionary_entries += dict[i].second;
for (const auto &sym : col_schema.symbols) {
dictionary_data_size += sym.length();
}
}
hostdevice_vector<uint8_t> global_dictionary(total_dictionary_entries * sizeof(gpu::nvstrdesc_s) + dictionary_data_size);
if (total_dictionary_entries > 0) {
size_t dict_pos = total_dictionary_entries * sizeof(gpu::nvstrdesc_s);
for (size_t i = 0; i < columns.size(); ++i) {
auto col_idx = selected_cols_[i].first;
auto &col_schema = md_->schema[md_->columns[col_idx].schema_data_idx];
auto index = &(reinterpret_cast<gpu::nvstrdesc_s *>(global_dictionary.host_ptr()))[dict[i].first];
for (size_t j = 0; j < dict[i].second; j++) {
size_t len = col_schema.symbols[j].length();
char *ptr = reinterpret_cast<char *>(global_dictionary.device_ptr() +
dict_pos);
index[j].ptr = ptr;
index[j].count = len;
memcpy(global_dictionary.host_ptr() + dict_pos,
col_schema.symbols[j].c_str(), len);
dict_pos += len;
}
}
CUDA_TRY(cudaMemcpyAsync(
global_dictionary.device_ptr(), global_dictionary.host_ptr(),
global_dictionary.memory_size(), cudaMemcpyHostToDevice, 0));
}
// Write out columns
decode_data(block_data, dict, global_dictionary, total_dictionary_entries,
columns);
// Perform any final column preparation (may reference decoded data)
for (auto &column : columns) {
column.finalize();
}
} else {
for (auto &column : columns) {
column.allocate();
column.finalize();
}
}
// Transfer ownership to raw pointer output arguments
std::vector<gdf_column *> out_cols(columns.size());
for (size_t i = 0; i < columns.size(); ++i) {
out_cols[i] = columns[i].release();
}
return cudf::table(out_cols.data(), out_cols.size());
}
device_buffer<uint8_t> reader::Impl::decompress_data(
const device_buffer<uint8_t> &comp_block_data) {
size_t uncompressed_data_size = 0;
hostdevice_vector<gpu_inflate_input_s> inflate_in(md_->block_list.size());
hostdevice_vector<gpu_inflate_status_s> inflate_out(md_->block_list.size());
if (md_->codec == "deflate") {
// Guess an initial maximum uncompressed block size
uint32_t initial_blk_len = (md_->max_block_size * 2 + 0xfff) & ~0xfff;
uncompressed_data_size = initial_blk_len * md_->block_list.size();
for (size_t i = 0; i < inflate_in.size(); ++i) {
inflate_in[i].dstSize = initial_blk_len;
}
} else if (md_->codec == "snappy") {
// Extract the uncompressed length from the snappy stream
for (size_t i = 0; i < md_->block_list.size(); i++) {
const auto buffer = source_->get_buffer(md_->block_list[i].offset, 4);
const uint8_t *blk = buffer->data();
uint32_t blk_len = blk[0];
if (blk_len > 0x7f) {
blk_len = (blk_len & 0x7f) | (blk[1] << 7);
if (blk_len > 0x3fff) {
blk_len = (blk_len & 0x3fff) | (blk[2] << 14);
if (blk_len > 0x1fffff) {
blk_len = (blk_len & 0x1fffff) | (blk[3] << 21);
}
}
}
inflate_in[i].dstSize = blk_len;
uncompressed_data_size += blk_len;
}
} else {
CUDF_FAIL("Unsupported compression codec\n");
}
device_buffer<uint8_t> decomp_block_data(uncompressed_data_size);
const auto base_offset = md_->block_list[0].offset;
for (size_t i = 0, dst_pos = 0; i < md_->block_list.size(); i++) {
const auto src_pos = md_->block_list[i].offset - base_offset;
inflate_in[i].srcDevice = comp_block_data.data() + src_pos;
inflate_in[i].srcSize = md_->block_list[i].size;
inflate_in[i].dstDevice = decomp_block_data.data() + dst_pos;
// Update blocks offsets & sizes to refer to uncompressed data
md_->block_list[i].offset = dst_pos;
md_->block_list[i].size = static_cast<uint32_t>(inflate_in[i].dstSize);
dst_pos += md_->block_list[i].size;
}
for (int loop_cnt = 0; loop_cnt < 2; loop_cnt++) {
CUDA_TRY(cudaMemcpyAsync(inflate_in.device_ptr(), inflate_in.host_ptr(),
inflate_in.memory_size(), cudaMemcpyHostToDevice,
0));
CUDA_TRY(cudaMemsetAsync(inflate_out.device_ptr(), 0,
inflate_out.memory_size(), 0));
if (md_->codec == "deflate") {
CUDA_TRY(gpuinflate(inflate_in.device_ptr(), inflate_out.device_ptr(),
inflate_in.size(), 0, 0));
} else if (md_->codec == "snappy") {
CUDA_TRY(gpu_unsnap(inflate_in.device_ptr(), inflate_out.device_ptr(),
inflate_in.size(), 0));
} else {
CUDF_FAIL("Unsupported compression codec\n");
}
CUDA_TRY(cudaMemcpyAsync(inflate_out.host_ptr(), inflate_out.device_ptr(),
inflate_out.memory_size(), cudaMemcpyDeviceToHost,
0));
CUDA_TRY(cudaStreamSynchronize(0));
// Check if larger output is required, as it's not known ahead of time
if (md_->codec == "deflate" && !loop_cnt) {
size_t actual_uncompressed_size = 0;
for (size_t i = 0; i < md_->block_list.size(); i++) {
// If error status is 1 (buffer too small), the `bytes_written` field
// is actually contains the uncompressed data size
if (inflate_out[i].status == 1 &&
inflate_out[i].bytes_written > inflate_in[i].dstSize) {
inflate_in[i].dstSize = inflate_out[i].bytes_written;
}
actual_uncompressed_size += inflate_in[i].dstSize;
}
if (actual_uncompressed_size > uncompressed_data_size) {
decomp_block_data.resize(actual_uncompressed_size);
for (size_t i = 0, dst_pos = 0; i < md_->block_list.size(); i++) {
inflate_in[i].dstDevice = decomp_block_data.data() + dst_pos;
md_->block_list[i].offset = dst_pos;
md_->block_list[i].size = static_cast<uint32_t>(inflate_in[i].dstSize);
dst_pos += md_->block_list[i].size;
}
} else {
break;
}
} else {
break;
}
}
return decomp_block_data;
}
void reader::Impl::decode_data(
const device_buffer<uint8_t> &block_data,
const std::vector<std::pair<uint32_t, uint32_t>> &dict,
const hostdevice_vector<uint8_t> &global_dictionary,
size_t total_dictionary_entries,
const std::vector<gdf_column_wrapper> &columns) {
// Build gpu schema
hostdevice_vector<gpu::schemadesc_s> schema_desc(md_->schema.size());
uint32_t min_row_data_size = 0;
int skip_field_cnt = 0;
for (size_t i = 0; i < md_->schema.size(); i++) {
type_kind_e kind = md_->schema[i].kind;
if (skip_field_cnt != 0) {
// Exclude union members from min_row_data_size
skip_field_cnt += md_->schema[i].num_children - 1;
} else {
switch (kind) {
case type_union:
skip_field_cnt = md_->schema[i].num_children;
// fall through
case type_boolean:
case type_int:
case type_long:
case type_bytes:
case type_string:
case type_enum:
min_row_data_size += 1;
break;
case type_float:
min_row_data_size += 4;
break;
case type_double:
min_row_data_size += 8;
break;
default:
break;
}
}
if (kind == type_enum && !md_->schema[i].symbols.size()) {
kind = type_int;
}
schema_desc[i].kind = kind;
schema_desc[i].count = (kind == type_enum) ? 0 : (uint32_t)md_->schema[i].num_children;
schema_desc[i].dataptr = nullptr;
CUDF_EXPECTS(kind != type_union || md_->schema[i].num_children < 2 ||
(md_->schema[i].num_children == 2 &&
(md_->schema[i + 1].kind == type_null ||
md_->schema[i + 2].kind == type_null)),
"Union with non-null type not currently supported");
}
std::vector<void*> valid_alias(columns.size(), nullptr);
for (size_t i = 0; i < columns.size(); i++) {
auto col_idx = selected_cols_[i].first;
int schema_data_idx = md_->columns[col_idx].schema_data_idx;
int schema_null_idx = md_->columns[col_idx].schema_null_idx;
schema_desc[schema_data_idx].dataptr = columns[i]->data;
if (schema_null_idx >= 0) {
if (!schema_desc[schema_null_idx].dataptr) {
schema_desc[schema_null_idx].dataptr = columns[i]->valid;
} else {
valid_alias[i] = schema_desc[schema_null_idx].dataptr;
}
}
if (md_->schema[schema_data_idx].kind == type_enum) {
schema_desc[schema_data_idx].count = dict[i].first;
}
}
device_buffer<block_desc_s> block_list(md_->block_list.size());
CUDA_TRY(cudaMemcpyAsync(schema_desc.device_ptr(), schema_desc.host_ptr(),
schema_desc.memory_size(), cudaMemcpyHostToDevice, 0));
CUDA_TRY(cudaMemcpyAsync(block_list.data(), md_->block_list.data(),
md_->block_list.size() * sizeof(block_desc_s),
cudaMemcpyHostToDevice, 0));
CUDA_TRY(DecodeAvroColumnData(
block_list.data(), schema_desc.device_ptr(),
reinterpret_cast<gpu::nvstrdesc_s *>(global_dictionary.device_ptr()),
block_data.data(), static_cast<uint32_t>(block_list.size()),
static_cast<uint32_t>(schema_desc.size()),
static_cast<uint32_t>(total_dictionary_entries), md_->num_rows,
md_->skip_rows, min_row_data_size, 0));
// Copy valid bits that are shared between columns
for (size_t i = 0; i < columns.size(); i++) {
if (valid_alias[i] != nullptr) {
CUDA_TRY(cudaMemcpyAsync(columns[i]->valid, valid_alias[i],
gdf_valid_allocation_size(columns[i]->size),
cudaMemcpyHostToDevice, 0));
}
}
CUDA_TRY(cudaMemcpyAsync(schema_desc.host_ptr(), schema_desc.device_ptr(),
schema_desc.memory_size(), cudaMemcpyDeviceToHost,
0));
CUDA_TRY(cudaStreamSynchronize(0));
for (size_t i = 0; i < columns.size(); i++) {
const auto col_idx = selected_cols_[i].first;
const auto schema_null_idx = md_->columns[col_idx].schema_null_idx;
columns[i]->null_count =
(schema_null_idx >= 0) ? schema_desc[schema_null_idx].count : 0;
}
}
reader::reader(std::string filepath, reader_options const &options)
: impl_(std::make_unique<Impl>(datasource::create(filepath), options)) {}
reader::reader(const char *buffer, size_t length, reader_options const &options)
: impl_(std::make_unique<Impl>(datasource::create(buffer, length),
options)) {}
reader::reader(std::shared_ptr<arrow::io::RandomAccessFile> file,
reader_options const &options)
: impl_(std::make_unique<Impl>(datasource::create(file), options)) {}
table reader::read_all() { return impl_->read(0, -1); }
table reader::read_rows(size_t skip_rows, size_t num_rows) {
return impl_->read(skip_rows,
(num_rows != 0) ? static_cast<int>(num_rows) : -1);
}
reader::~reader() = default;
} // namespace avro
} // namespace io
} // namespace cudf
|
b25171884cdc0dcf019e820a674d7c60548541d5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Homework 1
// Color to Greyscale Conversion
//A common way to represent color images is known as RGBA - the color
//is specified by how much Red, Grean and Blue is in it.
//The 'A' stands for Alpha and is used for transparency, it will be
//ignored in this homework.
//Each channel Red, Blue, Green and Alpha is represented by one byte.
//Since we are using one byte for each color there are 256 different
//possible values for each color. This means we use 4 bytes per pixel.
//Greyscale images are represented by a single intensity value per pixel
//which is one byte in size.
//To convert an image from color to grayscale one simple method is to
//set the intensity to the average of the RGB channels. But we will
//use a more sophisticated method that takes into account how the eye
//perceives color and weights the channels unequally.
//The eye responds most strongly to green followed by red and then blue.
//The NTSC (National Television System Committee) recommends the following
//formula for color to greyscale conversion:
//I = .299f * R + .587f * G + .114f * B
//Notice the trailing f's on the numbers which indicate that they are
//single precision floating point constants and not double precision
//constants.
//You should fill in the kernel as well as set the block and grid sizes
//so that the entire image is processed.
#include "utils.h"
__global__
void rgba_to_greyscale(const uchar4* const rgbaImage,
unsigned char* const greyImage,
int numRows, int numCols)
{
//TODO
//Fill in the kernel to convert from color to greyscale
//the mapping from components of a uchar4 to RGBA is:
// .x -> R ; .y -> G ; .z -> B ; .w -> A
//
//The output (greyImage) at each pixel should be the result of
//applying the formula: output = .299f * R + .587f * G + .114f * B;
//Note: We will be ignoring the alpha channel for this conversion
//First create a mapping from the 2D block and grid locations
//to an absolute 2D location in the image, then use that to
//calculate a 1D offset
int col = blockIdx.x * blockDim.x + threadIdx.x; // blockDim.x is 32 as set bellow.
int row = blockIdx.y * blockDim.y + threadIdx.y; // blockDim.y is 16 as set bellow.
int offset = row * numCols + col;
uchar4 rgb = rgbaImage[offset];
float grey = .299f * rgb.x + .587f * rgb.y + .114f * rgb.z;
greyImage[offset] = static_cast<unsigned char> (grey);
}
void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage,
unsigned char* const d_greyImage, size_t numRows, size_t numCols)
{
//You must fill in the correct sizes for the blockSize and gridSize
//currently only one block with one thread is being launched
const dim3 blockSize(32, 16, 1); //TODO
const dim3 gridSize( 1 + (numCols / blockSize.x),
1 + (numRows / blockSize.y),
1); //TODO
hipLaunchKernelGGL(( rgba_to_greyscale), dim3(gridSize), dim3(blockSize), 0, 0, d_rgbaImage, d_greyImage, numRows, numCols);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
}
| b25171884cdc0dcf019e820a674d7c60548541d5.cu | // Homework 1
// Color to Greyscale Conversion
//A common way to represent color images is known as RGBA - the color
//is specified by how much Red, Grean and Blue is in it.
//The 'A' stands for Alpha and is used for transparency, it will be
//ignored in this homework.
//Each channel Red, Blue, Green and Alpha is represented by one byte.
//Since we are using one byte for each color there are 256 different
//possible values for each color. This means we use 4 bytes per pixel.
//Greyscale images are represented by a single intensity value per pixel
//which is one byte in size.
//To convert an image from color to grayscale one simple method is to
//set the intensity to the average of the RGB channels. But we will
//use a more sophisticated method that takes into account how the eye
//perceives color and weights the channels unequally.
//The eye responds most strongly to green followed by red and then blue.
//The NTSC (National Television System Committee) recommends the following
//formula for color to greyscale conversion:
//I = .299f * R + .587f * G + .114f * B
//Notice the trailing f's on the numbers which indicate that they are
//single precision floating point constants and not double precision
//constants.
//You should fill in the kernel as well as set the block and grid sizes
//so that the entire image is processed.
#include "utils.h"
__global__
void rgba_to_greyscale(const uchar4* const rgbaImage,
unsigned char* const greyImage,
int numRows, int numCols)
{
//TODO
//Fill in the kernel to convert from color to greyscale
//the mapping from components of a uchar4 to RGBA is:
// .x -> R ; .y -> G ; .z -> B ; .w -> A
//
//The output (greyImage) at each pixel should be the result of
//applying the formula: output = .299f * R + .587f * G + .114f * B;
//Note: We will be ignoring the alpha channel for this conversion
//First create a mapping from the 2D block and grid locations
//to an absolute 2D location in the image, then use that to
//calculate a 1D offset
int col = blockIdx.x * blockDim.x + threadIdx.x; // blockDim.x is 32 as set bellow.
int row = blockIdx.y * blockDim.y + threadIdx.y; // blockDim.y is 16 as set bellow.
int offset = row * numCols + col;
uchar4 rgb = rgbaImage[offset];
float grey = .299f * rgb.x + .587f * rgb.y + .114f * rgb.z;
greyImage[offset] = static_cast<unsigned char> (grey);
}
void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage,
unsigned char* const d_greyImage, size_t numRows, size_t numCols)
{
//You must fill in the correct sizes for the blockSize and gridSize
//currently only one block with one thread is being launched
const dim3 blockSize(32, 16, 1); //TODO
const dim3 gridSize( 1 + (numCols / blockSize.x),
1 + (numRows / blockSize.y),
1); //TODO
rgba_to_greyscale<<<gridSize, blockSize>>>(d_rgbaImage, d_greyImage, numRows, numCols);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
}
|
90828e0a63b7d7319d1629fac4a51019dfc90b33.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "oneflow/core/kernel/util/cuda_blas_interface.h"
#include "oneflow/core/device/cuda_util.h"
#include "oneflow/core/register/blob.h"
#include "oneflow/core/kernel/util/cuda_half_util.h"
namespace oneflow {
namespace {
hipblasOperation_t CblasTrans2CublasTrans(CBLAS_TRANSPOSE trans) {
hipblasOperation_t cublas_trans;
if (trans == CBLAS_TRANSPOSE::CblasNoTrans) {
cublas_trans = hipblasOperation_t::HIPBLAS_OP_N;
} else if (trans == CBLAS_TRANSPOSE::CblasTrans) {
cublas_trans = hipblasOperation_t::HIPBLAS_OP_T;
} else if (trans == CBLAS_TRANSPOSE::CblasConjTrans) {
cublas_trans = hipblasOperation_t::HIPBLAS_OP_C;
} else {
// do nothing
}
return cublas_trans;
}
std::tuple<int, int, int, hipblasOperation_t, hipblasOperation_t> PrepareToCallCublasGemm(
enum CBLAS_TRANSPOSE trans_a, enum CBLAS_TRANSPOSE trans_b, const int m, const int n,
const int k) {
int lda = (trans_a == CblasNoTrans) ? k : m;
int ldb = (trans_b == CblasNoTrans) ? n : k;
int ldc = n;
hipblasOperation_t cublas_trans_a = CblasTrans2CublasTrans(trans_a);
hipblasOperation_t cublas_trans_b = CblasTrans2CublasTrans(trans_b);
return std::make_tuple(lda, ldb, ldc, cublas_trans_a, cublas_trans_b);
}
template<typename T>
void Gemm(DeviceCtx* ctx, const enum CBLAS_ORDER order, enum CBLAS_TRANSPOSE trans_a,
enum CBLAS_TRANSPOSE trans_b, const int m, const int n, const int k, const T* alpha,
const T* a, const T* b, const T* beta, T* c) {
int lda, ldb, ldc;
hipblasOperation_t cublas_trans_a, cublas_trans_b;
std::tie(lda, ldb, ldc, cublas_trans_a, cublas_trans_b) =
PrepareToCallCublasGemm(trans_a, trans_b, m, n, k);
hipblasHandle_t handle;
if (std::is_same<T, half>::value) {
handle = ctx->cublas_tensor_op_math_handle();
} else {
handle = ctx->cublas_pmh_handle();
}
cublas_gemm<T>(handle, cublas_trans_b, cublas_trans_a, n, m, k, alpha, b, ldb, a, lda, beta, c,
ldc);
}
void HGemmWithFloat(DeviceCtx* ctx, const enum CBLAS_ORDER order, enum CBLAS_TRANSPOSE trans_a,
enum CBLAS_TRANSPOSE trans_b, const int m, const int n, const int k,
const float* alpha, const half* a, const half* b, const float* beta, half* c) {
int lda, ldb, ldc;
hipblasOperation_t cublas_trans_a, cublas_trans_b;
std::tie(lda, ldb, ldc, cublas_trans_a, cublas_trans_b) =
PrepareToCallCublasGemm(trans_a, trans_b, m, n, k);
hipDataType data_type = GetCudaDataType(DataType::kFloat16);
OF_CUBLAS_CHECK(cublasSgemmEx(ctx->cublas_tensor_op_math_handle(), cublas_trans_b, cublas_trans_a,
n, m, k, alpha, b, data_type, ldb, a, data_type, lda, beta, c,
data_type, ldc));
}
std::tuple<int, int, int> CalcMNKForGemm(enum CBLAS_TRANSPOSE trans_a, const Blob* a,
const Blob* c) {
const auto& a_shape = a->shape_view();
const auto& c_shape = c->shape_view();
int m = c_shape.At(0);
int n = c_shape.Count(1);
int k = (trans_a == CblasNoTrans) ? a_shape.Count(1) : a_shape.At(0);
return std::make_tuple(m, n, k);
}
template<typename T>
void BlobGemmImpl(DeviceCtx* ctx, enum CBLAS_TRANSPOSE trans_a, enum CBLAS_TRANSPOSE trans_b,
T alpha, T beta, const Blob* a, const Blob* b, Blob* c) {
int m, n, k;
std::tie(m, n, k) = CalcMNKForGemm(trans_a, a, c);
BlasIf<DeviceType::kGPU>::OFGemm(ctx, trans_a, trans_b, m, n, k, alpha, a->dptr<T>(),
b->dptr<T>(), beta, c->mut_dptr<T>());
}
template<typename T>
__global__ void AssignStridedAddrGpu(T** dev_ptrs, T* start_ptr, int32_t stride_len,
int32_t stride_num) {
CUDA_1D_KERNEL_LOOP(i, stride_num) { dev_ptrs[i] = start_ptr + i * stride_len; }
}
template<typename T>
void AssignStridedAddr(DeviceCtx* ctx, T** dev_ptrs, T* start_ptr, int stride_len, int stride_num) {
hipLaunchKernelGGL(( AssignStridedAddrGpu<T>)
, dim3(BlocksNum4ThreadsNum(stride_num)), dim3(kCudaThreadsNumPerBlock), 0, ctx->cuda_stream(),
dev_ptrs, start_ptr, stride_len, stride_num);
}
template<typename T>
std::tuple<int, int, int, int, int, int, hipblasOperation_t, hipblasOperation_t, T**, T**, T**>
PrepareToCallBatchedGemm(DeviceCtx* ctx, const enum CBLAS_TRANSPOSE trans_a,
const enum CBLAS_TRANSPOSE trans_b, int batch_size, int m, int n, int k,
const T* a, const T* b, T* c, T** buf) {
const int a_stride = m * k;
const int b_stride = k * n;
const int c_stride = m * n;
const int lda = (trans_a == CblasNoTrans) ? k : m;
const int ldb = (trans_b == CblasNoTrans) ? n : k;
const int ldc = n;
hipblasOperation_t cublas_trans_a = CblasTrans2CublasTrans(trans_a);
hipblasOperation_t cublas_trans_b = CblasTrans2CublasTrans(trans_b);
T** dev_a_ptrs = buf;
T** dev_b_ptrs = buf + batch_size;
T** dev_c_ptrs = buf + 2 * batch_size;
AssignStridedAddr<T>(ctx, dev_a_ptrs, const_cast<T*>(a), a_stride, batch_size);
AssignStridedAddr<T>(ctx, dev_b_ptrs, const_cast<T*>(b), b_stride, batch_size);
AssignStridedAddr<T>(ctx, dev_c_ptrs, c, c_stride, batch_size);
return std::make_tuple(a_stride, b_stride, c_stride, lda, ldb, ldc, cublas_trans_a,
cublas_trans_b, dev_a_ptrs, dev_b_ptrs, dev_c_ptrs);
}
template<typename T>
hipDataType GetCudaDataType4BatchedGemm() {
return CudaDataType<T>::value;
}
template<>
hipDataType GetCudaDataType4BatchedGemm<half>() {
return HIP_R_16F;
}
template<typename T>
void BatchedGemmImpl(DeviceCtx* ctx, const enum CBLAS_ORDER order,
const enum CBLAS_TRANSPOSE trans_a, const enum CBLAS_TRANSPOSE trans_b,
int batch_size, int m, int n, int k, const T* alpha, const T* a, const T* b,
const T* beta, T* c, T** buf) {
int a_stride, b_stride, c_stride;
int lda, ldb, ldc;
hipblasOperation_t cublas_trans_a, cublas_trans_b;
T** dev_a_ptrs;
T** dev_b_ptrs;
T** dev_c_ptrs;
std::tie(a_stride, b_stride, c_stride, lda, ldb, ldc, cublas_trans_a, cublas_trans_b, dev_a_ptrs,
dev_b_ptrs, dev_c_ptrs) =
PrepareToCallBatchedGemm<T>(ctx, trans_a, trans_b, batch_size, m, n, k, a, b, c, buf);
#if TORCH_HIP_VERSION >= 9010
hipDataType data_type = GetCudaDataType4BatchedGemm<T>();
hipblasGemmBatchedEx(ctx->cublas_pmh_handle(), cublas_trans_b, cublas_trans_a, n, m, k,
reinterpret_cast<const void*>(alpha),
reinterpret_cast<const void**>(const_cast<const T**>(dev_b_ptrs)), data_type,
ldb, reinterpret_cast<const void**>(const_cast<const T**>(dev_a_ptrs)),
data_type, lda, reinterpret_cast<const void*>(beta),
reinterpret_cast<void**>(dev_c_ptrs), data_type, ldc, batch_size, data_type,
HIPBLAS_GEMM_DEFAULT);
#else
cublas_gemmBatched<T>(ctx->cublas_pmh_handle(), cublas_trans_b, cublas_trans_a, n, m, k, alpha,
const_cast<const T**>(dev_b_ptrs), ldb, const_cast<const T**>(dev_a_ptrs),
lda, beta, dev_c_ptrs, ldc, batch_size);
#endif
}
void BatchedHGemmWithFloatImpl(DeviceCtx* ctx, const enum CBLAS_ORDER order,
const enum CBLAS_TRANSPOSE trans_a,
const enum CBLAS_TRANSPOSE trans_b, int batch_size, int m, int n,
int k, const float* alpha, const half* a, const half* b,
const float* beta, half* c, half** buf) {
int a_stride, b_stride, c_stride;
int lda, ldb, ldc;
hipblasOperation_t cublas_trans_a, cublas_trans_b;
half** dev_a_ptrs;
half** dev_b_ptrs;
half** dev_c_ptrs;
std::tie(a_stride, b_stride, c_stride, lda, ldb, ldc, cublas_trans_a, cublas_trans_b, dev_a_ptrs,
dev_b_ptrs, dev_c_ptrs) =
PrepareToCallBatchedGemm<half>(ctx, trans_a, trans_b, batch_size, m, n, k, a, b, c, buf);
#if TORCH_HIP_VERSION >= 9010
hipblasGemmBatchedEx(
ctx->cublas_pmh_handle(), cublas_trans_b, cublas_trans_a, n, m, k,
reinterpret_cast<const void*>(alpha),
reinterpret_cast<const void**>(const_cast<const half**>(dev_b_ptrs)), HIP_R_16F, ldb,
reinterpret_cast<const void**>(const_cast<const half**>(dev_a_ptrs)), HIP_R_16F, lda,
reinterpret_cast<const void*>(beta), reinterpret_cast<void**>(dev_c_ptrs), HIP_R_16F, ldc,
batch_size, HIP_R_32F, HIPBLAS_GEMM_DEFAULT);
#else
LOG(FATAL) << "BatchedHGemmWithFloatImpl() does not support TORCH_HIP_VERSION below 9010";
#endif
}
__global__ void AxpyHalfGpu(const int n, const half alpha, const half* x, const int incx, half* y,
const int incy) {
#if __CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__)
CUDA_1D_KERNEL_LOOP(i, n) { y[i * incy] = __hfma(alpha, x[i * incx], y[i * incy]); }
#else
HALF_CHECK_FAILED;
#endif // __CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__)
}
} // namespace
void BlasIf<DeviceType::kGPU>::BlobGemm(DeviceCtx* ctx, enum CBLAS_TRANSPOSE trans_a,
enum CBLAS_TRANSPOSE trans_b, float alpha, float beta,
const Blob* a, const Blob* b, Blob* c) {
BlobGemmImpl<float>(ctx, trans_a, trans_b, alpha, beta, a, b, c);
}
void BlasIf<DeviceType::kGPU>::BlobGemm(DeviceCtx* ctx, enum CBLAS_TRANSPOSE trans_a,
enum CBLAS_TRANSPOSE trans_b, double alpha, double beta,
const Blob* a, const Blob* b, Blob* c) {
BlobGemmImpl<double>(ctx, trans_a, trans_b, alpha, beta, a, b, c);
}
void BlasIf<DeviceType::kGPU>::BlobGemm(DeviceCtx* ctx, enum CBLAS_TRANSPOSE trans_a,
enum CBLAS_TRANSPOSE trans_b, float16 alpha, float16 beta,
const Blob* a, const Blob* b, Blob* c) {
BlobGemmImpl<float16>(ctx, trans_a, trans_b, alpha, beta, a, b, c);
}
void BlasIf<DeviceType::kGPU>::BlobHGemmWithFloat(DeviceCtx* ctx, enum CBLAS_TRANSPOSE trans_a,
enum CBLAS_TRANSPOSE trans_b, float alpha,
float beta, const Blob* a, const Blob* b,
Blob* c) {
int m, n, k;
std::tie(m, n, k) = CalcMNKForGemm(trans_a, a, c);
BlasIf<DeviceType::kGPU>::OFHGemmWithFloat(ctx, trans_a, trans_b, m, n, k, alpha,
a->dptr<float16>(), b->dptr<float16>(), beta,
c->mut_dptr<float16>());
}
void BlasIf<DeviceType::kGPU>::OFGemm(DeviceCtx* ctx, enum CBLAS_TRANSPOSE trans_a,
enum CBLAS_TRANSPOSE trans_b, const int m, const int n,
const int k, const float alpha, const float* a,
const float* b, const float beta, float* c) {
Gemm<float>(ctx, CblasRowMajor, trans_a, trans_b, m, n, k, &alpha, a, b, &beta, c);
}
void BlasIf<DeviceType::kGPU>::OFGemm(DeviceCtx* ctx, enum CBLAS_TRANSPOSE trans_a,
enum CBLAS_TRANSPOSE trans_b, const int m, const int n,
const int k, const double alpha, const double* a,
const double* b, const double beta, double* c) {
Gemm<double>(ctx, CblasRowMajor, trans_a, trans_b, m, n, k, &alpha, a, b, &beta, c);
}
void BlasIf<DeviceType::kGPU>::OFGemm(DeviceCtx* ctx, enum CBLAS_TRANSPOSE trans_a,
enum CBLAS_TRANSPOSE trans_b, const int m, const int n,
const int k, const float16 alpha, const float16* a,
const float16* b, const float16 beta, float16* c) {
Gemm<half>(ctx, CblasRowMajor, trans_a, trans_b, m, n, k, reinterpret_cast<const half*>(&alpha),
reinterpret_cast<const half*>(a), reinterpret_cast<const half*>(b),
reinterpret_cast<const half*>(&beta), reinterpret_cast<half*>(c));
}
void BlasIf<DeviceType::kGPU>::OFHGemmWithFloat(DeviceCtx* ctx, enum CBLAS_TRANSPOSE trans_a,
enum CBLAS_TRANSPOSE trans_b, const int m,
const int n, const int k, const float alpha,
const float16* a, const float16* b,
const float beta, float16* c) {
HGemmWithFloat(ctx, CblasRowMajor, trans_a, trans_b, m, n, k, &alpha,
reinterpret_cast<const half*>(a), reinterpret_cast<const half*>(b), &beta,
reinterpret_cast<half*>(c));
}
void BlasIf<DeviceType::kGPU>::OFBatchedGemm(DeviceCtx* ctx, enum CBLAS_TRANSPOSE trans_a,
enum CBLAS_TRANSPOSE trans_b, const int batch_size,
const int m, const int n, const int k,
const float alpha, const float* a, const float* b,
const float beta, float* c, float** buf) {
BatchedGemmImpl<float>(ctx, CblasRowMajor, trans_a, trans_b, batch_size, m, n, k, &alpha, a, b,
&beta, c, buf);
}
void BlasIf<DeviceType::kGPU>::OFBatchedGemm(DeviceCtx* ctx, enum CBLAS_TRANSPOSE trans_a,
enum CBLAS_TRANSPOSE trans_b, const int batch_size,
const int m, const int n, const int k,
const double alpha, const double* a, const double* b,
const double beta, double* c, double** buf) {
BatchedGemmImpl<double>(ctx, CblasRowMajor, trans_a, trans_b, batch_size, m, n, k, &alpha, a, b,
&beta, c, buf);
}
void BlasIf<DeviceType::kGPU>::OFBatchedGemm(DeviceCtx* ctx, enum CBLAS_TRANSPOSE trans_a,
enum CBLAS_TRANSPOSE trans_b, const int batch_size,
const int m, const int n, const int k,
const float16 alpha, const float16* a,
const float16* b, const float16 beta, float16* c,
float16** buf) {
BatchedGemmImpl<half>(ctx, CblasRowMajor, trans_a, trans_b, batch_size, m, n, k,
reinterpret_cast<const half*>(&alpha), reinterpret_cast<const half*>(a),
reinterpret_cast<const half*>(b), reinterpret_cast<const half*>(&beta),
reinterpret_cast<half*>(c), reinterpret_cast<half**>(buf));
}
void BlasIf<DeviceType::kGPU>::OFBatchedHGemmWithFloat(
DeviceCtx* ctx, enum CBLAS_TRANSPOSE trans_a, enum CBLAS_TRANSPOSE trans_b,
const int batch_size, const int m, const int n, const int k, const float alpha,
const float16* a, const float16* b, const float beta, float16* c, float16** buf) {
BatchedHGemmWithFloatImpl(ctx, CblasRowMajor, trans_a, trans_b, batch_size, m, n, k, &alpha,
reinterpret_cast<const half*>(a), reinterpret_cast<const half*>(b),
&beta, reinterpret_cast<half*>(c), reinterpret_cast<half**>(buf));
}
void BlasIf<DeviceType::kGPU>::Axpy(DeviceCtx* ctx, const int n, const float alpha, const float* x,
const int incx, float* y, const int incy) {
cublas_axpy<float>(ctx->cublas_pmh_handle(), n, &alpha, x, incx, y, incy);
}
void BlasIf<DeviceType::kGPU>::Axpy(DeviceCtx* ctx, const int n, const double alpha,
const double* x, const int incx, double* y, const int incy) {
cublas_axpy<double>(ctx->cublas_pmh_handle(), n, &alpha, x, incx, y, incy);
}
void BlasIf<DeviceType::kGPU>::Axpy(DeviceCtx* ctx, const int n, const float16 alpha,
const float16* x, const int incx, float16* y, const int incy) {
hipLaunchKernelGGL(( AxpyHalfGpu), dim3(BlocksNum4ThreadsNum(n)), dim3(kCudaThreadsNumPerBlock), 0, ctx->cuda_stream(),
n, float16_2half(alpha), reinterpret_cast<const half*>(x), incx, reinterpret_cast<half*>(y),
incy);
}
} // namespace oneflow
| 90828e0a63b7d7319d1629fac4a51019dfc90b33.cu | /*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "oneflow/core/kernel/util/cuda_blas_interface.h"
#include "oneflow/core/device/cuda_util.h"
#include "oneflow/core/register/blob.h"
#include "oneflow/core/kernel/util/cuda_half_util.h"
namespace oneflow {
namespace {
cublasOperation_t CblasTrans2CublasTrans(CBLAS_TRANSPOSE trans) {
cublasOperation_t cublas_trans;
if (trans == CBLAS_TRANSPOSE::CblasNoTrans) {
cublas_trans = cublasOperation_t::CUBLAS_OP_N;
} else if (trans == CBLAS_TRANSPOSE::CblasTrans) {
cublas_trans = cublasOperation_t::CUBLAS_OP_T;
} else if (trans == CBLAS_TRANSPOSE::CblasConjTrans) {
cublas_trans = cublasOperation_t::CUBLAS_OP_C;
} else {
// do nothing
}
return cublas_trans;
}
std::tuple<int, int, int, cublasOperation_t, cublasOperation_t> PrepareToCallCublasGemm(
enum CBLAS_TRANSPOSE trans_a, enum CBLAS_TRANSPOSE trans_b, const int m, const int n,
const int k) {
int lda = (trans_a == CblasNoTrans) ? k : m;
int ldb = (trans_b == CblasNoTrans) ? n : k;
int ldc = n;
cublasOperation_t cublas_trans_a = CblasTrans2CublasTrans(trans_a);
cublasOperation_t cublas_trans_b = CblasTrans2CublasTrans(trans_b);
return std::make_tuple(lda, ldb, ldc, cublas_trans_a, cublas_trans_b);
}
template<typename T>
void Gemm(DeviceCtx* ctx, const enum CBLAS_ORDER order, enum CBLAS_TRANSPOSE trans_a,
enum CBLAS_TRANSPOSE trans_b, const int m, const int n, const int k, const T* alpha,
const T* a, const T* b, const T* beta, T* c) {
int lda, ldb, ldc;
cublasOperation_t cublas_trans_a, cublas_trans_b;
std::tie(lda, ldb, ldc, cublas_trans_a, cublas_trans_b) =
PrepareToCallCublasGemm(trans_a, trans_b, m, n, k);
cublasHandle_t handle;
if (std::is_same<T, half>::value) {
handle = ctx->cublas_tensor_op_math_handle();
} else {
handle = ctx->cublas_pmh_handle();
}
cublas_gemm<T>(handle, cublas_trans_b, cublas_trans_a, n, m, k, alpha, b, ldb, a, lda, beta, c,
ldc);
}
void HGemmWithFloat(DeviceCtx* ctx, const enum CBLAS_ORDER order, enum CBLAS_TRANSPOSE trans_a,
enum CBLAS_TRANSPOSE trans_b, const int m, const int n, const int k,
const float* alpha, const half* a, const half* b, const float* beta, half* c) {
int lda, ldb, ldc;
cublasOperation_t cublas_trans_a, cublas_trans_b;
std::tie(lda, ldb, ldc, cublas_trans_a, cublas_trans_b) =
PrepareToCallCublasGemm(trans_a, trans_b, m, n, k);
cudaDataType_t data_type = GetCudaDataType(DataType::kFloat16);
OF_CUBLAS_CHECK(cublasSgemmEx(ctx->cublas_tensor_op_math_handle(), cublas_trans_b, cublas_trans_a,
n, m, k, alpha, b, data_type, ldb, a, data_type, lda, beta, c,
data_type, ldc));
}
std::tuple<int, int, int> CalcMNKForGemm(enum CBLAS_TRANSPOSE trans_a, const Blob* a,
const Blob* c) {
const auto& a_shape = a->shape_view();
const auto& c_shape = c->shape_view();
int m = c_shape.At(0);
int n = c_shape.Count(1);
int k = (trans_a == CblasNoTrans) ? a_shape.Count(1) : a_shape.At(0);
return std::make_tuple(m, n, k);
}
template<typename T>
void BlobGemmImpl(DeviceCtx* ctx, enum CBLAS_TRANSPOSE trans_a, enum CBLAS_TRANSPOSE trans_b,
T alpha, T beta, const Blob* a, const Blob* b, Blob* c) {
int m, n, k;
std::tie(m, n, k) = CalcMNKForGemm(trans_a, a, c);
BlasIf<DeviceType::kGPU>::OFGemm(ctx, trans_a, trans_b, m, n, k, alpha, a->dptr<T>(),
b->dptr<T>(), beta, c->mut_dptr<T>());
}
template<typename T>
__global__ void AssignStridedAddrGpu(T** dev_ptrs, T* start_ptr, int32_t stride_len,
int32_t stride_num) {
CUDA_1D_KERNEL_LOOP(i, stride_num) { dev_ptrs[i] = start_ptr + i * stride_len; }
}
template<typename T>
void AssignStridedAddr(DeviceCtx* ctx, T** dev_ptrs, T* start_ptr, int stride_len, int stride_num) {
AssignStridedAddrGpu<T>
<<<BlocksNum4ThreadsNum(stride_num), kCudaThreadsNumPerBlock, 0, ctx->cuda_stream()>>>(
dev_ptrs, start_ptr, stride_len, stride_num);
}
template<typename T>
std::tuple<int, int, int, int, int, int, cublasOperation_t, cublasOperation_t, T**, T**, T**>
PrepareToCallBatchedGemm(DeviceCtx* ctx, const enum CBLAS_TRANSPOSE trans_a,
const enum CBLAS_TRANSPOSE trans_b, int batch_size, int m, int n, int k,
const T* a, const T* b, T* c, T** buf) {
const int a_stride = m * k;
const int b_stride = k * n;
const int c_stride = m * n;
const int lda = (trans_a == CblasNoTrans) ? k : m;
const int ldb = (trans_b == CblasNoTrans) ? n : k;
const int ldc = n;
cublasOperation_t cublas_trans_a = CblasTrans2CublasTrans(trans_a);
cublasOperation_t cublas_trans_b = CblasTrans2CublasTrans(trans_b);
T** dev_a_ptrs = buf;
T** dev_b_ptrs = buf + batch_size;
T** dev_c_ptrs = buf + 2 * batch_size;
AssignStridedAddr<T>(ctx, dev_a_ptrs, const_cast<T*>(a), a_stride, batch_size);
AssignStridedAddr<T>(ctx, dev_b_ptrs, const_cast<T*>(b), b_stride, batch_size);
AssignStridedAddr<T>(ctx, dev_c_ptrs, c, c_stride, batch_size);
return std::make_tuple(a_stride, b_stride, c_stride, lda, ldb, ldc, cublas_trans_a,
cublas_trans_b, dev_a_ptrs, dev_b_ptrs, dev_c_ptrs);
}
template<typename T>
cudaDataType_t GetCudaDataType4BatchedGemm() {
return CudaDataType<T>::value;
}
template<>
cudaDataType_t GetCudaDataType4BatchedGemm<half>() {
return CUDA_R_16F;
}
template<typename T>
void BatchedGemmImpl(DeviceCtx* ctx, const enum CBLAS_ORDER order,
const enum CBLAS_TRANSPOSE trans_a, const enum CBLAS_TRANSPOSE trans_b,
int batch_size, int m, int n, int k, const T* alpha, const T* a, const T* b,
const T* beta, T* c, T** buf) {
int a_stride, b_stride, c_stride;
int lda, ldb, ldc;
cublasOperation_t cublas_trans_a, cublas_trans_b;
T** dev_a_ptrs;
T** dev_b_ptrs;
T** dev_c_ptrs;
std::tie(a_stride, b_stride, c_stride, lda, ldb, ldc, cublas_trans_a, cublas_trans_b, dev_a_ptrs,
dev_b_ptrs, dev_c_ptrs) =
PrepareToCallBatchedGemm<T>(ctx, trans_a, trans_b, batch_size, m, n, k, a, b, c, buf);
#if CUDA_VERSION >= 9010
cudaDataType_t data_type = GetCudaDataType4BatchedGemm<T>();
cublasGemmBatchedEx(ctx->cublas_pmh_handle(), cublas_trans_b, cublas_trans_a, n, m, k,
reinterpret_cast<const void*>(alpha),
reinterpret_cast<const void**>(const_cast<const T**>(dev_b_ptrs)), data_type,
ldb, reinterpret_cast<const void**>(const_cast<const T**>(dev_a_ptrs)),
data_type, lda, reinterpret_cast<const void*>(beta),
reinterpret_cast<void**>(dev_c_ptrs), data_type, ldc, batch_size, data_type,
CUBLAS_GEMM_DEFAULT);
#else
cublas_gemmBatched<T>(ctx->cublas_pmh_handle(), cublas_trans_b, cublas_trans_a, n, m, k, alpha,
const_cast<const T**>(dev_b_ptrs), ldb, const_cast<const T**>(dev_a_ptrs),
lda, beta, dev_c_ptrs, ldc, batch_size);
#endif
}
void BatchedHGemmWithFloatImpl(DeviceCtx* ctx, const enum CBLAS_ORDER order,
const enum CBLAS_TRANSPOSE trans_a,
const enum CBLAS_TRANSPOSE trans_b, int batch_size, int m, int n,
int k, const float* alpha, const half* a, const half* b,
const float* beta, half* c, half** buf) {
int a_stride, b_stride, c_stride;
int lda, ldb, ldc;
cublasOperation_t cublas_trans_a, cublas_trans_b;
half** dev_a_ptrs;
half** dev_b_ptrs;
half** dev_c_ptrs;
std::tie(a_stride, b_stride, c_stride, lda, ldb, ldc, cublas_trans_a, cublas_trans_b, dev_a_ptrs,
dev_b_ptrs, dev_c_ptrs) =
PrepareToCallBatchedGemm<half>(ctx, trans_a, trans_b, batch_size, m, n, k, a, b, c, buf);
#if CUDA_VERSION >= 9010
cublasGemmBatchedEx(
ctx->cublas_pmh_handle(), cublas_trans_b, cublas_trans_a, n, m, k,
reinterpret_cast<const void*>(alpha),
reinterpret_cast<const void**>(const_cast<const half**>(dev_b_ptrs)), CUDA_R_16F, ldb,
reinterpret_cast<const void**>(const_cast<const half**>(dev_a_ptrs)), CUDA_R_16F, lda,
reinterpret_cast<const void*>(beta), reinterpret_cast<void**>(dev_c_ptrs), CUDA_R_16F, ldc,
batch_size, CUDA_R_32F, CUBLAS_GEMM_DEFAULT);
#else
LOG(FATAL) << "BatchedHGemmWithFloatImpl() does not support CUDA_VERSION below 9010";
#endif
}
__global__ void AxpyHalfGpu(const int n, const half alpha, const half* x, const int incx, half* y,
const int incy) {
#if __CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__)
CUDA_1D_KERNEL_LOOP(i, n) { y[i * incy] = __hfma(alpha, x[i * incx], y[i * incy]); }
#else
HALF_CHECK_FAILED;
#endif // __CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__)
}
} // namespace
void BlasIf<DeviceType::kGPU>::BlobGemm(DeviceCtx* ctx, enum CBLAS_TRANSPOSE trans_a,
enum CBLAS_TRANSPOSE trans_b, float alpha, float beta,
const Blob* a, const Blob* b, Blob* c) {
BlobGemmImpl<float>(ctx, trans_a, trans_b, alpha, beta, a, b, c);
}
void BlasIf<DeviceType::kGPU>::BlobGemm(DeviceCtx* ctx, enum CBLAS_TRANSPOSE trans_a,
enum CBLAS_TRANSPOSE trans_b, double alpha, double beta,
const Blob* a, const Blob* b, Blob* c) {
BlobGemmImpl<double>(ctx, trans_a, trans_b, alpha, beta, a, b, c);
}
void BlasIf<DeviceType::kGPU>::BlobGemm(DeviceCtx* ctx, enum CBLAS_TRANSPOSE trans_a,
enum CBLAS_TRANSPOSE trans_b, float16 alpha, float16 beta,
const Blob* a, const Blob* b, Blob* c) {
BlobGemmImpl<float16>(ctx, trans_a, trans_b, alpha, beta, a, b, c);
}
void BlasIf<DeviceType::kGPU>::BlobHGemmWithFloat(DeviceCtx* ctx, enum CBLAS_TRANSPOSE trans_a,
enum CBLAS_TRANSPOSE trans_b, float alpha,
float beta, const Blob* a, const Blob* b,
Blob* c) {
int m, n, k;
std::tie(m, n, k) = CalcMNKForGemm(trans_a, a, c);
BlasIf<DeviceType::kGPU>::OFHGemmWithFloat(ctx, trans_a, trans_b, m, n, k, alpha,
a->dptr<float16>(), b->dptr<float16>(), beta,
c->mut_dptr<float16>());
}
void BlasIf<DeviceType::kGPU>::OFGemm(DeviceCtx* ctx, enum CBLAS_TRANSPOSE trans_a,
enum CBLAS_TRANSPOSE trans_b, const int m, const int n,
const int k, const float alpha, const float* a,
const float* b, const float beta, float* c) {
Gemm<float>(ctx, CblasRowMajor, trans_a, trans_b, m, n, k, &alpha, a, b, &beta, c);
}
void BlasIf<DeviceType::kGPU>::OFGemm(DeviceCtx* ctx, enum CBLAS_TRANSPOSE trans_a,
enum CBLAS_TRANSPOSE trans_b, const int m, const int n,
const int k, const double alpha, const double* a,
const double* b, const double beta, double* c) {
Gemm<double>(ctx, CblasRowMajor, trans_a, trans_b, m, n, k, &alpha, a, b, &beta, c);
}
void BlasIf<DeviceType::kGPU>::OFGemm(DeviceCtx* ctx, enum CBLAS_TRANSPOSE trans_a,
enum CBLAS_TRANSPOSE trans_b, const int m, const int n,
const int k, const float16 alpha, const float16* a,
const float16* b, const float16 beta, float16* c) {
Gemm<half>(ctx, CblasRowMajor, trans_a, trans_b, m, n, k, reinterpret_cast<const half*>(&alpha),
reinterpret_cast<const half*>(a), reinterpret_cast<const half*>(b),
reinterpret_cast<const half*>(&beta), reinterpret_cast<half*>(c));
}
void BlasIf<DeviceType::kGPU>::OFHGemmWithFloat(DeviceCtx* ctx, enum CBLAS_TRANSPOSE trans_a,
enum CBLAS_TRANSPOSE trans_b, const int m,
const int n, const int k, const float alpha,
const float16* a, const float16* b,
const float beta, float16* c) {
HGemmWithFloat(ctx, CblasRowMajor, trans_a, trans_b, m, n, k, &alpha,
reinterpret_cast<const half*>(a), reinterpret_cast<const half*>(b), &beta,
reinterpret_cast<half*>(c));
}
void BlasIf<DeviceType::kGPU>::OFBatchedGemm(DeviceCtx* ctx, enum CBLAS_TRANSPOSE trans_a,
enum CBLAS_TRANSPOSE trans_b, const int batch_size,
const int m, const int n, const int k,
const float alpha, const float* a, const float* b,
const float beta, float* c, float** buf) {
BatchedGemmImpl<float>(ctx, CblasRowMajor, trans_a, trans_b, batch_size, m, n, k, &alpha, a, b,
&beta, c, buf);
}
void BlasIf<DeviceType::kGPU>::OFBatchedGemm(DeviceCtx* ctx, enum CBLAS_TRANSPOSE trans_a,
enum CBLAS_TRANSPOSE trans_b, const int batch_size,
const int m, const int n, const int k,
const double alpha, const double* a, const double* b,
const double beta, double* c, double** buf) {
BatchedGemmImpl<double>(ctx, CblasRowMajor, trans_a, trans_b, batch_size, m, n, k, &alpha, a, b,
&beta, c, buf);
}
void BlasIf<DeviceType::kGPU>::OFBatchedGemm(DeviceCtx* ctx, enum CBLAS_TRANSPOSE trans_a,
enum CBLAS_TRANSPOSE trans_b, const int batch_size,
const int m, const int n, const int k,
const float16 alpha, const float16* a,
const float16* b, const float16 beta, float16* c,
float16** buf) {
BatchedGemmImpl<half>(ctx, CblasRowMajor, trans_a, trans_b, batch_size, m, n, k,
reinterpret_cast<const half*>(&alpha), reinterpret_cast<const half*>(a),
reinterpret_cast<const half*>(b), reinterpret_cast<const half*>(&beta),
reinterpret_cast<half*>(c), reinterpret_cast<half**>(buf));
}
void BlasIf<DeviceType::kGPU>::OFBatchedHGemmWithFloat(
DeviceCtx* ctx, enum CBLAS_TRANSPOSE trans_a, enum CBLAS_TRANSPOSE trans_b,
const int batch_size, const int m, const int n, const int k, const float alpha,
const float16* a, const float16* b, const float beta, float16* c, float16** buf) {
BatchedHGemmWithFloatImpl(ctx, CblasRowMajor, trans_a, trans_b, batch_size, m, n, k, &alpha,
reinterpret_cast<const half*>(a), reinterpret_cast<const half*>(b),
&beta, reinterpret_cast<half*>(c), reinterpret_cast<half**>(buf));
}
void BlasIf<DeviceType::kGPU>::Axpy(DeviceCtx* ctx, const int n, const float alpha, const float* x,
const int incx, float* y, const int incy) {
cublas_axpy<float>(ctx->cublas_pmh_handle(), n, &alpha, x, incx, y, incy);
}
void BlasIf<DeviceType::kGPU>::Axpy(DeviceCtx* ctx, const int n, const double alpha,
const double* x, const int incx, double* y, const int incy) {
cublas_axpy<double>(ctx->cublas_pmh_handle(), n, &alpha, x, incx, y, incy);
}
void BlasIf<DeviceType::kGPU>::Axpy(DeviceCtx* ctx, const int n, const float16 alpha,
const float16* x, const int incx, float16* y, const int incy) {
AxpyHalfGpu<<<BlocksNum4ThreadsNum(n), kCudaThreadsNumPerBlock, 0, ctx->cuda_stream()>>>(
n, float16_2half(alpha), reinterpret_cast<const half*>(x), incx, reinterpret_cast<half*>(y),
incy);
}
} // namespace oneflow
|
5ff2ecfb8383fd90a731386e1832f70ac7254069.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#define SRC_SIZE 65536
#define DST_SIZE 65536
#define CPY_SIZE 8192
int main() {
int *h_mem = (int*)malloc(SRC_SIZE*sizeof(int));
memset(h_mem, 0, SRC_SIZE*sizeof(int));
int *d_mem;
hipMalloc((void**)&d_mem, DST_SIZE*sizeof(int));
hipMemset(d_mem, 0, DST_SIZE*sizeof(int));
hipMemcpy(d_mem, h_mem, CPY_SIZE*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(h_mem, d_mem, CPY_SIZE*sizeof(int), hipMemcpyDeviceToHost);
hipFree(d_mem);
free(h_mem);
hipDeviceReset();
return 0;
}
| 5ff2ecfb8383fd90a731386e1832f70ac7254069.cu | #include <stdio.h>
#define SRC_SIZE 65536
#define DST_SIZE 65536
#define CPY_SIZE 8192
int main() {
int *h_mem = (int*)malloc(SRC_SIZE*sizeof(int));
memset(h_mem, 0, SRC_SIZE*sizeof(int));
int *d_mem;
cudaMalloc((void**)&d_mem, DST_SIZE*sizeof(int));
cudaMemset(d_mem, 0, DST_SIZE*sizeof(int));
cudaMemcpy(d_mem, h_mem, CPY_SIZE*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(h_mem, d_mem, CPY_SIZE*sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(d_mem);
free(h_mem);
cudaDeviceReset();
return 0;
}
|
7baa197bd3104b92229046fc4b0ec2b6192ad94d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*!
* Copyright 2017-2018 tsooBGX contributors
*/
#include <tsoobgx/tree_updater.h>
#include <utility>
#include <vector>
#include <limits>
#include <string>
#include "../common/common.h"
#include "param.h"
#include "updater_gpu_common.cuh"
namespace tsoobgx {
namespace tree {
DMLC_REGISTRY_FILE_TAG(updater_gpu);
template <typename GradientPairT>
TSOOBGX_DEVICE float inline LossChangeMissing(const GradientPairT& scan,
const GradientPairT& missing,
const GradientPairT& parent_sum,
const float& parent_gain,
const GPUTrainingParam& param,
bool& missing_left_out) { // NOLINT
// Put gradients of missing values to left
float missing_left_loss =
DeviceCalcLossChange(param, scan + missing, parent_sum, parent_gain);
float missing_right_loss =
DeviceCalcLossChange(param, scan, parent_sum, parent_gain);
if (missing_left_loss >= missing_right_loss) {
missing_left_out = true;
return missing_left_loss;
} else {
missing_left_out = false;
return missing_right_loss;
}
}
/**
* @brief Absolute BFS order IDs to col-wise unique IDs based on user input
* @param tid the index of the element that this thread should access
* @param abs the array of absolute IDs
* @param colIds the array of column IDs for each element
* @param nodeStart the start of the node ID at this level
* @param nKeys number of nodes at this level.
* @return the uniq key
*/
static HOST_DEV_INLINE NodeIdT Abs2UniqueKey(int tid,
common::Span<const NodeIdT> abs,
common::Span<const int> colIds,
NodeIdT nodeStart, int nKeys) {
int a = abs[tid];
if (a == kUnusedNode) return a;
return ((a - nodeStart) + (colIds[tid] * nKeys));
}
/**
* @struct Pair
* @brief Pair used for key basd scan operations on GradientPair
*/
struct Pair {
int key;
GradientPair value;
};
/** define a key that's not used at all in the entire boosting process */
static const int kNoneKey = -100;
/**
* @brief Allocate temporary buffers needed for scan operations
* @param tmpScans gradient buffer
* @param tmpKeys keys buffer
* @param size number of elements that will be scanned
*/
template <int BLKDIM_L1L3 = 256>
int ScanTempBufferSize(int size) {
int num_blocks = dh::DivRoundUp(size, BLKDIM_L1L3);
return num_blocks;
}
struct AddByKey {
template <typename T>
HOST_DEV_INLINE T operator()(const T& first, const T& second) const {
T result;
if (first.key == second.key) {
result.key = first.key;
result.value = first.value + second.value;
} else {
result.key = second.key;
result.value = second.value;
}
return result;
}
};
/**
* @brief Gradient value getter function
* @param id the index into the vals or instIds array to which to fetch
* @param vals the gradient value buffer
* @param instIds instance index buffer
* @return the expected gradient value
*/
HOST_DEV_INLINE GradientPair Get(int id,
common::Span<const GradientPair> vals,
common::Span<const int> instIds) {
id = instIds[id];
return vals[id];
}
template <int BLKDIM_L1L3>
__global__ void CubScanByKeyL1(
common::Span<GradientPair> scans,
common::Span<const GradientPair> vals,
common::Span<const int> instIds,
common::Span<GradientPair> mScans,
common::Span<int> mKeys,
common::Span<const NodeIdT> keys,
int nUniqKeys,
common::Span<const int> colIds, NodeIdT nodeStart,
const int size) {
Pair rootPair = {kNoneKey, GradientPair(0.f, 0.f)};
int myKey;
GradientPair myValue;
using BlockScan = hipcub::BlockScan<Pair, BLKDIM_L1L3>;
__shared__ typename BlockScan::TempStorage temp_storage;
Pair threadData;
int tid = blockIdx.x * BLKDIM_L1L3 + threadIdx.x;
if (tid < size) {
myKey = Abs2UniqueKey(tid, keys, colIds, nodeStart, nUniqKeys);
myValue = Get(tid, vals, instIds);
} else {
myKey = kNoneKey;
myValue = {};
}
threadData.key = myKey;
threadData.value = myValue;
// get previous key, especially needed for the last thread in this block
// in order to pass on the partial scan values.
// this statement MUST appear before the checks below!
// else, the result of this shuffle operation will be undefined
#if (__CUDACC_VER_MAJOR__ >= 9)
int previousKey = __shfl_up_sync(0xFFFFFFFF, myKey, 1);
#else
int previousKey = __shfl_up(myKey, 1);
#endif
// Collectively compute the block-wide exclusive prefix sum
BlockScan(temp_storage)
.ExclusiveScan(threadData, threadData, rootPair, AddByKey());
if (tid < size) {
scans[tid] = threadData.value;
} else {
return;
}
if (threadIdx.x == BLKDIM_L1L3 - 1) {
threadData.value =
(myKey == previousKey) ? threadData.value : GradientPair(0.0f, 0.0f);
mKeys[blockIdx.x] = myKey;
mScans[blockIdx.x] = threadData.value + myValue;
}
}
template <int BLKSIZE>
__global__ void CubScanByKeyL2(common::Span<GradientPair> mScans,
common::Span<int> mKeys, int mLength) {
using BlockScan = hipcub::BlockScan<Pair, BLKSIZE, cub::BLOCK_SCAN_WARP_SCANS>;
Pair threadData;
__shared__ typename BlockScan::TempStorage temp_storage;
for (int i = threadIdx.x; i < mLength; i += BLKSIZE - 1) {
threadData.key = mKeys[i];
threadData.value = mScans[i];
BlockScan(temp_storage).InclusiveScan(threadData, threadData, AddByKey());
mScans[i] = threadData.value;
__syncthreads();
}
}
template <int BLKDIM_L1L3>
__global__ void CubScanByKeyL3(common::Span<GradientPair> sums,
common::Span<GradientPair> scans,
common::Span<const GradientPair> vals,
common::Span<const int> instIds,
common::Span<const GradientPair> mScans,
common::Span<const int> mKeys,
common::Span<const NodeIdT> keys,
int nUniqKeys,
common::Span<const int> colIds, NodeIdT nodeStart,
const int size) {
int relId = threadIdx.x;
int tid = (blockIdx.x * BLKDIM_L1L3) + relId;
// to avoid the following warning from nvcc:
// __shared__ memory variable with non-empty constructor or destructor
// (potential race between threads)
__shared__ char gradBuff[sizeof(GradientPair)];
__shared__ int s_mKeys;
GradientPair* s_mScans = reinterpret_cast<GradientPair*>(gradBuff);
if (tid >= size) return;
// cache block-wide partial scan info
if (relId == 0) {
s_mKeys = (blockIdx.x > 0) ? mKeys[blockIdx.x - 1] : kNoneKey;
s_mScans[0] = (blockIdx.x > 0) ? mScans[blockIdx.x - 1] : GradientPair();
}
int myKey = Abs2UniqueKey(tid, keys, colIds, nodeStart, nUniqKeys);
int previousKey =
tid == 0 ? kNoneKey
: Abs2UniqueKey(tid - 1, keys, colIds, nodeStart, nUniqKeys);
GradientPair my_value = scans[tid];
__syncthreads();
if (blockIdx.x > 0 && s_mKeys == previousKey) {
my_value += s_mScans[0];
}
if (tid == size - 1) {
sums[previousKey] = my_value + Get(tid, vals, instIds);
}
if ((previousKey != myKey) && (previousKey >= 0)) {
sums[previousKey] = my_value;
my_value = GradientPair(0.0f, 0.0f);
}
scans[tid] = my_value;
}
/**
* @brief Performs fused reduce and scan by key functionality. It is assumed
* that
* the keys occur contiguously!
* @param sums the output gradient reductions for each element performed
* key-wise
* @param scans the output gradient scans for each element performed key-wise
* @param vals the gradients evaluated for each observation.
* @param instIds instance ids for each element
* @param keys keys to be used to segment the reductions. They need not occur
* contiguously in contrast to scan_by_key. Currently, we need one key per
* value in the 'vals' array.
* @param size number of elements in the 'vals' array
* @param nUniqKeys max number of uniq keys found per column
* @param nCols number of columns
* @param tmpScans temporary scan buffer needed for cub-pyramid algo
* @param tmpKeys temporary key buffer needed for cub-pyramid algo
* @param colIds column indices for each element in the array
* @param nodeStart index of the leftmost node in the current level
*/
template <int BLKDIM_L1L3 = 256, int BLKDIM_L2 = 512>
void ReduceScanByKey(common::Span<GradientPair> sums,
common::Span<GradientPair> scans,
common::Span<GradientPair> vals,
common::Span<const int> instIds,
common::Span<const NodeIdT> keys,
int size, int nUniqKeys, int nCols,
common::Span<GradientPair> tmpScans,
common::Span<int> tmpKeys,
common::Span<const int> colIds, NodeIdT nodeStart) {
int nBlks = dh::DivRoundUp(size, BLKDIM_L1L3);
hipMemset(sums.data(), 0, nUniqKeys * nCols * sizeof(GradientPair));
hipLaunchKernelGGL(( CubScanByKeyL1<BLKDIM_L1L3>)
, dim3(nBlks), dim3(BLKDIM_L1L3), 0, 0, scans, vals, instIds, tmpScans, tmpKeys, keys,
nUniqKeys, colIds, nodeStart, size);
hipLaunchKernelGGL(( CubScanByKeyL2<BLKDIM_L2>), dim3(1), dim3(BLKDIM_L2), 0, 0, tmpScans, tmpKeys, nBlks);
hipLaunchKernelGGL(( CubScanByKeyL3<BLKDIM_L1L3>)
, dim3(nBlks), dim3(BLKDIM_L1L3), 0, 0, sums, scans, vals, instIds, tmpScans, tmpKeys,
keys, nUniqKeys, colIds, nodeStart, size);
}
/**
* @struct ExactSplitCandidate
* @brief Abstraction of a possible split in the decision tree
*/
struct ExactSplitCandidate {
/** the optimal gain score for this node */
float score;
/** index where to split in the DMatrix */
int index;
HOST_DEV_INLINE ExactSplitCandidate() : score{-FLT_MAX}, index{INT_MAX} {}
/**
* @brief Whether the split info is valid to be used to create a new child
* @param minSplitLoss minimum score above which decision to split is made
* @return true if splittable, else false
*/
HOST_DEV_INLINE bool IsSplittable(float minSplitLoss) const {
return ((score >= minSplitLoss) && (index != INT_MAX));
}
};
/**
* @enum ArgMaxByKeyAlgo best_split_evaluation.cuh
* @brief Help decide which algorithm to use for multi-argmax operation
*/
enum ArgMaxByKeyAlgo {
/** simplest, use gmem-atomics for all updates */
kAbkGmem = 0,
/** use smem-atomics for updates (when number of keys are less) */
kAbkSmem
};
/** max depth until which to use shared mem based atomics for argmax */
static const int kMaxAbkLevels = 3;
HOST_DEV_INLINE ExactSplitCandidate MaxSplit(ExactSplitCandidate a,
ExactSplitCandidate b) {
ExactSplitCandidate out;
if (a.score < b.score) {
out.score = b.score;
out.index = b.index;
} else if (a.score == b.score) {
out.score = a.score;
out.index = (a.index < b.index) ? a.index : b.index;
} else {
out.score = a.score;
out.index = a.index;
}
return out;
}
DEV_INLINE void AtomicArgMax(ExactSplitCandidate* address,
ExactSplitCandidate val) {
unsigned long long* intAddress = reinterpret_cast<unsigned long long*>(address); // NOLINT
unsigned long long old = *intAddress; // NOLINT
unsigned long long assumed = old; // NOLINT
do {
assumed = old;
ExactSplitCandidate res =
MaxSplit(val, *reinterpret_cast<ExactSplitCandidate*>(&assumed));
old = atomicCAS(intAddress, assumed, *reinterpret_cast<uint64_t*>(&res));
} while (assumed != old);
}
DEV_INLINE void ArgMaxWithAtomics(
int id,
common::Span<ExactSplitCandidate> nodeSplits,
common::Span<const GradientPair> gradScans,
common::Span<const GradientPair> gradSums,
common::Span<const float> vals,
common::Span<const int> colIds,
common::Span<const NodeIdT> nodeAssigns,
common::Span<const DeviceNodeStats> nodes, int nUniqKeys,
NodeIdT nodeStart, int len,
const GPUTrainingParam& param) {
int nodeId = nodeAssigns[id];
// @todo: this is really a bad check! but will be fixed when we move
// to key-based reduction
if ((id == 0) ||
!((nodeId == nodeAssigns[id - 1]) && (colIds[id] == colIds[id - 1]) &&
(vals[id] == vals[id - 1]))) {
if (nodeId != kUnusedNode) {
int sumId = Abs2UniqueKey(id, nodeAssigns, colIds, nodeStart, nUniqKeys);
GradientPair colSum = gradSums[sumId];
int uid = nodeId - nodeStart;
DeviceNodeStats node_stat = nodes[nodeId];
GradientPair parentSum = node_stat.sum_gradients;
float parentGain = node_stat.root_gain;
bool tmp;
ExactSplitCandidate s;
GradientPair missing = parentSum - colSum;
s.score = LossChangeMissing(gradScans[id], missing, parentSum, parentGain,
param, tmp);
s.index = id;
AtomicArgMax(&nodeSplits[uid], s);
} // end if nodeId != UNUSED_NODE
} // end if id == 0 ...
}
__global__ void AtomicArgMaxByKeyGmem(
common::Span<ExactSplitCandidate> nodeSplits,
common::Span<const GradientPair> gradScans,
common::Span<const GradientPair> gradSums,
common::Span<const float> vals,
common::Span<const int> colIds,
common::Span<const NodeIdT> nodeAssigns,
common::Span<const DeviceNodeStats> nodes,
int nUniqKeys,
NodeIdT nodeStart,
int len,
const TrainParam param) {
int id = threadIdx.x + (blockIdx.x * blockDim.x);
const int stride = blockDim.x * gridDim.x;
for (; id < len; id += stride) {
ArgMaxWithAtomics(id, nodeSplits, gradScans, gradSums, vals, colIds,
nodeAssigns, nodes, nUniqKeys, nodeStart, len,
GPUTrainingParam(param));
}
}
__global__ void AtomicArgMaxByKeySmem(
common::Span<ExactSplitCandidate> nodeSplits,
common::Span<const GradientPair> gradScans,
common::Span<const GradientPair> gradSums,
common::Span<const float> vals,
common::Span<const int> colIds,
common::Span<const NodeIdT> nodeAssigns,
common::Span<const DeviceNodeStats> nodes,
int nUniqKeys, NodeIdT nodeStart, int len, const GPUTrainingParam param) {
extern __shared__ char sArr[];
common::Span<ExactSplitCandidate> sNodeSplits =
common::Span<ExactSplitCandidate>(
reinterpret_cast<ExactSplitCandidate*>(sArr),
static_cast<typename common::Span<ExactSplitCandidate>::index_type>(
nUniqKeys * sizeof(ExactSplitCandidate)));
int tid = threadIdx.x;
ExactSplitCandidate defVal;
for (int i = tid; i < nUniqKeys; i += blockDim.x) {
sNodeSplits[i] = defVal;
}
__syncthreads();
int id = tid + (blockIdx.x * blockDim.x);
const int stride = blockDim.x * gridDim.x;
for (; id < len; id += stride) {
ArgMaxWithAtomics(id, sNodeSplits, gradScans, gradSums, vals, colIds,
nodeAssigns, nodes, nUniqKeys, nodeStart, len, param);
}
__syncthreads();
for (int i = tid; i < nUniqKeys; i += blockDim.x) {
ExactSplitCandidate s = sNodeSplits[i];
AtomicArgMax(&nodeSplits[i], s);
}
}
/**
* @brief Performs argmax_by_key functionality but for cases when keys need not
* occur contiguously
* @param nodeSplits will contain information on best split for each node
* @param gradScans exclusive sum on sorted segments for each col
* @param gradSums gradient sum for each column in DMatrix based on to node-ids
* @param vals feature values
* @param colIds column index for each element in the feature values array
* @param nodeAssigns node-id assignments to each element in DMatrix
* @param nodes pointer to all nodes for this tree in BFS order
* @param nUniqKeys number of unique node-ids in this level
* @param nodeStart start index of the node-ids in this level
* @param len number of elements
* @param param training parameters
* @param algo which algorithm to use for argmax_by_key
*/
template <int BLKDIM = 256, int ITEMS_PER_THREAD = 4>
void ArgMaxByKey(common::Span<ExactSplitCandidate> nodeSplits,
common::Span<const GradientPair> gradScans,
common::Span<const GradientPair> gradSums,
common::Span<const float> vals,
common::Span<const int> colIds,
common::Span<const NodeIdT> nodeAssigns,
common::Span<const DeviceNodeStats> nodes,
int nUniqKeys,
NodeIdT nodeStart, int len, const TrainParam param,
ArgMaxByKeyAlgo algo) {
dh::FillConst<ExactSplitCandidate, BLKDIM, ITEMS_PER_THREAD>(
param.gpu_id, nodeSplits.data(), nUniqKeys,
ExactSplitCandidate());
int nBlks = dh::DivRoundUp(len, ITEMS_PER_THREAD * BLKDIM);
switch (algo) {
case kAbkGmem:
hipLaunchKernelGGL(( AtomicArgMaxByKeyGmem), dim3(nBlks), dim3(BLKDIM), 0, 0,
nodeSplits, gradScans, gradSums, vals, colIds, nodeAssigns, nodes,
nUniqKeys, nodeStart, len, param);
break;
case kAbkSmem:
hipLaunchKernelGGL(( AtomicArgMaxByKeySmem), dim3(nBlks), dim3(BLKDIM),
sizeof(ExactSplitCandidate) * nUniqKeys, 0,
nodeSplits, gradScans, gradSums, vals, colIds, nodeAssigns, nodes,
nUniqKeys, nodeStart, len, GPUTrainingParam(param));
break;
default:
throw std::runtime_error("argMaxByKey: Bad algo passed!");
}
}
__global__ void AssignColIds(int* colIds, const int* colOffsets) {
int myId = blockIdx.x;
int start = colOffsets[myId];
int end = colOffsets[myId + 1];
for (int id = start + threadIdx.x; id < end; id += blockDim.x) {
colIds[id] = myId;
}
}
__global__ void FillDefaultNodeIds(NodeIdT* nodeIdsPerInst,
const DeviceNodeStats* nodes, int n_rows) {
int id = threadIdx.x + (blockIdx.x * blockDim.x);
if (id >= n_rows) {
return;
}
// if this element belongs to none of the currently active node-id's
NodeIdT nId = nodeIdsPerInst[id];
if (nId == kUnusedNode) {
return;
}
const DeviceNodeStats n = nodes[nId];
NodeIdT result;
if (n.IsLeaf() || n.IsUnused()) {
result = kUnusedNode;
} else if (n.dir == kLeftDir) {
result = (2 * n.idx) + 1;
} else {
result = (2 * n.idx) + 2;
}
nodeIdsPerInst[id] = result;
}
__global__ void AssignNodeIds(NodeIdT* nodeIdsPerInst, int* nodeLocations,
const NodeIdT* nodeIds, const int* instId,
const DeviceNodeStats* nodes,
const int* colOffsets, const float* vals,
int nVals, int nCols) {
int id = threadIdx.x + (blockIdx.x * blockDim.x);
const int stride = blockDim.x * gridDim.x;
for (; id < nVals; id += stride) {
// fusing generation of indices for node locations
nodeLocations[id] = id;
// using nodeIds here since the previous kernel would have updated
// the nodeIdsPerInst with all default assignments
int nId = nodeIds[id];
// if this element belongs to none of the currently active node-id's
if (nId != kUnusedNode) {
const DeviceNodeStats n = nodes[nId];
int colId = n.fidx;
// printf("nid=%d colId=%d id=%d\n", nId, colId, id);
int start = colOffsets[colId];
int end = colOffsets[colId + 1];
// @todo: too much wasteful threads!!
if ((id >= start) && (id < end) && !(n.IsLeaf() || n.IsUnused())) {
NodeIdT result = (2 * n.idx) + 1 + (vals[id] >= n.fvalue);
nodeIdsPerInst[instId[id]] = result;
}
}
}
}
__global__ void MarkLeavesKernel(DeviceNodeStats* nodes, int len) {
int id = (blockIdx.x * blockDim.x) + threadIdx.x;
if ((id < len) && !nodes[id].IsUnused()) {
int lid = (id << 1) + 1;
int rid = (id << 1) + 2;
if ((lid >= len) || (rid >= len)) {
nodes[id].root_gain = -FLT_MAX; // bottom-most nodes
} else if (nodes[lid].IsUnused() && nodes[rid].IsUnused()) {
nodes[id].root_gain = -FLT_MAX; // unused child nodes
}
}
}
class GPUMaker : public TreeUpdater {
protected:
TrainParam param_;
/** whether we have initialized memory already (so as not to repeat!) */
bool allocated_;
/** feature values stored in column-major compressed format */
dh::DoubleBuffer<float> vals_;
common::Span<float> vals_cached_;
/** corresponding instance id's of these featutre values */
dh::DoubleBuffer<int> instIds_;
common::Span<int> inst_ids_cached_;
/** column offsets for these feature values */
common::Span<int> colOffsets_;
common::Span<GradientPair> gradsInst_;
dh::DoubleBuffer<NodeIdT> nodeAssigns_;
dh::DoubleBuffer<int> nodeLocations_;
common::Span<DeviceNodeStats> nodes_;
common::Span<NodeIdT> node_assigns_per_inst_;
common::Span<GradientPair> gradsums_;
common::Span<GradientPair> gradscans_;
common::Span<ExactSplitCandidate> nodeSplits_;
int n_vals_;
int n_rows_;
int n_cols_;
int maxNodes_;
int maxLeaves_;
// devices are only used for sharding the HostDeviceVector passed as a parameter;
// the algorithm works with a single GPU only
GPUSet devices_;
dh::CubMemory tmp_mem_;
common::Span<GradientPair> tmpScanGradBuff_;
common::Span<int> tmp_scan_key_buff_;
common::Span<int> colIds_;
dh::BulkAllocator ba_;
public:
GPUMaker() : allocated_{false} {}
~GPUMaker() override = default;
void Init(const std::vector<std::pair<std::string, std::string>> &args) override {
param_.InitAllowUnknown(args);
maxNodes_ = (1 << (param_.max_depth + 1)) - 1;
maxLeaves_ = 1 << param_.max_depth;
devices_ = GPUSet::All(param_.gpu_id, param_.n_gpus);
}
void Update(HostDeviceVector<GradientPair>* gpair, DMatrix* dmat,
const std::vector<RegTree*>& trees) override {
// rescale learning rate according to size of trees
float lr = param_.learning_rate;
param_.learning_rate = lr / trees.size();
gpair->Shard(devices_);
try {
// build tree
for (auto tree : trees) {
UpdateTree(gpair, dmat, tree);
}
} catch (const std::exception& e) {
LOG(FATAL) << "grow_gpu exception: " << e.what() << std::endl;
}
param_.learning_rate = lr;
}
/// @note: Update should be only after Init!!
void UpdateTree(HostDeviceVector<GradientPair>* gpair, DMatrix* dmat,
RegTree* hTree) {
if (!allocated_) {
SetupOneTimeData(dmat);
}
for (int i = 0; i < param_.max_depth; ++i) {
if (i == 0) {
// make sure to start on a fresh tree with sorted values!
dh::CopyDeviceSpan(vals_.CurrentSpan(), vals_cached_);
dh::CopyDeviceSpan(instIds_.CurrentSpan(), inst_ids_cached_);
TransferGrads(gpair);
}
int nNodes = 1 << i;
NodeIdT nodeStart = nNodes - 1;
InitNodeData(i, nodeStart, nNodes);
FindSplit(i, nodeStart, nNodes);
}
// mark all the used nodes with unused children as leaf nodes
MarkLeaves();
Dense2SparseTree(hTree, nodes_, param_);
}
void Split2Node(int nNodes, NodeIdT nodeStart) {
auto d_nodes = nodes_;
auto d_gradScans = gradscans_;
auto d_gradsums = gradsums_;
auto d_nodeAssigns = nodeAssigns_.CurrentSpan();
auto d_colIds = colIds_;
auto d_vals = vals_.Current();
auto d_nodeSplits = nodeSplits_.data();
int nUniqKeys = nNodes;
float min_split_loss = param_.min_split_loss;
auto gpu_param = GPUTrainingParam(param_);
dh::LaunchN(param_.gpu_id, nNodes, [=] __device__(int uid) {
int absNodeId = uid + nodeStart;
ExactSplitCandidate s = d_nodeSplits[uid];
if (s.IsSplittable(min_split_loss)) {
int idx = s.index;
int nodeInstId =
Abs2UniqueKey(idx, d_nodeAssigns, d_colIds, nodeStart, nUniqKeys);
bool missingLeft = true;
const DeviceNodeStats& n = d_nodes[absNodeId];
GradientPair gradScan = d_gradScans[idx];
GradientPair gradSum = d_gradsums[nodeInstId];
float thresh = d_vals[idx];
int colId = d_colIds[idx];
// get the default direction for the current node
GradientPair missing = n.sum_gradients - gradSum;
LossChangeMissing(gradScan, missing, n.sum_gradients, n.root_gain,
gpu_param, missingLeft);
// get the score/weight/id/gradSum for left and right child nodes
GradientPair lGradSum = missingLeft ? gradScan + missing : gradScan;
GradientPair rGradSum = n.sum_gradients - lGradSum;
// Create children
d_nodes[LeftChildNodeIdx(absNodeId)] =
DeviceNodeStats(lGradSum, LeftChildNodeIdx(absNodeId), gpu_param);
d_nodes[RightChildNodeIdx(absNodeId)] =
DeviceNodeStats(rGradSum, RightChildNodeIdx(absNodeId), gpu_param);
// Set split for parent
d_nodes[absNodeId].SetSplit(thresh, colId,
missingLeft ? kLeftDir : kRightDir, lGradSum,
rGradSum);
} else {
// cannot be split further, so this node is a leaf!
d_nodes[absNodeId].root_gain = -FLT_MAX;
}
});
}
void FindSplit(int level, NodeIdT nodeStart, int nNodes) {
ReduceScanByKey(gradsums_, gradscans_, gradsInst_,
instIds_.CurrentSpan(), nodeAssigns_.CurrentSpan(), n_vals_, nNodes,
n_cols_, tmpScanGradBuff_, tmp_scan_key_buff_,
colIds_, nodeStart);
ArgMaxByKey(nodeSplits_, gradscans_, gradsums_,
vals_.CurrentSpan(), colIds_, nodeAssigns_.CurrentSpan(),
nodes_, nNodes, nodeStart, n_vals_, param_,
level <= kMaxAbkLevels ? kAbkSmem : kAbkGmem);
Split2Node(nNodes, nodeStart);
}
void AllocateAllData(int offsetSize) {
int tmpBuffSize = ScanTempBufferSize(n_vals_);
ba_.Allocate(param_.gpu_id, &vals_, n_vals_,
&vals_cached_, n_vals_, &instIds_, n_vals_, &inst_ids_cached_, n_vals_,
&colOffsets_, offsetSize, &gradsInst_, n_rows_, &nodeAssigns_, n_vals_,
&nodeLocations_, n_vals_, &nodes_, maxNodes_, &node_assigns_per_inst_,
n_rows_, &gradsums_, maxLeaves_ * n_cols_, &gradscans_, n_vals_,
&nodeSplits_, maxLeaves_, &tmpScanGradBuff_, tmpBuffSize,
&tmp_scan_key_buff_, tmpBuffSize, &colIds_, n_vals_);
}
void SetupOneTimeData(DMatrix* dmat) {
if (!dmat->SingleColBlock()) {
LOG(FATAL) << "exact::GPUBuilder - must have 1 column block";
}
std::vector<float> fval;
std::vector<int> fId;
std::vector<int> offset;
ConvertToCsc(dmat, &fval, &fId, &offset);
AllocateAllData(static_cast<int>(offset.size()));
TransferAndSortData(fval, fId, offset);
allocated_ = true;
}
void ConvertToCsc(DMatrix* dmat, std::vector<float>* fval,
std::vector<int>* fId, std::vector<int>* offset) {
const MetaInfo& info = dmat->Info();
CHECK(info.num_col_ < std::numeric_limits<int>::max());
CHECK(info.num_row_ < std::numeric_limits<int>::max());
n_rows_ = static_cast<int>(info.num_row_);
n_cols_ = static_cast<int>(info.num_col_);
offset->reserve(n_cols_ + 1);
offset->push_back(0);
fval->reserve(n_cols_ * n_rows_);
fId->reserve(n_cols_ * n_rows_);
// in case you end up with a DMatrix having no column access
// then make sure to enable that before copying the data!
for (const auto& batch : dmat->GetSortedColumnBatches()) {
for (int i = 0; i < batch.Size(); i++) {
auto col = batch[i];
for (const Entry& e : col) {
int inst_id = static_cast<int>(e.index);
fval->push_back(e.fvalue);
fId->push_back(inst_id);
}
offset->push_back(static_cast<int>(fval->size()));
}
}
CHECK(fval->size() < std::numeric_limits<int>::max());
n_vals_ = static_cast<int>(fval->size());
}
void TransferAndSortData(const std::vector<float>& fval,
const std::vector<int>& fId,
const std::vector<int>& offset) {
dh::CopyVectorToDeviceSpan(vals_.CurrentSpan(), fval);
dh::CopyVectorToDeviceSpan(instIds_.CurrentSpan(), fId);
dh::CopyVectorToDeviceSpan(colOffsets_, offset);
dh::SegmentedSort<float, int>(&tmp_mem_, &vals_, &instIds_, n_vals_, n_cols_,
colOffsets_);
dh::CopyDeviceSpan(vals_cached_, vals_.CurrentSpan());
dh::CopyDeviceSpan(inst_ids_cached_, instIds_.CurrentSpan());
hipLaunchKernelGGL(( AssignColIds), dim3(n_cols_), dim3(512), 0, 0, colIds_.data(), colOffsets_.data());
}
void TransferGrads(HostDeviceVector<GradientPair>* gpair) {
gpair->GatherTo(
thrust::device_pointer_cast(gradsInst_.data()),
thrust::device_pointer_cast(gradsInst_.data() + gradsInst_.size()));
// evaluate the full-grad reduction for the root node
dh::SumReduction<GradientPair>(tmp_mem_, gradsInst_, gradsums_, n_rows_);
}
void InitNodeData(int level, NodeIdT nodeStart, int nNodes) {
// all instances belong to root node at the beginning!
if (level == 0) {
thrust::fill(thrust::device_pointer_cast(nodes_.data()),
thrust::device_pointer_cast(nodes_.data() + nodes_.size()),
DeviceNodeStats());
thrust::fill(thrust::device_pointer_cast(nodeAssigns_.Current()),
thrust::device_pointer_cast(nodeAssigns_.Current() +
nodeAssigns_.Size()),
0);
thrust::fill(thrust::device_pointer_cast(node_assigns_per_inst_.data()),
thrust::device_pointer_cast(node_assigns_per_inst_.data() +
node_assigns_per_inst_.size()),
0);
// for root node, just update the gradient/score/weight/id info
// before splitting it! Currently all data is on GPU, hence this
// stupid little kernel
auto d_nodes = nodes_;
auto d_sums = gradsums_;
auto gpu_params = GPUTrainingParam(param_);
dh::LaunchN(param_.gpu_id, 1, [=] __device__(int idx) {
d_nodes[0] = DeviceNodeStats(d_sums[0], 0, gpu_params);
});
} else {
const int BlkDim = 256;
const int ItemsPerThread = 4;
// assign default node ids first
int nBlks = dh::DivRoundUp(n_rows_, BlkDim);
hipLaunchKernelGGL(( FillDefaultNodeIds), dim3(nBlks), dim3(BlkDim), 0, 0, node_assigns_per_inst_.data(),
nodes_.data(), n_rows_);
// evaluate the correct child indices of non-missing values next
nBlks = dh::DivRoundUp(n_vals_, BlkDim * ItemsPerThread);
hipLaunchKernelGGL(( AssignNodeIds), dim3(nBlks), dim3(BlkDim), 0, 0,
node_assigns_per_inst_.data(), nodeLocations_.Current(),
nodeAssigns_.Current(), instIds_.Current(), nodes_.data(),
colOffsets_.data(), vals_.Current(), n_vals_, n_cols_);
// gather the node assignments across all other columns too
dh::Gather(param_.gpu_id, nodeAssigns_.Current(),
node_assigns_per_inst_.data(), instIds_.Current(), n_vals_);
SortKeys(level);
}
}
void SortKeys(int level) {
// segmented-sort the arrays based on node-id's
// but we don't need more than level+1 bits for sorting!
SegmentedSort(&tmp_mem_, &nodeAssigns_, &nodeLocations_, n_vals_, n_cols_,
colOffsets_, 0, level + 1);
dh::Gather<float, int>(param_.gpu_id, vals_.other(),
vals_.Current(), instIds_.other(), instIds_.Current(),
nodeLocations_.Current(), n_vals_);
vals_.buff.selector ^= 1;
instIds_.buff.selector ^= 1;
}
void MarkLeaves() {
const int BlkDim = 128;
int nBlks = dh::DivRoundUp(maxNodes_, BlkDim);
hipLaunchKernelGGL(( MarkLeavesKernel), dim3(nBlks), dim3(BlkDim), 0, 0, nodes_.data(), maxNodes_);
}
};
TSOOBGX_REGISTER_TREE_UPDATER(GPUMaker, "grow_gpu")
.describe("Grow tree with GPU.")
.set_body([]() { return new GPUMaker(); });
} // namespace tree
} // namespace tsoobgx
| 7baa197bd3104b92229046fc4b0ec2b6192ad94d.cu | /*!
* Copyright 2017-2018 tsooBGX contributors
*/
#include <tsoobgx/tree_updater.h>
#include <utility>
#include <vector>
#include <limits>
#include <string>
#include "../common/common.h"
#include "param.h"
#include "updater_gpu_common.cuh"
namespace tsoobgx {
namespace tree {
DMLC_REGISTRY_FILE_TAG(updater_gpu);
template <typename GradientPairT>
TSOOBGX_DEVICE float inline LossChangeMissing(const GradientPairT& scan,
const GradientPairT& missing,
const GradientPairT& parent_sum,
const float& parent_gain,
const GPUTrainingParam& param,
bool& missing_left_out) { // NOLINT
// Put gradients of missing values to left
float missing_left_loss =
DeviceCalcLossChange(param, scan + missing, parent_sum, parent_gain);
float missing_right_loss =
DeviceCalcLossChange(param, scan, parent_sum, parent_gain);
if (missing_left_loss >= missing_right_loss) {
missing_left_out = true;
return missing_left_loss;
} else {
missing_left_out = false;
return missing_right_loss;
}
}
/**
* @brief Absolute BFS order IDs to col-wise unique IDs based on user input
* @param tid the index of the element that this thread should access
* @param abs the array of absolute IDs
* @param colIds the array of column IDs for each element
* @param nodeStart the start of the node ID at this level
* @param nKeys number of nodes at this level.
* @return the uniq key
*/
static HOST_DEV_INLINE NodeIdT Abs2UniqueKey(int tid,
common::Span<const NodeIdT> abs,
common::Span<const int> colIds,
NodeIdT nodeStart, int nKeys) {
int a = abs[tid];
if (a == kUnusedNode) return a;
return ((a - nodeStart) + (colIds[tid] * nKeys));
}
/**
* @struct Pair
* @brief Pair used for key basd scan operations on GradientPair
*/
struct Pair {
int key;
GradientPair value;
};
/** define a key that's not used at all in the entire boosting process */
static const int kNoneKey = -100;
/**
* @brief Allocate temporary buffers needed for scan operations
* @param tmpScans gradient buffer
* @param tmpKeys keys buffer
* @param size number of elements that will be scanned
*/
template <int BLKDIM_L1L3 = 256>
int ScanTempBufferSize(int size) {
int num_blocks = dh::DivRoundUp(size, BLKDIM_L1L3);
return num_blocks;
}
struct AddByKey {
template <typename T>
HOST_DEV_INLINE T operator()(const T& first, const T& second) const {
T result;
if (first.key == second.key) {
result.key = first.key;
result.value = first.value + second.value;
} else {
result.key = second.key;
result.value = second.value;
}
return result;
}
};
/**
* @brief Gradient value getter function
* @param id the index into the vals or instIds array to which to fetch
* @param vals the gradient value buffer
* @param instIds instance index buffer
* @return the expected gradient value
*/
HOST_DEV_INLINE GradientPair Get(int id,
common::Span<const GradientPair> vals,
common::Span<const int> instIds) {
id = instIds[id];
return vals[id];
}
template <int BLKDIM_L1L3>
__global__ void CubScanByKeyL1(
common::Span<GradientPair> scans,
common::Span<const GradientPair> vals,
common::Span<const int> instIds,
common::Span<GradientPair> mScans,
common::Span<int> mKeys,
common::Span<const NodeIdT> keys,
int nUniqKeys,
common::Span<const int> colIds, NodeIdT nodeStart,
const int size) {
Pair rootPair = {kNoneKey, GradientPair(0.f, 0.f)};
int myKey;
GradientPair myValue;
using BlockScan = cub::BlockScan<Pair, BLKDIM_L1L3>;
__shared__ typename BlockScan::TempStorage temp_storage;
Pair threadData;
int tid = blockIdx.x * BLKDIM_L1L3 + threadIdx.x;
if (tid < size) {
myKey = Abs2UniqueKey(tid, keys, colIds, nodeStart, nUniqKeys);
myValue = Get(tid, vals, instIds);
} else {
myKey = kNoneKey;
myValue = {};
}
threadData.key = myKey;
threadData.value = myValue;
// get previous key, especially needed for the last thread in this block
// in order to pass on the partial scan values.
// this statement MUST appear before the checks below!
// else, the result of this shuffle operation will be undefined
#if (__CUDACC_VER_MAJOR__ >= 9)
int previousKey = __shfl_up_sync(0xFFFFFFFF, myKey, 1);
#else
int previousKey = __shfl_up(myKey, 1);
#endif
// Collectively compute the block-wide exclusive prefix sum
BlockScan(temp_storage)
.ExclusiveScan(threadData, threadData, rootPair, AddByKey());
if (tid < size) {
scans[tid] = threadData.value;
} else {
return;
}
if (threadIdx.x == BLKDIM_L1L3 - 1) {
threadData.value =
(myKey == previousKey) ? threadData.value : GradientPair(0.0f, 0.0f);
mKeys[blockIdx.x] = myKey;
mScans[blockIdx.x] = threadData.value + myValue;
}
}
template <int BLKSIZE>
__global__ void CubScanByKeyL2(common::Span<GradientPair> mScans,
common::Span<int> mKeys, int mLength) {
using BlockScan = cub::BlockScan<Pair, BLKSIZE, cub::BLOCK_SCAN_WARP_SCANS>;
Pair threadData;
__shared__ typename BlockScan::TempStorage temp_storage;
for (int i = threadIdx.x; i < mLength; i += BLKSIZE - 1) {
threadData.key = mKeys[i];
threadData.value = mScans[i];
BlockScan(temp_storage).InclusiveScan(threadData, threadData, AddByKey());
mScans[i] = threadData.value;
__syncthreads();
}
}
template <int BLKDIM_L1L3>
__global__ void CubScanByKeyL3(common::Span<GradientPair> sums,
common::Span<GradientPair> scans,
common::Span<const GradientPair> vals,
common::Span<const int> instIds,
common::Span<const GradientPair> mScans,
common::Span<const int> mKeys,
common::Span<const NodeIdT> keys,
int nUniqKeys,
common::Span<const int> colIds, NodeIdT nodeStart,
const int size) {
int relId = threadIdx.x;
int tid = (blockIdx.x * BLKDIM_L1L3) + relId;
// to avoid the following warning from nvcc:
// __shared__ memory variable with non-empty constructor or destructor
// (potential race between threads)
__shared__ char gradBuff[sizeof(GradientPair)];
__shared__ int s_mKeys;
GradientPair* s_mScans = reinterpret_cast<GradientPair*>(gradBuff);
if (tid >= size) return;
// cache block-wide partial scan info
if (relId == 0) {
s_mKeys = (blockIdx.x > 0) ? mKeys[blockIdx.x - 1] : kNoneKey;
s_mScans[0] = (blockIdx.x > 0) ? mScans[blockIdx.x - 1] : GradientPair();
}
int myKey = Abs2UniqueKey(tid, keys, colIds, nodeStart, nUniqKeys);
int previousKey =
tid == 0 ? kNoneKey
: Abs2UniqueKey(tid - 1, keys, colIds, nodeStart, nUniqKeys);
GradientPair my_value = scans[tid];
__syncthreads();
if (blockIdx.x > 0 && s_mKeys == previousKey) {
my_value += s_mScans[0];
}
if (tid == size - 1) {
sums[previousKey] = my_value + Get(tid, vals, instIds);
}
if ((previousKey != myKey) && (previousKey >= 0)) {
sums[previousKey] = my_value;
my_value = GradientPair(0.0f, 0.0f);
}
scans[tid] = my_value;
}
/**
* @brief Performs fused reduce and scan by key functionality. It is assumed
* that
* the keys occur contiguously!
* @param sums the output gradient reductions for each element performed
* key-wise
* @param scans the output gradient scans for each element performed key-wise
* @param vals the gradients evaluated for each observation.
* @param instIds instance ids for each element
* @param keys keys to be used to segment the reductions. They need not occur
* contiguously in contrast to scan_by_key. Currently, we need one key per
* value in the 'vals' array.
* @param size number of elements in the 'vals' array
* @param nUniqKeys max number of uniq keys found per column
* @param nCols number of columns
* @param tmpScans temporary scan buffer needed for cub-pyramid algo
* @param tmpKeys temporary key buffer needed for cub-pyramid algo
* @param colIds column indices for each element in the array
* @param nodeStart index of the leftmost node in the current level
*/
template <int BLKDIM_L1L3 = 256, int BLKDIM_L2 = 512>
void ReduceScanByKey(common::Span<GradientPair> sums,
common::Span<GradientPair> scans,
common::Span<GradientPair> vals,
common::Span<const int> instIds,
common::Span<const NodeIdT> keys,
int size, int nUniqKeys, int nCols,
common::Span<GradientPair> tmpScans,
common::Span<int> tmpKeys,
common::Span<const int> colIds, NodeIdT nodeStart) {
int nBlks = dh::DivRoundUp(size, BLKDIM_L1L3);
cudaMemset(sums.data(), 0, nUniqKeys * nCols * sizeof(GradientPair));
CubScanByKeyL1<BLKDIM_L1L3>
<<<nBlks, BLKDIM_L1L3>>>(scans, vals, instIds, tmpScans, tmpKeys, keys,
nUniqKeys, colIds, nodeStart, size);
CubScanByKeyL2<BLKDIM_L2><<<1, BLKDIM_L2>>>(tmpScans, tmpKeys, nBlks);
CubScanByKeyL3<BLKDIM_L1L3>
<<<nBlks, BLKDIM_L1L3>>>(sums, scans, vals, instIds, tmpScans, tmpKeys,
keys, nUniqKeys, colIds, nodeStart, size);
}
/**
* @struct ExactSplitCandidate
* @brief Abstraction of a possible split in the decision tree
*/
struct ExactSplitCandidate {
/** the optimal gain score for this node */
float score;
/** index where to split in the DMatrix */
int index;
HOST_DEV_INLINE ExactSplitCandidate() : score{-FLT_MAX}, index{INT_MAX} {}
/**
* @brief Whether the split info is valid to be used to create a new child
* @param minSplitLoss minimum score above which decision to split is made
* @return true if splittable, else false
*/
HOST_DEV_INLINE bool IsSplittable(float minSplitLoss) const {
return ((score >= minSplitLoss) && (index != INT_MAX));
}
};
/**
* @enum ArgMaxByKeyAlgo best_split_evaluation.cuh
* @brief Help decide which algorithm to use for multi-argmax operation
*/
enum ArgMaxByKeyAlgo {
/** simplest, use gmem-atomics for all updates */
kAbkGmem = 0,
/** use smem-atomics for updates (when number of keys are less) */
kAbkSmem
};
/** max depth until which to use shared mem based atomics for argmax */
static const int kMaxAbkLevels = 3;
HOST_DEV_INLINE ExactSplitCandidate MaxSplit(ExactSplitCandidate a,
ExactSplitCandidate b) {
ExactSplitCandidate out;
if (a.score < b.score) {
out.score = b.score;
out.index = b.index;
} else if (a.score == b.score) {
out.score = a.score;
out.index = (a.index < b.index) ? a.index : b.index;
} else {
out.score = a.score;
out.index = a.index;
}
return out;
}
DEV_INLINE void AtomicArgMax(ExactSplitCandidate* address,
ExactSplitCandidate val) {
unsigned long long* intAddress = reinterpret_cast<unsigned long long*>(address); // NOLINT
unsigned long long old = *intAddress; // NOLINT
unsigned long long assumed = old; // NOLINT
do {
assumed = old;
ExactSplitCandidate res =
MaxSplit(val, *reinterpret_cast<ExactSplitCandidate*>(&assumed));
old = atomicCAS(intAddress, assumed, *reinterpret_cast<uint64_t*>(&res));
} while (assumed != old);
}
DEV_INLINE void ArgMaxWithAtomics(
int id,
common::Span<ExactSplitCandidate> nodeSplits,
common::Span<const GradientPair> gradScans,
common::Span<const GradientPair> gradSums,
common::Span<const float> vals,
common::Span<const int> colIds,
common::Span<const NodeIdT> nodeAssigns,
common::Span<const DeviceNodeStats> nodes, int nUniqKeys,
NodeIdT nodeStart, int len,
const GPUTrainingParam& param) {
int nodeId = nodeAssigns[id];
// @todo: this is really a bad check! but will be fixed when we move
// to key-based reduction
if ((id == 0) ||
!((nodeId == nodeAssigns[id - 1]) && (colIds[id] == colIds[id - 1]) &&
(vals[id] == vals[id - 1]))) {
if (nodeId != kUnusedNode) {
int sumId = Abs2UniqueKey(id, nodeAssigns, colIds, nodeStart, nUniqKeys);
GradientPair colSum = gradSums[sumId];
int uid = nodeId - nodeStart;
DeviceNodeStats node_stat = nodes[nodeId];
GradientPair parentSum = node_stat.sum_gradients;
float parentGain = node_stat.root_gain;
bool tmp;
ExactSplitCandidate s;
GradientPair missing = parentSum - colSum;
s.score = LossChangeMissing(gradScans[id], missing, parentSum, parentGain,
param, tmp);
s.index = id;
AtomicArgMax(&nodeSplits[uid], s);
} // end if nodeId != UNUSED_NODE
} // end if id == 0 ...
}
__global__ void AtomicArgMaxByKeyGmem(
common::Span<ExactSplitCandidate> nodeSplits,
common::Span<const GradientPair> gradScans,
common::Span<const GradientPair> gradSums,
common::Span<const float> vals,
common::Span<const int> colIds,
common::Span<const NodeIdT> nodeAssigns,
common::Span<const DeviceNodeStats> nodes,
int nUniqKeys,
NodeIdT nodeStart,
int len,
const TrainParam param) {
int id = threadIdx.x + (blockIdx.x * blockDim.x);
const int stride = blockDim.x * gridDim.x;
for (; id < len; id += stride) {
ArgMaxWithAtomics(id, nodeSplits, gradScans, gradSums, vals, colIds,
nodeAssigns, nodes, nUniqKeys, nodeStart, len,
GPUTrainingParam(param));
}
}
__global__ void AtomicArgMaxByKeySmem(
common::Span<ExactSplitCandidate> nodeSplits,
common::Span<const GradientPair> gradScans,
common::Span<const GradientPair> gradSums,
common::Span<const float> vals,
common::Span<const int> colIds,
common::Span<const NodeIdT> nodeAssigns,
common::Span<const DeviceNodeStats> nodes,
int nUniqKeys, NodeIdT nodeStart, int len, const GPUTrainingParam param) {
extern __shared__ char sArr[];
common::Span<ExactSplitCandidate> sNodeSplits =
common::Span<ExactSplitCandidate>(
reinterpret_cast<ExactSplitCandidate*>(sArr),
static_cast<typename common::Span<ExactSplitCandidate>::index_type>(
nUniqKeys * sizeof(ExactSplitCandidate)));
int tid = threadIdx.x;
ExactSplitCandidate defVal;
for (int i = tid; i < nUniqKeys; i += blockDim.x) {
sNodeSplits[i] = defVal;
}
__syncthreads();
int id = tid + (blockIdx.x * blockDim.x);
const int stride = blockDim.x * gridDim.x;
for (; id < len; id += stride) {
ArgMaxWithAtomics(id, sNodeSplits, gradScans, gradSums, vals, colIds,
nodeAssigns, nodes, nUniqKeys, nodeStart, len, param);
}
__syncthreads();
for (int i = tid; i < nUniqKeys; i += blockDim.x) {
ExactSplitCandidate s = sNodeSplits[i];
AtomicArgMax(&nodeSplits[i], s);
}
}
/**
* @brief Performs argmax_by_key functionality but for cases when keys need not
* occur contiguously
* @param nodeSplits will contain information on best split for each node
* @param gradScans exclusive sum on sorted segments for each col
* @param gradSums gradient sum for each column in DMatrix based on to node-ids
* @param vals feature values
* @param colIds column index for each element in the feature values array
* @param nodeAssigns node-id assignments to each element in DMatrix
* @param nodes pointer to all nodes for this tree in BFS order
* @param nUniqKeys number of unique node-ids in this level
* @param nodeStart start index of the node-ids in this level
* @param len number of elements
* @param param training parameters
* @param algo which algorithm to use for argmax_by_key
*/
template <int BLKDIM = 256, int ITEMS_PER_THREAD = 4>
void ArgMaxByKey(common::Span<ExactSplitCandidate> nodeSplits,
common::Span<const GradientPair> gradScans,
common::Span<const GradientPair> gradSums,
common::Span<const float> vals,
common::Span<const int> colIds,
common::Span<const NodeIdT> nodeAssigns,
common::Span<const DeviceNodeStats> nodes,
int nUniqKeys,
NodeIdT nodeStart, int len, const TrainParam param,
ArgMaxByKeyAlgo algo) {
dh::FillConst<ExactSplitCandidate, BLKDIM, ITEMS_PER_THREAD>(
param.gpu_id, nodeSplits.data(), nUniqKeys,
ExactSplitCandidate());
int nBlks = dh::DivRoundUp(len, ITEMS_PER_THREAD * BLKDIM);
switch (algo) {
case kAbkGmem:
AtomicArgMaxByKeyGmem<<<nBlks, BLKDIM>>>(
nodeSplits, gradScans, gradSums, vals, colIds, nodeAssigns, nodes,
nUniqKeys, nodeStart, len, param);
break;
case kAbkSmem:
AtomicArgMaxByKeySmem<<<nBlks, BLKDIM,
sizeof(ExactSplitCandidate) * nUniqKeys>>>(
nodeSplits, gradScans, gradSums, vals, colIds, nodeAssigns, nodes,
nUniqKeys, nodeStart, len, GPUTrainingParam(param));
break;
default:
throw std::runtime_error("argMaxByKey: Bad algo passed!");
}
}
__global__ void AssignColIds(int* colIds, const int* colOffsets) {
int myId = blockIdx.x;
int start = colOffsets[myId];
int end = colOffsets[myId + 1];
for (int id = start + threadIdx.x; id < end; id += blockDim.x) {
colIds[id] = myId;
}
}
__global__ void FillDefaultNodeIds(NodeIdT* nodeIdsPerInst,
const DeviceNodeStats* nodes, int n_rows) {
int id = threadIdx.x + (blockIdx.x * blockDim.x);
if (id >= n_rows) {
return;
}
// if this element belongs to none of the currently active node-id's
NodeIdT nId = nodeIdsPerInst[id];
if (nId == kUnusedNode) {
return;
}
const DeviceNodeStats n = nodes[nId];
NodeIdT result;
if (n.IsLeaf() || n.IsUnused()) {
result = kUnusedNode;
} else if (n.dir == kLeftDir) {
result = (2 * n.idx) + 1;
} else {
result = (2 * n.idx) + 2;
}
nodeIdsPerInst[id] = result;
}
__global__ void AssignNodeIds(NodeIdT* nodeIdsPerInst, int* nodeLocations,
const NodeIdT* nodeIds, const int* instId,
const DeviceNodeStats* nodes,
const int* colOffsets, const float* vals,
int nVals, int nCols) {
int id = threadIdx.x + (blockIdx.x * blockDim.x);
const int stride = blockDim.x * gridDim.x;
for (; id < nVals; id += stride) {
// fusing generation of indices for node locations
nodeLocations[id] = id;
// using nodeIds here since the previous kernel would have updated
// the nodeIdsPerInst with all default assignments
int nId = nodeIds[id];
// if this element belongs to none of the currently active node-id's
if (nId != kUnusedNode) {
const DeviceNodeStats n = nodes[nId];
int colId = n.fidx;
// printf("nid=%d colId=%d id=%d\n", nId, colId, id);
int start = colOffsets[colId];
int end = colOffsets[colId + 1];
// @todo: too much wasteful threads!!
if ((id >= start) && (id < end) && !(n.IsLeaf() || n.IsUnused())) {
NodeIdT result = (2 * n.idx) + 1 + (vals[id] >= n.fvalue);
nodeIdsPerInst[instId[id]] = result;
}
}
}
}
__global__ void MarkLeavesKernel(DeviceNodeStats* nodes, int len) {
int id = (blockIdx.x * blockDim.x) + threadIdx.x;
if ((id < len) && !nodes[id].IsUnused()) {
int lid = (id << 1) + 1;
int rid = (id << 1) + 2;
if ((lid >= len) || (rid >= len)) {
nodes[id].root_gain = -FLT_MAX; // bottom-most nodes
} else if (nodes[lid].IsUnused() && nodes[rid].IsUnused()) {
nodes[id].root_gain = -FLT_MAX; // unused child nodes
}
}
}
class GPUMaker : public TreeUpdater {
protected:
TrainParam param_;
/** whether we have initialized memory already (so as not to repeat!) */
bool allocated_;
/** feature values stored in column-major compressed format */
dh::DoubleBuffer<float> vals_;
common::Span<float> vals_cached_;
/** corresponding instance id's of these featutre values */
dh::DoubleBuffer<int> instIds_;
common::Span<int> inst_ids_cached_;
/** column offsets for these feature values */
common::Span<int> colOffsets_;
common::Span<GradientPair> gradsInst_;
dh::DoubleBuffer<NodeIdT> nodeAssigns_;
dh::DoubleBuffer<int> nodeLocations_;
common::Span<DeviceNodeStats> nodes_;
common::Span<NodeIdT> node_assigns_per_inst_;
common::Span<GradientPair> gradsums_;
common::Span<GradientPair> gradscans_;
common::Span<ExactSplitCandidate> nodeSplits_;
int n_vals_;
int n_rows_;
int n_cols_;
int maxNodes_;
int maxLeaves_;
// devices are only used for sharding the HostDeviceVector passed as a parameter;
// the algorithm works with a single GPU only
GPUSet devices_;
dh::CubMemory tmp_mem_;
common::Span<GradientPair> tmpScanGradBuff_;
common::Span<int> tmp_scan_key_buff_;
common::Span<int> colIds_;
dh::BulkAllocator ba_;
public:
GPUMaker() : allocated_{false} {}
~GPUMaker() override = default;
void Init(const std::vector<std::pair<std::string, std::string>> &args) override {
param_.InitAllowUnknown(args);
maxNodes_ = (1 << (param_.max_depth + 1)) - 1;
maxLeaves_ = 1 << param_.max_depth;
devices_ = GPUSet::All(param_.gpu_id, param_.n_gpus);
}
void Update(HostDeviceVector<GradientPair>* gpair, DMatrix* dmat,
const std::vector<RegTree*>& trees) override {
// rescale learning rate according to size of trees
float lr = param_.learning_rate;
param_.learning_rate = lr / trees.size();
gpair->Shard(devices_);
try {
// build tree
for (auto tree : trees) {
UpdateTree(gpair, dmat, tree);
}
} catch (const std::exception& e) {
LOG(FATAL) << "grow_gpu exception: " << e.what() << std::endl;
}
param_.learning_rate = lr;
}
/// @note: Update should be only after Init!!
void UpdateTree(HostDeviceVector<GradientPair>* gpair, DMatrix* dmat,
RegTree* hTree) {
if (!allocated_) {
SetupOneTimeData(dmat);
}
for (int i = 0; i < param_.max_depth; ++i) {
if (i == 0) {
// make sure to start on a fresh tree with sorted values!
dh::CopyDeviceSpan(vals_.CurrentSpan(), vals_cached_);
dh::CopyDeviceSpan(instIds_.CurrentSpan(), inst_ids_cached_);
TransferGrads(gpair);
}
int nNodes = 1 << i;
NodeIdT nodeStart = nNodes - 1;
InitNodeData(i, nodeStart, nNodes);
FindSplit(i, nodeStart, nNodes);
}
// mark all the used nodes with unused children as leaf nodes
MarkLeaves();
Dense2SparseTree(hTree, nodes_, param_);
}
void Split2Node(int nNodes, NodeIdT nodeStart) {
auto d_nodes = nodes_;
auto d_gradScans = gradscans_;
auto d_gradsums = gradsums_;
auto d_nodeAssigns = nodeAssigns_.CurrentSpan();
auto d_colIds = colIds_;
auto d_vals = vals_.Current();
auto d_nodeSplits = nodeSplits_.data();
int nUniqKeys = nNodes;
float min_split_loss = param_.min_split_loss;
auto gpu_param = GPUTrainingParam(param_);
dh::LaunchN(param_.gpu_id, nNodes, [=] __device__(int uid) {
int absNodeId = uid + nodeStart;
ExactSplitCandidate s = d_nodeSplits[uid];
if (s.IsSplittable(min_split_loss)) {
int idx = s.index;
int nodeInstId =
Abs2UniqueKey(idx, d_nodeAssigns, d_colIds, nodeStart, nUniqKeys);
bool missingLeft = true;
const DeviceNodeStats& n = d_nodes[absNodeId];
GradientPair gradScan = d_gradScans[idx];
GradientPair gradSum = d_gradsums[nodeInstId];
float thresh = d_vals[idx];
int colId = d_colIds[idx];
// get the default direction for the current node
GradientPair missing = n.sum_gradients - gradSum;
LossChangeMissing(gradScan, missing, n.sum_gradients, n.root_gain,
gpu_param, missingLeft);
// get the score/weight/id/gradSum for left and right child nodes
GradientPair lGradSum = missingLeft ? gradScan + missing : gradScan;
GradientPair rGradSum = n.sum_gradients - lGradSum;
// Create children
d_nodes[LeftChildNodeIdx(absNodeId)] =
DeviceNodeStats(lGradSum, LeftChildNodeIdx(absNodeId), gpu_param);
d_nodes[RightChildNodeIdx(absNodeId)] =
DeviceNodeStats(rGradSum, RightChildNodeIdx(absNodeId), gpu_param);
// Set split for parent
d_nodes[absNodeId].SetSplit(thresh, colId,
missingLeft ? kLeftDir : kRightDir, lGradSum,
rGradSum);
} else {
// cannot be split further, so this node is a leaf!
d_nodes[absNodeId].root_gain = -FLT_MAX;
}
});
}
void FindSplit(int level, NodeIdT nodeStart, int nNodes) {
ReduceScanByKey(gradsums_, gradscans_, gradsInst_,
instIds_.CurrentSpan(), nodeAssigns_.CurrentSpan(), n_vals_, nNodes,
n_cols_, tmpScanGradBuff_, tmp_scan_key_buff_,
colIds_, nodeStart);
ArgMaxByKey(nodeSplits_, gradscans_, gradsums_,
vals_.CurrentSpan(), colIds_, nodeAssigns_.CurrentSpan(),
nodes_, nNodes, nodeStart, n_vals_, param_,
level <= kMaxAbkLevels ? kAbkSmem : kAbkGmem);
Split2Node(nNodes, nodeStart);
}
void AllocateAllData(int offsetSize) {
int tmpBuffSize = ScanTempBufferSize(n_vals_);
ba_.Allocate(param_.gpu_id, &vals_, n_vals_,
&vals_cached_, n_vals_, &instIds_, n_vals_, &inst_ids_cached_, n_vals_,
&colOffsets_, offsetSize, &gradsInst_, n_rows_, &nodeAssigns_, n_vals_,
&nodeLocations_, n_vals_, &nodes_, maxNodes_, &node_assigns_per_inst_,
n_rows_, &gradsums_, maxLeaves_ * n_cols_, &gradscans_, n_vals_,
&nodeSplits_, maxLeaves_, &tmpScanGradBuff_, tmpBuffSize,
&tmp_scan_key_buff_, tmpBuffSize, &colIds_, n_vals_);
}
void SetupOneTimeData(DMatrix* dmat) {
if (!dmat->SingleColBlock()) {
LOG(FATAL) << "exact::GPUBuilder - must have 1 column block";
}
std::vector<float> fval;
std::vector<int> fId;
std::vector<int> offset;
ConvertToCsc(dmat, &fval, &fId, &offset);
AllocateAllData(static_cast<int>(offset.size()));
TransferAndSortData(fval, fId, offset);
allocated_ = true;
}
void ConvertToCsc(DMatrix* dmat, std::vector<float>* fval,
std::vector<int>* fId, std::vector<int>* offset) {
const MetaInfo& info = dmat->Info();
CHECK(info.num_col_ < std::numeric_limits<int>::max());
CHECK(info.num_row_ < std::numeric_limits<int>::max());
n_rows_ = static_cast<int>(info.num_row_);
n_cols_ = static_cast<int>(info.num_col_);
offset->reserve(n_cols_ + 1);
offset->push_back(0);
fval->reserve(n_cols_ * n_rows_);
fId->reserve(n_cols_ * n_rows_);
// in case you end up with a DMatrix having no column access
// then make sure to enable that before copying the data!
for (const auto& batch : dmat->GetSortedColumnBatches()) {
for (int i = 0; i < batch.Size(); i++) {
auto col = batch[i];
for (const Entry& e : col) {
int inst_id = static_cast<int>(e.index);
fval->push_back(e.fvalue);
fId->push_back(inst_id);
}
offset->push_back(static_cast<int>(fval->size()));
}
}
CHECK(fval->size() < std::numeric_limits<int>::max());
n_vals_ = static_cast<int>(fval->size());
}
void TransferAndSortData(const std::vector<float>& fval,
const std::vector<int>& fId,
const std::vector<int>& offset) {
dh::CopyVectorToDeviceSpan(vals_.CurrentSpan(), fval);
dh::CopyVectorToDeviceSpan(instIds_.CurrentSpan(), fId);
dh::CopyVectorToDeviceSpan(colOffsets_, offset);
dh::SegmentedSort<float, int>(&tmp_mem_, &vals_, &instIds_, n_vals_, n_cols_,
colOffsets_);
dh::CopyDeviceSpan(vals_cached_, vals_.CurrentSpan());
dh::CopyDeviceSpan(inst_ids_cached_, instIds_.CurrentSpan());
AssignColIds<<<n_cols_, 512>>>(colIds_.data(), colOffsets_.data());
}
void TransferGrads(HostDeviceVector<GradientPair>* gpair) {
gpair->GatherTo(
thrust::device_pointer_cast(gradsInst_.data()),
thrust::device_pointer_cast(gradsInst_.data() + gradsInst_.size()));
// evaluate the full-grad reduction for the root node
dh::SumReduction<GradientPair>(tmp_mem_, gradsInst_, gradsums_, n_rows_);
}
void InitNodeData(int level, NodeIdT nodeStart, int nNodes) {
// all instances belong to root node at the beginning!
if (level == 0) {
thrust::fill(thrust::device_pointer_cast(nodes_.data()),
thrust::device_pointer_cast(nodes_.data() + nodes_.size()),
DeviceNodeStats());
thrust::fill(thrust::device_pointer_cast(nodeAssigns_.Current()),
thrust::device_pointer_cast(nodeAssigns_.Current() +
nodeAssigns_.Size()),
0);
thrust::fill(thrust::device_pointer_cast(node_assigns_per_inst_.data()),
thrust::device_pointer_cast(node_assigns_per_inst_.data() +
node_assigns_per_inst_.size()),
0);
// for root node, just update the gradient/score/weight/id info
// before splitting it! Currently all data is on GPU, hence this
// stupid little kernel
auto d_nodes = nodes_;
auto d_sums = gradsums_;
auto gpu_params = GPUTrainingParam(param_);
dh::LaunchN(param_.gpu_id, 1, [=] __device__(int idx) {
d_nodes[0] = DeviceNodeStats(d_sums[0], 0, gpu_params);
});
} else {
const int BlkDim = 256;
const int ItemsPerThread = 4;
// assign default node ids first
int nBlks = dh::DivRoundUp(n_rows_, BlkDim);
FillDefaultNodeIds<<<nBlks, BlkDim>>>(node_assigns_per_inst_.data(),
nodes_.data(), n_rows_);
// evaluate the correct child indices of non-missing values next
nBlks = dh::DivRoundUp(n_vals_, BlkDim * ItemsPerThread);
AssignNodeIds<<<nBlks, BlkDim>>>(
node_assigns_per_inst_.data(), nodeLocations_.Current(),
nodeAssigns_.Current(), instIds_.Current(), nodes_.data(),
colOffsets_.data(), vals_.Current(), n_vals_, n_cols_);
// gather the node assignments across all other columns too
dh::Gather(param_.gpu_id, nodeAssigns_.Current(),
node_assigns_per_inst_.data(), instIds_.Current(), n_vals_);
SortKeys(level);
}
}
void SortKeys(int level) {
// segmented-sort the arrays based on node-id's
// but we don't need more than level+1 bits for sorting!
SegmentedSort(&tmp_mem_, &nodeAssigns_, &nodeLocations_, n_vals_, n_cols_,
colOffsets_, 0, level + 1);
dh::Gather<float, int>(param_.gpu_id, vals_.other(),
vals_.Current(), instIds_.other(), instIds_.Current(),
nodeLocations_.Current(), n_vals_);
vals_.buff.selector ^= 1;
instIds_.buff.selector ^= 1;
}
void MarkLeaves() {
const int BlkDim = 128;
int nBlks = dh::DivRoundUp(maxNodes_, BlkDim);
MarkLeavesKernel<<<nBlks, BlkDim>>>(nodes_.data(), maxNodes_);
}
};
TSOOBGX_REGISTER_TREE_UPDATER(GPUMaker, "grow_gpu")
.describe("Grow tree with GPU.")
.set_body([]() { return new GPUMaker(); });
} // namespace tree
} // namespace tsoobgx
|
8880669f5c5746f110ad2581435a3025a1ce566c.hip | // !!! This is a file automatically generated by hipify!!!
#include <hipcub/hipcub.hpp>
int main() {
hipcub::CachingDeviceAllocator allocator;
void *d;
for (int i = 0; i < 100000; i++) {
allocator.DeviceAllocate(&d, 1024);
allocator.DeviceFree(d);
}
}
| 8880669f5c5746f110ad2581435a3025a1ce566c.cu | #include <cub/cub.cuh>
int main() {
cub::CachingDeviceAllocator allocator;
void *d;
for (int i = 0; i < 100000; i++) {
allocator.DeviceAllocate(&d, 1024);
allocator.DeviceFree(d);
}
}
|
a5caddd98970bdd6ac571c33c60f25b0265229b2.hip | // !!! This is a file automatically generated by hipify!!!
// Includes
#include <stdio.h>
#include <stdlib.h>
// includes CUDA
#include <hip/hip_runtime.h>
#define THREADS_PER_BLOCK 256
#define NUM_OF_BLOCKS 80
#define F 8
#define ITERATIONS (unsigned)( 10000 )
#define max_tid THREADS_PER_BLOCK*NUM_OF_BLOCKS
// Variables
int* h_A;
int* h_B;
int* h_C;
int* d_A;
int* d_B;
int* d_C;
// Functions
void CleanupResources(void);
void RandomInit(int*, int);
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(hipError_t err, const char *file, const int line ){
if(hipSuccess != err){
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, hipGetErrorString( err ) );
exit(-1);
}
}
// This will output the proper error string when calling hipGetLastError
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line ){
hipError_t err = hipGetLastError();
if (hipSuccess != err){
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, hipGetErrorString( err ) );
exit(-1);
}
}
// end of CUDA Helper Functions
// Device code
__global__ void PowerKernal(int* A, int* C, int iterations){
int tid = blockDim.x * blockIdx.x + threadIdx.x;
//Do Some Computation
int m_sum=0;
for (unsigned j=0; j<iterations; j++){
for(unsigned k=0; k<ITERATIONS; ++k){
m_sum=A[((unsigned)(tid*F)+(unsigned)(k*max_tid*F))];
}
m_sum+=j;
}
C[tid]=m_sum;
__syncthreads();
}
// Host code
int main(int argc, char** argv)
{
int iterations;
if (argc != 2){
fprintf(stderr,"usage: %s #iterations\n",argv[0]);
exit(1);
}
else{
iterations = atoi(argv[1]);
}
printf("Power Microbenchmark with %d iterations\n",iterations);
unsigned N =((unsigned)(max_tid*F)+(unsigned)(ITERATIONS*max_tid*F));
size_t size = N * sizeof(int);
// Allocate input vectors h_A and h_B in host memory
h_A = (int*)malloc(size);
if (h_A == 0) CleanupResources();
//h_B = (float*)malloc(size);
//if (h_B == 0) CleanupResources();
h_C = (int*)malloc(size);
if (h_C == 0) CleanupResources();
// Initialize input vectors
RandomInit(h_A, N);
//RandomInit(h_B, N);
// Allocate vectors in device memory
checkCudaErrors( hipMalloc((void**)&d_A, size) );
//checkCudaErrors( hipMalloc((void**)&d_B, size) );
checkCudaErrors( hipMalloc((void**)&d_C, size) );
// Copy vectors from host memory to device memory
checkCudaErrors( hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice) );
hipEvent_t start, stop;
float elapsedTime = 0;
checkCudaErrors(hipEventCreate(&start));
checkCudaErrors(hipEventCreate(&stop));
dim3 dimGrid(NUM_OF_BLOCKS,1);
dim3 dimBlock(THREADS_PER_BLOCK,1);
checkCudaErrors(hipEventRecord(start));
hipLaunchKernelGGL(( PowerKernal), dim3(dimGrid),dim3(dimBlock), 0, 0, d_A, d_C, iterations);
checkCudaErrors(hipEventRecord(stop));
checkCudaErrors(hipEventSynchronize(stop));
checkCudaErrors(hipEventElapsedTime(&elapsedTime, start, stop));
printf("gpu execution time = %.2f s\n", elapsedTime/1000);
getLastCudaError("kernel launch failure");
hipDeviceSynchronize();
// Copy result from device memory to host memory
// h_C contains the result in host memory
checkCudaErrors( hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost) );
checkCudaErrors(hipEventDestroy(start));
checkCudaErrors(hipEventDestroy(stop));
CleanupResources();
return 0;
}
void CleanupResources(void){
// Free device memory
if (d_A)
hipFree(d_A);
//if (d_B)
// hipFree(d_B);
if (d_C)
hipFree(d_C);
// Free host memory
if (h_A)
free(h_A);
// if (h_B)
// free(h_B);
if (h_C)
free(h_C);
}
// Allocates an array with random float entries.
void RandomInit(int* data, int n){
for (int i = 0; i < n; ++i)
data[i] = (int)(rand() / RAND_MAX);
}
| a5caddd98970bdd6ac571c33c60f25b0265229b2.cu | // Includes
#include <stdio.h>
#include <stdlib.h>
// includes CUDA
#include <cuda_runtime.h>
#define THREADS_PER_BLOCK 256
#define NUM_OF_BLOCKS 80
#define F 8
#define ITERATIONS (unsigned)( 10000 )
#define max_tid THREADS_PER_BLOCK*NUM_OF_BLOCKS
// Variables
int* h_A;
int* h_B;
int* h_C;
int* d_A;
int* d_B;
int* d_C;
// Functions
void CleanupResources(void);
void RandomInit(int*, int);
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(cudaError err, const char *file, const int line ){
if(cudaSuccess != err){
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, cudaGetErrorString( err ) );
exit(-1);
}
}
// This will output the proper error string when calling cudaGetLastError
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line ){
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err){
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, cudaGetErrorString( err ) );
exit(-1);
}
}
// end of CUDA Helper Functions
// Device code
__global__ void PowerKernal(int* A, int* C, int iterations){
int tid = blockDim.x * blockIdx.x + threadIdx.x;
//Do Some Computation
int m_sum=0;
for (unsigned j=0; j<iterations; j++){
for(unsigned k=0; k<ITERATIONS; ++k){
m_sum=A[((unsigned)(tid*F)+(unsigned)(k*max_tid*F))];
}
m_sum+=j;
}
C[tid]=m_sum;
__syncthreads();
}
// Host code
int main(int argc, char** argv)
{
int iterations;
if (argc != 2){
fprintf(stderr,"usage: %s #iterations\n",argv[0]);
exit(1);
}
else{
iterations = atoi(argv[1]);
}
printf("Power Microbenchmark with %d iterations\n",iterations);
unsigned N =((unsigned)(max_tid*F)+(unsigned)(ITERATIONS*max_tid*F));
size_t size = N * sizeof(int);
// Allocate input vectors h_A and h_B in host memory
h_A = (int*)malloc(size);
if (h_A == 0) CleanupResources();
//h_B = (float*)malloc(size);
//if (h_B == 0) CleanupResources();
h_C = (int*)malloc(size);
if (h_C == 0) CleanupResources();
// Initialize input vectors
RandomInit(h_A, N);
//RandomInit(h_B, N);
// Allocate vectors in device memory
checkCudaErrors( cudaMalloc((void**)&d_A, size) );
//checkCudaErrors( cudaMalloc((void**)&d_B, size) );
checkCudaErrors( cudaMalloc((void**)&d_C, size) );
// Copy vectors from host memory to device memory
checkCudaErrors( cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice) );
cudaEvent_t start, stop;
float elapsedTime = 0;
checkCudaErrors(cudaEventCreate(&start));
checkCudaErrors(cudaEventCreate(&stop));
dim3 dimGrid(NUM_OF_BLOCKS,1);
dim3 dimBlock(THREADS_PER_BLOCK,1);
checkCudaErrors(cudaEventRecord(start));
PowerKernal<<<dimGrid,dimBlock>>>(d_A, d_C, iterations);
checkCudaErrors(cudaEventRecord(stop));
checkCudaErrors(cudaEventSynchronize(stop));
checkCudaErrors(cudaEventElapsedTime(&elapsedTime, start, stop));
printf("gpu execution time = %.2f s\n", elapsedTime/1000);
getLastCudaError("kernel launch failure");
cudaThreadSynchronize();
// Copy result from device memory to host memory
// h_C contains the result in host memory
checkCudaErrors( cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost) );
checkCudaErrors(cudaEventDestroy(start));
checkCudaErrors(cudaEventDestroy(stop));
CleanupResources();
return 0;
}
void CleanupResources(void){
// Free device memory
if (d_A)
cudaFree(d_A);
//if (d_B)
// cudaFree(d_B);
if (d_C)
cudaFree(d_C);
// Free host memory
if (h_A)
free(h_A);
// if (h_B)
// free(h_B);
if (h_C)
free(h_C);
}
// Allocates an array with random float entries.
void RandomInit(int* data, int n){
for (int i = 0; i < n; ++i)
data[i] = (int)(rand() / RAND_MAX);
}
|
770d9b3cfc70f59594b077ec59286f980acf3752.hip | // !!! This is a file automatically generated by hipify!!!
#include "DT.cuh"
DTChunk::DTChunk(int argmaxDTLength, int argMaxDocLength, int argNumChunks) {
maxDTLength = argmaxDTLength;
maxDocLength = argMaxDocLength;
numChunks = argNumChunks;
//NZDTCount = new int[maxDocLength];
//
//DTIndex = new unsigned short int[maxDTLength];
//DTValue = new int[maxDTLength];
////DTCount = new int[maxDocLength];
////DTOffset = new int[maxDocLength];
//DTLengthVec = new int[numChunks];
//docLengthVec = new int[numChunks];
// hipHostMalloc((void**)&NZDTCount, maxDocLength * sizeof(int));
// hipHostMalloc((void**)&DTIndex, maxDTLength * sizeof(unsigned short int));
// hipHostMalloc((void**)&DTValue, maxDTLength * sizeof(int));
hipHostMalloc((void**)&DTLengthVec, numChunks * sizeof(int));
hipHostMalloc((void**)&docLengthVec, numChunks * sizeof(int));
}
void DTChunk::loadDocDTLength(string argFilePrefix) {
ifstream DTLength((argFilePrefix + string("/DTLength.txt")).c_str(), ios::binary);//store max Doc and DT length
ifstream docLength((argFilePrefix + string("/docLength.txt")).c_str(), ios::binary);//store max Doc and DT length
for (int chunkId = 0; chunkId < numChunks; chunkId++) {
DTLength >> DTLengthVec[chunkId];
docLength >> docLengthVec[chunkId];
}
DTLength.close();
docLength.close();
// memset(NZDTCount, 0, maxDocLength * sizeof(int));
// memset(DTIndex, 0, maxDTLength * sizeof(unsigned short int));
// memset(DTValue, 0, maxDTLength * sizeof(int));
}
void DTChunk::CPUMemSet() {
// memset(NZDTCount, 0, maxDocLength * sizeof(int));
// memset(DTIndex, 0, maxDTLength * sizeof(unsigned short int));
// memset(DTValue, 0, maxDTLength * sizeof(int));
//memset(DTCount, 0, maxDocLength * sizeof(int));
//memset(DTOffset, 0, maxDocLength * sizeof(int));
}
void DTChunk::GPUMemAllocate() {
for (int i = 0; i < numStreams; i++) {
hipMalloc((void**)&deviceNZDTCount[i], (maxDocLength) * sizeof(int));
/*hipMalloc((void**)&deviceDTIndex[i], (maxDTLength) * sizeof(unsigned short int));
hipMalloc((void**)&deviceDTValue[i], (maxDTLength) * sizeof(int));*/
hipMalloc((void**)&deviceDTIndexValue[i], (maxDTLength) * sizeof(int));
hipMalloc((void**)&deviceDTCount[i], (maxDocLength) * sizeof(int));
hipMalloc((void**)&deviceDTOffset[i], (maxDocLength) * sizeof(int));
}
DTMemory = 2 * (3 * maxDocLength + maxDTLength) * sizeof(int) / 1000000000.0;
printf("DT memory usage:%f GB\n", DTMemory);
}
void DTChunk::loadDTCountOffset(string argFilePrefix) {
/*chunkId = argChunkId;*/
for (int chunkId = 0; chunkId < numChunks; chunkId++) {
string chunkFolderName = argFilePrefix + "/chunk" + to_string(chunkId);
ifstream DTCountOffset((chunkFolderName + string("/DTCountOffset.txt")).c_str(), ios::binary);//store Word offset of TL
/*int* DTCount = new int[docLengthVec[chunkId]];
int* DTOffset = new int[docLengthVec[chunkId]];*/
int* DTCount;
int* DTOffset;
hipHostMalloc((void**)&DTCount, docLengthVec[chunkId] * sizeof(int));
hipHostMalloc((void**)&DTOffset, docLengthVec[chunkId] * sizeof(int));
memset(DTCount, 0, docLengthVec[chunkId] * sizeof(int));
memset(DTOffset, 0, docLengthVec[chunkId] * sizeof(int));
for (int i = 0; i < docLengthVec[chunkId]; i++)
{
DTCountOffset >> DTCount[i] >> DTOffset[i];
}
DTCountOffset.close();
DTCountVec.push_back(DTCount);
DTOffsetVec.push_back(DTOffset);
}
}
//void DTChunk::CPU2GPU(int argChunkId, int argStreamId, hipStream_t& stream) {
// chunkId = argChunkId;
// //docLength = argDocLength;
// hipMemcpy(deviceNZDTCount, NZDTCount, (docLengthVec[chunkId]) * sizeof(int), hipMemcpyHostToDevice);
// hipMemcpy(deviceDTIndex, DTIndex, (DTLengthVec[chunkId]) * sizeof(unsigned short int), hipMemcpyHostToDevice);
// hipMemcpy(deviceDTValue, DTValue, (DTLengthVec[chunkId]) * sizeof(int), hipMemcpyHostToDevice);
//
//
//}
void DTChunk::GPUMemSet(int argChunkId, int argStreamId, hipStream_t& stream)
{
chunkId = argChunkId;
hipMemsetAsync(deviceNZDTCount[argStreamId], 0, (maxDocLength) * sizeof(int),stream);
/*hipMemsetAsync(deviceDTIndex[argStreamId], 0, (maxDTLength) * sizeof(unsigned short int),stream);
hipMemsetAsync(deviceDTValue[argStreamId], 0, (maxDTLength) * sizeof(int),stream);*/
hipMemsetAsync(deviceDTIndexValue[argStreamId], 0, (maxDTLength) * sizeof(int), stream);
/*hipMemcpyAsync(deviceNZDTCount[argStreamId], NZDTCount, (maxDocLength) * sizeof(int), hipMemcpyHostToDevice,stream);
hipMemcpyAsync(deviceDTIndex[argStreamId], DTIndex, (maxDTLength) * sizeof(unsigned short int), hipMemcpyHostToDevice, stream);
hipMemcpyAsync(deviceDTValue[argStreamId], DTValue, (maxDTLength) * sizeof(int), hipMemcpyHostToDevice,stream);*/
}
void DTChunk::CPU2GPUDTCountOffset(int argChunkId, int argStreamId, hipStream_t& stream) {
chunkId = argChunkId;
//docLength = argDocLength;
hipMemcpyAsync(deviceDTCount[argStreamId], DTCountVec[chunkId], (docLengthVec[chunkId]) * sizeof(int), hipMemcpyHostToDevice,stream);
hipMemcpyAsync(deviceDTOffset[argStreamId], DTOffsetVec[chunkId], (docLengthVec[chunkId]) * sizeof(int), hipMemcpyHostToDevice,stream);
}
//void DTChunk::GPU2CPU(int argChunkId, int argStreamId, hipStream_t& stream) {
// chunkId = argChunkId;
// //docLength = argDocLength;
// hipMemcpy(NZDTCount, deviceNZDTCount, (docLengthVec[chunkId]) * sizeof(int), hipMemcpyDeviceToHost);
// hipMemcpy(DTIndex, deviceDTIndex, (DTLengthVec[chunkId]) * sizeof(unsigned short int), hipMemcpyDeviceToHost);
// hipMemcpy(DTValue, deviceDTValue, (DTLengthVec[chunkId]) * sizeof(int), hipMemcpyDeviceToHost);
//
//}
void DTChunk::CPU2Disk(string argFilePrefix,int argChunkId) {
chunkId = argChunkId;
//docLength = argDocLength;
string chunkFolderName = argFilePrefix + "/chunk" + to_string(chunkId);
ofstream OutputNZDTCount((chunkFolderName + string("/NZDTCount.txt")).c_str(), ios::binary);
for (int i = 0; i < docLengthVec[chunkId]; i++) {
OutputNZDTCount << NZDTCount[i] << "\n";
}
OutputNZDTCount.close();
ofstream OutputDTIndexValue((chunkFolderName + string("/DTIndexValue.txt")).c_str(), ios::binary);
for (int i = 0; i < DTLengthVec[chunkId]; i++) {
OutputDTIndexValue << DTIndex[i] <<" "<<DTValue[i]<< "\n";
}
OutputDTIndexValue.close();
}
| 770d9b3cfc70f59594b077ec59286f980acf3752.cu | #include "DT.cuh"
DTChunk::DTChunk(int argmaxDTLength, int argMaxDocLength, int argNumChunks) {
maxDTLength = argmaxDTLength;
maxDocLength = argMaxDocLength;
numChunks = argNumChunks;
//NZDTCount = new int[maxDocLength];
//
//DTIndex = new unsigned short int[maxDTLength];
//DTValue = new int[maxDTLength];
////DTCount = new int[maxDocLength];
////DTOffset = new int[maxDocLength];
//DTLengthVec = new int[numChunks];
//docLengthVec = new int[numChunks];
// cudaMallocHost((void**)&NZDTCount, maxDocLength * sizeof(int));
// cudaMallocHost((void**)&DTIndex, maxDTLength * sizeof(unsigned short int));
// cudaMallocHost((void**)&DTValue, maxDTLength * sizeof(int));
cudaMallocHost((void**)&DTLengthVec, numChunks * sizeof(int));
cudaMallocHost((void**)&docLengthVec, numChunks * sizeof(int));
}
void DTChunk::loadDocDTLength(string argFilePrefix) {
ifstream DTLength((argFilePrefix + string("/DTLength.txt")).c_str(), ios::binary);//store max Doc and DT length
ifstream docLength((argFilePrefix + string("/docLength.txt")).c_str(), ios::binary);//store max Doc and DT length
for (int chunkId = 0; chunkId < numChunks; chunkId++) {
DTLength >> DTLengthVec[chunkId];
docLength >> docLengthVec[chunkId];
}
DTLength.close();
docLength.close();
// memset(NZDTCount, 0, maxDocLength * sizeof(int));
// memset(DTIndex, 0, maxDTLength * sizeof(unsigned short int));
// memset(DTValue, 0, maxDTLength * sizeof(int));
}
void DTChunk::CPUMemSet() {
// memset(NZDTCount, 0, maxDocLength * sizeof(int));
// memset(DTIndex, 0, maxDTLength * sizeof(unsigned short int));
// memset(DTValue, 0, maxDTLength * sizeof(int));
//memset(DTCount, 0, maxDocLength * sizeof(int));
//memset(DTOffset, 0, maxDocLength * sizeof(int));
}
void DTChunk::GPUMemAllocate() {
for (int i = 0; i < numStreams; i++) {
cudaMalloc((void**)&deviceNZDTCount[i], (maxDocLength) * sizeof(int));
/*cudaMalloc((void**)&deviceDTIndex[i], (maxDTLength) * sizeof(unsigned short int));
cudaMalloc((void**)&deviceDTValue[i], (maxDTLength) * sizeof(int));*/
cudaMalloc((void**)&deviceDTIndexValue[i], (maxDTLength) * sizeof(int));
cudaMalloc((void**)&deviceDTCount[i], (maxDocLength) * sizeof(int));
cudaMalloc((void**)&deviceDTOffset[i], (maxDocLength) * sizeof(int));
}
DTMemory = 2 * (3 * maxDocLength + maxDTLength) * sizeof(int) / 1000000000.0;
printf("DT memory usage:%f GB\n", DTMemory);
}
void DTChunk::loadDTCountOffset(string argFilePrefix) {
/*chunkId = argChunkId;*/
for (int chunkId = 0; chunkId < numChunks; chunkId++) {
string chunkFolderName = argFilePrefix + "/chunk" + to_string(chunkId);
ifstream DTCountOffset((chunkFolderName + string("/DTCountOffset.txt")).c_str(), ios::binary);//store Word offset of TL
/*int* DTCount = new int[docLengthVec[chunkId]];
int* DTOffset = new int[docLengthVec[chunkId]];*/
int* DTCount;
int* DTOffset;
cudaMallocHost((void**)&DTCount, docLengthVec[chunkId] * sizeof(int));
cudaMallocHost((void**)&DTOffset, docLengthVec[chunkId] * sizeof(int));
memset(DTCount, 0, docLengthVec[chunkId] * sizeof(int));
memset(DTOffset, 0, docLengthVec[chunkId] * sizeof(int));
for (int i = 0; i < docLengthVec[chunkId]; i++)
{
DTCountOffset >> DTCount[i] >> DTOffset[i];
}
DTCountOffset.close();
DTCountVec.push_back(DTCount);
DTOffsetVec.push_back(DTOffset);
}
}
//void DTChunk::CPU2GPU(int argChunkId, int argStreamId, cudaStream_t& stream) {
// chunkId = argChunkId;
// //docLength = argDocLength;
// cudaMemcpy(deviceNZDTCount, NZDTCount, (docLengthVec[chunkId]) * sizeof(int), cudaMemcpyHostToDevice);
// cudaMemcpy(deviceDTIndex, DTIndex, (DTLengthVec[chunkId]) * sizeof(unsigned short int), cudaMemcpyHostToDevice);
// cudaMemcpy(deviceDTValue, DTValue, (DTLengthVec[chunkId]) * sizeof(int), cudaMemcpyHostToDevice);
//
//
//}
void DTChunk::GPUMemSet(int argChunkId, int argStreamId, cudaStream_t& stream)
{
chunkId = argChunkId;
cudaMemsetAsync(deviceNZDTCount[argStreamId], 0, (maxDocLength) * sizeof(int),stream);
/*cudaMemsetAsync(deviceDTIndex[argStreamId], 0, (maxDTLength) * sizeof(unsigned short int),stream);
cudaMemsetAsync(deviceDTValue[argStreamId], 0, (maxDTLength) * sizeof(int),stream);*/
cudaMemsetAsync(deviceDTIndexValue[argStreamId], 0, (maxDTLength) * sizeof(int), stream);
/*cudaMemcpyAsync(deviceNZDTCount[argStreamId], NZDTCount, (maxDocLength) * sizeof(int), cudaMemcpyHostToDevice,stream);
cudaMemcpyAsync(deviceDTIndex[argStreamId], DTIndex, (maxDTLength) * sizeof(unsigned short int), cudaMemcpyHostToDevice, stream);
cudaMemcpyAsync(deviceDTValue[argStreamId], DTValue, (maxDTLength) * sizeof(int), cudaMemcpyHostToDevice,stream);*/
}
void DTChunk::CPU2GPUDTCountOffset(int argChunkId, int argStreamId, cudaStream_t& stream) {
chunkId = argChunkId;
//docLength = argDocLength;
cudaMemcpyAsync(deviceDTCount[argStreamId], DTCountVec[chunkId], (docLengthVec[chunkId]) * sizeof(int), cudaMemcpyHostToDevice,stream);
cudaMemcpyAsync(deviceDTOffset[argStreamId], DTOffsetVec[chunkId], (docLengthVec[chunkId]) * sizeof(int), cudaMemcpyHostToDevice,stream);
}
//void DTChunk::GPU2CPU(int argChunkId, int argStreamId, cudaStream_t& stream) {
// chunkId = argChunkId;
// //docLength = argDocLength;
// cudaMemcpy(NZDTCount, deviceNZDTCount, (docLengthVec[chunkId]) * sizeof(int), cudaMemcpyDeviceToHost);
// cudaMemcpy(DTIndex, deviceDTIndex, (DTLengthVec[chunkId]) * sizeof(unsigned short int), cudaMemcpyDeviceToHost);
// cudaMemcpy(DTValue, deviceDTValue, (DTLengthVec[chunkId]) * sizeof(int), cudaMemcpyDeviceToHost);
//
//}
void DTChunk::CPU2Disk(string argFilePrefix,int argChunkId) {
chunkId = argChunkId;
//docLength = argDocLength;
string chunkFolderName = argFilePrefix + "/chunk" + to_string(chunkId);
ofstream OutputNZDTCount((chunkFolderName + string("/NZDTCount.txt")).c_str(), ios::binary);
for (int i = 0; i < docLengthVec[chunkId]; i++) {
OutputNZDTCount << NZDTCount[i] << "\n";
}
OutputNZDTCount.close();
ofstream OutputDTIndexValue((chunkFolderName + string("/DTIndexValue.txt")).c_str(), ios::binary);
for (int i = 0; i < DTLengthVec[chunkId]; i++) {
OutputDTIndexValue << DTIndex[i] <<" "<<DTValue[i]<< "\n";
}
OutputDTIndexValue.close();
}
|
8d963b4eec5f4e7272b5332684dfea3156491251.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.6.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2014
@generated from zsymmetrize_tiles.cu normal z -> c, Sat Nov 15 19:53:59 2014
@author Mark Gates
*/
#include "common_magma.h"
#define NB 64
/*
Symmetrizes ntile tiles at a time, e.g., all diagonal tiles of a matrix.
Grid is ntile x ceil(m/NB).
Each tile is m x m, and is divided into block rows, each NB x m.
Each block has NB threads.
Each thread copies one row, iterating across all columns below diagonal.
The bottom block of rows may be partially outside the matrix;
if so, rows outside the matrix (i >= m) are disabled.
*/
__global__ void
csymmetrize_tiles_lower( int m, magmaFloatComplex *dA, int ldda, int mstride, int nstride )
{
// shift dA to tile's top-left corner
dA += blockIdx.x*(mstride + nstride*ldda);
// dA iterates across row i and dAT iterates down column i.
int i = blockIdx.y*NB + threadIdx.x;
magmaFloatComplex *dAT = dA;
if ( i < m ) {
dA += i;
dAT += i*ldda;
magmaFloatComplex *dAend = dA + i*ldda;
while( dA < dAend ) {
*dAT = cuConjf(*dA); // upper := lower
dA += ldda;
dAT += 1;
}
}
}
// only difference with _lower version is direction dA=dAT instead of dAT=dA.
__global__ void
csymmetrize_tiles_upper( int m, magmaFloatComplex *dA, int ldda, int mstride, int nstride )
{
// shift dA to tile's top-left corner
dA += blockIdx.x*(mstride + nstride*ldda);
// dA iterates across row i and dAT iterates down column i.
int i = blockIdx.y*NB + threadIdx.x;
magmaFloatComplex *dAT = dA;
if ( i < m ) {
dA += i;
dAT += i*ldda;
magmaFloatComplex *dAend = dA + i*ldda;
while( dA < dAend ) {
*dA = cuConjf(*dAT); // lower := upper
dA += ldda;
dAT += 1;
}
}
}
/**
Purpose
-------
CSYMMETRIZE_TILES copies lower triangle to upper triangle, or vice-versa,
to make some blocks of dA into general representations of a symmetric block.
This processes NTILE blocks, typically the diagonal blocks.
Each block is offset by mstride rows and nstride columns from the previous block.
Arguments
---------
@param[in]
uplo magma_uplo_t
Specifies the part of the matrix dA that is valid on input.
- = MagmaUpper: Upper triangular part
- = MagmaLower: Lower triangular part
@param[in]
m INTEGER
The number of rows & columns of each square block of dA. M >= 0.
@param[in,out]
dA COMPLEX array, dimension (LDDA,N)
The matrix dA. N = m + nstride*(ntile-1).
@param[in]
ldda INTEGER
The leading dimension of the array dA. LDDA >= max(1, m + mstride*(ntile-1)).
@param[in]
ntile INTEGER
Number of blocks to symmetrize. ntile >= 0.
@param[in]
mstride INTEGER
Row offset from start of one block to start of next block. mstride >= 0.
Either (mstride >= m) or (nstride >= m), to prevent m-by-m tiles
from overlapping.
@param[in]
nstride INTEGER
Column offset from start of one block to start of next block. nstride >= 0.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_caux2
********************************************************************/
extern "C" void
magmablas_csymmetrize_tiles_q(
magma_uplo_t uplo, magma_int_t m,
magmaFloatComplex_ptr dA, magma_int_t ldda,
magma_int_t ntile, magma_int_t mstride, magma_int_t nstride,
magma_queue_t queue )
{
magma_int_t info = 0;
if ( uplo != MagmaLower && uplo != MagmaUpper )
info = -1;
else if ( m < 0 )
info = -2;
else if ( ldda < max(1,m + mstride*(ntile-1)) )
info = -5;
else if ( ntile < 0 )
info = -6;
else if ( mstride < 0 )
info = -7;
else if ( nstride < 0 )
info = -8;
else if ( mstride < m && nstride < m ) // only one must be >= m.
info = -7;
if ( info != 0 ) {
magma_xerbla( __func__, -(info) );
return;
}
if ( m == 0 || ntile == 0 )
return;
dim3 threads( NB );
dim3 grid( ntile, (m + NB - 1)/NB );
//printf( "m %d, grid %d x %d, threads %d\n", m, grid.x, grid.y, threads.x );
if ( uplo == MagmaUpper ) {
hipLaunchKernelGGL(( csymmetrize_tiles_upper), dim3(grid), dim3(threads), 0, queue , m, dA, ldda, mstride, nstride );
}
else {
hipLaunchKernelGGL(( csymmetrize_tiles_lower), dim3(grid), dim3(threads), 0, queue , m, dA, ldda, mstride, nstride );
}
}
/**
@see magmablas_csymmetrize_tiles_q
@ingroup magma_caux2
********************************************************************/
extern "C" void
magmablas_csymmetrize_tiles(
magma_uplo_t uplo, magma_int_t m,
magmaFloatComplex_ptr dA, magma_int_t ldda,
magma_int_t ntile, magma_int_t mstride, magma_int_t nstride )
{
magmablas_csymmetrize_tiles_q( uplo, m, dA, ldda, ntile, mstride, nstride, magma_stream );
}
| 8d963b4eec5f4e7272b5332684dfea3156491251.cu | /*
-- MAGMA (version 1.6.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2014
@generated from zsymmetrize_tiles.cu normal z -> c, Sat Nov 15 19:53:59 2014
@author Mark Gates
*/
#include "common_magma.h"
#define NB 64
/*
Symmetrizes ntile tiles at a time, e.g., all diagonal tiles of a matrix.
Grid is ntile x ceil(m/NB).
Each tile is m x m, and is divided into block rows, each NB x m.
Each block has NB threads.
Each thread copies one row, iterating across all columns below diagonal.
The bottom block of rows may be partially outside the matrix;
if so, rows outside the matrix (i >= m) are disabled.
*/
__global__ void
csymmetrize_tiles_lower( int m, magmaFloatComplex *dA, int ldda, int mstride, int nstride )
{
// shift dA to tile's top-left corner
dA += blockIdx.x*(mstride + nstride*ldda);
// dA iterates across row i and dAT iterates down column i.
int i = blockIdx.y*NB + threadIdx.x;
magmaFloatComplex *dAT = dA;
if ( i < m ) {
dA += i;
dAT += i*ldda;
magmaFloatComplex *dAend = dA + i*ldda;
while( dA < dAend ) {
*dAT = cuConjf(*dA); // upper := lower
dA += ldda;
dAT += 1;
}
}
}
// only difference with _lower version is direction dA=dAT instead of dAT=dA.
__global__ void
csymmetrize_tiles_upper( int m, magmaFloatComplex *dA, int ldda, int mstride, int nstride )
{
// shift dA to tile's top-left corner
dA += blockIdx.x*(mstride + nstride*ldda);
// dA iterates across row i and dAT iterates down column i.
int i = blockIdx.y*NB + threadIdx.x;
magmaFloatComplex *dAT = dA;
if ( i < m ) {
dA += i;
dAT += i*ldda;
magmaFloatComplex *dAend = dA + i*ldda;
while( dA < dAend ) {
*dA = cuConjf(*dAT); // lower := upper
dA += ldda;
dAT += 1;
}
}
}
/**
Purpose
-------
CSYMMETRIZE_TILES copies lower triangle to upper triangle, or vice-versa,
to make some blocks of dA into general representations of a symmetric block.
This processes NTILE blocks, typically the diagonal blocks.
Each block is offset by mstride rows and nstride columns from the previous block.
Arguments
---------
@param[in]
uplo magma_uplo_t
Specifies the part of the matrix dA that is valid on input.
- = MagmaUpper: Upper triangular part
- = MagmaLower: Lower triangular part
@param[in]
m INTEGER
The number of rows & columns of each square block of dA. M >= 0.
@param[in,out]
dA COMPLEX array, dimension (LDDA,N)
The matrix dA. N = m + nstride*(ntile-1).
@param[in]
ldda INTEGER
The leading dimension of the array dA. LDDA >= max(1, m + mstride*(ntile-1)).
@param[in]
ntile INTEGER
Number of blocks to symmetrize. ntile >= 0.
@param[in]
mstride INTEGER
Row offset from start of one block to start of next block. mstride >= 0.
Either (mstride >= m) or (nstride >= m), to prevent m-by-m tiles
from overlapping.
@param[in]
nstride INTEGER
Column offset from start of one block to start of next block. nstride >= 0.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_caux2
********************************************************************/
extern "C" void
magmablas_csymmetrize_tiles_q(
magma_uplo_t uplo, magma_int_t m,
magmaFloatComplex_ptr dA, magma_int_t ldda,
magma_int_t ntile, magma_int_t mstride, magma_int_t nstride,
magma_queue_t queue )
{
magma_int_t info = 0;
if ( uplo != MagmaLower && uplo != MagmaUpper )
info = -1;
else if ( m < 0 )
info = -2;
else if ( ldda < max(1,m + mstride*(ntile-1)) )
info = -5;
else if ( ntile < 0 )
info = -6;
else if ( mstride < 0 )
info = -7;
else if ( nstride < 0 )
info = -8;
else if ( mstride < m && nstride < m ) // only one must be >= m.
info = -7;
if ( info != 0 ) {
magma_xerbla( __func__, -(info) );
return;
}
if ( m == 0 || ntile == 0 )
return;
dim3 threads( NB );
dim3 grid( ntile, (m + NB - 1)/NB );
//printf( "m %d, grid %d x %d, threads %d\n", m, grid.x, grid.y, threads.x );
if ( uplo == MagmaUpper ) {
csymmetrize_tiles_upper<<< grid, threads, 0, queue >>>( m, dA, ldda, mstride, nstride );
}
else {
csymmetrize_tiles_lower<<< grid, threads, 0, queue >>>( m, dA, ldda, mstride, nstride );
}
}
/**
@see magmablas_csymmetrize_tiles_q
@ingroup magma_caux2
********************************************************************/
extern "C" void
magmablas_csymmetrize_tiles(
magma_uplo_t uplo, magma_int_t m,
magmaFloatComplex_ptr dA, magma_int_t ldda,
magma_int_t ntile, magma_int_t mstride, magma_int_t nstride )
{
magmablas_csymmetrize_tiles_q( uplo, m, dA, ldda, ntile, mstride, nstride, magma_stream );
}
|
1874fd8e03bcaa6801d6afd7a3cef48cb6a68f18.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<kernels/kernelDefines.cu>
static
__device__
__forceinline__
void ApplyMaterialPropertiesForElems_device(
Real_t& eosvmin, Real_t& eosvmax,
Real_t* vnew, Real_t *v,
Real_t& vnewc, Index_t* bad_vol, Index_t i, Index_t zn)
{
vnewc = vnew[zn] ;
if (eosvmin != Real_t(0.)) {
if (vnewc < eosvmin)
vnewc = eosvmin ;
}
if (eosvmax != Real_t(0.)) {
if (vnewc > eosvmax)
vnewc = eosvmax ;
}
// Now check for valid volume
Real_t vc = v[zn];
if (eosvmin != Real_t(0.)) {
if (vc < eosvmin)
vc = eosvmin ;
}
if (eosvmax != Real_t(0.)) {
if (vc > eosvmax)
vc = eosvmax ;
}
if (vc <= 0.) {
*bad_vol = i;
}
}
static
__device__ __forceinline__
void CalcSoundSpeedForElems_device(Real_t& vnewc, Real_t rho0, Real_t &enewc,
Real_t &pnewc, Real_t &pbvc,
Real_t &bvc, Real_t ss4o3, Index_t nz,
Real_t *ss, Index_t i, Index_t iz)
{
Real_t ssTmp = (pbvc * enewc + vnewc * vnewc *
bvc * pnewc) / rho0;
if (ssTmp <= Real_t(1.111111e-36)) {
ssTmp = Real_t(1.111111e-36);
}
else {
ssTmp = SQRT(ssTmp) ;
}
ss[iz] = ssTmp;
}
static
__device__ __forceinline__
void CalcPressureForElems_device(
Real_t& p_new, Real_t& bvc,
Real_t& pbvc, Real_t& e_old,
Real_t& compression, Real_t& vnewc,
Real_t pmin,
Real_t p_cut, Real_t eosvmax)
{
Real_t c1s = Real_t(2.0)/Real_t(3.0);
Real_t p_temp = p_new;
bvc = c1s * (compression + Real_t(1.));
pbvc = c1s;
p_temp = bvc * e_old ;
if ( FABS(p_temp) < p_cut )
p_temp = Real_t(0.0) ;
if ( vnewc >= eosvmax ) /* impossible condition here? */
p_temp = Real_t(0.0) ;
if (p_temp < pmin)
p_temp = pmin ;
p_new = p_temp;
}
static
__device__
__forceinline__
void UpdateVolumesForElems_device(Index_t numElem, Real_t& v_cut,
Real_t *vnew,
Real_t *v,
int i)
{
Real_t tmpV ;
tmpV = vnew[i] ;
if ( FABS(tmpV - Real_t(1.0)) < v_cut )
tmpV = Real_t(1.0) ;
v[i] = tmpV ;
}
static
__device__
__forceinline__
void CalcEnergyForElems_device(Real_t& p_new, Real_t& e_new, Real_t& q_new,
Real_t& bvc, Real_t& pbvc,
Real_t& p_old, Real_t& e_old, Real_t& q_old,
Real_t& compression, Real_t& compHalfStep,
Real_t& vnewc, Real_t& work, Real_t& delvc, Real_t pmin,
Real_t p_cut, Real_t e_cut, Real_t q_cut, Real_t emin,
Real_t& qq, Real_t& ql,
Real_t& rho0,
Real_t& eosvmax,
Index_t length)
{
const Real_t sixth = Real_t(1.0) / Real_t(6.0) ;
Real_t pHalfStep;
e_new = e_old - Real_t(0.5) * delvc * (p_old + q_old)
+ Real_t(0.5) * work;
if (e_new < emin ) {
e_new = emin ;
}
CalcPressureForElems_device(pHalfStep, bvc, pbvc, e_new, compHalfStep, vnewc,
pmin, p_cut, eosvmax);
Real_t vhalf = Real_t(1.) / (Real_t(1.) + compHalfStep) ;
if ( delvc > Real_t(0.) ) {
q_new /* = qq = ql */ = Real_t(0.) ;
}
else {
Real_t ssc = ( pbvc * e_new
+ vhalf * vhalf * bvc * pHalfStep ) / rho0 ;
if ( ssc <= Real_t(0.) ) {
ssc =Real_t(.333333e-36) ;
} else {
ssc = SQRT(ssc) ;
}
q_new = (ssc*ql + qq) ;
}
e_new = e_new + Real_t(0.5) * delvc
* ( Real_t(3.0)*(p_old + q_old)
- Real_t(4.0)*(pHalfStep + q_new)) ;
e_new += Real_t(0.5) * work;
if (FABS(e_new) < e_cut) {
e_new = Real_t(0.) ;
}
if ( e_new < emin ) {
e_new = emin ;
}
CalcPressureForElems_device(p_new, bvc, pbvc, e_new, compression, vnewc,
pmin, p_cut, eosvmax);
Real_t q_tilde ;
if (delvc > Real_t(0.)) {
q_tilde = Real_t(0.) ;
}
else {
Real_t ssc = ( pbvc * e_new
+ vnewc * vnewc * bvc * p_new ) / rho0 ;
if ( ssc <= Real_t(0.) ) {
ssc = Real_t(.333333e-36) ;
} else {
ssc = SQRT(ssc) ;
}
q_tilde = (ssc*ql + qq) ;
}
e_new = e_new - ( Real_t(7.0)*(p_old + q_old)
- Real_t(8.0)*(pHalfStep + q_new)
+ (p_new + q_tilde)) * delvc*sixth ;
if (FABS(e_new) < e_cut) {
e_new = Real_t(0.) ;
}
if ( e_new < emin ) {
e_new = emin ;
}
CalcPressureForElems_device(p_new, bvc, pbvc, e_new, compression, vnewc,
pmin, p_cut, eosvmax);
if ( delvc <= Real_t(0.) ) {
Real_t ssc = ( pbvc * e_new
+ vnewc * vnewc * bvc * p_new ) / rho0 ;
if ( ssc <= Real_t(0.) ) {
ssc = Real_t(.333333e-36) ;
} else {
ssc = SQRT(ssc) ;
}
q_new = (ssc*ql + qq) ;
if (FABS(q_new) < q_cut) q_new = Real_t(0.) ;
}
return ;
}
extern "C" __global__
void ApplyMaterialPropertiesAndUpdateVolume_kernel(occaKernelInfoArg,
Index_t length,
Real_t rho0,
Real_t e_cut,
Real_t emin,
Real_t* ql,
Real_t* qq,
Real_t* vnew,
Real_t* v,
Real_t pmin,
Real_t p_cut,
Real_t q_cut,
Real_t eosvmin,
Real_t eosvmax,
Index_t* matElemlist,
Real_t* e,
Real_t* delv,
Real_t* p,
Real_t* q,
Real_t ss4o3,
Real_t* ss,
Real_t v_cut,
Index_t* bad_vol
)
{
Real_t e_old, delvc, p_old, q_old;
Real_t compression, compHalfStep;
Real_t qq_old, ql_old, work;
Real_t p_new, e_new, q_new;
Real_t bvc, pbvc, vnewc;
Index_t i=blockDim.x*blockIdx.x + threadIdx.x;
if (i<length) {
Index_t zidx = matElemlist[i] ;
ApplyMaterialPropertiesForElems_device
(eosvmin,eosvmax,vnew,v,vnewc,bad_vol,i,zidx);
e_old = e[zidx];
delvc = delv[zidx];
p_old = p[zidx];
q_old = q[zidx];
Real_t vchalf ;
compression = Real_t(1.) / vnewc - Real_t(1.);
vchalf = vnewc - delvc * Real_t(.5);
compHalfStep = Real_t(1.) / vchalf - Real_t(1.);
if ( eosvmin != Real_t(0.) ) {
if (vnewc <= eosvmin) { /* impossible due to calling func? */
compHalfStep = compression ;
}
}
if ( eosvmax != Real_t(0.) ) {
if (vnewc >= eosvmax) { /* impossible due to calling func? */
p_old = Real_t(0.) ;
compression = Real_t(0.) ;
compHalfStep = Real_t(0.) ;
}
}
qq_old = qq[zidx] ;
ql_old = ql[zidx] ;
work = Real_t(0.) ;
CalcEnergyForElems_device(p_new, e_new, q_new, bvc, pbvc,
p_old, e_old, q_old, compression, compHalfStep,
vnewc, work, delvc, pmin,
p_cut, e_cut, q_cut, emin,
qq_old, ql_old, rho0, eosvmax, length);
p[zidx] = p_new ;
e[zidx] = e_new ;
q[zidx] = q_new ;
CalcSoundSpeedForElems_device
(vnewc,rho0,e_new,p_new,pbvc,bvc,ss4o3,length,ss,i,zidx);
UpdateVolumesForElems_device(length,v_cut,vnew,v,i);
}
}
| 1874fd8e03bcaa6801d6afd7a3cef48cb6a68f18.cu | #include<kernels/kernelDefines.cu>
static
__device__
__forceinline__
void ApplyMaterialPropertiesForElems_device(
Real_t& eosvmin, Real_t& eosvmax,
Real_t* vnew, Real_t *v,
Real_t& vnewc, Index_t* bad_vol, Index_t i, Index_t zn)
{
vnewc = vnew[zn] ;
if (eosvmin != Real_t(0.)) {
if (vnewc < eosvmin)
vnewc = eosvmin ;
}
if (eosvmax != Real_t(0.)) {
if (vnewc > eosvmax)
vnewc = eosvmax ;
}
// Now check for valid volume
Real_t vc = v[zn];
if (eosvmin != Real_t(0.)) {
if (vc < eosvmin)
vc = eosvmin ;
}
if (eosvmax != Real_t(0.)) {
if (vc > eosvmax)
vc = eosvmax ;
}
if (vc <= 0.) {
*bad_vol = i;
}
}
static
__device__ __forceinline__
void CalcSoundSpeedForElems_device(Real_t& vnewc, Real_t rho0, Real_t &enewc,
Real_t &pnewc, Real_t &pbvc,
Real_t &bvc, Real_t ss4o3, Index_t nz,
Real_t *ss, Index_t i, Index_t iz)
{
Real_t ssTmp = (pbvc * enewc + vnewc * vnewc *
bvc * pnewc) / rho0;
if (ssTmp <= Real_t(1.111111e-36)) {
ssTmp = Real_t(1.111111e-36);
}
else {
ssTmp = SQRT(ssTmp) ;
}
ss[iz] = ssTmp;
}
static
__device__ __forceinline__
void CalcPressureForElems_device(
Real_t& p_new, Real_t& bvc,
Real_t& pbvc, Real_t& e_old,
Real_t& compression, Real_t& vnewc,
Real_t pmin,
Real_t p_cut, Real_t eosvmax)
{
Real_t c1s = Real_t(2.0)/Real_t(3.0);
Real_t p_temp = p_new;
bvc = c1s * (compression + Real_t(1.));
pbvc = c1s;
p_temp = bvc * e_old ;
if ( FABS(p_temp) < p_cut )
p_temp = Real_t(0.0) ;
if ( vnewc >= eosvmax ) /* impossible condition here? */
p_temp = Real_t(0.0) ;
if (p_temp < pmin)
p_temp = pmin ;
p_new = p_temp;
}
static
__device__
__forceinline__
void UpdateVolumesForElems_device(Index_t numElem, Real_t& v_cut,
Real_t *vnew,
Real_t *v,
int i)
{
Real_t tmpV ;
tmpV = vnew[i] ;
if ( FABS(tmpV - Real_t(1.0)) < v_cut )
tmpV = Real_t(1.0) ;
v[i] = tmpV ;
}
static
__device__
__forceinline__
void CalcEnergyForElems_device(Real_t& p_new, Real_t& e_new, Real_t& q_new,
Real_t& bvc, Real_t& pbvc,
Real_t& p_old, Real_t& e_old, Real_t& q_old,
Real_t& compression, Real_t& compHalfStep,
Real_t& vnewc, Real_t& work, Real_t& delvc, Real_t pmin,
Real_t p_cut, Real_t e_cut, Real_t q_cut, Real_t emin,
Real_t& qq, Real_t& ql,
Real_t& rho0,
Real_t& eosvmax,
Index_t length)
{
const Real_t sixth = Real_t(1.0) / Real_t(6.0) ;
Real_t pHalfStep;
e_new = e_old - Real_t(0.5) * delvc * (p_old + q_old)
+ Real_t(0.5) * work;
if (e_new < emin ) {
e_new = emin ;
}
CalcPressureForElems_device(pHalfStep, bvc, pbvc, e_new, compHalfStep, vnewc,
pmin, p_cut, eosvmax);
Real_t vhalf = Real_t(1.) / (Real_t(1.) + compHalfStep) ;
if ( delvc > Real_t(0.) ) {
q_new /* = qq = ql */ = Real_t(0.) ;
}
else {
Real_t ssc = ( pbvc * e_new
+ vhalf * vhalf * bvc * pHalfStep ) / rho0 ;
if ( ssc <= Real_t(0.) ) {
ssc =Real_t(.333333e-36) ;
} else {
ssc = SQRT(ssc) ;
}
q_new = (ssc*ql + qq) ;
}
e_new = e_new + Real_t(0.5) * delvc
* ( Real_t(3.0)*(p_old + q_old)
- Real_t(4.0)*(pHalfStep + q_new)) ;
e_new += Real_t(0.5) * work;
if (FABS(e_new) < e_cut) {
e_new = Real_t(0.) ;
}
if ( e_new < emin ) {
e_new = emin ;
}
CalcPressureForElems_device(p_new, bvc, pbvc, e_new, compression, vnewc,
pmin, p_cut, eosvmax);
Real_t q_tilde ;
if (delvc > Real_t(0.)) {
q_tilde = Real_t(0.) ;
}
else {
Real_t ssc = ( pbvc * e_new
+ vnewc * vnewc * bvc * p_new ) / rho0 ;
if ( ssc <= Real_t(0.) ) {
ssc = Real_t(.333333e-36) ;
} else {
ssc = SQRT(ssc) ;
}
q_tilde = (ssc*ql + qq) ;
}
e_new = e_new - ( Real_t(7.0)*(p_old + q_old)
- Real_t(8.0)*(pHalfStep + q_new)
+ (p_new + q_tilde)) * delvc*sixth ;
if (FABS(e_new) < e_cut) {
e_new = Real_t(0.) ;
}
if ( e_new < emin ) {
e_new = emin ;
}
CalcPressureForElems_device(p_new, bvc, pbvc, e_new, compression, vnewc,
pmin, p_cut, eosvmax);
if ( delvc <= Real_t(0.) ) {
Real_t ssc = ( pbvc * e_new
+ vnewc * vnewc * bvc * p_new ) / rho0 ;
if ( ssc <= Real_t(0.) ) {
ssc = Real_t(.333333e-36) ;
} else {
ssc = SQRT(ssc) ;
}
q_new = (ssc*ql + qq) ;
if (FABS(q_new) < q_cut) q_new = Real_t(0.) ;
}
return ;
}
extern "C" __global__
void ApplyMaterialPropertiesAndUpdateVolume_kernel(occaKernelInfoArg,
Index_t length,
Real_t rho0,
Real_t e_cut,
Real_t emin,
Real_t* ql,
Real_t* qq,
Real_t* vnew,
Real_t* v,
Real_t pmin,
Real_t p_cut,
Real_t q_cut,
Real_t eosvmin,
Real_t eosvmax,
Index_t* matElemlist,
Real_t* e,
Real_t* delv,
Real_t* p,
Real_t* q,
Real_t ss4o3,
Real_t* ss,
Real_t v_cut,
Index_t* bad_vol
)
{
Real_t e_old, delvc, p_old, q_old;
Real_t compression, compHalfStep;
Real_t qq_old, ql_old, work;
Real_t p_new, e_new, q_new;
Real_t bvc, pbvc, vnewc;
Index_t i=blockDim.x*blockIdx.x + threadIdx.x;
if (i<length) {
Index_t zidx = matElemlist[i] ;
ApplyMaterialPropertiesForElems_device
(eosvmin,eosvmax,vnew,v,vnewc,bad_vol,i,zidx);
e_old = e[zidx];
delvc = delv[zidx];
p_old = p[zidx];
q_old = q[zidx];
Real_t vchalf ;
compression = Real_t(1.) / vnewc - Real_t(1.);
vchalf = vnewc - delvc * Real_t(.5);
compHalfStep = Real_t(1.) / vchalf - Real_t(1.);
if ( eosvmin != Real_t(0.) ) {
if (vnewc <= eosvmin) { /* impossible due to calling func? */
compHalfStep = compression ;
}
}
if ( eosvmax != Real_t(0.) ) {
if (vnewc >= eosvmax) { /* impossible due to calling func? */
p_old = Real_t(0.) ;
compression = Real_t(0.) ;
compHalfStep = Real_t(0.) ;
}
}
qq_old = qq[zidx] ;
ql_old = ql[zidx] ;
work = Real_t(0.) ;
CalcEnergyForElems_device(p_new, e_new, q_new, bvc, pbvc,
p_old, e_old, q_old, compression, compHalfStep,
vnewc, work, delvc, pmin,
p_cut, e_cut, q_cut, emin,
qq_old, ql_old, rho0, eosvmax, length);
p[zidx] = p_new ;
e[zidx] = e_new ;
q[zidx] = q_new ;
CalcSoundSpeedForElems_device
(vnewc,rho0,e_new,p_new,pbvc,bvc,ss4o3,length,ss,i,zidx);
UpdateVolumesForElems_device(length,v_cut,vnew,v,i);
}
}
|
566fafdcf9013aa09b6182c0bcab4585aee2abad.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <layer/padding_layer.h>
#include <device_atomic_functions.h>
#include <math_functions.h>
#include <stream_singleton.h>
namespace SuperNeurons {
template<class value_type>
__global__ void padding_fkernel(size_t N, size_t C, size_t H, size_t W, size_t padC, size_t padH, size_t padW,
const value_type *src, value_type *dst) {
size_t n = blockIdx.x * blockDim.x + threadIdx.x;
if (n < N) {
size_t CC = C + 2 * padC, HH = H + 2 * padH, WW = W + 2 * padW;
for (size_t c = 0; c < padC; ++c) {
for (size_t hw = 0; hw < HH * WW; ++hw) {
dst[(n * CC + c) * HH * WW + hw] = 0.0;
}
}
for (size_t c = 0; c < C; ++c) {
for (size_t h = 0; h < padH; ++h) {
for (size_t w = 0; w < WW; ++w) {
dst[index(n, c + padC, h, w, CC, HH, WW)] = 0.0;
}
}
for (size_t h = 0; h < H; ++h) {
// pad before w
for (size_t w = 0; w < padW; ++w) {
dst[index(n, c + padC, h + padH, w, CC, HH, WW)] = 0;
}
// copy it
for (size_t w = 0; w < W; ++w) {
dst[index(n, c + padC, h + padH, w + padW, CC, HH, WW)] = src[index(n, c, h, w, C, H, W)];
}
// pad after
for (size_t w = 0; w < padW; ++w) {
dst[index(n, c + padC, h + padH, w + padW + W, CC, HH, WW)] = 0;
}
}
// pad after
for (size_t h = 0; h < padH; ++h) {
for (size_t w = 0; w < WW; ++w) {
dst[index(n, c + padC, h + padH + H, w, CC, HH, WW)] = 0.0;
}
}
}
for (size_t c = 0; c < padC; ++c) {
for (size_t hw = 0; hw < HH * WW; ++hw) {
dst[(n * CC + c + padC + C) * HH * WW + hw] = 0.0;
}
}
}
__syncthreads();
}
template<class value_type>
__global__ void padding_bkernel(size_t N, size_t C, size_t H, size_t W, size_t padC, size_t padH, size_t padW,
const value_type *src, value_type *dst) {
size_t n = blockIdx.x * blockDim.x + threadIdx.x;
if (n < N) {
size_t CC = C + 2 * padC, HH = H + 2 * padH, WW = W + 2 * padW;
for (size_t c = 0; c < C; ++c) {
for (size_t h = 0; h < H; ++h) {
for (size_t w = 0; w < W; ++w) {
dst[index(n, c, h, w, C, H, W)] = src[index(n, c + padC, h + padH, w + padW, CC, HH,
WW)];
}
}
}
__syncthreads();
}
}
template<class value_type>
void padding_forward(size_t N, size_t C, size_t H, size_t W, size_t padC, size_t padH, size_t padW,
const value_type *src, value_type *dst) {
padding_fkernel<value_type> << < (N + 255) / 256, 256, 0, stream_singleton::get_compute_stream() >> > (N, C, H, W, padC, padH, padW, src, dst);
}
template<class value_type>
void padding_backward(size_t N, size_t C, size_t H, size_t W, size_t padC, size_t padH, size_t padW,
const value_type *src, value_type *dst) {
padding_bkernel<value_type> << < (N + 255) / 256, 256, 0, stream_singleton::get_compute_stream() >> > (N, C, H, W, padC, padH, padW, src, dst);
}
template void padding_forward<float>(size_t, size_t, size_t, size_t, size_t, size_t, size_t, const float*, float*);
template void padding_forward<double>(size_t, size_t, size_t, size_t, size_t, size_t, size_t, const double *, double*);
template void padding_backward<float>(size_t, size_t, size_t, size_t, size_t, size_t, size_t, const float*, float*);
template void padding_backward<double>(size_t, size_t, size_t, size_t, size_t, size_t, size_t, const double *, double*);
} // namespace SuperNeurons | 566fafdcf9013aa09b6182c0bcab4585aee2abad.cu | #include <layer/padding_layer.h>
#include <device_atomic_functions.h>
#include <math_functions.h>
#include <stream_singleton.h>
namespace SuperNeurons {
template<class value_type>
__global__ void padding_fkernel(size_t N, size_t C, size_t H, size_t W, size_t padC, size_t padH, size_t padW,
const value_type *src, value_type *dst) {
size_t n = blockIdx.x * blockDim.x + threadIdx.x;
if (n < N) {
size_t CC = C + 2 * padC, HH = H + 2 * padH, WW = W + 2 * padW;
for (size_t c = 0; c < padC; ++c) {
for (size_t hw = 0; hw < HH * WW; ++hw) {
dst[(n * CC + c) * HH * WW + hw] = 0.0;
}
}
for (size_t c = 0; c < C; ++c) {
for (size_t h = 0; h < padH; ++h) {
for (size_t w = 0; w < WW; ++w) {
dst[index(n, c + padC, h, w, CC, HH, WW)] = 0.0;
}
}
for (size_t h = 0; h < H; ++h) {
// pad before w
for (size_t w = 0; w < padW; ++w) {
dst[index(n, c + padC, h + padH, w, CC, HH, WW)] = 0;
}
// copy it
for (size_t w = 0; w < W; ++w) {
dst[index(n, c + padC, h + padH, w + padW, CC, HH, WW)] = src[index(n, c, h, w, C, H, W)];
}
// pad after
for (size_t w = 0; w < padW; ++w) {
dst[index(n, c + padC, h + padH, w + padW + W, CC, HH, WW)] = 0;
}
}
// pad after
for (size_t h = 0; h < padH; ++h) {
for (size_t w = 0; w < WW; ++w) {
dst[index(n, c + padC, h + padH + H, w, CC, HH, WW)] = 0.0;
}
}
}
for (size_t c = 0; c < padC; ++c) {
for (size_t hw = 0; hw < HH * WW; ++hw) {
dst[(n * CC + c + padC + C) * HH * WW + hw] = 0.0;
}
}
}
__syncthreads();
}
template<class value_type>
__global__ void padding_bkernel(size_t N, size_t C, size_t H, size_t W, size_t padC, size_t padH, size_t padW,
const value_type *src, value_type *dst) {
size_t n = blockIdx.x * blockDim.x + threadIdx.x;
if (n < N) {
size_t CC = C + 2 * padC, HH = H + 2 * padH, WW = W + 2 * padW;
for (size_t c = 0; c < C; ++c) {
for (size_t h = 0; h < H; ++h) {
for (size_t w = 0; w < W; ++w) {
dst[index(n, c, h, w, C, H, W)] = src[index(n, c + padC, h + padH, w + padW, CC, HH,
WW)];
}
}
}
__syncthreads();
}
}
template<class value_type>
void padding_forward(size_t N, size_t C, size_t H, size_t W, size_t padC, size_t padH, size_t padW,
const value_type *src, value_type *dst) {
padding_fkernel<value_type> << < (N + 255) / 256, 256, 0, stream_singleton::get_compute_stream() >> > (N, C, H, W, padC, padH, padW, src, dst);
}
template<class value_type>
void padding_backward(size_t N, size_t C, size_t H, size_t W, size_t padC, size_t padH, size_t padW,
const value_type *src, value_type *dst) {
padding_bkernel<value_type> << < (N + 255) / 256, 256, 0, stream_singleton::get_compute_stream() >> > (N, C, H, W, padC, padH, padW, src, dst);
}
template void padding_forward<float>(size_t, size_t, size_t, size_t, size_t, size_t, size_t, const float*, float*);
template void padding_forward<double>(size_t, size_t, size_t, size_t, size_t, size_t, size_t, const double *, double*);
template void padding_backward<float>(size_t, size_t, size_t, size_t, size_t, size_t, size_t, const float*, float*);
template void padding_backward<double>(size_t, size_t, size_t, size_t, size_t, size_t, size_t, const double *, double*);
} // namespace SuperNeurons |
5fcfa73896c65a7c54c5f5e4f42b000a603574b4.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <hip/hip_runtime.h>
using namespace std;
__global__ void add(int *a)
{
int tid = threadIdx.x;
int no_of_threads = blockDim.x;
int step=1;
while(no_of_threads>0)
{
if(tid<no_of_threads)
{
int first = tid*step*2;
int second = first+step;
a[first]+=a[second];
}
step<<=1;
no_of_threads>>=1;
}
}
__global__ void max(int *a)
{
int tid=threadIdx.x;
int step =1;
int no_of_threads = blockDim.x;
while(no_of_threads>0)
{
if(tid<no_of_threads)
{
int first = tid*step*2;
int second = first+step;
a[first] = a[first]>a[second]?a[first]:a[second];
}
step<<=1;
no_of_threads>>=1;
}
}
__global__ void min(int *a)
{
int tid=threadIdx.x;
int step =1;
int no_of_threads = blockDim.x;
while(no_of_threads>0)
{
if(tid<no_of_threads)
{
int first = tid*step*2;
int second = first+step;
a[first] = a[first]<a[second]?a[first]:a[second];
}
step<<=1;
no_of_threads>>=1;
}
}
__global__ void stdDev(int *a,int mean){
a[threadIdx.x]-=mean;
a[threadIdx.x]*=a[threadIdx.x];
}
int main()
{
int host_arr[]={1,2,3,4,5,6,7,8};
int *dev_arr;
int SIZE=8;
hipMalloc((void**)&dev_arr,SIZE*sizeof(int));
//SUM AND AVERAGE
hipMemcpy(dev_arr,host_arr,SIZE*sizeof(int),hipMemcpyHostToDevice);hipLaunchKernelGGL((
add), dim3(1),dim3(SIZE/2), 0, 0, dev_arr);
int sum;
hipMemcpy(&sum,dev_arr,sizeof(int),hipMemcpyDeviceToHost);
int mean=sum/SIZE;
cout<<"Sum is : "<<sum;
cout<<"Average is : "<<mean;
//MAX
hipMemcpy(dev_arr,host_arr,SIZE*sizeof(int),hipMemcpyHostToDevice);hipLaunchKernelGGL((
max), dim3(1),dim3(SIZE/2), 0, 0, dev_arr);
int max;
hipMemcpy(&max,dev_arr,sizeof(int),hipMemcpyDeviceToHost);
cout<<"Max is : "<<max;
//MIN
hipMemcpy(dev_arr,host_arr,SIZE*sizeof(int),hipMemcpyHostToDevice);hipLaunchKernelGGL((
min), dim3(1),dim3(SIZE/2), 0, 0, dev_arr);
int min;
hipMemcpy(&min,dev_arr,sizeof(int),hipMemcpyDeviceToHost);
cout<<"Min is : "<<min;
cout<<"\n\n";
//STDDV
hipMemcpy(dev_arr,host_arr,SIZE*sizeof(int),hipMemcpyHostToDevice);hipLaunchKernelGGL((
stdDev), dim3(1),dim3(SIZE), 0, 0, dev_arr,mean);
hipMemcpy(host_arr,dev_arr,SIZE*sizeof(int),hipMemcpyDeviceToHost);
cout<<host_arr[0];
cout<<host_arr[1];
cout<<host_arr[2];
cout<<host_arr[3];
cout<<host_arr[4];
cout<<host_arr[5];
cout<<host_arr[6];
cout<<host_arr[7];
cout<<"\n\n";
hipMemcpy(dev_arr,host_arr,SIZE*sizeof(int),hipMemcpyHostToDevice);hipLaunchKernelGGL((
add), dim3(1),dim3(SIZE/2), 0, 0, dev_arr);
int stdDeviation;
hipMemcpy(&stdDeviation,dev_arr,sizeof(int),hipMemcpyDeviceToHost);
cout<<"STDDEV:"<<sqrt(stdDeviation/SIZE);
}
| 5fcfa73896c65a7c54c5f5e4f42b000a603574b4.cu | #include <iostream>
#include <cuda.h>
using namespace std;
__global__ void add(int *a)
{
int tid = threadIdx.x;
int no_of_threads = blockDim.x;
int step=1;
while(no_of_threads>0)
{
if(tid<no_of_threads)
{
int first = tid*step*2;
int second = first+step;
a[first]+=a[second];
}
step<<=1;
no_of_threads>>=1;
}
}
__global__ void max(int *a)
{
int tid=threadIdx.x;
int step =1;
int no_of_threads = blockDim.x;
while(no_of_threads>0)
{
if(tid<no_of_threads)
{
int first = tid*step*2;
int second = first+step;
a[first] = a[first]>a[second]?a[first]:a[second];
}
step<<=1;
no_of_threads>>=1;
}
}
__global__ void min(int *a)
{
int tid=threadIdx.x;
int step =1;
int no_of_threads = blockDim.x;
while(no_of_threads>0)
{
if(tid<no_of_threads)
{
int first = tid*step*2;
int second = first+step;
a[first] = a[first]<a[second]?a[first]:a[second];
}
step<<=1;
no_of_threads>>=1;
}
}
__global__ void stdDev(int *a,int mean){
a[threadIdx.x]-=mean;
a[threadIdx.x]*=a[threadIdx.x];
}
int main()
{
int host_arr[]={1,2,3,4,5,6,7,8};
int *dev_arr;
int SIZE=8;
cudaMalloc((void**)&dev_arr,SIZE*sizeof(int));
//SUM AND AVERAGE
cudaMemcpy(dev_arr,host_arr,SIZE*sizeof(int),cudaMemcpyHostToDevice);
add<<<1,SIZE/2>>>(dev_arr);
int sum;
cudaMemcpy(&sum,dev_arr,sizeof(int),cudaMemcpyDeviceToHost);
int mean=sum/SIZE;
cout<<"Sum is : "<<sum;
cout<<"Average is : "<<mean;
//MAX
cudaMemcpy(dev_arr,host_arr,SIZE*sizeof(int),cudaMemcpyHostToDevice);
max<<<1,SIZE/2>>>(dev_arr);
int max;
cudaMemcpy(&max,dev_arr,sizeof(int),cudaMemcpyDeviceToHost);
cout<<"Max is : "<<max;
//MIN
cudaMemcpy(dev_arr,host_arr,SIZE*sizeof(int),cudaMemcpyHostToDevice);
min<<<1,SIZE/2>>>(dev_arr);
int min;
cudaMemcpy(&min,dev_arr,sizeof(int),cudaMemcpyDeviceToHost);
cout<<"Min is : "<<min;
cout<<"\n\n";
//STDDV
cudaMemcpy(dev_arr,host_arr,SIZE*sizeof(int),cudaMemcpyHostToDevice);
stdDev<<<1,SIZE>>>(dev_arr,mean);
cudaMemcpy(host_arr,dev_arr,SIZE*sizeof(int),cudaMemcpyDeviceToHost);
cout<<host_arr[0];
cout<<host_arr[1];
cout<<host_arr[2];
cout<<host_arr[3];
cout<<host_arr[4];
cout<<host_arr[5];
cout<<host_arr[6];
cout<<host_arr[7];
cout<<"\n\n";
cudaMemcpy(dev_arr,host_arr,SIZE*sizeof(int),cudaMemcpyHostToDevice);
add<<<1,SIZE/2>>>(dev_arr);
int stdDeviation;
cudaMemcpy(&stdDeviation,dev_arr,sizeof(int),cudaMemcpyDeviceToHost);
cout<<"STDDEV:"<<sqrt(stdDeviation/SIZE);
}
|
8201658624bb8699d4d251c685a6210b8c7ae450.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
__global__ void print_threadIds()
{
printf("threadIdx.x : %d, threadIdx.y : %d, threadIdx.z : %d \n",
threadIdx.x,threadIdx.y,threadIdx.z);
}
//int main()
//{
// int nx, ny;
// nx = 16;
// ny = 16;
//
// dim3 block(8,8,8);
// dim3 grid(nx/ block.x, ny/block.y);
//
// print_threadIds << <grid,block >> > ();
// hipDeviceSynchronize();
//
// hipDeviceReset();
// return 0;
//}
| 8201658624bb8699d4d251c685a6210b8c7ae450.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
__global__ void print_threadIds()
{
printf("threadIdx.x : %d, threadIdx.y : %d, threadIdx.z : %d \n",
threadIdx.x,threadIdx.y,threadIdx.z);
}
//int main()
//{
// int nx, ny;
// nx = 16;
// ny = 16;
//
// dim3 block(8,8,8);
// dim3 grid(nx/ block.x, ny/block.y);
//
// print_threadIds << <grid,block >> > ();
// cudaDeviceSynchronize();
//
// cudaDeviceReset();
// return 0;
//}
|
112d50caafd1646dd0612807151b4dbbb7de70c6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <math.h>
__global__ void
jacobikernel( float* a, float* newa, float* lchange, int n, int m, float w0, float w1, float w2 )
{
int ti = threadIdx.x;
int tj = threadIdx.y;
int i = blockIdx.x * blockDim.x + ti + 1;
int j = blockIdx.y * blockDim.y + tj + 1;
newa[j*m+i] = w0*a[j*m+i] +
w1 * (a[j*m+i-1] + a[(j-1)*m+i] +
a[j*m+i+1] + a[(j+1)*m+i]) +
w2 * (a[(j-1)*m+i-1] + a[(j+1)*m+i-1] +
a[(j-1)*m+i+1] + a[(j+1)*m+i+1]);
__shared__ float mychange[256];
int ii = ti+blockDim.x*tj;
mychange[ii] = fabsf( newa[j*m+i] - a[j*m+i] );
__syncthreads();
int nn = blockDim.x * blockDim.y;
while( (nn>>=1) > 0 ){
if( ii < nn )
mychange[ii] = fmaxf( mychange[ii], mychange[ii+nn] );
__syncthreads();
}
if( ii == 0 )
lchange[blockIdx.x + gridDim.x*blockIdx.y] = mychange[0];
}
__global__ void
reductionkernel( float* lchange, int n )
{
__shared__ float mychange[256];
float mych = 0.0f;
int ii = threadIdx.x, m;
if( ii < n ) mych = lchange[ii];
m = blockDim.x;
while( m <= n ){
mych = fmaxf( mych, lchange[ii+m] );
m += blockDim.x;
}
mychange[ii] = mych;
__syncthreads();
int nn = blockDim.x;
while( (nn>>=1) > 0 ){
if( ii < nn )
mychange[ii] = fmaxf(mychange[ii],mychange[ii+nn]);
__syncthreads();
}
if( ii == 0 )
lchange[0] = mychange[0];
}
static float sumtime;
void JacobiGPU( float* a, int n, int m, float w0, float w1, float w2, float tol )
{
float change;
int iters;
size_t memsize;
int bx, by, gx, gy;
float *da, *dnewa, *lchange;
hipEvent_t e1, e2;
float changeCheck = 0, oldchange = 0;
bx = 16;
by = 16;
gx = (n-2)/bx + ((n-2)%bx == 0?0:1);
gy = (m-2)/by + ((m-2)%by == 0?0:1);
sumtime = 0.0f;
memsize = sizeof(float) * n * m;
hipMalloc( &da, memsize );
hipMalloc( &dnewa, memsize );
hipMalloc( &lchange, gx * gy * sizeof(float) );
hipEventCreate( &e1 );
hipEventCreate( &e2 );
dim3 block( bx, by );
dim3 grid( gx, gy );
iters = 0;
hipMemcpy( da, a, memsize, hipMemcpyHostToDevice );
hipMemcpy( dnewa, a, memsize, hipMemcpyHostToDevice );
do{
float msec;
++iters;
hipEventRecord( e1 );
hipLaunchKernelGGL(( jacobikernel), dim3(grid), dim3(block) , 0, 0, da, dnewa, lchange, n, m, w0, w1, w2 );
hipLaunchKernelGGL(( reductionkernel), dim3(1), dim3(bx*by) , 0, 0, lchange, gx*gy );
hipEventRecord( e2 );
hipMemcpy( &change, lchange, sizeof(float), hipMemcpyDeviceToHost );
hipEventElapsedTime( &msec, e1, e2 );
sumtime += msec;
float *ta;
ta = da;
da = dnewa;
dnewa = ta;
//printf("iters = %d, change = %f\n", iters, change);
if(change == oldchange)
{
changeCheck++;
}
oldchange = change;
if(changeCheck > sqrt(m))
{
change = (tol - .01);
}
printf("iters = %d, change = %f, changeCheck = %f, oldchange = %f\n", iters, change, changeCheck, oldchange);
}while( change > tol );
printf( "JacobiGPU converged in %d iterations to residual %f\n", iters, change );
printf( "JacobiGPU used %f seconds total\n", sumtime/1000.0f );
hipMemcpy( a, dnewa, memsize, hipMemcpyDeviceToHost );
hipFree( da );
hipFree( dnewa );
hipFree( lchange );
hipEventDestroy( e1 );
hipEventDestroy( e2 );
}
static void init( float* a, int n, int m )
{
int i, j;
memset( a, 0, sizeof(float) * n * m );
/* boundary conditions */
for( j = 0; j < n; ++j ){
a[j*m+n-1] = j;
}
for( i = 0; i < m; ++i ){
a[(n-1)*m+i] = i;
}
a[(n-1)*m+m-1] = m+n;
}
int
main( int argc, char* argv[] )
{
int n, m;
float *a;
struct timeval tt1, tt2;
int ms;
float fms;
if( argc <= 1 ){
fprintf( stderr, "%s sizen [sizem]\n", argv[0] );
return 1;
}
n = atoi( argv[1] );
if( n <= 0 ) n = 100;
m = n;
if( argc > 2 ){
m = atoi( argv[2] );
if( m <= 0 ) m = 100;
}
printf( "Jacobi %d x %d\n", n, m );
a = (float*)malloc( sizeof(float) * n * m );
init( a, n, m );
gettimeofday( &tt1, NULL );
JacobiGPU( a, n, m, .2, .1, .1, .1 );
gettimeofday( &tt2, NULL );
ms = (tt2.tv_sec - tt1.tv_sec);
ms = ms * 1000000 + (tt2.tv_usec - tt1.tv_usec);
fms = (float)ms / 1000000.0f;
printf( "time(gpu ) = %f seconds\n", fms );
}
| 112d50caafd1646dd0612807151b4dbbb7de70c6.cu | #include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <math.h>
__global__ void
jacobikernel( float* a, float* newa, float* lchange, int n, int m, float w0, float w1, float w2 )
{
int ti = threadIdx.x;
int tj = threadIdx.y;
int i = blockIdx.x * blockDim.x + ti + 1;
int j = blockIdx.y * blockDim.y + tj + 1;
newa[j*m+i] = w0*a[j*m+i] +
w1 * (a[j*m+i-1] + a[(j-1)*m+i] +
a[j*m+i+1] + a[(j+1)*m+i]) +
w2 * (a[(j-1)*m+i-1] + a[(j+1)*m+i-1] +
a[(j-1)*m+i+1] + a[(j+1)*m+i+1]);
__shared__ float mychange[256];
int ii = ti+blockDim.x*tj;
mychange[ii] = fabsf( newa[j*m+i] - a[j*m+i] );
__syncthreads();
int nn = blockDim.x * blockDim.y;
while( (nn>>=1) > 0 ){
if( ii < nn )
mychange[ii] = fmaxf( mychange[ii], mychange[ii+nn] );
__syncthreads();
}
if( ii == 0 )
lchange[blockIdx.x + gridDim.x*blockIdx.y] = mychange[0];
}
__global__ void
reductionkernel( float* lchange, int n )
{
__shared__ float mychange[256];
float mych = 0.0f;
int ii = threadIdx.x, m;
if( ii < n ) mych = lchange[ii];
m = blockDim.x;
while( m <= n ){
mych = fmaxf( mych, lchange[ii+m] );
m += blockDim.x;
}
mychange[ii] = mych;
__syncthreads();
int nn = blockDim.x;
while( (nn>>=1) > 0 ){
if( ii < nn )
mychange[ii] = fmaxf(mychange[ii],mychange[ii+nn]);
__syncthreads();
}
if( ii == 0 )
lchange[0] = mychange[0];
}
static float sumtime;
void JacobiGPU( float* a, int n, int m, float w0, float w1, float w2, float tol )
{
float change;
int iters;
size_t memsize;
int bx, by, gx, gy;
float *da, *dnewa, *lchange;
cudaEvent_t e1, e2;
float changeCheck = 0, oldchange = 0;
bx = 16;
by = 16;
gx = (n-2)/bx + ((n-2)%bx == 0?0:1);
gy = (m-2)/by + ((m-2)%by == 0?0:1);
sumtime = 0.0f;
memsize = sizeof(float) * n * m;
cudaMalloc( &da, memsize );
cudaMalloc( &dnewa, memsize );
cudaMalloc( &lchange, gx * gy * sizeof(float) );
cudaEventCreate( &e1 );
cudaEventCreate( &e2 );
dim3 block( bx, by );
dim3 grid( gx, gy );
iters = 0;
cudaMemcpy( da, a, memsize, cudaMemcpyHostToDevice );
cudaMemcpy( dnewa, a, memsize, cudaMemcpyHostToDevice );
do{
float msec;
++iters;
cudaEventRecord( e1 );
jacobikernel<<< grid, block >>>( da, dnewa, lchange, n, m, w0, w1, w2 );
reductionkernel<<< 1, bx*by >>>( lchange, gx*gy );
cudaEventRecord( e2 );
cudaMemcpy( &change, lchange, sizeof(float), cudaMemcpyDeviceToHost );
cudaEventElapsedTime( &msec, e1, e2 );
sumtime += msec;
float *ta;
ta = da;
da = dnewa;
dnewa = ta;
//printf("iters = %d, change = %f\n", iters, change);
if(change == oldchange)
{
changeCheck++;
}
oldchange = change;
if(changeCheck > sqrt(m))
{
change = (tol - .01);
}
printf("iters = %d, change = %f, changeCheck = %f, oldchange = %f\n", iters, change, changeCheck, oldchange);
}while( change > tol );
printf( "JacobiGPU converged in %d iterations to residual %f\n", iters, change );
printf( "JacobiGPU used %f seconds total\n", sumtime/1000.0f );
cudaMemcpy( a, dnewa, memsize, cudaMemcpyDeviceToHost );
cudaFree( da );
cudaFree( dnewa );
cudaFree( lchange );
cudaEventDestroy( e1 );
cudaEventDestroy( e2 );
}
static void init( float* a, int n, int m )
{
int i, j;
memset( a, 0, sizeof(float) * n * m );
/* boundary conditions */
for( j = 0; j < n; ++j ){
a[j*m+n-1] = j;
}
for( i = 0; i < m; ++i ){
a[(n-1)*m+i] = i;
}
a[(n-1)*m+m-1] = m+n;
}
int
main( int argc, char* argv[] )
{
int n, m;
float *a;
struct timeval tt1, tt2;
int ms;
float fms;
if( argc <= 1 ){
fprintf( stderr, "%s sizen [sizem]\n", argv[0] );
return 1;
}
n = atoi( argv[1] );
if( n <= 0 ) n = 100;
m = n;
if( argc > 2 ){
m = atoi( argv[2] );
if( m <= 0 ) m = 100;
}
printf( "Jacobi %d x %d\n", n, m );
a = (float*)malloc( sizeof(float) * n * m );
init( a, n, m );
gettimeofday( &tt1, NULL );
JacobiGPU( a, n, m, .2, .1, .1, .1 );
gettimeofday( &tt2, NULL );
ms = (tt2.tv_sec - tt1.tv_sec);
ms = ms * 1000000 + (tt2.tv_usec - tt1.tv_usec);
fms = (float)ms / 1000000.0f;
printf( "time(gpu ) = %f seconds\n", fms );
}
|
c8201670d32204669da39a1a987eaccc8d889df7.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Invert the laplace equation with boundary conditions in x using the cusparse tridiagonal solver with the new datatype
*
* Invert
* g(x,y) = exp(-(x^2 + y^2) / 2)
* \nabla^2 g(x,y) = f(x,y)
* where
* f(x,y) = exp(-(x^2 + y^2) / 2) (-2 + x^2 + y^2)
*
* Goal: Given f(x,y) find g(x,y)
*/
#include <iostream>
#include <sstream>
#include "slab_bc.h"
using namespace std;
int main(void){
size_t Nx{128};
size_t My{128};
constexpr twodads::real_t x_l{-10.0};
constexpr twodads::real_t Lx{20.0};
constexpr twodads::real_t y_l{-10.0};
constexpr twodads::real_t Ly{20.0};
constexpr size_t tlevs{1}; // time levels for all arrays
constexpr size_t tsrc{0}; // the time level we operate on
cout << "Enter Nx: ";
cin >> Nx;
cout << "Enter My: ";
cin >> My;
stringstream fname;
twodads::slab_layout_t my_geom(x_l, Lx / double(Nx), y_l, Ly / double(My), Nx, 0, My, 2, twodads::grid_t::cell_centered);
twodads::bvals_t<double> my_bvals{twodads::bc_t::bc_dirichlet, twodads::bc_t::bc_dirichlet, twodads::bc_t::bc_periodic, twodads::bc_t::bc_periodic,
0.0, 0.0, 0.0, 0.0};
twodads::stiff_params_t params(0.001, 20.0, 20.0, 0.001, 0.0, my_geom.get_nx(), (my_geom.get_my() + my_geom.get_pad_y()) / 2, tlevs);
{
slab_bc my_slab(my_geom, my_bvals, params);
my_slab.initialize_invlaplace(twodads::field_t::f_omega, tsrc);
fname << "test_laplace_input_" << Nx << "_device.dat";
utility :: print((*my_slab.get_array_ptr(twodads::field_t::f_omega)), tsrc, fname.str());
cuda_array_bc_nogp<twodads::real_t, allocator_device> sol_an(my_geom, my_bvals, tlevs);
sol_an.apply([] __device__ (twodads::real_t dummy, size_t n, size_t m, twodads::slab_layout_t geom) -> twodads::real_t
{
const twodads::real_t x{geom.get_x(n)};
const twodads::real_t y{geom.get_y(m)};
return(exp(-0.5 * (x * x + y * y)));
},
tsrc);
fname.str(string(""));
fname << "test_laplace_solan_" << Nx << "_device.dat";
utility :: print(sol_an, tsrc, fname.str());
my_slab.invert_laplace(twodads::field_t::f_omega, twodads::field_t::f_strmf, tsrc, tsrc);
// Write numerical solution to file
fname.str(string(""));
fname << "test_laplace_solnum_" << Nx << "_device.dat";
utility :: print((*my_slab.get_array_ptr(twodads::field_t::f_strmf)), tsrc, fname.str());
// Get the analytic solution
sol_an -= my_slab.get_array_ptr(twodads::field_t::f_strmf);
cout << "Nx = " << Nx << ", My = " << My << ", L2 = " << utility :: L2(sol_an, tsrc) << endl;
} // Let managed memory go out of scope before calling hipDeviceReset()
// However cublas_handle_t survivs this scoping and we get a segfault from its destructor
/*
warning: Cuda API error detected: hipFree returned (0x11)
warning: Cuda API error detected: hipEventDestroy returned (0x1e)
warning: Cuda API error detected: hipEventDestroy returned (0x1e)
Program received signal SIGSEGV, Segmentation fault.
0x00007fffeb3e903a in cuMemGetAttribute_v2 () from /usr/lib/x86_64-linux-gnu/libcuda.so.1
(cuda-gdb) bt
#0 0x00007fffeb3e903a in cuMemGetAttribute_v2 () from /usr/lib/x86_64-linux-gnu/libcuda.so.1
#1 0x00007fffeb3598a6 in hipVDPAUCtxCreate () from /usr/lib/x86_64-linux-gnu/libcuda.so.1
#2 0x00007fffeb33293a in hipEventDestroy () from /usr/lib/x86_64-linux-gnu/libcuda.so.1
#3 0x00007fffefad7724 in cublasSgemmEx () from /usr/local/cuda/lib64/libcublas.so.7.5
#4 0x00007fffefb0b984 in cublasSgemmEx () from /usr/local/cuda/lib64/libcublas.so.7.5
#5 0x00007fffef8e4797 in ?? () from /usr/local/cuda/lib64/libcublas.so.7.5
#6 0x00007fffef91932d in hipblasDestroy () from /usr/local/cuda/lib64/libcublas.so.7.5
#7 0x0000000000414e38 in solvers::cublas_handle_t::~cublas_handle_t (this=0x728100 <solvers::cublas_handle_t::get_handle()::h>, __in_chrg=<optimized out>) at /home/rku000/source/2dads/include/solvers.h:49
#8 0x00007fffec1d8b29 in secure_getenv () from /lib/x86_64-linux-gnu/libc.so.6
#9 0x00007fffec1d8b75 in exit () from /lib/x86_64-linux-gnu/libc.so.6
#10 0x00007fffec1c2b4c in __libc_start_main () from /lib/x86_64-linux-gnu/libc.so.6
*/
//hipDeviceReset();
} | c8201670d32204669da39a1a987eaccc8d889df7.cu | /*
* Invert the laplace equation with boundary conditions in x using the cusparse tridiagonal solver with the new datatype
*
* Invert
* g(x,y) = exp(-(x^2 + y^2) / 2)
* \nabla^2 g(x,y) = f(x,y)
* where
* f(x,y) = exp(-(x^2 + y^2) / 2) (-2 + x^2 + y^2)
*
* Goal: Given f(x,y) find g(x,y)
*/
#include <iostream>
#include <sstream>
#include "slab_bc.h"
using namespace std;
int main(void){
size_t Nx{128};
size_t My{128};
constexpr twodads::real_t x_l{-10.0};
constexpr twodads::real_t Lx{20.0};
constexpr twodads::real_t y_l{-10.0};
constexpr twodads::real_t Ly{20.0};
constexpr size_t tlevs{1}; // time levels for all arrays
constexpr size_t tsrc{0}; // the time level we operate on
cout << "Enter Nx: ";
cin >> Nx;
cout << "Enter My: ";
cin >> My;
stringstream fname;
twodads::slab_layout_t my_geom(x_l, Lx / double(Nx), y_l, Ly / double(My), Nx, 0, My, 2, twodads::grid_t::cell_centered);
twodads::bvals_t<double> my_bvals{twodads::bc_t::bc_dirichlet, twodads::bc_t::bc_dirichlet, twodads::bc_t::bc_periodic, twodads::bc_t::bc_periodic,
0.0, 0.0, 0.0, 0.0};
twodads::stiff_params_t params(0.001, 20.0, 20.0, 0.001, 0.0, my_geom.get_nx(), (my_geom.get_my() + my_geom.get_pad_y()) / 2, tlevs);
{
slab_bc my_slab(my_geom, my_bvals, params);
my_slab.initialize_invlaplace(twodads::field_t::f_omega, tsrc);
fname << "test_laplace_input_" << Nx << "_device.dat";
utility :: print((*my_slab.get_array_ptr(twodads::field_t::f_omega)), tsrc, fname.str());
cuda_array_bc_nogp<twodads::real_t, allocator_device> sol_an(my_geom, my_bvals, tlevs);
sol_an.apply([] __device__ (twodads::real_t dummy, size_t n, size_t m, twodads::slab_layout_t geom) -> twodads::real_t
{
const twodads::real_t x{geom.get_x(n)};
const twodads::real_t y{geom.get_y(m)};
return(exp(-0.5 * (x * x + y * y)));
},
tsrc);
fname.str(string(""));
fname << "test_laplace_solan_" << Nx << "_device.dat";
utility :: print(sol_an, tsrc, fname.str());
my_slab.invert_laplace(twodads::field_t::f_omega, twodads::field_t::f_strmf, tsrc, tsrc);
// Write numerical solution to file
fname.str(string(""));
fname << "test_laplace_solnum_" << Nx << "_device.dat";
utility :: print((*my_slab.get_array_ptr(twodads::field_t::f_strmf)), tsrc, fname.str());
// Get the analytic solution
sol_an -= my_slab.get_array_ptr(twodads::field_t::f_strmf);
cout << "Nx = " << Nx << ", My = " << My << ", L2 = " << utility :: L2(sol_an, tsrc) << endl;
} // Let managed memory go out of scope before calling cudaDeviceReset()
// However cublas_handle_t survivs this scoping and we get a segfault from its destructor
/*
warning: Cuda API error detected: cudaFree returned (0x11)
warning: Cuda API error detected: cudaEventDestroy returned (0x1e)
warning: Cuda API error detected: cudaEventDestroy returned (0x1e)
Program received signal SIGSEGV, Segmentation fault.
0x00007fffeb3e903a in cuMemGetAttribute_v2 () from /usr/lib/x86_64-linux-gnu/libcuda.so.1
(cuda-gdb) bt
#0 0x00007fffeb3e903a in cuMemGetAttribute_v2 () from /usr/lib/x86_64-linux-gnu/libcuda.so.1
#1 0x00007fffeb3598a6 in cuVDPAUCtxCreate () from /usr/lib/x86_64-linux-gnu/libcuda.so.1
#2 0x00007fffeb33293a in cuEventDestroy_v2 () from /usr/lib/x86_64-linux-gnu/libcuda.so.1
#3 0x00007fffefad7724 in cublasSgemmEx () from /usr/local/cuda/lib64/libcublas.so.7.5
#4 0x00007fffefb0b984 in cublasSgemmEx () from /usr/local/cuda/lib64/libcublas.so.7.5
#5 0x00007fffef8e4797 in ?? () from /usr/local/cuda/lib64/libcublas.so.7.5
#6 0x00007fffef91932d in cublasDestroy_v2 () from /usr/local/cuda/lib64/libcublas.so.7.5
#7 0x0000000000414e38 in solvers::cublas_handle_t::~cublas_handle_t (this=0x728100 <solvers::cublas_handle_t::get_handle()::h>, __in_chrg=<optimized out>) at /home/rku000/source/2dads/include/solvers.h:49
#8 0x00007fffec1d8b29 in secure_getenv () from /lib/x86_64-linux-gnu/libc.so.6
#9 0x00007fffec1d8b75 in exit () from /lib/x86_64-linux-gnu/libc.so.6
#10 0x00007fffec1c2b4c in __libc_start_main () from /lib/x86_64-linux-gnu/libc.so.6
*/
//cudaDeviceReset();
} |
c8b6cf11cf82ff20fa155f3467d4eecf76b98aa6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "caffe2/operators/topk_grad_hook_op.h"
#include <algorithm>
#include <array>
#include <functional>
#include <limits>
#include <numeric>
#include <vector>
#include <thrust/sort.h>
#include <thrust/system/hip/execution_policy.h>
#include <thrust/device_vector.h>
#include <thrust/transform_reduce.h>
#include <thrust/functional.h>
#include <thrust/execution_policy.h>
#include "caffe2/core/context.h"
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/top_k_heap_selection.cuh"
#include "caffe2/operators/top_k_radix_selection.cuh"
#include "caffe2/utils/math.h"
namespace caffe2 {
namespace {
template <typename T, int kHeapSize, bool kSelectMax = true>
void RunHeapSelectionImpl(
const T* input,
const TIndex outer_size,
const TIndex inner_size,
const int k,
T* values,
TIndex* indices,
CUDAContext* context) {
constexpr int kBlockSize = 256;
constexpr int kNumWarps = kBlockSize / kWarpSize;
constexpr int smem = kNumWarps * kHeapSize * (sizeof(T) + sizeof(TIndex));
constexpr T kInitVal = kSelectMax ? std::numeric_limits<T>::lowest()
: std::numeric_limits<T>::max();
hipLaunchKernelGGL(( selectRowsViaHeap<T, TIndex, TIndex, kBlockSize, kHeapSize, kSelectMax>)
, dim3(outer_size), dim3(kBlockSize), smem, context->cuda_stream(),
input,
values,
indices,
kInitVal,
std::numeric_limits<TIndex>::max(),
outer_size,
inner_size,
k);
}
template <typename T, bool kSelectMax = true>
void RunRadixSelectionImpl(
const T* input,
const TIndex outer_size,
const TIndex inner_size,
const int k,
T* values,
TIndex* indices,
CUDAContext* context) {
const int block = ::min(
math::roundUp(static_cast<int>(inner_size), kWarpSize),
CAFFE_CUDA_NUM_THREADS);
hipLaunchKernelGGL(( gatherTopK<T, kSelectMax, TIndex>)
, dim3(outer_size), dim3(block), 0, context->cuda_stream(),
input, inner_size, k, outer_size, values, indices);
// Unfortunately the output is not currently sorted, and there is no batch
// sorting utility available. Iterate over all of the slices and sort them
// in-place using Thrust.
for (int i = 0; i < outer_size; ++i) {
thrust::sort_by_key(
thrust::hip::par.on(context->cuda_stream()),
values + i * k,
values + i * k + k,
indices + i * k,
thrust::greater<T>());
}
}
template <typename T>
void RunTopKOnLastDimCUDAImpl(
const T* input,
const TIndex outer_size,
const TIndex inner_size,
const int k,
T* values,
TIndex* indices,
CUDAContext* context) {
// If k is small, uses heap selection, otherwise uses radix selection.
if (k < 32) {
RunHeapSelectionImpl<T, 32>(
input, outer_size, inner_size, k, values, indices, context);
} else if (k < 128) {
RunHeapSelectionImpl<T, 128>(
input, outer_size, inner_size, k, values, indices, context);
} else if (k < 512) {
RunHeapSelectionImpl<T, 512>(
input, outer_size, inner_size, k, values, indices, context);
} else {
RunRadixSelectionImpl<T>(
input, outer_size, inner_size, k, values, indices, context);
}
}
__global__ void GradsReduction4D(
const int N,
const int C,
const int H,
const int W,
const float* dY_data,
float* dY_reduction_data) {
/// 4D NxCxHxW grads tensor reduction to 2D NxC grads tensor
CUDA_1D_KERNEL_LOOP(i, N * C) {
int begin_offset = i * H * W;
int end_offset = (i + 1) * H * W;
dY_reduction_data[i] = thrust::transform_reduce(
thrust::device,
dY_data + begin_offset,
dY_data + end_offset,
fabsf,
0,
thrust::plus<float>());
}
}
__global__ void GradsReduction2D(
const int N,
const int C,
const float* dY_data,
float* dY_reduction_data) {
/// 4D NxCxHxW grads tensor reduction to 2D NxC grads tensor
CUDA_1D_KERNEL_LOOP(i, N * C) {
dY_reduction_data[i] = fabsf(dY_data[i]);
}
}
__global__ void ChannelZeroOut4D(
const int N,
const int C,
const int k,
const int H,
const int W,
const float* dY_data,
float* dY_topk_data,
TIndex* indices_data) {
/// zero out specific channel dimention of 4D NxCxHxW
CUDA_1D_KERNEL_LOOP(i, N * k) {
int ii = i / k;
// int jj = i % k;
int zero_channel = indices_data[i];
int begin_offset = ii * (C * H * W) + zero_channel * (H * W);
// int end_offset = ii * (C * H * W) + (zero_channel + 1) * (H * W);
memcpy(dY_topk_data + begin_offset,
dY_data + begin_offset,
(H * W) * sizeof(float));
}
}
__global__ void ChannelZeroOut2D(
const int N,
const int C,
const int k,
const float* dY_data,
float* dY_topk_data,
TIndex* indices_data) {
/// zero out specific channel dimention of 4D NxCxHxW
CUDA_1D_KERNEL_LOOP(i, N * k) {
int ii = i / k;
int zero_channel = indices_data[i];
dY_topk_data[ii * C + zero_channel] = dY_data[ii * C + zero_channel];
}
}
} // namespace
template <>
bool TopKGradHookOp<float, CUDAContext>::RunOnDevice() {
// input check
auto& X = Input(0);
auto* Y = Output(0);
Y->ResizeLike(X);
Y->CopyFrom(X);
return true;
} //TopKGradHookOp::RunOnDevice
template <typename T>
class TopKGradHookGradientOp<T, CUDAContext> : public Operator<CUDAContext> {
public:
USE_OPERATOR_FUNCTIONS(CUDAContext);
TopKGradHookGradientOp(const OperatorDef& operator_def, Workspace* ws)
: Operator<CUDAContext>(operator_def, ws),
OP_SINGLE_ARG(int, "k", k_, -1) {
CAFFE_ENFORCE(k_ >= 1, "k argument must be >= 1");
}
~TopKGradHookGradientOp(){};
bool RunOnDevice() override;
private:
const int k_;
// Buffers for CUDAContext.
TensorCUDA input_transposed_buffer_;
TensorCUDA values_transposed_buffer_;
TensorCUDA indices_transposed_buffer_;
// Shape tensors on device for CUDAContext.
TensorCUDA input_dims_device_;
TensorCUDA input_transposed_dims_device_;
TensorCUDA input_axes_device_;
TensorCUDA output_dims_device_;
TensorCUDA output_transposed_dims_device_;
TensorCUDA output_transposed_axes_device_;
};
template <typename T>
bool TopKGradHookGradientOp<T, CUDAContext>::RunOnDevice() {
std::cout << "========================= In TopKGradHookGradientOp op (GPU) ======================" << std::endl;
/// #1 --- input check
const auto& dY = Input(0);
auto* dY_topk = Output(0);
const std::vector<TIndex>& input_dims = dY.dims();
// The input tensor must be 4D or 2D tensor
CAFFE_ENFORCE(dY.ndim() == 2 || dY.ndim() == 4,
"The dimession of input tensor must be 2 or 4");
dY_topk->ResizeLike(dY);
/// #2 --- if input is 4D tensor (NxCxHxW), reduce it back to 2D tensor
/// (NxC) by summing the last two dimentions
int N = dY.dim32(0); // batchsize
int C = dY.dim32(1); // channel
std::vector<TIndex> output_dims = {N, C};
Tensor<CUDAContext> dY_reduction(output_dims);
const float* dY_original_data = dY.template data<float>();
if (dY.ndim() == 4) {
int H = dY.dim32(2);
int W = dY.dim32(3);
float* dY_reduction_data = dY_reduction.template mutable_data<float>();
hipLaunchKernelGGL(( GradsReduction4D),
dim3(CAFFE_GET_BLOCKS(dY_reduction.size())),
dim3(CAFFE_CUDA_NUM_THREADS),
0, context_.cuda_stream(),
N, C, H, W,
dY_original_data,
dY_reduction.template mutable_data<float>());
}else {
// dY_reduction.CopyFrom(dY);
hipLaunchKernelGGL(( GradsReduction2D),
dim3(CAFFE_GET_BLOCKS(dY_reduction.size())),
dim3(CAFFE_CUDA_NUM_THREADS),
0, context_.cuda_stream(),
N, C,
dY_original_data,
dY_reduction.template mutable_data<float>());
}
// DEBUG PRINT
Tensor<CPUContext> dY_reduction_CPU(output_dims);
context_.Copy<float, CUDAContext, CPUContext>(
dY_reduction.size(),
dY_reduction.data<float>(),
dY_reduction_CPU.mutable_data<float>());
std::cout << "dY reduction data (only first two dimensions)\n";
const float* dY_reduction_cpu_data = dY_reduction_CPU.data<float>();
for (TIndex i = 0; i < N; ++i) {
for (TIndex j = 0; j < C; ++j) {
std::cout << dY_reduction_cpu_data[i * C + j] << ", ";
}
std::cout << std::endl;
}
std::cout << "print over" << std::endl;
/// #3 --- get topk in channel level
int AXIS_ = 1; // hard-coded, only for channel dimension topk
CAFFE_ENFORCE_LE(
k_,
output_dims[AXIS_],
"k argument should not be greater than the channel dim.");
std::vector<TIndex> topk_dims = output_dims;
topk_dims[AXIS_] = k_;
Tensor<CUDAContext> values(topk_dims);
Tensor<CUDAContext> indices(topk_dims);
float* values_data = values.template mutable_data<float>();
TIndex* indices_data = indices.template mutable_data<TIndex>();
const TIndex prev_size = std::accumulate(
output_dims.cbegin(),
output_dims.cbegin() + AXIS_,
TIndex(1),
std::multiplies<TIndex>());
const TIndex next_size = std::accumulate(
output_dims.cbegin() + AXIS_ + 1,
output_dims.cend(),
TIndex(1),
std::multiplies<TIndex>());
const TIndex outer_size = dY_reduction.size() / output_dims[AXIS_];
const TIndex inner_size = output_dims[AXIS_];
RunTopKOnLastDimCUDAImpl<T>(
// dY_reduction_data,
dY_reduction.template data<float>(),
outer_size,
inner_size,
k_,
values_data,
indices_data,
&context_);
// DEBUG PRINT
Tensor<CPUContext> values_cpu(topk_dims);
context_.Copy<float, CUDAContext, CPUContext>(
values.size(),
values.data<float>(),
values_cpu.mutable_data<float>());
std::cout << "dY reduction topk values\n";
const float* values_cpu_data = values_cpu.data<float>();
for (TIndex i = 0; i < N; ++i) {
for (TIndex j = 0; j < k_; ++j) {
std::cout << values_cpu_data[i * k_ + j] << ", ";
}
std::cout << std::endl;
}
std::cout << "print over\n" << std::endl;
Tensor<CPUContext> indices_cpu(topk_dims);
context_.Copy<TIndex, CUDAContext, CPUContext>(
indices.size(),
indices.data<TIndex>(),
indices_cpu.mutable_data<TIndex>());
std::cout << "dY reduction topk indices\n";
const TIndex* indices_cpu_data = indices_cpu.data<TIndex>();
for (TIndex i = 0; i < N; ++i) {
for (TIndex j = 0; j < k_; ++j) {
std::cout << indices_cpu_data[i * k_ + j] << ", ";
}
std::cout << std::endl;
}
std::cout << "print over\n" << std::endl;
/// #4 --- zero out channels smaller than topk value
float* dY_topk_data = dY_topk->template mutable_data<float>();
if (dY.ndim() == 4) {
int H = dY.dim32(2);
int W = dY.dim32(3);
hipLaunchKernelGGL(( ChannelZeroOut4D),
dim3(CAFFE_GET_BLOCKS(indices.size())),
dim3(CAFFE_CUDA_NUM_THREADS),
0, context_.cuda_stream(),
N, C, k_, H, W,
dY_original_data,
dY_topk_data,
indices_data);
}else {
hipLaunchKernelGGL(( ChannelZeroOut2D),
dim3(CAFFE_GET_BLOCKS(indices.size())),
dim3(CAFFE_CUDA_NUM_THREADS),
0, context_.cuda_stream(),
N, C, k_,
dY_original_data,
dY_topk_data,
indices_data);
}
std::cout << "========================= In TopKGradHookGradientOp op (GPU) ======================" << std::endl;
return true;
}
REGISTER_CUDA_OPERATOR(TopKGradHook, TopKGradHookOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(TopKGradHookGradient,
TopKGradHookGradientOp<float, CUDAContext>);
} // namespace caffe2
| c8b6cf11cf82ff20fa155f3467d4eecf76b98aa6.cu | #include "caffe2/operators/topk_grad_hook_op.h"
#include <algorithm>
#include <array>
#include <functional>
#include <limits>
#include <numeric>
#include <vector>
#include <thrust/sort.h>
#include <thrust/system/cuda/execution_policy.h>
#include <thrust/device_vector.h>
#include <thrust/transform_reduce.h>
#include <thrust/functional.h>
#include <thrust/execution_policy.h>
#include "caffe2/core/context.h"
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/top_k_heap_selection.cuh"
#include "caffe2/operators/top_k_radix_selection.cuh"
#include "caffe2/utils/math.h"
namespace caffe2 {
namespace {
template <typename T, int kHeapSize, bool kSelectMax = true>
void RunHeapSelectionImpl(
const T* input,
const TIndex outer_size,
const TIndex inner_size,
const int k,
T* values,
TIndex* indices,
CUDAContext* context) {
constexpr int kBlockSize = 256;
constexpr int kNumWarps = kBlockSize / kWarpSize;
constexpr int smem = kNumWarps * kHeapSize * (sizeof(T) + sizeof(TIndex));
constexpr T kInitVal = kSelectMax ? std::numeric_limits<T>::lowest()
: std::numeric_limits<T>::max();
selectRowsViaHeap<T, TIndex, TIndex, kBlockSize, kHeapSize, kSelectMax>
<<<outer_size, kBlockSize, smem, context->cuda_stream()>>>(
input,
values,
indices,
kInitVal,
std::numeric_limits<TIndex>::max(),
outer_size,
inner_size,
k);
}
template <typename T, bool kSelectMax = true>
void RunRadixSelectionImpl(
const T* input,
const TIndex outer_size,
const TIndex inner_size,
const int k,
T* values,
TIndex* indices,
CUDAContext* context) {
const int block = std::min(
math::roundUp(static_cast<int>(inner_size), kWarpSize),
CAFFE_CUDA_NUM_THREADS);
gatherTopK<T, kSelectMax, TIndex>
<<<outer_size, block, 0, context->cuda_stream()>>>(
input, inner_size, k, outer_size, values, indices);
// Unfortunately the output is not currently sorted, and there is no batch
// sorting utility available. Iterate over all of the slices and sort them
// in-place using Thrust.
for (int i = 0; i < outer_size; ++i) {
thrust::sort_by_key(
thrust::cuda::par.on(context->cuda_stream()),
values + i * k,
values + i * k + k,
indices + i * k,
thrust::greater<T>());
}
}
template <typename T>
void RunTopKOnLastDimCUDAImpl(
const T* input,
const TIndex outer_size,
const TIndex inner_size,
const int k,
T* values,
TIndex* indices,
CUDAContext* context) {
// If k is small, uses heap selection, otherwise uses radix selection.
if (k < 32) {
RunHeapSelectionImpl<T, 32>(
input, outer_size, inner_size, k, values, indices, context);
} else if (k < 128) {
RunHeapSelectionImpl<T, 128>(
input, outer_size, inner_size, k, values, indices, context);
} else if (k < 512) {
RunHeapSelectionImpl<T, 512>(
input, outer_size, inner_size, k, values, indices, context);
} else {
RunRadixSelectionImpl<T>(
input, outer_size, inner_size, k, values, indices, context);
}
}
__global__ void GradsReduction4D(
const int N,
const int C,
const int H,
const int W,
const float* dY_data,
float* dY_reduction_data) {
/// 4D NxCxHxW grads tensor reduction to 2D NxC grads tensor
CUDA_1D_KERNEL_LOOP(i, N * C) {
int begin_offset = i * H * W;
int end_offset = (i + 1) * H * W;
dY_reduction_data[i] = thrust::transform_reduce(
thrust::device,
dY_data + begin_offset,
dY_data + end_offset,
fabsf,
0,
thrust::plus<float>());
}
}
__global__ void GradsReduction2D(
const int N,
const int C,
const float* dY_data,
float* dY_reduction_data) {
/// 4D NxCxHxW grads tensor reduction to 2D NxC grads tensor
CUDA_1D_KERNEL_LOOP(i, N * C) {
dY_reduction_data[i] = fabsf(dY_data[i]);
}
}
__global__ void ChannelZeroOut4D(
const int N,
const int C,
const int k,
const int H,
const int W,
const float* dY_data,
float* dY_topk_data,
TIndex* indices_data) {
/// zero out specific channel dimention of 4D NxCxHxW
CUDA_1D_KERNEL_LOOP(i, N * k) {
int ii = i / k;
// int jj = i % k;
int zero_channel = indices_data[i];
int begin_offset = ii * (C * H * W) + zero_channel * (H * W);
// int end_offset = ii * (C * H * W) + (zero_channel + 1) * (H * W);
memcpy(dY_topk_data + begin_offset,
dY_data + begin_offset,
(H * W) * sizeof(float));
}
}
__global__ void ChannelZeroOut2D(
const int N,
const int C,
const int k,
const float* dY_data,
float* dY_topk_data,
TIndex* indices_data) {
/// zero out specific channel dimention of 4D NxCxHxW
CUDA_1D_KERNEL_LOOP(i, N * k) {
int ii = i / k;
int zero_channel = indices_data[i];
dY_topk_data[ii * C + zero_channel] = dY_data[ii * C + zero_channel];
}
}
} // namespace
template <>
bool TopKGradHookOp<float, CUDAContext>::RunOnDevice() {
// input check
auto& X = Input(0);
auto* Y = Output(0);
Y->ResizeLike(X);
Y->CopyFrom(X);
return true;
} //TopKGradHookOp::RunOnDevice
template <typename T>
class TopKGradHookGradientOp<T, CUDAContext> : public Operator<CUDAContext> {
public:
USE_OPERATOR_FUNCTIONS(CUDAContext);
TopKGradHookGradientOp(const OperatorDef& operator_def, Workspace* ws)
: Operator<CUDAContext>(operator_def, ws),
OP_SINGLE_ARG(int, "k", k_, -1) {
CAFFE_ENFORCE(k_ >= 1, "k argument must be >= 1");
}
~TopKGradHookGradientOp(){};
bool RunOnDevice() override;
private:
const int k_;
// Buffers for CUDAContext.
TensorCUDA input_transposed_buffer_;
TensorCUDA values_transposed_buffer_;
TensorCUDA indices_transposed_buffer_;
// Shape tensors on device for CUDAContext.
TensorCUDA input_dims_device_;
TensorCUDA input_transposed_dims_device_;
TensorCUDA input_axes_device_;
TensorCUDA output_dims_device_;
TensorCUDA output_transposed_dims_device_;
TensorCUDA output_transposed_axes_device_;
};
template <typename T>
bool TopKGradHookGradientOp<T, CUDAContext>::RunOnDevice() {
std::cout << "========================= In TopKGradHookGradientOp op (GPU) ======================" << std::endl;
/// #1 --- input check
const auto& dY = Input(0);
auto* dY_topk = Output(0);
const std::vector<TIndex>& input_dims = dY.dims();
// The input tensor must be 4D or 2D tensor
CAFFE_ENFORCE(dY.ndim() == 2 || dY.ndim() == 4,
"The dimession of input tensor must be 2 or 4");
dY_topk->ResizeLike(dY);
/// #2 --- if input is 4D tensor (NxCxHxW), reduce it back to 2D tensor
/// (NxC) by summing the last two dimentions
int N = dY.dim32(0); // batchsize
int C = dY.dim32(1); // channel
std::vector<TIndex> output_dims = {N, C};
Tensor<CUDAContext> dY_reduction(output_dims);
const float* dY_original_data = dY.template data<float>();
if (dY.ndim() == 4) {
int H = dY.dim32(2);
int W = dY.dim32(3);
float* dY_reduction_data = dY_reduction.template mutable_data<float>();
GradsReduction4D<<<
CAFFE_GET_BLOCKS(dY_reduction.size()),
CAFFE_CUDA_NUM_THREADS,
0, context_.cuda_stream()>>>(
N, C, H, W,
dY_original_data,
dY_reduction.template mutable_data<float>());
}else {
// dY_reduction.CopyFrom(dY);
GradsReduction2D<<<
CAFFE_GET_BLOCKS(dY_reduction.size()),
CAFFE_CUDA_NUM_THREADS,
0, context_.cuda_stream()>>>(
N, C,
dY_original_data,
dY_reduction.template mutable_data<float>());
}
// DEBUG PRINT
Tensor<CPUContext> dY_reduction_CPU(output_dims);
context_.Copy<float, CUDAContext, CPUContext>(
dY_reduction.size(),
dY_reduction.data<float>(),
dY_reduction_CPU.mutable_data<float>());
std::cout << "dY reduction data (only first two dimensions)\n";
const float* dY_reduction_cpu_data = dY_reduction_CPU.data<float>();
for (TIndex i = 0; i < N; ++i) {
for (TIndex j = 0; j < C; ++j) {
std::cout << dY_reduction_cpu_data[i * C + j] << ", ";
}
std::cout << std::endl;
}
std::cout << "print over" << std::endl;
/// #3 --- get topk in channel level
int AXIS_ = 1; // hard-coded, only for channel dimension topk
CAFFE_ENFORCE_LE(
k_,
output_dims[AXIS_],
"k argument should not be greater than the channel dim.");
std::vector<TIndex> topk_dims = output_dims;
topk_dims[AXIS_] = k_;
Tensor<CUDAContext> values(topk_dims);
Tensor<CUDAContext> indices(topk_dims);
float* values_data = values.template mutable_data<float>();
TIndex* indices_data = indices.template mutable_data<TIndex>();
const TIndex prev_size = std::accumulate(
output_dims.cbegin(),
output_dims.cbegin() + AXIS_,
TIndex(1),
std::multiplies<TIndex>());
const TIndex next_size = std::accumulate(
output_dims.cbegin() + AXIS_ + 1,
output_dims.cend(),
TIndex(1),
std::multiplies<TIndex>());
const TIndex outer_size = dY_reduction.size() / output_dims[AXIS_];
const TIndex inner_size = output_dims[AXIS_];
RunTopKOnLastDimCUDAImpl<T>(
// dY_reduction_data,
dY_reduction.template data<float>(),
outer_size,
inner_size,
k_,
values_data,
indices_data,
&context_);
// DEBUG PRINT
Tensor<CPUContext> values_cpu(topk_dims);
context_.Copy<float, CUDAContext, CPUContext>(
values.size(),
values.data<float>(),
values_cpu.mutable_data<float>());
std::cout << "dY reduction topk values\n";
const float* values_cpu_data = values_cpu.data<float>();
for (TIndex i = 0; i < N; ++i) {
for (TIndex j = 0; j < k_; ++j) {
std::cout << values_cpu_data[i * k_ + j] << ", ";
}
std::cout << std::endl;
}
std::cout << "print over\n" << std::endl;
Tensor<CPUContext> indices_cpu(topk_dims);
context_.Copy<TIndex, CUDAContext, CPUContext>(
indices.size(),
indices.data<TIndex>(),
indices_cpu.mutable_data<TIndex>());
std::cout << "dY reduction topk indices\n";
const TIndex* indices_cpu_data = indices_cpu.data<TIndex>();
for (TIndex i = 0; i < N; ++i) {
for (TIndex j = 0; j < k_; ++j) {
std::cout << indices_cpu_data[i * k_ + j] << ", ";
}
std::cout << std::endl;
}
std::cout << "print over\n" << std::endl;
/// #4 --- zero out channels smaller than topk value
float* dY_topk_data = dY_topk->template mutable_data<float>();
if (dY.ndim() == 4) {
int H = dY.dim32(2);
int W = dY.dim32(3);
ChannelZeroOut4D<<<
CAFFE_GET_BLOCKS(indices.size()),
CAFFE_CUDA_NUM_THREADS,
0, context_.cuda_stream()>>>(
N, C, k_, H, W,
dY_original_data,
dY_topk_data,
indices_data);
}else {
ChannelZeroOut2D<<<
CAFFE_GET_BLOCKS(indices.size()),
CAFFE_CUDA_NUM_THREADS,
0, context_.cuda_stream()>>>(
N, C, k_,
dY_original_data,
dY_topk_data,
indices_data);
}
std::cout << "========================= In TopKGradHookGradientOp op (GPU) ======================" << std::endl;
return true;
}
REGISTER_CUDA_OPERATOR(TopKGradHook, TopKGradHookOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(TopKGradHookGradient,
TopKGradHookGradientOp<float, CUDAContext>);
} // namespace caffe2
|
dcdbc85440174389d873ecf1236490ba737fb51d.hip | // !!! This is a file automatically generated by hipify!!!
#include <unittest/unittest.h>
#include <thrust/copy.h>
#include <list>
#include <iterator>
#include <thrust/sequence.h>
#include <thrust/iterator/zip_iterator.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/iterator/constant_iterator.h>
#include <thrust/iterator/discard_iterator.h>
void TestCopyFromConstIterator(void)
{
typedef int T;
std::vector<T> v(5);
v[0] = 0; v[1] = 1; v[2] = 2; v[3] = 3; v[4] = 4;
std::vector<int>::const_iterator begin = v.begin();
std::vector<int>::const_iterator end = v.end();
// copy to host_vector
thrust::host_vector<T> h(5, (T) 10);
thrust::host_vector<T>::iterator h_result = thrust::copy(begin, end, h.begin());
ASSERT_EQUAL(h[0], 0);
ASSERT_EQUAL(h[1], 1);
ASSERT_EQUAL(h[2], 2);
ASSERT_EQUAL(h[3], 3);
ASSERT_EQUAL(h[4], 4);
ASSERT_EQUAL_QUIET(h_result, h.end());
// copy to device_vector
thrust::device_vector<T> d(5, (T) 10);
thrust::device_vector<T>::iterator d_result = thrust::copy(begin, end, d.begin());
ASSERT_EQUAL(d[0], 0);
ASSERT_EQUAL(d[1], 1);
ASSERT_EQUAL(d[2], 2);
ASSERT_EQUAL(d[3], 3);
ASSERT_EQUAL(d[4], 4);
ASSERT_EQUAL_QUIET(d_result, d.end());
}
DECLARE_UNITTEST(TestCopyFromConstIterator);
void TestCopyToDiscardIterator(void)
{
typedef int T;
thrust::host_vector<T> h_input(5,1);
thrust::device_vector<T> d_input = h_input;
thrust::discard_iterator<> reference(5);
// copy from host_vector
thrust::discard_iterator<> h_result =
thrust::copy(h_input.begin(), h_input.end(), thrust::make_discard_iterator());
// copy from device_vector
thrust::discard_iterator<> d_result =
thrust::copy(d_input.begin(), d_input.end(), thrust::make_discard_iterator());
ASSERT_EQUAL_QUIET(reference, h_result);
ASSERT_EQUAL_QUIET(reference, d_result);
}
DECLARE_UNITTEST(TestCopyToDiscardIterator);
void TestCopyToDiscardIteratorZipped(void)
{
typedef int T;
thrust::host_vector<T> h_input(5,1);
thrust::device_vector<T> d_input = h_input;
thrust::host_vector<T> h_output(5);
thrust::device_vector<T> d_output(5);
thrust::discard_iterator<> reference(5);
typedef thrust::tuple<thrust::discard_iterator<>,thrust::host_vector<T>::iterator> Tuple1;
typedef thrust::tuple<thrust::discard_iterator<>,thrust::device_vector<T>::iterator> Tuple2;
typedef thrust::zip_iterator<Tuple1> ZipIterator1;
typedef thrust::zip_iterator<Tuple2> ZipIterator2;
// copy from host_vector
ZipIterator1 h_result =
thrust::copy(thrust::make_zip_iterator(thrust::make_tuple(h_input.begin(), h_input.begin())),
thrust::make_zip_iterator(thrust::make_tuple(h_input.end(), h_input.end())),
thrust::make_zip_iterator(thrust::make_tuple(thrust::make_discard_iterator(), h_output.begin())));
// copy from device_vector
ZipIterator2 d_result =
thrust::copy(thrust::make_zip_iterator(thrust::make_tuple(d_input.begin(), d_input.begin())),
thrust::make_zip_iterator(thrust::make_tuple(d_input.end(), d_input.end())),
thrust::make_zip_iterator(thrust::make_tuple(thrust::make_discard_iterator(), d_output.begin())));
ASSERT_EQUAL(h_output, h_input);
ASSERT_EQUAL(d_output, d_input);
ASSERT_EQUAL_QUIET(reference, thrust::get<0>(h_result.get_iterator_tuple()));
ASSERT_EQUAL_QUIET(reference, thrust::get<0>(d_result.get_iterator_tuple()));
}
DECLARE_UNITTEST(TestCopyToDiscardIteratorZipped);
template <class Vector>
void TestCopyMatchingTypes(void)
{
typedef typename Vector::value_type T;
Vector v(5);
v[0] = 0; v[1] = 1; v[2] = 2; v[3] = 3; v[4] = 4;
// copy to host_vector
thrust::host_vector<T> h(5, (T) 10);
typename thrust::host_vector<T>::iterator h_result = thrust::copy(v.begin(), v.end(), h.begin());
ASSERT_EQUAL(h[0], 0);
ASSERT_EQUAL(h[1], 1);
ASSERT_EQUAL(h[2], 2);
ASSERT_EQUAL(h[3], 3);
ASSERT_EQUAL(h[4], 4);
ASSERT_EQUAL_QUIET(h_result, h.end());
// copy to device_vector
thrust::device_vector<T> d(5, (T) 10);
typename thrust::device_vector<T>::iterator d_result = thrust::copy(v.begin(), v.end(), d.begin());
ASSERT_EQUAL(d[0], 0);
ASSERT_EQUAL(d[1], 1);
ASSERT_EQUAL(d[2], 2);
ASSERT_EQUAL(d[3], 3);
ASSERT_EQUAL(d[4], 4);
ASSERT_EQUAL_QUIET(d_result, d.end());
}
DECLARE_VECTOR_UNITTEST(TestCopyMatchingTypes);
template <class Vector>
void TestCopyMixedTypes(void)
{
typedef typename Vector::value_type T;
Vector v(5);
v[0] = 0; v[1] = 1; v[2] = 2; v[3] = 3; v[4] = 4;
// copy to host_vector with different type
thrust::host_vector<float> h(5, (float) 10);
typename thrust::host_vector<float>::iterator h_result = thrust::copy(v.begin(), v.end(), h.begin());
ASSERT_EQUAL(h[0], 0);
ASSERT_EQUAL(h[1], 1);
ASSERT_EQUAL(h[2], 2);
ASSERT_EQUAL(h[3], 3);
ASSERT_EQUAL(h[4], 4);
ASSERT_EQUAL_QUIET(h_result, h.end());
// copy to device_vector with different type
thrust::device_vector<float> d(5, (float) 10);
typename thrust::device_vector<float>::iterator d_result = thrust::copy(v.begin(), v.end(), d.begin());
ASSERT_EQUAL(d[0], 0);
ASSERT_EQUAL(d[1], 1);
ASSERT_EQUAL(d[2], 2);
ASSERT_EQUAL(d[3], 3);
ASSERT_EQUAL(d[4], 4);
ASSERT_EQUAL_QUIET(d_result, d.end());
}
DECLARE_VECTOR_UNITTEST(TestCopyMixedTypes);
void TestCopyVectorBool(void)
{
std::vector<bool> v(3);
v[0] = true; v[1] = false; v[2] = true;
thrust::host_vector<bool> h(3);
thrust::device_vector<bool> d(3);
thrust::copy(v.begin(), v.end(), h.begin());
thrust::copy(v.begin(), v.end(), d.begin());
ASSERT_EQUAL(h[0], true);
ASSERT_EQUAL(h[1], false);
ASSERT_EQUAL(h[2], true);
ASSERT_EQUAL(d[0], true);
ASSERT_EQUAL(d[1], false);
ASSERT_EQUAL(d[2], true);
}
DECLARE_UNITTEST(TestCopyVectorBool);
template <class Vector>
void TestCopyListTo(void)
{
typedef typename Vector::value_type T;
// copy from list to Vector
std::list<T> l;
l.push_back(0);
l.push_back(1);
l.push_back(2);
l.push_back(3);
l.push_back(4);
Vector v(l.size());
typename Vector::iterator v_result = thrust::copy(l.begin(), l.end(), v.begin());
ASSERT_EQUAL(v[0], 0);
ASSERT_EQUAL(v[1], 1);
ASSERT_EQUAL(v[2], 2);
ASSERT_EQUAL(v[3], 3);
ASSERT_EQUAL(v[4], 4);
ASSERT_EQUAL_QUIET(v_result, v.end());
l.clear();
std::back_insert_iterator< std::list<T> > l_result = thrust::copy(v.begin(), v.end(), std::back_insert_iterator< std::list<T> >(l));
ASSERT_EQUAL(l.size(), 5);
typename std::list<T>::const_iterator iter = l.begin();
ASSERT_EQUAL(*iter, 0); iter++;
ASSERT_EQUAL(*iter, 1); iter++;
ASSERT_EQUAL(*iter, 2); iter++;
ASSERT_EQUAL(*iter, 3); iter++;
ASSERT_EQUAL(*iter, 4); iter++;
}
DECLARE_VECTOR_UNITTEST(TestCopyListTo);
template<typename T>
struct is_even
{
__host__ __device__
bool operator()(T x) { return (static_cast<unsigned int>(x) & 1) == 0; }
};
template<typename T>
struct is_true
{
__host__ __device__
bool operator()(T x) { return x ? true : false; }
};
template<typename T>
struct mod_3
{
__host__ __device__
unsigned int operator()(T x) { return static_cast<unsigned int>(x) % 3; }
};
template <class Vector>
void TestCopyIfSimple(void)
{
typedef typename Vector::value_type T;
Vector v(5);
v[0] = 0; v[1] = 1; v[2] = 2; v[3] = 3; v[4] = 4;
Vector dest(3);
typename Vector::iterator dest_end = thrust::copy_if(v.begin(), v.end(), dest.begin(), is_even<T>());
ASSERT_EQUAL(0, dest[0]);
ASSERT_EQUAL(2, dest[1]);
ASSERT_EQUAL(4, dest[2]);
ASSERT_EQUAL_QUIET(dest.end(), dest_end);
}
DECLARE_VECTOR_UNITTEST(TestCopyIfSimple);
template <typename T>
void TestCopyIf(const size_t n)
{
thrust::host_vector<T> h_data = unittest::random_integers<T>(n);
thrust::device_vector<T> d_data = h_data;
typename thrust::host_vector<T>::iterator h_new_end;
typename thrust::device_vector<T>::iterator d_new_end;
// test with Predicate that returns a bool
{
thrust::host_vector<T> h_result(n);
thrust::device_vector<T> d_result(n);
h_new_end = thrust::copy_if(h_data.begin(), h_data.end(), h_result.begin(), is_even<T>());
d_new_end = thrust::copy_if(d_data.begin(), d_data.end(), d_result.begin(), is_even<T>());
h_result.resize(h_new_end - h_result.begin());
d_result.resize(d_new_end - d_result.begin());
ASSERT_EQUAL(h_result, d_result);
}
// test with Predicate that returns a non-bool
{
thrust::host_vector<T> h_result(n);
thrust::device_vector<T> d_result(n);
h_new_end = thrust::copy_if(h_data.begin(), h_data.end(), h_result.begin(), mod_3<T>());
d_new_end = thrust::copy_if(d_data.begin(), d_data.end(), d_result.begin(), mod_3<T>());
h_result.resize(h_new_end - h_result.begin());
d_result.resize(d_new_end - d_result.begin());
ASSERT_EQUAL(h_result, d_result);
}
}
DECLARE_VARIABLE_UNITTEST(TestCopyIf);
template <class Vector>
void TestCopyIfStencilSimple(void)
{
typedef typename Vector::value_type T;
Vector v(5);
v[0] = 0; v[1] = 1; v[2] = 2; v[3] = 3; v[4] = 4;
Vector s(5);
s[0] = 1; s[1] = 1; s[2] = 0; s[3] = 1; s[4] = 0;
Vector dest(3);
typename Vector::iterator dest_end = thrust::copy_if(v.begin(), v.end(), s.begin(), dest.begin(), is_true<T>());
ASSERT_EQUAL(0, dest[0]);
ASSERT_EQUAL(1, dest[1]);
ASSERT_EQUAL(3, dest[2]);
ASSERT_EQUAL_QUIET(dest.end(), dest_end);
}
DECLARE_VECTOR_UNITTEST(TestCopyIfStencilSimple);
template <typename T>
void TestCopyIfStencil(const size_t n)
{
thrust::host_vector<T> h_data(n); thrust::sequence(h_data.begin(), h_data.end());
thrust::device_vector<T> d_data(n); thrust::sequence(d_data.begin(), d_data.end());
thrust::host_vector<T> h_stencil = unittest::random_integers<T>(n);
thrust::device_vector<T> d_stencil = unittest::random_integers<T>(n);
thrust::host_vector<T> h_result(n);
thrust::device_vector<T> d_result(n);
typename thrust::host_vector<T>::iterator h_new_end;
typename thrust::device_vector<T>::iterator d_new_end;
// test with Predicate that returns a bool
{
thrust::host_vector<T> h_result(n);
thrust::device_vector<T> d_result(n);
h_new_end = thrust::copy_if(h_data.begin(), h_data.end(), h_result.begin(), is_even<T>());
d_new_end = thrust::copy_if(d_data.begin(), d_data.end(), d_result.begin(), is_even<T>());
h_result.resize(h_new_end - h_result.begin());
d_result.resize(d_new_end - d_result.begin());
ASSERT_EQUAL(h_result, d_result);
}
// test with Predicate that returns a non-bool
{
thrust::host_vector<T> h_result(n);
thrust::device_vector<T> d_result(n);
h_new_end = thrust::copy_if(h_data.begin(), h_data.end(), h_result.begin(), mod_3<T>());
d_new_end = thrust::copy_if(d_data.begin(), d_data.end(), d_result.begin(), mod_3<T>());
h_result.resize(h_new_end - h_result.begin());
d_result.resize(d_new_end - d_result.begin());
ASSERT_EQUAL(h_result, d_result);
}
}
DECLARE_VARIABLE_UNITTEST(TestCopyIfStencil);
#if THRUST_DEVICE_SYSTEM == THRUST_DEVICE_SYSTEM_CUDA
// do we really want to test this ever?
void TestCopyDeviceThrow(void)
{
typedef int T;
thrust::device_ptr<T> null_device_ptr((int*)0);
bool caught_exception = false;
try
{
thrust::copy(null_device_ptr, null_device_ptr + 1, null_device_ptr);
} // end try
catch(std::runtime_error)
{
caught_exception = true;
// kill the context so it can revive later
hipDeviceReset();
} // end catch
ASSERT_EQUAL(true, caught_exception);
}
DECLARE_UNITTEST(TestCopyDeviceThrow);
#endif
template <typename Vector>
void TestCopyCountingIterator(void)
{
typedef typename Vector::value_type T;
thrust::counting_iterator<T> iter(1);
Vector vec(4);
thrust::copy(iter, iter + 4, vec.begin());
ASSERT_EQUAL(vec[0], 1);
ASSERT_EQUAL(vec[1], 2);
ASSERT_EQUAL(vec[2], 3);
ASSERT_EQUAL(vec[3], 4);
}
DECLARE_VECTOR_UNITTEST(TestCopyCountingIterator);
template <typename Vector>
void TestCopyZipIterator(void)
{
typedef typename Vector::value_type T;
Vector v1(3); v1[0] = 1; v1[1] = 2; v1[2] = 3;
Vector v2(3); v2[0] = 4; v2[1] = 5; v2[2] = 6;
Vector v3(3, T(0));
Vector v4(3, T(0));
thrust::copy(thrust::make_zip_iterator(thrust::make_tuple(v1.begin(),v2.begin())),
thrust::make_zip_iterator(thrust::make_tuple(v1.end(),v2.end())),
thrust::make_zip_iterator(thrust::make_tuple(v3.begin(),v4.begin())));
ASSERT_EQUAL(v1, v3);
ASSERT_EQUAL(v2, v4);
};
DECLARE_VECTOR_UNITTEST(TestCopyZipIterator);
template <typename Vector>
void TestCopyConstantIteratorToZipIterator(void)
{
typedef typename Vector::value_type T;
Vector v1(3,T(0));
Vector v2(3,T(0));
thrust::copy(thrust::make_constant_iterator(thrust::tuple<T,T>(4,7)),
thrust::make_constant_iterator(thrust::tuple<T,T>(4,7)) + v1.size(),
thrust::make_zip_iterator(thrust::make_tuple(v1.begin(),v2.begin())));
ASSERT_EQUAL(v1[0], 4);
ASSERT_EQUAL(v1[1], 4);
ASSERT_EQUAL(v1[2], 4);
ASSERT_EQUAL(v2[0], 7);
ASSERT_EQUAL(v2[1], 7);
ASSERT_EQUAL(v2[2], 7);
};
DECLARE_VECTOR_UNITTEST(TestCopyConstantIteratorToZipIterator);
| dcdbc85440174389d873ecf1236490ba737fb51d.cu | #include <unittest/unittest.h>
#include <thrust/copy.h>
#include <list>
#include <iterator>
#include <thrust/sequence.h>
#include <thrust/iterator/zip_iterator.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/iterator/constant_iterator.h>
#include <thrust/iterator/discard_iterator.h>
void TestCopyFromConstIterator(void)
{
typedef int T;
std::vector<T> v(5);
v[0] = 0; v[1] = 1; v[2] = 2; v[3] = 3; v[4] = 4;
std::vector<int>::const_iterator begin = v.begin();
std::vector<int>::const_iterator end = v.end();
// copy to host_vector
thrust::host_vector<T> h(5, (T) 10);
thrust::host_vector<T>::iterator h_result = thrust::copy(begin, end, h.begin());
ASSERT_EQUAL(h[0], 0);
ASSERT_EQUAL(h[1], 1);
ASSERT_EQUAL(h[2], 2);
ASSERT_EQUAL(h[3], 3);
ASSERT_EQUAL(h[4], 4);
ASSERT_EQUAL_QUIET(h_result, h.end());
// copy to device_vector
thrust::device_vector<T> d(5, (T) 10);
thrust::device_vector<T>::iterator d_result = thrust::copy(begin, end, d.begin());
ASSERT_EQUAL(d[0], 0);
ASSERT_EQUAL(d[1], 1);
ASSERT_EQUAL(d[2], 2);
ASSERT_EQUAL(d[3], 3);
ASSERT_EQUAL(d[4], 4);
ASSERT_EQUAL_QUIET(d_result, d.end());
}
DECLARE_UNITTEST(TestCopyFromConstIterator);
void TestCopyToDiscardIterator(void)
{
typedef int T;
thrust::host_vector<T> h_input(5,1);
thrust::device_vector<T> d_input = h_input;
thrust::discard_iterator<> reference(5);
// copy from host_vector
thrust::discard_iterator<> h_result =
thrust::copy(h_input.begin(), h_input.end(), thrust::make_discard_iterator());
// copy from device_vector
thrust::discard_iterator<> d_result =
thrust::copy(d_input.begin(), d_input.end(), thrust::make_discard_iterator());
ASSERT_EQUAL_QUIET(reference, h_result);
ASSERT_EQUAL_QUIET(reference, d_result);
}
DECLARE_UNITTEST(TestCopyToDiscardIterator);
void TestCopyToDiscardIteratorZipped(void)
{
typedef int T;
thrust::host_vector<T> h_input(5,1);
thrust::device_vector<T> d_input = h_input;
thrust::host_vector<T> h_output(5);
thrust::device_vector<T> d_output(5);
thrust::discard_iterator<> reference(5);
typedef thrust::tuple<thrust::discard_iterator<>,thrust::host_vector<T>::iterator> Tuple1;
typedef thrust::tuple<thrust::discard_iterator<>,thrust::device_vector<T>::iterator> Tuple2;
typedef thrust::zip_iterator<Tuple1> ZipIterator1;
typedef thrust::zip_iterator<Tuple2> ZipIterator2;
// copy from host_vector
ZipIterator1 h_result =
thrust::copy(thrust::make_zip_iterator(thrust::make_tuple(h_input.begin(), h_input.begin())),
thrust::make_zip_iterator(thrust::make_tuple(h_input.end(), h_input.end())),
thrust::make_zip_iterator(thrust::make_tuple(thrust::make_discard_iterator(), h_output.begin())));
// copy from device_vector
ZipIterator2 d_result =
thrust::copy(thrust::make_zip_iterator(thrust::make_tuple(d_input.begin(), d_input.begin())),
thrust::make_zip_iterator(thrust::make_tuple(d_input.end(), d_input.end())),
thrust::make_zip_iterator(thrust::make_tuple(thrust::make_discard_iterator(), d_output.begin())));
ASSERT_EQUAL(h_output, h_input);
ASSERT_EQUAL(d_output, d_input);
ASSERT_EQUAL_QUIET(reference, thrust::get<0>(h_result.get_iterator_tuple()));
ASSERT_EQUAL_QUIET(reference, thrust::get<0>(d_result.get_iterator_tuple()));
}
DECLARE_UNITTEST(TestCopyToDiscardIteratorZipped);
template <class Vector>
void TestCopyMatchingTypes(void)
{
typedef typename Vector::value_type T;
Vector v(5);
v[0] = 0; v[1] = 1; v[2] = 2; v[3] = 3; v[4] = 4;
// copy to host_vector
thrust::host_vector<T> h(5, (T) 10);
typename thrust::host_vector<T>::iterator h_result = thrust::copy(v.begin(), v.end(), h.begin());
ASSERT_EQUAL(h[0], 0);
ASSERT_EQUAL(h[1], 1);
ASSERT_EQUAL(h[2], 2);
ASSERT_EQUAL(h[3], 3);
ASSERT_EQUAL(h[4], 4);
ASSERT_EQUAL_QUIET(h_result, h.end());
// copy to device_vector
thrust::device_vector<T> d(5, (T) 10);
typename thrust::device_vector<T>::iterator d_result = thrust::copy(v.begin(), v.end(), d.begin());
ASSERT_EQUAL(d[0], 0);
ASSERT_EQUAL(d[1], 1);
ASSERT_EQUAL(d[2], 2);
ASSERT_EQUAL(d[3], 3);
ASSERT_EQUAL(d[4], 4);
ASSERT_EQUAL_QUIET(d_result, d.end());
}
DECLARE_VECTOR_UNITTEST(TestCopyMatchingTypes);
template <class Vector>
void TestCopyMixedTypes(void)
{
typedef typename Vector::value_type T;
Vector v(5);
v[0] = 0; v[1] = 1; v[2] = 2; v[3] = 3; v[4] = 4;
// copy to host_vector with different type
thrust::host_vector<float> h(5, (float) 10);
typename thrust::host_vector<float>::iterator h_result = thrust::copy(v.begin(), v.end(), h.begin());
ASSERT_EQUAL(h[0], 0);
ASSERT_EQUAL(h[1], 1);
ASSERT_EQUAL(h[2], 2);
ASSERT_EQUAL(h[3], 3);
ASSERT_EQUAL(h[4], 4);
ASSERT_EQUAL_QUIET(h_result, h.end());
// copy to device_vector with different type
thrust::device_vector<float> d(5, (float) 10);
typename thrust::device_vector<float>::iterator d_result = thrust::copy(v.begin(), v.end(), d.begin());
ASSERT_EQUAL(d[0], 0);
ASSERT_EQUAL(d[1], 1);
ASSERT_EQUAL(d[2], 2);
ASSERT_EQUAL(d[3], 3);
ASSERT_EQUAL(d[4], 4);
ASSERT_EQUAL_QUIET(d_result, d.end());
}
DECLARE_VECTOR_UNITTEST(TestCopyMixedTypes);
void TestCopyVectorBool(void)
{
std::vector<bool> v(3);
v[0] = true; v[1] = false; v[2] = true;
thrust::host_vector<bool> h(3);
thrust::device_vector<bool> d(3);
thrust::copy(v.begin(), v.end(), h.begin());
thrust::copy(v.begin(), v.end(), d.begin());
ASSERT_EQUAL(h[0], true);
ASSERT_EQUAL(h[1], false);
ASSERT_EQUAL(h[2], true);
ASSERT_EQUAL(d[0], true);
ASSERT_EQUAL(d[1], false);
ASSERT_EQUAL(d[2], true);
}
DECLARE_UNITTEST(TestCopyVectorBool);
template <class Vector>
void TestCopyListTo(void)
{
typedef typename Vector::value_type T;
// copy from list to Vector
std::list<T> l;
l.push_back(0);
l.push_back(1);
l.push_back(2);
l.push_back(3);
l.push_back(4);
Vector v(l.size());
typename Vector::iterator v_result = thrust::copy(l.begin(), l.end(), v.begin());
ASSERT_EQUAL(v[0], 0);
ASSERT_EQUAL(v[1], 1);
ASSERT_EQUAL(v[2], 2);
ASSERT_EQUAL(v[3], 3);
ASSERT_EQUAL(v[4], 4);
ASSERT_EQUAL_QUIET(v_result, v.end());
l.clear();
std::back_insert_iterator< std::list<T> > l_result = thrust::copy(v.begin(), v.end(), std::back_insert_iterator< std::list<T> >(l));
ASSERT_EQUAL(l.size(), 5);
typename std::list<T>::const_iterator iter = l.begin();
ASSERT_EQUAL(*iter, 0); iter++;
ASSERT_EQUAL(*iter, 1); iter++;
ASSERT_EQUAL(*iter, 2); iter++;
ASSERT_EQUAL(*iter, 3); iter++;
ASSERT_EQUAL(*iter, 4); iter++;
}
DECLARE_VECTOR_UNITTEST(TestCopyListTo);
template<typename T>
struct is_even
{
__host__ __device__
bool operator()(T x) { return (static_cast<unsigned int>(x) & 1) == 0; }
};
template<typename T>
struct is_true
{
__host__ __device__
bool operator()(T x) { return x ? true : false; }
};
template<typename T>
struct mod_3
{
__host__ __device__
unsigned int operator()(T x) { return static_cast<unsigned int>(x) % 3; }
};
template <class Vector>
void TestCopyIfSimple(void)
{
typedef typename Vector::value_type T;
Vector v(5);
v[0] = 0; v[1] = 1; v[2] = 2; v[3] = 3; v[4] = 4;
Vector dest(3);
typename Vector::iterator dest_end = thrust::copy_if(v.begin(), v.end(), dest.begin(), is_even<T>());
ASSERT_EQUAL(0, dest[0]);
ASSERT_EQUAL(2, dest[1]);
ASSERT_EQUAL(4, dest[2]);
ASSERT_EQUAL_QUIET(dest.end(), dest_end);
}
DECLARE_VECTOR_UNITTEST(TestCopyIfSimple);
template <typename T>
void TestCopyIf(const size_t n)
{
thrust::host_vector<T> h_data = unittest::random_integers<T>(n);
thrust::device_vector<T> d_data = h_data;
typename thrust::host_vector<T>::iterator h_new_end;
typename thrust::device_vector<T>::iterator d_new_end;
// test with Predicate that returns a bool
{
thrust::host_vector<T> h_result(n);
thrust::device_vector<T> d_result(n);
h_new_end = thrust::copy_if(h_data.begin(), h_data.end(), h_result.begin(), is_even<T>());
d_new_end = thrust::copy_if(d_data.begin(), d_data.end(), d_result.begin(), is_even<T>());
h_result.resize(h_new_end - h_result.begin());
d_result.resize(d_new_end - d_result.begin());
ASSERT_EQUAL(h_result, d_result);
}
// test with Predicate that returns a non-bool
{
thrust::host_vector<T> h_result(n);
thrust::device_vector<T> d_result(n);
h_new_end = thrust::copy_if(h_data.begin(), h_data.end(), h_result.begin(), mod_3<T>());
d_new_end = thrust::copy_if(d_data.begin(), d_data.end(), d_result.begin(), mod_3<T>());
h_result.resize(h_new_end - h_result.begin());
d_result.resize(d_new_end - d_result.begin());
ASSERT_EQUAL(h_result, d_result);
}
}
DECLARE_VARIABLE_UNITTEST(TestCopyIf);
template <class Vector>
void TestCopyIfStencilSimple(void)
{
typedef typename Vector::value_type T;
Vector v(5);
v[0] = 0; v[1] = 1; v[2] = 2; v[3] = 3; v[4] = 4;
Vector s(5);
s[0] = 1; s[1] = 1; s[2] = 0; s[3] = 1; s[4] = 0;
Vector dest(3);
typename Vector::iterator dest_end = thrust::copy_if(v.begin(), v.end(), s.begin(), dest.begin(), is_true<T>());
ASSERT_EQUAL(0, dest[0]);
ASSERT_EQUAL(1, dest[1]);
ASSERT_EQUAL(3, dest[2]);
ASSERT_EQUAL_QUIET(dest.end(), dest_end);
}
DECLARE_VECTOR_UNITTEST(TestCopyIfStencilSimple);
template <typename T>
void TestCopyIfStencil(const size_t n)
{
thrust::host_vector<T> h_data(n); thrust::sequence(h_data.begin(), h_data.end());
thrust::device_vector<T> d_data(n); thrust::sequence(d_data.begin(), d_data.end());
thrust::host_vector<T> h_stencil = unittest::random_integers<T>(n);
thrust::device_vector<T> d_stencil = unittest::random_integers<T>(n);
thrust::host_vector<T> h_result(n);
thrust::device_vector<T> d_result(n);
typename thrust::host_vector<T>::iterator h_new_end;
typename thrust::device_vector<T>::iterator d_new_end;
// test with Predicate that returns a bool
{
thrust::host_vector<T> h_result(n);
thrust::device_vector<T> d_result(n);
h_new_end = thrust::copy_if(h_data.begin(), h_data.end(), h_result.begin(), is_even<T>());
d_new_end = thrust::copy_if(d_data.begin(), d_data.end(), d_result.begin(), is_even<T>());
h_result.resize(h_new_end - h_result.begin());
d_result.resize(d_new_end - d_result.begin());
ASSERT_EQUAL(h_result, d_result);
}
// test with Predicate that returns a non-bool
{
thrust::host_vector<T> h_result(n);
thrust::device_vector<T> d_result(n);
h_new_end = thrust::copy_if(h_data.begin(), h_data.end(), h_result.begin(), mod_3<T>());
d_new_end = thrust::copy_if(d_data.begin(), d_data.end(), d_result.begin(), mod_3<T>());
h_result.resize(h_new_end - h_result.begin());
d_result.resize(d_new_end - d_result.begin());
ASSERT_EQUAL(h_result, d_result);
}
}
DECLARE_VARIABLE_UNITTEST(TestCopyIfStencil);
#if THRUST_DEVICE_SYSTEM == THRUST_DEVICE_SYSTEM_CUDA
// do we really want to test this ever?
void TestCopyDeviceThrow(void)
{
typedef int T;
thrust::device_ptr<T> null_device_ptr((int*)0);
bool caught_exception = false;
try
{
thrust::copy(null_device_ptr, null_device_ptr + 1, null_device_ptr);
} // end try
catch(std::runtime_error)
{
caught_exception = true;
// kill the context so it can revive later
cudaThreadExit();
} // end catch
ASSERT_EQUAL(true, caught_exception);
}
DECLARE_UNITTEST(TestCopyDeviceThrow);
#endif
template <typename Vector>
void TestCopyCountingIterator(void)
{
typedef typename Vector::value_type T;
thrust::counting_iterator<T> iter(1);
Vector vec(4);
thrust::copy(iter, iter + 4, vec.begin());
ASSERT_EQUAL(vec[0], 1);
ASSERT_EQUAL(vec[1], 2);
ASSERT_EQUAL(vec[2], 3);
ASSERT_EQUAL(vec[3], 4);
}
DECLARE_VECTOR_UNITTEST(TestCopyCountingIterator);
template <typename Vector>
void TestCopyZipIterator(void)
{
typedef typename Vector::value_type T;
Vector v1(3); v1[0] = 1; v1[1] = 2; v1[2] = 3;
Vector v2(3); v2[0] = 4; v2[1] = 5; v2[2] = 6;
Vector v3(3, T(0));
Vector v4(3, T(0));
thrust::copy(thrust::make_zip_iterator(thrust::make_tuple(v1.begin(),v2.begin())),
thrust::make_zip_iterator(thrust::make_tuple(v1.end(),v2.end())),
thrust::make_zip_iterator(thrust::make_tuple(v3.begin(),v4.begin())));
ASSERT_EQUAL(v1, v3);
ASSERT_EQUAL(v2, v4);
};
DECLARE_VECTOR_UNITTEST(TestCopyZipIterator);
template <typename Vector>
void TestCopyConstantIteratorToZipIterator(void)
{
typedef typename Vector::value_type T;
Vector v1(3,T(0));
Vector v2(3,T(0));
thrust::copy(thrust::make_constant_iterator(thrust::tuple<T,T>(4,7)),
thrust::make_constant_iterator(thrust::tuple<T,T>(4,7)) + v1.size(),
thrust::make_zip_iterator(thrust::make_tuple(v1.begin(),v2.begin())));
ASSERT_EQUAL(v1[0], 4);
ASSERT_EQUAL(v1[1], 4);
ASSERT_EQUAL(v1[2], 4);
ASSERT_EQUAL(v2[0], 7);
ASSERT_EQUAL(v2[1], 7);
ASSERT_EQUAL(v2[2], 7);
};
DECLARE_VECTOR_UNITTEST(TestCopyConstantIteratorToZipIterator);
|
4cdc3a9817d21351321a98339869ae382cf0337b.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <sys/time.h>
double wtime()
{
struct timeval tv;
gettimeofday(&tv, NULL);
return tv.tv_sec + tv.tv_usec * 1E-6;
}
__global__ void gTest(float* a)
{
a[threadIdx.x + blockDim.x * blockIdx.x]
= (float)(threadIdx.x + blockDim.x * blockIdx.x);
}
void firstStart()
{
float *da, *ha;
int num_of_blocks = 2, threads_per_block = 32;
int N = num_of_blocks * threads_per_block;
ha = (float*) calloc(N, sizeof(float));
hipMalloc((void**) &da, N * sizeof(float));
hipLaunchKernelGGL(( gTest), dim3(dim3(num_of_blocks)), dim3(dim3(threads_per_block)), 0, 0, da);
hipDeviceSynchronize();
hipMemcpy(ha, da, N * sizeof(float), hipMemcpyDeviceToHost);
for(int i = 0; i < N; i++)
printf("%g\n", ha[i]);
free(ha);
hipFree(da);
}
__global__ void gSGEVV(float* a, float* b, float* c)
{
c[threadIdx.x + blockDim.x * blockIdx.x] =
a[threadIdx.x + blockDim.x * blockIdx.x] +
b[threadIdx.x + blockDim.x * blockIdx.x];
}
__global__ void gInitArray(float* a, float x)
{
a[threadIdx.x + blockDim.x * blockIdx.x] = x;
}
__global__ void gSGEVV_iter(float* a, float* b, float* c, int n, int N)
{
for (int i = threadIdx.x + blockDim.x * blockIdx.x; i < n; i += N)
c[i] = a[i] + b[i];
}
__global__ void gInitArray_iter(float* a, float x, int n, int N)
{
for (int i = threadIdx.x + blockDim.x * blockIdx.x; i < n; i += N)
a[i] = x;
}
#define CUDA_CHECK_RETURN(value)\
{\
hipError_t _m_cudaStat = value;\
if (_m_cudaStat != hipSuccess) {\
fprintf(stderr, "Error %s at line %d in file %s\n",\
hipGetErrorString(_m_cudaStat), __LINE__, __FILE__);\
exit(1);\
}\
}
int main(int argc, char *argv[])
{
float *a, *b, *c, *d;
int num_of_blocks = (argc > 1) ? atoi(argv[1]) : 1024;
// int threads_per_block = (argc > 2) ? atoi(argv[2]) : 32;
int threads_per_block = 1;
int N = num_of_blocks * threads_per_block;
int size_array;
double time;
float elapsedTime;
hipEvent_t stop, start;
hipEventCreate(&start);
hipEventCreate(&stop);
#if 0
size_array = N;
hipMalloc((void**) &a, size_array * sizeof(float));
hipMalloc((void**) &b, size_array * sizeof(float));
hipMalloc((void**) &c, size_array * sizeof(float));
d = (float*) calloc(size_array, sizeof(float));
for (int i = num_of_blocks; threads_per_block <= 128; i /= 2, threads_per_block *= 2) {
hipLaunchKernelGGL(( gInitArray), dim3(dim3(i)), dim3(dim3(threads_per_block)), 0, 0, a, 1.0);
hipLaunchKernelGGL(( gInitArray), dim3(dim3(i)), dim3(dim3(threads_per_block)), 0, 0, b, 2.0);
time = wtime();
hipLaunchKernelGGL(( gSGEVV), dim3(dim3(i)), dim3(dim3(threads_per_block)), 0, 0, a, b, c);
hipDeviceSynchronize();
time = wtime() - time;
hipMemcpy(d, c, size_array * sizeof(float), hipMemcpyDeviceToHost);
if (argc > 2 && atoi(argv[2]) == 1) {
for (int j = 0; j < N; j++)
printf("%g ", d[j]);
printf("\n");
}
printf("Blocks: %d,\tThreads: %d,\t", i, threads_per_block);
printf("Time: %.8f sec.\n", time);
hipLaunchKernelGGL(( gInitArray), dim3(dim3(i)), dim3(dim3(threads_per_block)), 0, 0, c, 0.0);
}
free(d);
hipFree(a);
hipFree(b);
hipFree(c);
#endif
#if 1
int rank = (argc > 3) ? atoi(argv[3]) : 10;
// size_array = 1 << rank;
size_array = 1 << 10;
CUDA_CHECK_RETURN(hipMalloc((void**) &a, size_array * sizeof(float)));
CUDA_CHECK_RETURN(hipMalloc((void**) &b, size_array * sizeof(float)));
CUDA_CHECK_RETURN(hipMalloc((void**) &c, size_array * sizeof(float)));
d = (float*) calloc(size_array, sizeof(float));
printf("Size vector: %d(10^%d)\n", size_array, rank);
// int blocks = num_of_blocks;
// int threads = threads_per_block;
int blocks = 1;
int threads = 1025;
// for (; threads <= 128; blocks /= 2, threads *= 2) {
hipLaunchKernelGGL(( gInitArray_iter), dim3(dim3(blocks)), dim3(dim3(threads)), 0, 0, a, 1.0, size_array, N);
hipLaunchKernelGGL(( gInitArray_iter), dim3(dim3(blocks)), dim3(dim3(threads)), 0, 0, b, 2.0, size_array, N);
time = wtime();
hipEventRecord(start, 0);
hipLaunchKernelGGL(( gSGEVV_iter), dim3(dim3(blocks)), dim3(dim3(threads)), 0, 0, a, b, c, size_array, N);
CUDA_CHECK_RETURN(hipDeviceSynchronize());
CUDA_CHECK_RETURN(hipGetLastError());
time = wtime() - time;
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime, start, stop);
CUDA_CHECK_RETURN(hipMemcpy(d, c, size_array * sizeof(float), hipMemcpyDeviceToHost));
if (argc > 2 && atoi(argv[2]) == 1) {
for (int j = 0; j < size_array; j++)
printf("%g ", d[j]);
printf("\n");
}
printf("Blocks: %d\tThreads: %d\t", blocks, threads);
// printf("Time: %.8f sec.\t", time);
printf("Time(2): %.8f sec.\n", elapsedTime);
hipLaunchKernelGGL(( gInitArray_iter), dim3(dim3(blocks)), dim3(dim3(threads)), 0, 0, c, 0.0, size_array, N);
// }
free(d);
hipFree(a);
hipFree(b);
hipFree(c);
#endif
hipEventDestroy(start);
hipEventDestroy(stop);
#if 0
FILE *out_1024_1 = fopen("1024_1.txt", "w");
FILE *out_512_2 = fopen("512_2.txt", "w");
FILE *out_128_8 = fopen("128_8.txt", "w");
FILE *out_32_32 = fopen("32_32.txt", "w");
FILE *out_8_128 = fopen("8_128.txt", "w");
for (int rank = 10; rank <= 23; rank++) {
size_array = 1 << rank;
hipMalloc((void**) &a, size_array * sizeof(float));
hipMalloc((void**) &b, size_array * sizeof(float));
hipMalloc((void**) &c, size_array * sizeof(float));
d = (float*) calloc(size_array, sizeof(float));
printf("Size vector: %d(10^%d)\n", size_array, rank);
int blocks = num_of_blocks;
int threads = threads_per_block;
for (; threads <= 128; blocks /= 2, threads *= 2) {
hipLaunchKernelGGL(( gInitArray_iter), dim3(dim3(blocks)), dim3(dim3(threads)), 0, 0, a, 1.0, size_array, N);
hipLaunchKernelGGL(( gInitArray_iter), dim3(dim3(blocks)), dim3(dim3(threads)), 0, 0, b, 2.0, size_array, N);
time = wtime();
hipLaunchKernelGGL(( gSGEVV_iter), dim3(dim3(blocks)), dim3(dim3(threads)), 0, 0, a, b, c, size_array, N);
hipDeviceSynchronize();
time = wtime() - time;
hipMemcpy(d, c, size_array * sizeof(float), hipMemcpyDeviceToHost);
if (argc > 2 && atoi(argv[2]) == 1) {
for (int j = 0; j < size_array; j++)
printf("%g ", d[j]);
printf("\n");
}
printf("Blocks: %d\tThreads: %d\t", blocks, threads);
printf("Time: %.8f sec.\n", time);
switch (threads) {
case 1:
fprintf(out_1024_1, "%d %.8f\n", size_array, time);
break;
case 2:
fprintf(out_512_2, "%d %.8f\n", size_array, time);
break;
case 8:
fprintf(out_128_8, "%d %.8f\n", size_array, time);
break;
case 32:
fprintf(out_32_32, "%d %.8f\n", size_array, time);
break;
case 128:
fprintf(out_8_128, "%d %.8f\n", size_array, time);
break;
default:
break;
}
hipLaunchKernelGGL(( gInitArray_iter), dim3(dim3(blocks)), dim3(dim3(threads)), 0, 0, c, 0.0, size_array, N);
}
free(d);
hipFree(a);
hipFree(b);
hipFree(c);
}
fclose(out_1024_1);
fclose(out_512_2);
fclose(out_128_8);
fclose(out_32_32);
fclose(out_8_128);
#endif
#if 0
time = wtime();
hipLaunchKernelGGL(( gSGEVV), dim3(dim3(num_of_blocks)), dim3(dim3(threads_per_block)), 0, 0, a, b, c);
hipDeviceSynchronize();
time = wtime() - time;
hipMemcpy(d, c, size_array * sizeof(float), hipMemcpyDeviceToHost);
for (int i = 0; i < N; i++)
printf("%g\n", d[i]);
printf("Blocks: %d, Threads: %d, ", num_of_blocks, threads_per_block);
printf("Time: %.6f sec.\n", time);
#endif
#if 0
firstStart();
free(d);
hipFree(a);
hipFree(b);
hipFree(c);
#endif
return 0;
}
| 4cdc3a9817d21351321a98339869ae382cf0337b.cu | #include <cuda.h>
#include <stdio.h>
#include <sys/time.h>
double wtime()
{
struct timeval tv;
gettimeofday(&tv, NULL);
return tv.tv_sec + tv.tv_usec * 1E-6;
}
__global__ void gTest(float* a)
{
a[threadIdx.x + blockDim.x * blockIdx.x]
= (float)(threadIdx.x + blockDim.x * blockIdx.x);
}
void firstStart()
{
float *da, *ha;
int num_of_blocks = 2, threads_per_block = 32;
int N = num_of_blocks * threads_per_block;
ha = (float*) calloc(N, sizeof(float));
cudaMalloc((void**) &da, N * sizeof(float));
gTest<<<dim3(num_of_blocks), dim3(threads_per_block)>>>(da);
cudaDeviceSynchronize();
cudaMemcpy(ha, da, N * sizeof(float), cudaMemcpyDeviceToHost);
for(int i = 0; i < N; i++)
printf("%g\n", ha[i]);
free(ha);
cudaFree(da);
}
__global__ void gSGEVV(float* a, float* b, float* c)
{
c[threadIdx.x + blockDim.x * blockIdx.x] =
a[threadIdx.x + blockDim.x * blockIdx.x] +
b[threadIdx.x + blockDim.x * blockIdx.x];
}
__global__ void gInitArray(float* a, float x)
{
a[threadIdx.x + blockDim.x * blockIdx.x] = x;
}
__global__ void gSGEVV_iter(float* a, float* b, float* c, int n, int N)
{
for (int i = threadIdx.x + blockDim.x * blockIdx.x; i < n; i += N)
c[i] = a[i] + b[i];
}
__global__ void gInitArray_iter(float* a, float x, int n, int N)
{
for (int i = threadIdx.x + blockDim.x * blockIdx.x; i < n; i += N)
a[i] = x;
}
#define CUDA_CHECK_RETURN(value)\
{\
cudaError_t _m_cudaStat = value;\
if (_m_cudaStat != cudaSuccess) {\
fprintf(stderr, "Error %s at line %d in file %s\n",\
cudaGetErrorString(_m_cudaStat), __LINE__, __FILE__);\
exit(1);\
}\
}
int main(int argc, char *argv[])
{
float *a, *b, *c, *d;
int num_of_blocks = (argc > 1) ? atoi(argv[1]) : 1024;
// int threads_per_block = (argc > 2) ? atoi(argv[2]) : 32;
int threads_per_block = 1;
int N = num_of_blocks * threads_per_block;
int size_array;
double time;
float elapsedTime;
cudaEvent_t stop, start;
cudaEventCreate(&start);
cudaEventCreate(&stop);
#if 0
size_array = N;
cudaMalloc((void**) &a, size_array * sizeof(float));
cudaMalloc((void**) &b, size_array * sizeof(float));
cudaMalloc((void**) &c, size_array * sizeof(float));
d = (float*) calloc(size_array, sizeof(float));
for (int i = num_of_blocks; threads_per_block <= 128; i /= 2, threads_per_block *= 2) {
gInitArray<<<dim3(i), dim3(threads_per_block)>>>(a, 1.0);
gInitArray<<<dim3(i), dim3(threads_per_block)>>>(b, 2.0);
time = wtime();
gSGEVV<<<dim3(i), dim3(threads_per_block)>>>(a, b, c);
cudaDeviceSynchronize();
time = wtime() - time;
cudaMemcpy(d, c, size_array * sizeof(float), cudaMemcpyDeviceToHost);
if (argc > 2 && atoi(argv[2]) == 1) {
for (int j = 0; j < N; j++)
printf("%g ", d[j]);
printf("\n");
}
printf("Blocks: %d,\tThreads: %d,\t", i, threads_per_block);
printf("Time: %.8f sec.\n", time);
gInitArray<<<dim3(i), dim3(threads_per_block)>>>(c, 0.0);
}
free(d);
cudaFree(a);
cudaFree(b);
cudaFree(c);
#endif
#if 1
int rank = (argc > 3) ? atoi(argv[3]) : 10;
// size_array = 1 << rank;
size_array = 1 << 10;
CUDA_CHECK_RETURN(cudaMalloc((void**) &a, size_array * sizeof(float)));
CUDA_CHECK_RETURN(cudaMalloc((void**) &b, size_array * sizeof(float)));
CUDA_CHECK_RETURN(cudaMalloc((void**) &c, size_array * sizeof(float)));
d = (float*) calloc(size_array, sizeof(float));
printf("Size vector: %d(10^%d)\n", size_array, rank);
// int blocks = num_of_blocks;
// int threads = threads_per_block;
int blocks = 1;
int threads = 1025;
// for (; threads <= 128; blocks /= 2, threads *= 2) {
gInitArray_iter<<<dim3(blocks), dim3(threads)>>>(a, 1.0, size_array, N);
gInitArray_iter<<<dim3(blocks), dim3(threads)>>>(b, 2.0, size_array, N);
time = wtime();
cudaEventRecord(start, 0);
gSGEVV_iter<<<dim3(blocks), dim3(threads)>>>(a, b, c, size_array, N);
CUDA_CHECK_RETURN(cudaDeviceSynchronize());
CUDA_CHECK_RETURN(cudaGetLastError());
time = wtime() - time;
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
CUDA_CHECK_RETURN(cudaMemcpy(d, c, size_array * sizeof(float), cudaMemcpyDeviceToHost));
if (argc > 2 && atoi(argv[2]) == 1) {
for (int j = 0; j < size_array; j++)
printf("%g ", d[j]);
printf("\n");
}
printf("Blocks: %d\tThreads: %d\t", blocks, threads);
// printf("Time: %.8f sec.\t", time);
printf("Time(2): %.8f sec.\n", elapsedTime);
gInitArray_iter<<<dim3(blocks), dim3(threads)>>>(c, 0.0, size_array, N);
// }
free(d);
cudaFree(a);
cudaFree(b);
cudaFree(c);
#endif
cudaEventDestroy(start);
cudaEventDestroy(stop);
#if 0
FILE *out_1024_1 = fopen("1024_1.txt", "w");
FILE *out_512_2 = fopen("512_2.txt", "w");
FILE *out_128_8 = fopen("128_8.txt", "w");
FILE *out_32_32 = fopen("32_32.txt", "w");
FILE *out_8_128 = fopen("8_128.txt", "w");
for (int rank = 10; rank <= 23; rank++) {
size_array = 1 << rank;
cudaMalloc((void**) &a, size_array * sizeof(float));
cudaMalloc((void**) &b, size_array * sizeof(float));
cudaMalloc((void**) &c, size_array * sizeof(float));
d = (float*) calloc(size_array, sizeof(float));
printf("Size vector: %d(10^%d)\n", size_array, rank);
int blocks = num_of_blocks;
int threads = threads_per_block;
for (; threads <= 128; blocks /= 2, threads *= 2) {
gInitArray_iter<<<dim3(blocks), dim3(threads)>>>(a, 1.0, size_array, N);
gInitArray_iter<<<dim3(blocks), dim3(threads)>>>(b, 2.0, size_array, N);
time = wtime();
gSGEVV_iter<<<dim3(blocks), dim3(threads)>>>(a, b, c, size_array, N);
cudaDeviceSynchronize();
time = wtime() - time;
cudaMemcpy(d, c, size_array * sizeof(float), cudaMemcpyDeviceToHost);
if (argc > 2 && atoi(argv[2]) == 1) {
for (int j = 0; j < size_array; j++)
printf("%g ", d[j]);
printf("\n");
}
printf("Blocks: %d\tThreads: %d\t", blocks, threads);
printf("Time: %.8f sec.\n", time);
switch (threads) {
case 1:
fprintf(out_1024_1, "%d %.8f\n", size_array, time);
break;
case 2:
fprintf(out_512_2, "%d %.8f\n", size_array, time);
break;
case 8:
fprintf(out_128_8, "%d %.8f\n", size_array, time);
break;
case 32:
fprintf(out_32_32, "%d %.8f\n", size_array, time);
break;
case 128:
fprintf(out_8_128, "%d %.8f\n", size_array, time);
break;
default:
break;
}
gInitArray_iter<<<dim3(blocks), dim3(threads)>>>(c, 0.0, size_array, N);
}
free(d);
cudaFree(a);
cudaFree(b);
cudaFree(c);
}
fclose(out_1024_1);
fclose(out_512_2);
fclose(out_128_8);
fclose(out_32_32);
fclose(out_8_128);
#endif
#if 0
time = wtime();
gSGEVV<<<dim3(num_of_blocks), dim3(threads_per_block)>>>(a, b, c);
cudaDeviceSynchronize();
time = wtime() - time;
cudaMemcpy(d, c, size_array * sizeof(float), cudaMemcpyDeviceToHost);
for (int i = 0; i < N; i++)
printf("%g\n", d[i]);
printf("Blocks: %d, Threads: %d, ", num_of_blocks, threads_per_block);
printf("Time: %.6f sec.\n", time);
#endif
#if 0
firstStart();
free(d);
cudaFree(a);
cudaFree(b);
cudaFree(c);
#endif
return 0;
}
|
MSECriterion.hip | // !!! This is a file automatically generated by hipify!!!
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/MSECriterion.cu"
#else
void THNN_(MSECriterion_updateOutput)(
THCState *state,
THCTensor *input,
THCTensor *target,
THCTensor *output,
int64_t reduction)
{
THCUNN_check_shape(state, input, target);
THCUNN_assertSameGPU(state, 3, input, target, output);
if (reduction != Reduction::None) {
THCTensor_(resize1d)(state, output, 1);
ptrdiff_t size = THCTensor_(nElement)(state, input);
input = THCTensor_(newContiguous)(state, input);
target = THCTensor_(newContiguous)(state, target);
THCThrustAllocator thrustAlloc(state);
thrust::device_ptr<scalar_t> input_data(THCTensor_(data)(state, input));
thrust::device_ptr<scalar_t> target_data(THCTensor_(data)(state, target));
accreal sum = thrust::inner_product(
#if TORCH_HIP_VERSION >= 7000
thrust::hip::par(thrustAlloc).on(THCState_getCurrentStream(state)),
#endif
input_data, input_data+size, target_data, (accreal) 0,
thrust::plus<accreal>(), mse_functor<scalar_t, accreal>());
if (reduction == Reduction::Mean)
sum /= size;
THCTensor_(free)(state, input);
THCTensor_(free)(state, target);
THCTensor_(set1d)(state, output, 0, ScalarConvert<accreal, scalar_t>::to(sum));
return;
}
THCTensor_(resizeAs)(state, output, input);
THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(
state,
input,
target,
output,
mse_updateOutput_functor<scalar_t>());
}
void THNN_(MSECriterion_updateGradInput)(
THCState *state,
THCTensor *input,
THCTensor *target,
THCTensor *gradOutput,
THCTensor *gradInput,
int64_t reduction)
{
THCUNN_check_shape(state, input, target);
THCUNN_assertSameGPU(state, 4, input, target, gradInput, gradOutput);
if (reduction != Reduction::None) {
ptrdiff_t size = THCTensor_(nElement)(state, input);
THCUNN_check_dim_size(state, gradOutput, 1, 0, 1);
accreal norm = reduction == Reduction::Mean ? (accreal)(2)/size : (accreal)(2);
norm *= ScalarConvert<scalar_t, accreal>::to(THCTensor_(get1d)(state, gradOutput, 0));
input = THCTensor_(newContiguous)(state, input);
target = THCTensor_(newContiguous)(state, target);
THCTensor_(resizeAs)(state, gradInput, input);
THCThrustAllocator thrustAlloc(state);
thrust::device_ptr<scalar_t> input_data(THCTensor_(data)(state, input));
thrust::device_ptr<scalar_t> target_data(THCTensor_(data)(state, target));
thrust::device_ptr<scalar_t> gradInput_data(THCTensor_(data)(state, gradInput));
thrust::transform(
#if TORCH_HIP_VERSION >= 7000
thrust::hip::par(thrustAlloc).on(THCState_getCurrentStream(state)),
#endif
input_data, input_data+size, target_data, gradInput_data,
mse_updateGradInput_functor<scalar_t, accreal>(norm));
THCTensor_(free)(state, input);
THCTensor_(free)(state, target);
return;
}
THCUNN_check_shape(state, input, gradOutput);
ptrdiff_t size = THCTensor_(nElement)(state, input);
input = THCTensor_(newContiguous)(state, input);
target = THCTensor_(newContiguous)(state, target);
gradOutput = THCTensor_(newContiguous)(state, gradOutput);
THCTensor_(resizeAs)(state, gradInput, input);
THCThrustAllocator thrustAlloc(state);
thrust::device_ptr<scalar_t> input_data(THCTensor_(data)(state, input));
thrust::device_ptr<scalar_t> target_data(THCTensor_(data)(state, target));
thrust::device_ptr<scalar_t> gradOutput_data(THCTensor_(data)(state, gradOutput));
thrust::device_ptr<scalar_t> gradInput_data(THCTensor_(data)(state, gradInput));
thrust::transform(
#if TORCH_HIP_VERSION >= 7000
thrust::hip::par(thrustAlloc).on(THCState_getCurrentStream(state)),
#endif
input_data, input_data+size, target_data, gradInput_data,
mse_updateGradInput_functor<scalar_t, accreal>(2));
thrust::transform(
#if TORCH_HIP_VERSION >= 7000
thrust::hip::par(thrustAlloc).on(THCState_getCurrentStream(state)),
#endif
gradInput_data, gradInput_data+size, gradOutput_data, gradInput_data,
thrust::multiplies<scalar_t>());
THCTensor_(free)(state, input);
THCTensor_(free)(state, target);
THCTensor_(free)(state, gradOutput);
}
#endif
| MSECriterion.cu | #ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/MSECriterion.cu"
#else
void THNN_(MSECriterion_updateOutput)(
THCState *state,
THCTensor *input,
THCTensor *target,
THCTensor *output,
int64_t reduction)
{
THCUNN_check_shape(state, input, target);
THCUNN_assertSameGPU(state, 3, input, target, output);
if (reduction != Reduction::None) {
THCTensor_(resize1d)(state, output, 1);
ptrdiff_t size = THCTensor_(nElement)(state, input);
input = THCTensor_(newContiguous)(state, input);
target = THCTensor_(newContiguous)(state, target);
THCThrustAllocator thrustAlloc(state);
thrust::device_ptr<scalar_t> input_data(THCTensor_(data)(state, input));
thrust::device_ptr<scalar_t> target_data(THCTensor_(data)(state, target));
accreal sum = thrust::inner_product(
#if CUDA_VERSION >= 7000
thrust::cuda::par(thrustAlloc).on(THCState_getCurrentStream(state)),
#endif
input_data, input_data+size, target_data, (accreal) 0,
thrust::plus<accreal>(), mse_functor<scalar_t, accreal>());
if (reduction == Reduction::Mean)
sum /= size;
THCTensor_(free)(state, input);
THCTensor_(free)(state, target);
THCTensor_(set1d)(state, output, 0, ScalarConvert<accreal, scalar_t>::to(sum));
return;
}
THCTensor_(resizeAs)(state, output, input);
THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(
state,
input,
target,
output,
mse_updateOutput_functor<scalar_t>());
}
void THNN_(MSECriterion_updateGradInput)(
THCState *state,
THCTensor *input,
THCTensor *target,
THCTensor *gradOutput,
THCTensor *gradInput,
int64_t reduction)
{
THCUNN_check_shape(state, input, target);
THCUNN_assertSameGPU(state, 4, input, target, gradInput, gradOutput);
if (reduction != Reduction::None) {
ptrdiff_t size = THCTensor_(nElement)(state, input);
THCUNN_check_dim_size(state, gradOutput, 1, 0, 1);
accreal norm = reduction == Reduction::Mean ? (accreal)(2)/size : (accreal)(2);
norm *= ScalarConvert<scalar_t, accreal>::to(THCTensor_(get1d)(state, gradOutput, 0));
input = THCTensor_(newContiguous)(state, input);
target = THCTensor_(newContiguous)(state, target);
THCTensor_(resizeAs)(state, gradInput, input);
THCThrustAllocator thrustAlloc(state);
thrust::device_ptr<scalar_t> input_data(THCTensor_(data)(state, input));
thrust::device_ptr<scalar_t> target_data(THCTensor_(data)(state, target));
thrust::device_ptr<scalar_t> gradInput_data(THCTensor_(data)(state, gradInput));
thrust::transform(
#if CUDA_VERSION >= 7000
thrust::cuda::par(thrustAlloc).on(THCState_getCurrentStream(state)),
#endif
input_data, input_data+size, target_data, gradInput_data,
mse_updateGradInput_functor<scalar_t, accreal>(norm));
THCTensor_(free)(state, input);
THCTensor_(free)(state, target);
return;
}
THCUNN_check_shape(state, input, gradOutput);
ptrdiff_t size = THCTensor_(nElement)(state, input);
input = THCTensor_(newContiguous)(state, input);
target = THCTensor_(newContiguous)(state, target);
gradOutput = THCTensor_(newContiguous)(state, gradOutput);
THCTensor_(resizeAs)(state, gradInput, input);
THCThrustAllocator thrustAlloc(state);
thrust::device_ptr<scalar_t> input_data(THCTensor_(data)(state, input));
thrust::device_ptr<scalar_t> target_data(THCTensor_(data)(state, target));
thrust::device_ptr<scalar_t> gradOutput_data(THCTensor_(data)(state, gradOutput));
thrust::device_ptr<scalar_t> gradInput_data(THCTensor_(data)(state, gradInput));
thrust::transform(
#if CUDA_VERSION >= 7000
thrust::cuda::par(thrustAlloc).on(THCState_getCurrentStream(state)),
#endif
input_data, input_data+size, target_data, gradInput_data,
mse_updateGradInput_functor<scalar_t, accreal>(2));
thrust::transform(
#if CUDA_VERSION >= 7000
thrust::cuda::par(thrustAlloc).on(THCState_getCurrentStream(state)),
#endif
gradInput_data, gradInput_data+size, gradOutput_data, gradInput_data,
thrust::multiplies<scalar_t>());
THCTensor_(free)(state, input);
THCTensor_(free)(state, target);
THCTensor_(free)(state, gradOutput);
}
#endif
|
a97369169e67fa8af73791c6d91ad4c71903b068.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "common.h"
#include <iostream>
#include <cstdio>
#include <cstdlib>
#include <math.h>
#include <chrono>
#include <stdlib.h> /* srand, rand */
#include <time.h> /* time */
using namespace std;
void print(int *mat, const int size)
{
int i;
for(i = 0; i < size; i++)
{
cout << mat[i] << " ";
}
return;
}
void initData(int *mat, const int size)
{
int i;
srand (time(0));
for(i = 0; i < size; i++)
{
mat[i] = rand() % 10 + 1;
}
return;
}
void multMatrixOnHost(int *A, int *B, int *C, const int cols,
const int rows)
{
for (int i = 0; i < rows; i++)
{
for (int j = 0; j < cols; j++)
{
C[j * rows + i] = 0;
for (int shared_dim = 0; shared_dim < cols; shared_dim++)
{
//dot product
C[j * rows + i] += A[shared_dim * rows + i] * B[j * rows + shared_dim];
}
}
}
return;
}
void checkResult(int *hostRef, int *gpuRef, const int N)
{
double epsilon = 1.0E-8;
bool match = 1;
for (int i = 0; i < N; i++)
{
if (abs(hostRef[i] - gpuRef[i]) > epsilon)
{
match = 0;
printf("host %d gpu %d\n", hostRef[i], gpuRef[i]);
break;
}
}
if (match)
printf("Arrays match.\n\n");
else
printf("Arrays do not match.\n\n");
}
// grid 1D block 1D
__global__ void multMatrixOnGPU1D(int *A, int *B, int *C, const int cols,
const int rows)
{
unsigned int ix_cols = threadIdx.x + blockIdx.x * blockDim.x;
if (ix_cols < cols )
for (int iy_rows = 0; iy_rows < rows; iy_rows++)
{
int idx = iy_rows * cols + ix_cols;
C[idx] = 0;
for (int shared_dim = 0; shared_dim < cols; shared_dim++)
{
//dot product
C[idx] += A[shared_dim * rows + ix_cols] * B[iy_rows * rows + shared_dim];
}
}
}
int main(int argc, char **argv)
{
// set up data size of matrix
int nx = 0;
int ny = 0;
if(argc < 2)
{
nx = ny = 2;
}
else
{
nx = ny = stoi(argv[1]);
}
int nxy = nx * ny;
int nBytes = nxy * sizeof(int);
printf("Matrix size: nx %d ny %d\n", nx, ny);
// malloc host memory
int *h_A, *h_B, *hostRef, *gpuRef;
h_A = (int *)malloc(nBytes);
h_B = (int *)malloc(nBytes);
hostRef = (int *)malloc(nBytes);
gpuRef = (int *)malloc(nBytes);
// initialize data at host side
initData(h_A, nxy);
initData(h_B, nxy);
multMatrixOnHost(h_A, h_B, hostRef, nx, ny);
memset(hostRef, 0, nBytes);
memset(gpuRef, 0, nBytes);
// add matrix at host side for result SAFE_CALLs
auto start_cpu = chrono::high_resolution_clock::now();
multMatrixOnHost(h_A, h_B, hostRef, nx, ny);
auto end_cpu = chrono::high_resolution_clock::now();
chrono::duration<float, std::milli> duration_ms = end_cpu - start_cpu;
printf("multMatrixOnHost elapsed %f ms\n", duration_ms.count());
// malloc device global memory
int *d_MatA, *d_MatB, *d_MatC;
SAFE_CALL(hipMalloc((void **)&d_MatA, nBytes), "Error allocating d_MatA");
SAFE_CALL(hipMalloc((void **)&d_MatB, nBytes), "Error allocating d_MatB");
SAFE_CALL(hipMalloc((void **)&d_MatC, nBytes), "Error allocating d_MatC");
// transfer data from host to device
SAFE_CALL(hipMemcpy(d_MatA, h_A, nBytes, hipMemcpyHostToDevice), "Error copying d_MatA");
SAFE_CALL(hipMemcpy(d_MatB, h_B, nBytes, hipMemcpyHostToDevice), "Error copying d_MatB");
// invoke kernel at host side
int dimx = 256;
dim3 block(dimx, 1);
dim3 grid((nx + block.x - 1) / block.x, 1);
start_cpu = chrono::high_resolution_clock::now();
hipLaunchKernelGGL(( multMatrixOnGPU1D), dim3(grid), dim3(block), 0, 0, d_MatA, d_MatB, d_MatC, nx, ny);
SAFE_CALL(hipDeviceSynchronize(), "Error executing kernel");
end_cpu = chrono::high_resolution_clock::now();
duration_ms = end_cpu - start_cpu;
printf("multMatrixOnGPU1D <<<(%d,%d), (%d,%d)>>> elapsed %f ms\n", grid.x,
grid.y,
block.x, block.y, duration_ms.count());
// SAFE_CALL kernel error
SAFE_CALL(hipGetLastError(), "Error with last error");
// copy kernel result back to host side
SAFE_CALL(hipMemcpy(gpuRef, d_MatC, nBytes, hipMemcpyDeviceToHost), "Error copying d_MatC");
// check device results
checkResult(hostRef, gpuRef, nxy);
// free device global memory
SAFE_CALL(hipFree(d_MatA), "Error freeing memory");
SAFE_CALL(hipFree(d_MatB), "Error freeing memory");
SAFE_CALL(hipFree(d_MatC), "Error freeing memory");
// free host memory
free(h_A);
free(h_B);
free(hostRef);
free(gpuRef);
// reset device
SAFE_CALL(hipDeviceReset(), "Error reseting");
return (0);
}
| a97369169e67fa8af73791c6d91ad4c71903b068.cu | #include "common.h"
#include <iostream>
#include <cstdio>
#include <cstdlib>
#include <math.h>
#include <chrono>
#include <stdlib.h> /* srand, rand */
#include <time.h> /* time */
using namespace std;
void print(int *mat, const int size)
{
int i;
for(i = 0; i < size; i++)
{
cout << mat[i] << " ";
}
return;
}
void initData(int *mat, const int size)
{
int i;
srand (time(0));
for(i = 0; i < size; i++)
{
mat[i] = rand() % 10 + 1;
}
return;
}
void multMatrixOnHost(int *A, int *B, int *C, const int cols,
const int rows)
{
for (int i = 0; i < rows; i++)
{
for (int j = 0; j < cols; j++)
{
C[j * rows + i] = 0;
for (int shared_dim = 0; shared_dim < cols; shared_dim++)
{
//dot product
C[j * rows + i] += A[shared_dim * rows + i] * B[j * rows + shared_dim];
}
}
}
return;
}
void checkResult(int *hostRef, int *gpuRef, const int N)
{
double epsilon = 1.0E-8;
bool match = 1;
for (int i = 0; i < N; i++)
{
if (abs(hostRef[i] - gpuRef[i]) > epsilon)
{
match = 0;
printf("host %d gpu %d\n", hostRef[i], gpuRef[i]);
break;
}
}
if (match)
printf("Arrays match.\n\n");
else
printf("Arrays do not match.\n\n");
}
// grid 1D block 1D
__global__ void multMatrixOnGPU1D(int *A, int *B, int *C, const int cols,
const int rows)
{
unsigned int ix_cols = threadIdx.x + blockIdx.x * blockDim.x;
if (ix_cols < cols )
for (int iy_rows = 0; iy_rows < rows; iy_rows++)
{
int idx = iy_rows * cols + ix_cols;
C[idx] = 0;
for (int shared_dim = 0; shared_dim < cols; shared_dim++)
{
//dot product
C[idx] += A[shared_dim * rows + ix_cols] * B[iy_rows * rows + shared_dim];
}
}
}
int main(int argc, char **argv)
{
// set up data size of matrix
int nx = 0;
int ny = 0;
if(argc < 2)
{
nx = ny = 2;
}
else
{
nx = ny = stoi(argv[1]);
}
int nxy = nx * ny;
int nBytes = nxy * sizeof(int);
printf("Matrix size: nx %d ny %d\n", nx, ny);
// malloc host memory
int *h_A, *h_B, *hostRef, *gpuRef;
h_A = (int *)malloc(nBytes);
h_B = (int *)malloc(nBytes);
hostRef = (int *)malloc(nBytes);
gpuRef = (int *)malloc(nBytes);
// initialize data at host side
initData(h_A, nxy);
initData(h_B, nxy);
multMatrixOnHost(h_A, h_B, hostRef, nx, ny);
memset(hostRef, 0, nBytes);
memset(gpuRef, 0, nBytes);
// add matrix at host side for result SAFE_CALLs
auto start_cpu = chrono::high_resolution_clock::now();
multMatrixOnHost(h_A, h_B, hostRef, nx, ny);
auto end_cpu = chrono::high_resolution_clock::now();
chrono::duration<float, std::milli> duration_ms = end_cpu - start_cpu;
printf("multMatrixOnHost elapsed %f ms\n", duration_ms.count());
// malloc device global memory
int *d_MatA, *d_MatB, *d_MatC;
SAFE_CALL(cudaMalloc((void **)&d_MatA, nBytes), "Error allocating d_MatA");
SAFE_CALL(cudaMalloc((void **)&d_MatB, nBytes), "Error allocating d_MatB");
SAFE_CALL(cudaMalloc((void **)&d_MatC, nBytes), "Error allocating d_MatC");
// transfer data from host to device
SAFE_CALL(cudaMemcpy(d_MatA, h_A, nBytes, cudaMemcpyHostToDevice), "Error copying d_MatA");
SAFE_CALL(cudaMemcpy(d_MatB, h_B, nBytes, cudaMemcpyHostToDevice), "Error copying d_MatB");
// invoke kernel at host side
int dimx = 256;
dim3 block(dimx, 1);
dim3 grid((nx + block.x - 1) / block.x, 1);
start_cpu = chrono::high_resolution_clock::now();
multMatrixOnGPU1D<<<grid, block>>>(d_MatA, d_MatB, d_MatC, nx, ny);
SAFE_CALL(cudaDeviceSynchronize(), "Error executing kernel");
end_cpu = chrono::high_resolution_clock::now();
duration_ms = end_cpu - start_cpu;
printf("multMatrixOnGPU1D <<<(%d,%d), (%d,%d)>>> elapsed %f ms\n", grid.x,
grid.y,
block.x, block.y, duration_ms.count());
// SAFE_CALL kernel error
SAFE_CALL(cudaGetLastError(), "Error with last error");
// copy kernel result back to host side
SAFE_CALL(cudaMemcpy(gpuRef, d_MatC, nBytes, cudaMemcpyDeviceToHost), "Error copying d_MatC");
// check device results
checkResult(hostRef, gpuRef, nxy);
// free device global memory
SAFE_CALL(cudaFree(d_MatA), "Error freeing memory");
SAFE_CALL(cudaFree(d_MatB), "Error freeing memory");
SAFE_CALL(cudaFree(d_MatC), "Error freeing memory");
// free host memory
free(h_A);
free(h_B);
free(hostRef);
free(gpuRef);
// reset device
SAFE_CALL(cudaDeviceReset(), "Error reseting");
return (0);
}
|
b3f501fe08a43aa01081c47e7bcf65bb9600e44e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
__global__ void addSubArray3 (int *A, int *B, int w, int h) {
__shared__ int As[blockDim.x];
int i = blockIdx.x * blockDim.x + threadIdx.x;
As[threadIdx.x] = A[i];
for (int j = 0; j < h; j += 2) {
B[j * w + i] += As[i];
B[(j + 1) * w + i] -= As[i];
}
}
| b3f501fe08a43aa01081c47e7bcf65bb9600e44e.cu | __global__ void addSubArray3 (int *A, int *B, int w, int h) {
__shared__ int As[blockDim.x];
int i = blockIdx.x * blockDim.x + threadIdx.x;
As[threadIdx.x] = A[i];
for (int j = 0; j < h; j += 2) {
B[j * w + i] += As[i];
B[(j + 1) * w + i] -= As[i];
}
}
|
56caa913965df846f52f6517161a5ef77a1e5fcf.hip | // !!! This is a file automatically generated by hipify!!!
#include <chrono>
#include <iomanip>
#include <memory>
#include <stdexcept>
#include <tuple>
#include <hip/hip_runtime.h>
#include <hiprand/hiprand.h>
#include <thrust/device_ptr.h>
#include <thrust/fill.h>
#include "tensor.h"
#include "cudnn_helper.h"
#include "rnn_problems.h"
/*
Usage:
The default precision is set based on the architecture and mode.
By default, the program runs the benchmark in training mode.
bin/rnn_bench
To run inference mode, use the following command:
bin/rnn_bench inference
To change the precision for training/inference, use:
bin/rnn_bench train <precision>
bin/rnn_bench inference <precision>
Supported precision types:
For Maxwell GPUS:
float for training and inference
For Pascal GPUS:
float, half for training
float, half, int8 for inference
*/
#ifndef USE_TENSOR_CORES
#if CUDNN_MAJOR >= 7
#define USE_TENSOR_CORES 1
#else
#define USE_TENSOR_CORES 0
#endif
#endif
cudnnHandle_t cudnn_handle;
hiprandGenerator_t curand_gen;
class cudnnDropout {
std::shared_ptr<cudnnDropoutDescriptor_t> dropout_desc_;
std::shared_ptr<Tensor<uint8_t>> dropout_state_;
struct DropoutDeleter {
void operator()(cudnnDropoutDescriptor_t * dropout_desc) {
cudnnDestroyDropoutDescriptor(*dropout_desc);
delete dropout_desc;
}
};
public:
cudnnDropout(float dropout_percentage) : dropout_desc_(new cudnnDropoutDescriptor_t,
DropoutDeleter()) {
size_t dropoutStateSize;
CHECK_CUDNN_ERROR(cudnnCreateDropoutDescriptor(dropout_desc_.get()));
CHECK_CUDNN_ERROR(cudnnDropoutGetStatesSize(cudnn_handle, &dropoutStateSize));
dropout_state_.reset(new Tensor<uint8_t>(std::vector<int>{static_cast<int>(dropoutStateSize), 1}));
CHECK_CUDNN_ERROR(cudnnSetDropoutDescriptor(*dropout_desc_,
cudnn_handle,
dropout_percentage,
dropout_state_->begin(),
dropoutStateSize,
0ULL) );
}
cudnnDropoutDescriptor_t desc() const { return *dropout_desc_; }
};
template <typename T>
class cudnnRNN {
RNNDescriptor<T> rnn_desc_;
FilterDescriptorNd<T> wDesc_;
cudnnDropout dropout_;
int time_steps_;
TensorDescriptorNdArray<T> xDescArray_;
TensorDescriptorNdArray<T> yDescArray_;
TensorDescriptorNdArray<T> dxDescArray_;
TensorDescriptorNdArray<T> dyDescArray_;
TensorDescriptorNd<T> hx_desc_;
TensorDescriptorNd<T> hy_desc_;
TensorDescriptorNd<T> dhx_desc_;
TensorDescriptorNd<T> dhy_desc_;
TensorDescriptorNd<T> cx_desc_;
TensorDescriptorNd<T> cy_desc_;
TensorDescriptorNd<T> dcx_desc_;
TensorDescriptorNd<T> dcy_desc_;
size_t weight_size_;
size_t workspace_size_;
size_t train_size_;
Tensor<T> weights_;
Tensor<float> workspace_;
Tensor<float> trainspace_;
public:
cudnnRNN(int hidden_size, int batch_size, int time_steps, const std::string& rnn_type) :
dropout_(0.f), time_steps_(time_steps),
xDescArray_({batch_size, hidden_size, 1}, {hidden_size, 1, 1}, time_steps),
yDescArray_({batch_size, hidden_size, 1}, {hidden_size, 1, 1}, time_steps),
dxDescArray_({batch_size, hidden_size, 1}, {hidden_size, 1, 1}, time_steps),
dyDescArray_({batch_size, hidden_size, 1}, {hidden_size, 1, 1}, time_steps),
hx_desc_({1, batch_size, hidden_size}, {hidden_size * batch_size, hidden_size, 1}),
hy_desc_({1, batch_size, hidden_size}, {hidden_size * batch_size, hidden_size, 1}),
dhx_desc_({1, batch_size, hidden_size}, {hidden_size * batch_size, hidden_size, 1}),
dhy_desc_({1, batch_size, hidden_size}, {hidden_size * batch_size, hidden_size, 1}),
cx_desc_({1, batch_size, hidden_size}, {hidden_size * batch_size, hidden_size, 1}),
cy_desc_({1, batch_size, hidden_size}, {hidden_size * batch_size, hidden_size, 1}),
dcx_desc_({1, batch_size, hidden_size}, {hidden_size * batch_size, hidden_size, 1}),
dcy_desc_({1, batch_size, hidden_size}, {hidden_size * batch_size, hidden_size, 1})
{
rnn_desc_ = RNNDescriptor<T>(hidden_size,
1,
dropout_.desc(),
CUDNN_SKIP_INPUT,
CUDNN_UNIDIRECTIONAL,
rnn_type,
cudnn_handle);
cudnnDataType_t type;
if (std::is_same<T, float>::value)
type = CUDNN_DATA_FLOAT;
#if CUDNN_MAJOR >= 6
else if (std::is_same<T, uint8_t>::value)
type = CUDNN_DATA_INT8;
#endif
else if (std::is_same<T, uint16_t>::value)
type= CUDNN_DATA_HALF;
else
throw std::runtime_error("Unknown type in cudnnRNN constructor.");
CHECK_CUDNN_ERROR( cudnnGetRNNParamsSize(cudnn_handle,
rnn_desc_.desc(),
xDescArray_.ptr()[0],
&weight_size_,
type) );
#if (CUDNN_MAJOR >= 7) && (USE_TENSOR_CORES)
CHECK_CUDNN_ERROR( cudnnSetRNNMatrixMathType(rnn_desc_.desc(), CUDNN_TENSOR_OP_MATH) );
#endif
weights_ = rand<T>(std::vector<int>{static_cast<int>(weight_size_ / sizeof(T)), 1}, curand_gen);
std::vector<int> dim = {weights_.size(), 1, 1};
wDesc_ = FilterDescriptorNd<T>(CUDNN_TENSOR_NCHW, dim);
CHECK_CUDNN_ERROR( cudnnGetRNNWorkspaceSize(cudnn_handle,
rnn_desc_.desc(),
time_steps,
xDescArray_.ptr(),
&workspace_size_) );
workspace_ = zeros<float>(std::vector<int>{static_cast<int>(workspace_size_ / sizeof(float)), 1});
CHECK_CUDNN_ERROR( cudnnGetRNNTrainingReserveSize(cudnn_handle,
rnn_desc_.desc(),
time_steps,
xDescArray_.ptr(),
&train_size_) );
trainspace_ = zeros<float>(std::vector<int>{static_cast<int>(train_size_ / sizeof(float)), 1});
}
void forward(Tensor<T> x, Tensor<T> hx, Tensor<T> cx,
Tensor<T> y, Tensor<T> hy, Tensor<T> cy) {
CHECK_CUDNN_ERROR( cudnnRNNForwardTraining(cudnn_handle,
rnn_desc_.desc(),
time_steps_,
xDescArray_.ptr(),
(void *)x.begin(),
hx_desc_.desc(),
(void *)hx.begin(),
cx_desc_.desc(),
(void *)cx.begin(),
wDesc_.desc(),
(void *)weights_.begin(),
yDescArray_.ptr(),
(void *)y.begin(),
hy_desc_.desc(),
(void *)hy.begin(),
cy_desc_.desc(),
(void *)cy.begin(),
(void *)workspace_.begin(),
workspace_size_,
(void *)trainspace_.begin(),
train_size_) );
}
void backward_data(Tensor<T> y, Tensor<T> dy, Tensor<T> dhy,
Tensor<T> dcy, Tensor<T> hx, Tensor<T> cx,
Tensor<T> dx, Tensor<T> dhx, Tensor<T> dcx) {
CHECK_CUDNN_ERROR( cudnnRNNBackwardData(cudnn_handle,
rnn_desc_.desc(),
time_steps_,
yDescArray_.ptr(),
(void *)y.begin(),
dyDescArray_.ptr(),
(void *)dy.begin(),
dhy_desc_.desc(),
(void *)dhy.begin(),
dcy_desc_.desc(),
(void *)dcy.begin(),
wDesc_.desc(),
(void *)weights_.begin(),
hx_desc_.desc(),
(void *)hx.begin(),
cx_desc_.desc(),
(void *)cx.begin(),
dxDescArray_.ptr(),
(void *)dx.begin(),
dhx_desc_.desc(),
(void *)dhx.begin(),
dcx_desc_.desc(),
(void *)dcx.begin(),
(void *)workspace_.begin(),
workspace_size_,
(void *)trainspace_.begin(),
train_size_) );
}
};
template <typename T>
std::tuple<int, int> time_rnn(int hidden_size,
int batch_size,
int time_steps,
const std::string& type,
int inference) {
cudnnRNN<T> rnn(hidden_size, batch_size, time_steps, type);
auto x = rand<T>({hidden_size, batch_size * time_steps}, curand_gen);
auto y = rand<T>({hidden_size, batch_size * time_steps}, curand_gen);
auto dx = rand<T>({hidden_size, batch_size * time_steps}, curand_gen);
auto dy = rand<T>({hidden_size, batch_size * time_steps}, curand_gen);
auto hx = rand<T>({hidden_size, batch_size}, curand_gen);
auto hy = rand<T>({hidden_size, batch_size}, curand_gen);
auto cx = rand<T>({hidden_size, batch_size}, curand_gen);
auto cy = rand<T>({hidden_size, batch_size}, curand_gen);
auto dhx = rand<T>({hidden_size, batch_size}, curand_gen);
auto dhy = rand<T>({hidden_size, batch_size}, curand_gen);
auto dcx = rand<T>({hidden_size, batch_size}, curand_gen);
auto dcy = rand<T>({hidden_size, batch_size}, curand_gen);
int numRepeats = 1;
//Warm up
rnn.forward(x, hx, cx, y, hy, cy);
hipDeviceSynchronize();
auto start = std::chrono::steady_clock::now();
for (int i = 0; i < numRepeats; ++i) {
rnn.forward(x, hx, cx, y, hy, cy);
}
hipDeviceSynchronize();
auto end = std::chrono::steady_clock::now();
auto forward_time = std::chrono::duration<double, std::micro>(end - start).count() / numRepeats;
int backward_time = 0;
if (!inference) {
//Warm up
rnn.backward_data(y, dy, dhy, dcy,
hx, cx, dx, dhx, dcx);
hipDeviceSynchronize();
start = std::chrono::steady_clock::now();
for (int i = 0; i < numRepeats; ++i) {
rnn.backward_data(y, dy, dhy, dcy,
hx, cx, dx, dhx, dcx);
}
hipDeviceSynchronize();
end = std::chrono::steady_clock::now();
backward_time = std::chrono::duration<double, std::micro>(end - start).count() / numRepeats;
}
return std::make_tuple(static_cast<int>(forward_time),
static_cast<int>(backward_time));
}
int main(int argc, char **argv) {
hipFree(0);
CHECK_CUDNN_ERROR( cudnnCreate(&cudnn_handle) );
hiprandCreateGenerator(&curand_gen, HIPRAND_RNG_PSEUDO_DEFAULT);
hiprandSetPseudoRandomGeneratorSeed(curand_gen, 123ULL);
int inference = 0;
if (argc > 1) {
std::string inf = "inference";
inference = argv[1] == inf ? 1 : 0;
}
std::vector<std::tuple<int, int, int, std::string>> dataset;
if (argc > 3) {
assert (argc == 7);
dataset.push_back(
std::make_tuple(atoi(argv[3]), atoi(argv[4]), atoi(argv[5]), argv[6])
);
}
#if CUDNN_MAJOR >= 6
std::string precision;
if (inference)
precision = "int8";
else
precision = "half";
#else
std::string precision = "float";
#endif
if (argc > 2) {
precision = argv[2];
}
if (inference) {
std::cout << std::setw(45) << "Running inference benchmark " << std::endl;
} else {
std::cout << std::setw(45) << "Running training benchmark " << std::endl;
}
std::cout << std::setw(30) << "Times" << std::endl;
std::cout << std::setfill('-') << std::setw(88) << "-" << std::endl;
std::cout << std::setfill(' ');
std::cout << " type hidden N timesteps precision fwd_time (usec) ";
if (!inference)
std::cout << "bwd_time (usec)";
std::cout << std::endl;
for (const auto &problem : (dataset.empty() ? (inference ? inference_server_set : training_set) : dataset)) {
int hidden_state, batch_size, time_steps;
std::string type;
std::tie(hidden_state, batch_size, time_steps, type) = problem;
std::cout << std::setw(8) << type;
std::cout << std::setw(8) << hidden_state;
std::cout << std::setw(8) << batch_size;
std::cout << std::setw(8) << time_steps;
std::cout << std::setw(14) << precision;
int fwd_time, bwd_time;
std::stringstream ss;
ss << "Unsupported precision requested. Precision: " << precision << " Inference: " << inference;
#if CUDNN_MAJOR >= 6
if (inference) {
if (precision == "float") {
std::tie(fwd_time, bwd_time) = time_rnn<float>(hidden_state,
batch_size,
time_steps,
type,
inference);
} else if (precision == "half") {
std::tie(fwd_time, bwd_time) = time_rnn<uint16_t>(hidden_state,
batch_size,
time_steps,
type,
inference);
} else if (precision == "int8") {
std::tie(fwd_time, bwd_time) = time_rnn<uint8_t>(hidden_state,
batch_size,
time_steps,
type,
inference);
} else {
throw std::runtime_error(ss.str());
}
} else {
if (precision == "float") {
std::tie(fwd_time, bwd_time) = time_rnn<float>(hidden_state,
batch_size,
time_steps,
type,
inference);
} else if (precision == "half") {
std::tie(fwd_time, bwd_time) = time_rnn<uint16_t>(hidden_state,
batch_size,
time_steps,
type,
inference);
} else {
throw std::runtime_error(ss.str());
}
}
#else
if (precision != "float")
throw std::runtime_error(ss.str());
std::tie(fwd_time, bwd_time) = time_rnn<float>(hidden_state,
batch_size,
time_steps,
type,
inference);
#endif
std::cout << std::setw(18) << fwd_time;
if (!inference)
std::cout << std::setw(18) << bwd_time;
std::cout << std::endl;
}
cudnnDestroy(cudnn_handle);
hiprandDestroyGenerator(curand_gen);
return 0;
}
| 56caa913965df846f52f6517161a5ef77a1e5fcf.cu | #include <chrono>
#include <iomanip>
#include <memory>
#include <stdexcept>
#include <tuple>
#include <cuda.h>
#include <curand.h>
#include <thrust/device_ptr.h>
#include <thrust/fill.h>
#include "tensor.h"
#include "cudnn_helper.h"
#include "rnn_problems.h"
/*
Usage:
The default precision is set based on the architecture and mode.
By default, the program runs the benchmark in training mode.
bin/rnn_bench
To run inference mode, use the following command:
bin/rnn_bench inference
To change the precision for training/inference, use:
bin/rnn_bench train <precision>
bin/rnn_bench inference <precision>
Supported precision types:
For Maxwell GPUS:
float for training and inference
For Pascal GPUS:
float, half for training
float, half, int8 for inference
*/
#ifndef USE_TENSOR_CORES
#if CUDNN_MAJOR >= 7
#define USE_TENSOR_CORES 1
#else
#define USE_TENSOR_CORES 0
#endif
#endif
cudnnHandle_t cudnn_handle;
curandGenerator_t curand_gen;
class cudnnDropout {
std::shared_ptr<cudnnDropoutDescriptor_t> dropout_desc_;
std::shared_ptr<Tensor<uint8_t>> dropout_state_;
struct DropoutDeleter {
void operator()(cudnnDropoutDescriptor_t * dropout_desc) {
cudnnDestroyDropoutDescriptor(*dropout_desc);
delete dropout_desc;
}
};
public:
cudnnDropout(float dropout_percentage) : dropout_desc_(new cudnnDropoutDescriptor_t,
DropoutDeleter()) {
size_t dropoutStateSize;
CHECK_CUDNN_ERROR(cudnnCreateDropoutDescriptor(dropout_desc_.get()));
CHECK_CUDNN_ERROR(cudnnDropoutGetStatesSize(cudnn_handle, &dropoutStateSize));
dropout_state_.reset(new Tensor<uint8_t>(std::vector<int>{static_cast<int>(dropoutStateSize), 1}));
CHECK_CUDNN_ERROR(cudnnSetDropoutDescriptor(*dropout_desc_,
cudnn_handle,
dropout_percentage,
dropout_state_->begin(),
dropoutStateSize,
0ULL) );
}
cudnnDropoutDescriptor_t desc() const { return *dropout_desc_; }
};
template <typename T>
class cudnnRNN {
RNNDescriptor<T> rnn_desc_;
FilterDescriptorNd<T> wDesc_;
cudnnDropout dropout_;
int time_steps_;
TensorDescriptorNdArray<T> xDescArray_;
TensorDescriptorNdArray<T> yDescArray_;
TensorDescriptorNdArray<T> dxDescArray_;
TensorDescriptorNdArray<T> dyDescArray_;
TensorDescriptorNd<T> hx_desc_;
TensorDescriptorNd<T> hy_desc_;
TensorDescriptorNd<T> dhx_desc_;
TensorDescriptorNd<T> dhy_desc_;
TensorDescriptorNd<T> cx_desc_;
TensorDescriptorNd<T> cy_desc_;
TensorDescriptorNd<T> dcx_desc_;
TensorDescriptorNd<T> dcy_desc_;
size_t weight_size_;
size_t workspace_size_;
size_t train_size_;
Tensor<T> weights_;
Tensor<float> workspace_;
Tensor<float> trainspace_;
public:
cudnnRNN(int hidden_size, int batch_size, int time_steps, const std::string& rnn_type) :
dropout_(0.f), time_steps_(time_steps),
xDescArray_({batch_size, hidden_size, 1}, {hidden_size, 1, 1}, time_steps),
yDescArray_({batch_size, hidden_size, 1}, {hidden_size, 1, 1}, time_steps),
dxDescArray_({batch_size, hidden_size, 1}, {hidden_size, 1, 1}, time_steps),
dyDescArray_({batch_size, hidden_size, 1}, {hidden_size, 1, 1}, time_steps),
hx_desc_({1, batch_size, hidden_size}, {hidden_size * batch_size, hidden_size, 1}),
hy_desc_({1, batch_size, hidden_size}, {hidden_size * batch_size, hidden_size, 1}),
dhx_desc_({1, batch_size, hidden_size}, {hidden_size * batch_size, hidden_size, 1}),
dhy_desc_({1, batch_size, hidden_size}, {hidden_size * batch_size, hidden_size, 1}),
cx_desc_({1, batch_size, hidden_size}, {hidden_size * batch_size, hidden_size, 1}),
cy_desc_({1, batch_size, hidden_size}, {hidden_size * batch_size, hidden_size, 1}),
dcx_desc_({1, batch_size, hidden_size}, {hidden_size * batch_size, hidden_size, 1}),
dcy_desc_({1, batch_size, hidden_size}, {hidden_size * batch_size, hidden_size, 1})
{
rnn_desc_ = RNNDescriptor<T>(hidden_size,
1,
dropout_.desc(),
CUDNN_SKIP_INPUT,
CUDNN_UNIDIRECTIONAL,
rnn_type,
cudnn_handle);
cudnnDataType_t type;
if (std::is_same<T, float>::value)
type = CUDNN_DATA_FLOAT;
#if CUDNN_MAJOR >= 6
else if (std::is_same<T, uint8_t>::value)
type = CUDNN_DATA_INT8;
#endif
else if (std::is_same<T, uint16_t>::value)
type= CUDNN_DATA_HALF;
else
throw std::runtime_error("Unknown type in cudnnRNN constructor.");
CHECK_CUDNN_ERROR( cudnnGetRNNParamsSize(cudnn_handle,
rnn_desc_.desc(),
xDescArray_.ptr()[0],
&weight_size_,
type) );
#if (CUDNN_MAJOR >= 7) && (USE_TENSOR_CORES)
CHECK_CUDNN_ERROR( cudnnSetRNNMatrixMathType(rnn_desc_.desc(), CUDNN_TENSOR_OP_MATH) );
#endif
weights_ = rand<T>(std::vector<int>{static_cast<int>(weight_size_ / sizeof(T)), 1}, curand_gen);
std::vector<int> dim = {weights_.size(), 1, 1};
wDesc_ = FilterDescriptorNd<T>(CUDNN_TENSOR_NCHW, dim);
CHECK_CUDNN_ERROR( cudnnGetRNNWorkspaceSize(cudnn_handle,
rnn_desc_.desc(),
time_steps,
xDescArray_.ptr(),
&workspace_size_) );
workspace_ = zeros<float>(std::vector<int>{static_cast<int>(workspace_size_ / sizeof(float)), 1});
CHECK_CUDNN_ERROR( cudnnGetRNNTrainingReserveSize(cudnn_handle,
rnn_desc_.desc(),
time_steps,
xDescArray_.ptr(),
&train_size_) );
trainspace_ = zeros<float>(std::vector<int>{static_cast<int>(train_size_ / sizeof(float)), 1});
}
void forward(Tensor<T> x, Tensor<T> hx, Tensor<T> cx,
Tensor<T> y, Tensor<T> hy, Tensor<T> cy) {
CHECK_CUDNN_ERROR( cudnnRNNForwardTraining(cudnn_handle,
rnn_desc_.desc(),
time_steps_,
xDescArray_.ptr(),
(void *)x.begin(),
hx_desc_.desc(),
(void *)hx.begin(),
cx_desc_.desc(),
(void *)cx.begin(),
wDesc_.desc(),
(void *)weights_.begin(),
yDescArray_.ptr(),
(void *)y.begin(),
hy_desc_.desc(),
(void *)hy.begin(),
cy_desc_.desc(),
(void *)cy.begin(),
(void *)workspace_.begin(),
workspace_size_,
(void *)trainspace_.begin(),
train_size_) );
}
void backward_data(Tensor<T> y, Tensor<T> dy, Tensor<T> dhy,
Tensor<T> dcy, Tensor<T> hx, Tensor<T> cx,
Tensor<T> dx, Tensor<T> dhx, Tensor<T> dcx) {
CHECK_CUDNN_ERROR( cudnnRNNBackwardData(cudnn_handle,
rnn_desc_.desc(),
time_steps_,
yDescArray_.ptr(),
(void *)y.begin(),
dyDescArray_.ptr(),
(void *)dy.begin(),
dhy_desc_.desc(),
(void *)dhy.begin(),
dcy_desc_.desc(),
(void *)dcy.begin(),
wDesc_.desc(),
(void *)weights_.begin(),
hx_desc_.desc(),
(void *)hx.begin(),
cx_desc_.desc(),
(void *)cx.begin(),
dxDescArray_.ptr(),
(void *)dx.begin(),
dhx_desc_.desc(),
(void *)dhx.begin(),
dcx_desc_.desc(),
(void *)dcx.begin(),
(void *)workspace_.begin(),
workspace_size_,
(void *)trainspace_.begin(),
train_size_) );
}
};
template <typename T>
std::tuple<int, int> time_rnn(int hidden_size,
int batch_size,
int time_steps,
const std::string& type,
int inference) {
cudnnRNN<T> rnn(hidden_size, batch_size, time_steps, type);
auto x = rand<T>({hidden_size, batch_size * time_steps}, curand_gen);
auto y = rand<T>({hidden_size, batch_size * time_steps}, curand_gen);
auto dx = rand<T>({hidden_size, batch_size * time_steps}, curand_gen);
auto dy = rand<T>({hidden_size, batch_size * time_steps}, curand_gen);
auto hx = rand<T>({hidden_size, batch_size}, curand_gen);
auto hy = rand<T>({hidden_size, batch_size}, curand_gen);
auto cx = rand<T>({hidden_size, batch_size}, curand_gen);
auto cy = rand<T>({hidden_size, batch_size}, curand_gen);
auto dhx = rand<T>({hidden_size, batch_size}, curand_gen);
auto dhy = rand<T>({hidden_size, batch_size}, curand_gen);
auto dcx = rand<T>({hidden_size, batch_size}, curand_gen);
auto dcy = rand<T>({hidden_size, batch_size}, curand_gen);
int numRepeats = 1;
//Warm up
rnn.forward(x, hx, cx, y, hy, cy);
cudaDeviceSynchronize();
auto start = std::chrono::steady_clock::now();
for (int i = 0; i < numRepeats; ++i) {
rnn.forward(x, hx, cx, y, hy, cy);
}
cudaDeviceSynchronize();
auto end = std::chrono::steady_clock::now();
auto forward_time = std::chrono::duration<double, std::micro>(end - start).count() / numRepeats;
int backward_time = 0;
if (!inference) {
//Warm up
rnn.backward_data(y, dy, dhy, dcy,
hx, cx, dx, dhx, dcx);
cudaDeviceSynchronize();
start = std::chrono::steady_clock::now();
for (int i = 0; i < numRepeats; ++i) {
rnn.backward_data(y, dy, dhy, dcy,
hx, cx, dx, dhx, dcx);
}
cudaDeviceSynchronize();
end = std::chrono::steady_clock::now();
backward_time = std::chrono::duration<double, std::micro>(end - start).count() / numRepeats;
}
return std::make_tuple(static_cast<int>(forward_time),
static_cast<int>(backward_time));
}
int main(int argc, char **argv) {
cudaFree(0);
CHECK_CUDNN_ERROR( cudnnCreate(&cudnn_handle) );
curandCreateGenerator(&curand_gen, CURAND_RNG_PSEUDO_DEFAULT);
curandSetPseudoRandomGeneratorSeed(curand_gen, 123ULL);
int inference = 0;
if (argc > 1) {
std::string inf = "inference";
inference = argv[1] == inf ? 1 : 0;
}
std::vector<std::tuple<int, int, int, std::string>> dataset;
if (argc > 3) {
assert (argc == 7);
dataset.push_back(
std::make_tuple(atoi(argv[3]), atoi(argv[4]), atoi(argv[5]), argv[6])
);
}
#if CUDNN_MAJOR >= 6
std::string precision;
if (inference)
precision = "int8";
else
precision = "half";
#else
std::string precision = "float";
#endif
if (argc > 2) {
precision = argv[2];
}
if (inference) {
std::cout << std::setw(45) << "Running inference benchmark " << std::endl;
} else {
std::cout << std::setw(45) << "Running training benchmark " << std::endl;
}
std::cout << std::setw(30) << "Times" << std::endl;
std::cout << std::setfill('-') << std::setw(88) << "-" << std::endl;
std::cout << std::setfill(' ');
std::cout << " type hidden N timesteps precision fwd_time (usec) ";
if (!inference)
std::cout << "bwd_time (usec)";
std::cout << std::endl;
for (const auto &problem : (dataset.empty() ? (inference ? inference_server_set : training_set) : dataset)) {
int hidden_state, batch_size, time_steps;
std::string type;
std::tie(hidden_state, batch_size, time_steps, type) = problem;
std::cout << std::setw(8) << type;
std::cout << std::setw(8) << hidden_state;
std::cout << std::setw(8) << batch_size;
std::cout << std::setw(8) << time_steps;
std::cout << std::setw(14) << precision;
int fwd_time, bwd_time;
std::stringstream ss;
ss << "Unsupported precision requested. Precision: " << precision << " Inference: " << inference;
#if CUDNN_MAJOR >= 6
if (inference) {
if (precision == "float") {
std::tie(fwd_time, bwd_time) = time_rnn<float>(hidden_state,
batch_size,
time_steps,
type,
inference);
} else if (precision == "half") {
std::tie(fwd_time, bwd_time) = time_rnn<uint16_t>(hidden_state,
batch_size,
time_steps,
type,
inference);
} else if (precision == "int8") {
std::tie(fwd_time, bwd_time) = time_rnn<uint8_t>(hidden_state,
batch_size,
time_steps,
type,
inference);
} else {
throw std::runtime_error(ss.str());
}
} else {
if (precision == "float") {
std::tie(fwd_time, bwd_time) = time_rnn<float>(hidden_state,
batch_size,
time_steps,
type,
inference);
} else if (precision == "half") {
std::tie(fwd_time, bwd_time) = time_rnn<uint16_t>(hidden_state,
batch_size,
time_steps,
type,
inference);
} else {
throw std::runtime_error(ss.str());
}
}
#else
if (precision != "float")
throw std::runtime_error(ss.str());
std::tie(fwd_time, bwd_time) = time_rnn<float>(hidden_state,
batch_size,
time_steps,
type,
inference);
#endif
std::cout << std::setw(18) << fwd_time;
if (!inference)
std::cout << std::setw(18) << bwd_time;
std::cout << std::endl;
}
cudnnDestroy(cudnn_handle);
curandDestroyGenerator(curand_gen);
return 0;
}
|
7a5f597955b80986f6dd0b57cbddbda048cb69f1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*!
\file gridding.cu
\brief Implementation of the S2G computation.
\author Iakovidis Ioannis
\date 2021-06-14
*/
#include "matrix_indexing.hpp"
#include "gridding.cuh"
#include "utils_hip.cuh"
extern int Blocks;
extern int Threads;
extern hipStream_t streamRep;
//#define MIXED_PREC_SUM
#define idx2(i, j, d) (SUB2IND2D(i, j, d))
#define idx4(i, j, k, l, m, n, o) (SUB2IND4D(i, j, k, l, m, n, o))
#define y(i, j) y[i * nDim + j]
#ifdef MIXED_PREC_SUM
template <class dataPoint>
void s2g(dataPoint *VGrid, dataPoint *y, dataPoint *VScat, uint32_t nGridDim,
uint32_t n, uint32_t d, uint32_t m) {
double *VGridD;
int szV = pow(nGridDim + 2, d) * m;
gpuErrchk(hipMallocManaged(&VGridD, szV * sizeof(double)));
hipLaunchKernelGGL(( initKernel), dim3(Blocks), dim3(Threads),0,streamRep, VGridD, (double)0, szV);
switch (d) {
case 1:
hipLaunchKernelGGL(( s2g1d), dim3(Blocks), dim3(Threads),0,streamRep, VGridD, y, VScat, nGridDim + 2, n, d, m);
break;
case 2:
hipLaunchKernelGGL(( s2g2d), dim3(Blocks), dim3(Threads),0,streamRep, VGridD, y, VScat, nGridDim + 2, n, d, m);
break;
case 3:
hipLaunchKernelGGL(( s2g3d), dim3(Blocks), dim3(Threads),0,streamRep, VGridD, y, VScat, nGridDim + 2, n, d, m);
break;
}
hipLaunchKernelGGL(( copymixed), dim3(Blocks), dim3(Threads),0,streamRep, VGrid, VGridD, szV);
hipFree(VGridD);
}
#else
template <class dataPoint>
void s2g(dataPoint *VGrid, dataPoint *y, dataPoint *VScat, uint32_t nGridDim,
uint32_t n, uint32_t d, uint32_t m) {
switch (d) {
case 1:
hipLaunchKernelGGL(( s2g1d), dim3(Blocks), dim3(Threads),0,streamRep, VGrid, y, VScat, nGridDim + 2, n, d, m);
break;
case 2:
hipLaunchKernelGGL(( s2g2d), dim3(Blocks), dim3(Threads),0,streamRep, VGrid, y, VScat, nGridDim + 2, n, d, m);
break;
case 3:
hipLaunchKernelGGL(( s2g3d), dim3(Blocks), dim3(Threads),0,streamRep, VGrid, y, VScat, nGridDim + 2, n, d, m);
break;
}
}
#endif
template <class dataPoint>
void g2s(dataPoint *PhiScat, dataPoint *PhiGrid, dataPoint *y,
uint32_t nGridDim, uint32_t n, uint32_t d, uint32_t m) {
switch (d) {
case 1:
hipLaunchKernelGGL(( g2s1d), dim3(Blocks), dim3(Threads),0,streamRep, PhiScat, PhiGrid, y, nGridDim + 2, n, d, m);
break;
case 2:
hipLaunchKernelGGL(( g2s2d), dim3(Blocks), dim3(Threads),0,streamRep, PhiScat, PhiGrid, y, nGridDim + 2, n, d, m);
break;
case 3:
hipLaunchKernelGGL(( g2s3d), dim3(Blocks), dim3(Threads),0,streamRep, PhiScat, PhiGrid, y, nGridDim + 2, n, d, m);
break;
}
}
template <class dataPoint, class sumType>
__global__ void s2g1d(sumType *__restrict__ V, const dataPoint *const y,
const dataPoint *const q, const uint32_t ng,
const uint32_t nPts, const uint32_t nDim,
const uint32_t nVec) {
dataPoint v1[4];
register uint32_t f1;
register dataPoint d;
for (register int TID = threadIdx.x + blockIdx.x * blockDim.x; TID < nPts;
TID += gridDim.x * blockDim.x) {
f1 = (uint32_t)floor(y(TID, 0));
d = y(TID, 0) - (dataPoint)f1;
v1[0] = l2(1 + d);
v1[1] = l1(d);
v1[2] = l1(1 - d);
v1[3] = l2(2 - d);
for (int j = 0; j < nVec; j++) {
dataPoint qv = q[nPts * j + TID];
for (int idx1 = 0; idx1 < 4; idx1++) {
atomicAdd(&V[f1 + idx1 + j * ng], (sumType)qv * v1[idx1]);
}
}
}
}
template <class dataPoint>
__global__ void g2s1d(volatile dataPoint *__restrict__ Phi,
const dataPoint *const V, const dataPoint *const y,
const uint32_t ng, const uint32_t nPts,
const uint32_t nDim, const uint32_t nVec) {
dataPoint v1[4];
uint32_t f1;
dataPoint d;
for (register int TID = threadIdx.x + blockIdx.x * blockDim.x; TID < nPts;
TID += gridDim.x * blockDim.x) {
f1 = (uint32_t)floor(y(TID, 0));
d = y(TID, 0) - (dataPoint)f1;
v1[0] = l2(1 + d);
v1[1] = l1(d);
v1[2] = l1(1 - d);
v1[3] = l2(2 - d);
for (uint32_t j = 0; j < nVec; j++) {
dataPoint accum = 0;
for (uint32_t idx1 = 0; idx1 < 4; idx1++) {
accum += V[f1 + idx1 + j * ng] * v1[idx1];
}
Phi[TID + j * nPts] = accum;
}
}
}
template <class dataPoint, class sumType>
__global__ void s2g2d(sumType *__restrict__ V, const dataPoint *const y,
const dataPoint *const q, const uint32_t ng,
const uint32_t nPts, const uint32_t nDim,
const uint32_t nVec) {
dataPoint v1[4];
dataPoint v2[4];
register uint32_t f1;
register uint32_t f2;
register dataPoint d;
for (register int TID = threadIdx.x + blockIdx.x * blockDim.x; TID < nPts;
TID += gridDim.x * blockDim.x) {
f1 = (uint32_t)floorf(y(TID, 0));
d = y(TID, 0) - (dataPoint)f1;
v1[0] = l2(1 + d);
v1[1] = l1(d);
v1[2] = l1(1 - d);
v1[3] = l2(2 - d);
f2 = (uint32_t)floorf(y(TID, 1));
d = y(TID, 1) - (dataPoint)f2;
v2[0] = l2(1 + d);
v2[1] = l1(d);
v2[2] = l1(1 - d);
v2[3] = l2(2 - d);
for (int j = 0; j < nVec; j++) {
for (int idx2 = 0; idx2 < 4; idx2++) {
for (int idx1 = 0; idx1 < 4; idx1++) {
atomicAdd(&V[f1 + idx1 + (f2 + idx2) * ng + j * ng * ng],
(sumType)((q[j + nVec * TID] * v2[idx2]) * v1[idx1]));
}
}
}
}
}
template <class dataPoint>
__global__ void g2s2d(volatile dataPoint *__restrict__ Phi,
const dataPoint *const V, const dataPoint *const y,
const uint32_t ng, const uint32_t nPts,
const uint32_t nDim, const uint32_t nVec) {
dataPoint v1[4];
dataPoint v2[4];
register uint32_t f1;
register uint32_t f2;
register dataPoint d;
register dataPoint accum = 0;
for (register int TID = threadIdx.x + blockIdx.x * blockDim.x; TID < nPts;
TID += gridDim.x * blockDim.x) {
f1 = (uint32_t)floor(y(TID, 0));
d = y(TID, 0) - (dataPoint)f1;
v1[0] = l2(1 + d);
v1[1] = l1(d);
v1[2] = l1(1 - d);
v1[3] = l2(2 - d);
f2 = (uint32_t)floor(y(TID, 1));
d = y(TID, 1) - (dataPoint)f2;
v2[0] = l2(1 + d);
v2[1] = l1(d);
v2[2] = l1(1 - d);
v2[3] = l2(2 - d);
for (int j = 0; j < nVec; j++) {
accum = 0;
for (int idx2 = 0; idx2 < 4; idx2++) {
dataPoint qv = v2[idx2];
for (int idx1 = 0; idx1 < 4; idx1++) {
accum +=
V[f1 + idx1 + (f2 + idx2) * ng + j * ng * ng] * qv * v1[idx1];
}
}
Phi[TID + j * nPts] = accum;
}
}
}
template <class dataPoint, class sumType>
__global__ void s2g3d(sumType *__restrict__ V, dataPoint *y, dataPoint *q,
uint32_t ng, uint32_t nPts, uint32_t nDim,
uint32_t nVec) {
dataPoint v1[4];
dataPoint v2[4];
dataPoint v3[4];
register uint32_t f1, f2, f3;
register dataPoint d;
register dataPoint y1, y2, y3;
for (register int TID = threadIdx.x + blockIdx.x * blockDim.x; TID < nPts;
TID += gridDim.x * blockDim.x) {
y1 = y(TID, 0);
y2 = y(TID, 1);
y3 = y(TID, 2);
f1 = (uint32_t)floor(y1);
d = y1 - (dataPoint)f1;
v1[0] = l2(1 + d);
v1[1] = l1(d);
v1[2] = l1(1 - d);
v1[3] = l2(2 - d);
f2 = (uint32_t)floor(y2);
d = y2 - (dataPoint)f2;
v2[0] = l2(1 + d);
v2[1] = l1(d);
v2[2] = l1(1 - d);
v2[3] = l2(2 - d);
f3 = (uint32_t)floor(y3);
d = y3 - (dataPoint)f3;
v3[0] = l2(1 + d);
v3[1] = l1(d);
v3[2] = l1(1 - d);
v3[3] = l2(2 - d);
for (int j = 0; j < 4; j++) {
for (int idx3 = 0; idx3 < 4; idx3++) {
for (int idx2 = 0; idx2 < 4; idx2++) {
dataPoint qv = q[j + 4 * TID] * v2[idx2] * v3[idx3];
for (int idx1 = 0; idx1 < 4; idx1++) {
atomicAdd(&V[idx4(f1 + idx1, f2 + idx2, f3 + idx3, j, ng, ng, ng)],
(sumType)qv * v1[idx1]);
}
}
}
}
}
}
template <class dataPoint>
__global__ void g2s3d(volatile dataPoint *__restrict__ Phi,
const dataPoint *const V, const dataPoint *const y,
const uint32_t ng, const uint32_t nPts,
const uint32_t nDim, const uint32_t nVec) {
dataPoint v1[4];
dataPoint v2[4];
dataPoint v3[4];
register uint32_t f1;
register uint32_t f2;
register uint32_t f3;
register dataPoint d;
for (register int TID = threadIdx.x + blockIdx.x * blockDim.x; TID < nPts;
TID += gridDim.x * blockDim.x) {
f1 = (uint32_t)floor(y(TID, 0));
d = y(TID, 0) - (dataPoint)f1;
v1[0] = l2(1 + d);
v1[1] = l1(d);
v1[2] = l1(1 - d);
v1[3] = l2(2 - d);
f2 = (uint32_t)floor(y(TID, 1));
d = y(TID, 1) - (dataPoint)f2;
v2[0] = l2(1 + d);
v2[1] = l1(d);
v2[2] = l1(1 - d);
v2[3] = l2(2 - d);
f3 = (uint32_t)floor(y(TID, 2));
d = y(TID, 2) - (dataPoint)f3;
v3[0] = l2(1 + d);
v3[1] = l1(d);
v3[2] = l1(1 - d);
v3[3] = l2(2 - d);
for (int j = 0; j < nVec; j++) {
dataPoint accum = 0;
for (int idx3 = 0; idx3 < 4; idx3++) {
for (int idx2 = 0; idx2 < 4; idx2++) {
dataPoint qv = v2[idx2] * v3[idx3];
for (int idx1 = 0; idx1 < 4; idx1++) {
accum += V[idx4(f1 + idx1, f2 + idx2, f3 + idx3, j, ng, ng, ng)] *
qv * v1[idx1];
}
}
Phi[TID + j * nPts] = accum;
}
}
}
}
template void s2g(float *VGrid, float *y, float *VScat, uint32_t nGridDim,
uint32_t n, uint32_t d, uint32_t m);
template void g2s(float *PhiScat, float *PhiGrid, float *y, uint32_t nGridDim,
uint32_t n, uint32_t d, uint32_t m);
template void s2g(double *VGrid, double *y, double *VScat, uint32_t nGridDim,
uint32_t n, uint32_t d, uint32_t m);
template void g2s(double *PhiScat, double *PhiGrid, double *y,
uint32_t nGridDim, uint32_t n, uint32_t d, uint32_t m);
| 7a5f597955b80986f6dd0b57cbddbda048cb69f1.cu | /*!
\file gridding.cu
\brief Implementation of the S2G computation.
\author Iakovidis Ioannis
\date 2021-06-14
*/
#include "matrix_indexing.hpp"
#include "gridding.cuh"
#include "utils_cuda.cuh"
extern int Blocks;
extern int Threads;
extern cudaStream_t streamRep;
//#define MIXED_PREC_SUM
#define idx2(i, j, d) (SUB2IND2D(i, j, d))
#define idx4(i, j, k, l, m, n, o) (SUB2IND4D(i, j, k, l, m, n, o))
#define y(i, j) y[i * nDim + j]
#ifdef MIXED_PREC_SUM
template <class dataPoint>
void s2g(dataPoint *VGrid, dataPoint *y, dataPoint *VScat, uint32_t nGridDim,
uint32_t n, uint32_t d, uint32_t m) {
double *VGridD;
int szV = pow(nGridDim + 2, d) * m;
gpuErrchk(cudaMallocManaged(&VGridD, szV * sizeof(double)));
initKernel<<<Blocks, Threads,0,streamRep>>>(VGridD, (double)0, szV);
switch (d) {
case 1:
s2g1d<<<Blocks, Threads,0,streamRep>>>(VGridD, y, VScat, nGridDim + 2, n, d, m);
break;
case 2:
s2g2d<<<Blocks, Threads,0,streamRep>>>(VGridD, y, VScat, nGridDim + 2, n, d, m);
break;
case 3:
s2g3d<<<Blocks, Threads,0,streamRep>>>(VGridD, y, VScat, nGridDim + 2, n, d, m);
break;
}
copymixed<<<Blocks, Threads,0,streamRep>>>(VGrid, VGridD, szV);
cudaFree(VGridD);
}
#else
template <class dataPoint>
void s2g(dataPoint *VGrid, dataPoint *y, dataPoint *VScat, uint32_t nGridDim,
uint32_t n, uint32_t d, uint32_t m) {
switch (d) {
case 1:
s2g1d<<<Blocks, Threads,0,streamRep>>>(VGrid, y, VScat, nGridDim + 2, n, d, m);
break;
case 2:
s2g2d<<<Blocks, Threads,0,streamRep>>>(VGrid, y, VScat, nGridDim + 2, n, d, m);
break;
case 3:
s2g3d<<<Blocks, Threads,0,streamRep>>>(VGrid, y, VScat, nGridDim + 2, n, d, m);
break;
}
}
#endif
template <class dataPoint>
void g2s(dataPoint *PhiScat, dataPoint *PhiGrid, dataPoint *y,
uint32_t nGridDim, uint32_t n, uint32_t d, uint32_t m) {
switch (d) {
case 1:
g2s1d<<<Blocks, Threads,0,streamRep>>>(PhiScat, PhiGrid, y, nGridDim + 2, n, d, m);
break;
case 2:
g2s2d<<<Blocks, Threads,0,streamRep>>>(PhiScat, PhiGrid, y, nGridDim + 2, n, d, m);
break;
case 3:
g2s3d<<<Blocks, Threads,0,streamRep>>>(PhiScat, PhiGrid, y, nGridDim + 2, n, d, m);
break;
}
}
template <class dataPoint, class sumType>
__global__ void s2g1d(sumType *__restrict__ V, const dataPoint *const y,
const dataPoint *const q, const uint32_t ng,
const uint32_t nPts, const uint32_t nDim,
const uint32_t nVec) {
dataPoint v1[4];
register uint32_t f1;
register dataPoint d;
for (register int TID = threadIdx.x + blockIdx.x * blockDim.x; TID < nPts;
TID += gridDim.x * blockDim.x) {
f1 = (uint32_t)floor(y(TID, 0));
d = y(TID, 0) - (dataPoint)f1;
v1[0] = l2(1 + d);
v1[1] = l1(d);
v1[2] = l1(1 - d);
v1[3] = l2(2 - d);
for (int j = 0; j < nVec; j++) {
dataPoint qv = q[nPts * j + TID];
for (int idx1 = 0; idx1 < 4; idx1++) {
atomicAdd(&V[f1 + idx1 + j * ng], (sumType)qv * v1[idx1]);
}
}
}
}
template <class dataPoint>
__global__ void g2s1d(volatile dataPoint *__restrict__ Phi,
const dataPoint *const V, const dataPoint *const y,
const uint32_t ng, const uint32_t nPts,
const uint32_t nDim, const uint32_t nVec) {
dataPoint v1[4];
uint32_t f1;
dataPoint d;
for (register int TID = threadIdx.x + blockIdx.x * blockDim.x; TID < nPts;
TID += gridDim.x * blockDim.x) {
f1 = (uint32_t)floor(y(TID, 0));
d = y(TID, 0) - (dataPoint)f1;
v1[0] = l2(1 + d);
v1[1] = l1(d);
v1[2] = l1(1 - d);
v1[3] = l2(2 - d);
for (uint32_t j = 0; j < nVec; j++) {
dataPoint accum = 0;
for (uint32_t idx1 = 0; idx1 < 4; idx1++) {
accum += V[f1 + idx1 + j * ng] * v1[idx1];
}
Phi[TID + j * nPts] = accum;
}
}
}
template <class dataPoint, class sumType>
__global__ void s2g2d(sumType *__restrict__ V, const dataPoint *const y,
const dataPoint *const q, const uint32_t ng,
const uint32_t nPts, const uint32_t nDim,
const uint32_t nVec) {
dataPoint v1[4];
dataPoint v2[4];
register uint32_t f1;
register uint32_t f2;
register dataPoint d;
for (register int TID = threadIdx.x + blockIdx.x * blockDim.x; TID < nPts;
TID += gridDim.x * blockDim.x) {
f1 = (uint32_t)floorf(y(TID, 0));
d = y(TID, 0) - (dataPoint)f1;
v1[0] = l2(1 + d);
v1[1] = l1(d);
v1[2] = l1(1 - d);
v1[3] = l2(2 - d);
f2 = (uint32_t)floorf(y(TID, 1));
d = y(TID, 1) - (dataPoint)f2;
v2[0] = l2(1 + d);
v2[1] = l1(d);
v2[2] = l1(1 - d);
v2[3] = l2(2 - d);
for (int j = 0; j < nVec; j++) {
for (int idx2 = 0; idx2 < 4; idx2++) {
for (int idx1 = 0; idx1 < 4; idx1++) {
atomicAdd(&V[f1 + idx1 + (f2 + idx2) * ng + j * ng * ng],
(sumType)((q[j + nVec * TID] * v2[idx2]) * v1[idx1]));
}
}
}
}
}
template <class dataPoint>
__global__ void g2s2d(volatile dataPoint *__restrict__ Phi,
const dataPoint *const V, const dataPoint *const y,
const uint32_t ng, const uint32_t nPts,
const uint32_t nDim, const uint32_t nVec) {
dataPoint v1[4];
dataPoint v2[4];
register uint32_t f1;
register uint32_t f2;
register dataPoint d;
register dataPoint accum = 0;
for (register int TID = threadIdx.x + blockIdx.x * blockDim.x; TID < nPts;
TID += gridDim.x * blockDim.x) {
f1 = (uint32_t)floor(y(TID, 0));
d = y(TID, 0) - (dataPoint)f1;
v1[0] = l2(1 + d);
v1[1] = l1(d);
v1[2] = l1(1 - d);
v1[3] = l2(2 - d);
f2 = (uint32_t)floor(y(TID, 1));
d = y(TID, 1) - (dataPoint)f2;
v2[0] = l2(1 + d);
v2[1] = l1(d);
v2[2] = l1(1 - d);
v2[3] = l2(2 - d);
for (int j = 0; j < nVec; j++) {
accum = 0;
for (int idx2 = 0; idx2 < 4; idx2++) {
dataPoint qv = v2[idx2];
for (int idx1 = 0; idx1 < 4; idx1++) {
accum +=
V[f1 + idx1 + (f2 + idx2) * ng + j * ng * ng] * qv * v1[idx1];
}
}
Phi[TID + j * nPts] = accum;
}
}
}
template <class dataPoint, class sumType>
__global__ void s2g3d(sumType *__restrict__ V, dataPoint *y, dataPoint *q,
uint32_t ng, uint32_t nPts, uint32_t nDim,
uint32_t nVec) {
dataPoint v1[4];
dataPoint v2[4];
dataPoint v3[4];
register uint32_t f1, f2, f3;
register dataPoint d;
register dataPoint y1, y2, y3;
for (register int TID = threadIdx.x + blockIdx.x * blockDim.x; TID < nPts;
TID += gridDim.x * blockDim.x) {
y1 = y(TID, 0);
y2 = y(TID, 1);
y3 = y(TID, 2);
f1 = (uint32_t)floor(y1);
d = y1 - (dataPoint)f1;
v1[0] = l2(1 + d);
v1[1] = l1(d);
v1[2] = l1(1 - d);
v1[3] = l2(2 - d);
f2 = (uint32_t)floor(y2);
d = y2 - (dataPoint)f2;
v2[0] = l2(1 + d);
v2[1] = l1(d);
v2[2] = l1(1 - d);
v2[3] = l2(2 - d);
f3 = (uint32_t)floor(y3);
d = y3 - (dataPoint)f3;
v3[0] = l2(1 + d);
v3[1] = l1(d);
v3[2] = l1(1 - d);
v3[3] = l2(2 - d);
for (int j = 0; j < 4; j++) {
for (int idx3 = 0; idx3 < 4; idx3++) {
for (int idx2 = 0; idx2 < 4; idx2++) {
dataPoint qv = q[j + 4 * TID] * v2[idx2] * v3[idx3];
for (int idx1 = 0; idx1 < 4; idx1++) {
atomicAdd(&V[idx4(f1 + idx1, f2 + idx2, f3 + idx3, j, ng, ng, ng)],
(sumType)qv * v1[idx1]);
}
}
}
}
}
}
template <class dataPoint>
__global__ void g2s3d(volatile dataPoint *__restrict__ Phi,
const dataPoint *const V, const dataPoint *const y,
const uint32_t ng, const uint32_t nPts,
const uint32_t nDim, const uint32_t nVec) {
dataPoint v1[4];
dataPoint v2[4];
dataPoint v3[4];
register uint32_t f1;
register uint32_t f2;
register uint32_t f3;
register dataPoint d;
for (register int TID = threadIdx.x + blockIdx.x * blockDim.x; TID < nPts;
TID += gridDim.x * blockDim.x) {
f1 = (uint32_t)floor(y(TID, 0));
d = y(TID, 0) - (dataPoint)f1;
v1[0] = l2(1 + d);
v1[1] = l1(d);
v1[2] = l1(1 - d);
v1[3] = l2(2 - d);
f2 = (uint32_t)floor(y(TID, 1));
d = y(TID, 1) - (dataPoint)f2;
v2[0] = l2(1 + d);
v2[1] = l1(d);
v2[2] = l1(1 - d);
v2[3] = l2(2 - d);
f3 = (uint32_t)floor(y(TID, 2));
d = y(TID, 2) - (dataPoint)f3;
v3[0] = l2(1 + d);
v3[1] = l1(d);
v3[2] = l1(1 - d);
v3[3] = l2(2 - d);
for (int j = 0; j < nVec; j++) {
dataPoint accum = 0;
for (int idx3 = 0; idx3 < 4; idx3++) {
for (int idx2 = 0; idx2 < 4; idx2++) {
dataPoint qv = v2[idx2] * v3[idx3];
for (int idx1 = 0; idx1 < 4; idx1++) {
accum += V[idx4(f1 + idx1, f2 + idx2, f3 + idx3, j, ng, ng, ng)] *
qv * v1[idx1];
}
}
Phi[TID + j * nPts] = accum;
}
}
}
}
template void s2g(float *VGrid, float *y, float *VScat, uint32_t nGridDim,
uint32_t n, uint32_t d, uint32_t m);
template void g2s(float *PhiScat, float *PhiGrid, float *y, uint32_t nGridDim,
uint32_t n, uint32_t d, uint32_t m);
template void s2g(double *VGrid, double *y, double *VScat, uint32_t nGridDim,
uint32_t n, uint32_t d, uint32_t m);
template void g2s(double *PhiScat, double *PhiGrid, double *y,
uint32_t nGridDim, uint32_t n, uint32_t d, uint32_t m);
|
86a6cc700fef8ad167c2035920d53e1fc81bd4f6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "split_pairwise.cuh"
#include "split_properties_helpers.cuh"
#include <hip/hip_cooperative_groups.h>
#include <catboost/cuda/cuda_lib/kernel/arch.cuh>
#include <catboost/cuda/cuda_util/kernel/instructions.cuh>
#include <catboost/cuda/cuda_util/kernel/kernel_helpers.cuh>
using namespace cooperative_groups;
namespace NKernel {
__forceinline__ __device__ void AddToMatrices(int row, int col, float sum,
float* matrix) {
const int ind = col < row ? (row * (row + 1) >> 1) + col : (col * (col + 1) >> 1) + row;
matrix[ind] += sum;
}
template <int BLOCK_SIZE, int PartCount>
__global__ void MakePairwiseDerivatives(const float* pairwiseHistogram,
int matrixOffset,
int matCount,
int histLineSize /* 4 * totalBinFeatureCount */,
float* linearSystem) {
const int logicalWarpSize = PartCount > 32 ? 32 : PartCount;
const int matricesPerBlock = BLOCK_SIZE / logicalWarpSize;
int matrixIdx = blockIdx.x * matricesPerBlock + threadIdx.x / logicalWarpSize;
int localTid = threadIdx.x & (logicalWarpSize - 1);
const int inBlockOffset = threadIdx.x / logicalWarpSize;
if (matrixIdx >= matCount)
return;
{
const size_t rowSize = PartCount * 2;
const size_t linearSystemSize = (rowSize + rowSize * (rowSize + 1) / 2);
linearSystem += matrixIdx * linearSystemSize;
}
pairwiseHistogram += (matrixOffset + matrixIdx) * 4;
__shared__ float lineData[BLOCK_SIZE * 2];
const int N = PartCount / logicalWarpSize;
const int logicalWarpId = threadIdx.x / logicalWarpSize;
const int logicalWarpCount = BLOCK_SIZE / logicalWarpSize;
thread_block_tile<logicalWarpSize> groupTile = tiled_partition<logicalWarpSize>(this_thread_block());
float sum0[N];
float sum1[N];
for (int i = 0; i < N; ++i) {
sum0[i] = 0;
sum1[i] = 0;
}
#pragma unroll 16
for (int y = 0; y < PartCount; ++y) {
#pragma unroll
for (int i = 0; i < N; ++i) {
const int x = localTid + 32 * i;
const int partIdx = ConvertBlockToPart(x, y);
ui64 offset = ((ui64) partIdx * histLineSize * 4ULL);
float4 hist = __ldg((float4*)(pairwiseHistogram + offset));
const float w00 = max((x != y ? hist.x : 0.0f), 0.0f);
const float w01 = max(hist.y, 0.0f);
const float w10 = max(hist.z, 0.0f);
const float w11 = max((x != y ? hist.w : 0.0f), 0.0f);
// sync for row write done in reduce if we need it
const int nextRow = 2 * y;
const int nextCol = 2 * x;
sum0[i] += w00 + w10;
sum1[i] += w01 + w11;
if (x == y) {
AddToMatrices(nextRow + 1, nextRow, -(w01 + w10), linearSystem);
} else if (x < y) {
AddToMatrices(nextRow, nextCol, -w00, linearSystem);
AddToMatrices(nextRow, nextCol + 1, -w01, linearSystem);
AddToMatrices(nextRow + 1, nextCol, -w10, linearSystem);
AddToMatrices(nextRow + 1, nextCol + 1, -w11, linearSystem);
}
groupTile.sync();
}
groupTile.sync();
}
#pragma unroll 16
for (int x = 0; x < PartCount; ++x) {
#pragma unroll
for (int i = 0; i < N; ++i) {
const int y = localTid + 32 * i;
const int partIdx = ConvertBlockToPart(x, y);
ui64 offset = ((ui64) partIdx * histLineSize * 4ULL);
float4 hist = __ldg((float4*)(pairwiseHistogram + offset));
const float w00 = max((x != y ? hist.x : 0.0f), 0.0f);
const float w01 = max(hist.y, 0.0f);
const float w10 = max(hist.z, 0.0f);
const float w11 = max((x != y ? hist.w : 0.0f), 0.0f);
// sync for row write done in reduce if we need it
const int nextRow = 2 * y;
const int nextCol = 2 * x;
sum0[i] += w01 + w00;
sum1[i] += w10 + w11;
if (x > y) {
AddToMatrices(nextRow, nextCol, -w00, linearSystem);
AddToMatrices(nextRow, nextCol + 1, -w01, linearSystem);
AddToMatrices(nextRow + 1, nextCol, -w10, linearSystem);
AddToMatrices(nextRow + 1, nextCol + 1, -w11, linearSystem);
}
groupTile.sync();
}
groupTile.sync();
}
#pragma unroll
for (int i = 0; i < N; ++i) {
const int x = localTid + 32 * i;
const int nextRow = 2 * x;
linearSystem[nextRow * (nextRow + 1) / 2 + nextRow] += sum0[i];
linearSystem[(nextRow + 1) * (nextRow + 2) / 2 + nextRow + 1] += sum1[i];
}
}
template <int BLOCK_SIZE>
void RunMakeMatrices(const float* histogram, int partCount, int histLineSize, int firstMatrix, int matricesCount, float* linearSystem, TCudaStream stream) {
if (matricesCount > 0) {
const int numBlocks = (((size_t) matricesCount) * min(partCount, 32) + BLOCK_SIZE - 1) / BLOCK_SIZE;
#define RUN(PartCount)\
MakePairwiseDerivatives<BLOCK_SIZE, PartCount> << < numBlocks, BLOCK_SIZE, 0, stream >> > (histogram, firstMatrix, matricesCount, histLineSize, linearSystem);
if (partCount == 1) {
RUN(1)
} else if (partCount == 2) {
RUN(2)
} else if (partCount == 4) {
RUN(4)
} else if (partCount == 8) {
RUN(8)
} else if (partCount == 16) {
RUN(16)
} else if (partCount == 32) {
RUN(32)
} else if (partCount == 64) {
RUN(64)
} else if (partCount == 128) {
RUN(128)
} else if (partCount == 256) {
RUN(256)
} else {
Y_VERIFY(false);
}
}
}
void MakePairwiseDerivatives(const float* histogram, int leavesCount, int firstMatrix, int matricesCount, int histLineSize, float* linearSystem,
TCudaStream stream) {
RunMakeMatrices<256>(histogram, leavesCount, histLineSize, firstMatrix, matricesCount, linearSystem, stream);
}
template <int BLOCK_SIZE>
__global__ void MakePointwiseDerivatives(const float* pointwiseHist, ui64 pointwiseHistSize,
const TPartitionStatistics* partStats,
bool hasPointwiseWeights,
int rowSize,
int firstMatrixIdx,
int matCount,
float* linearSystem) {
const int lineSize = min(rowSize, 32);
const int matricesPerBlock = BLOCK_SIZE / lineSize;
const int matrixIdx = blockIdx.x * matricesPerBlock + threadIdx.x / lineSize;
pointwiseHist += (firstMatrixIdx + matrixIdx) * (hasPointwiseWeights ? 2 : 1);
linearSystem += ((size_t)matrixIdx) * (rowSize + rowSize * (rowSize + 1) / 2);
const int x = threadIdx.x & (lineSize - 1);
float* targets = linearSystem + rowSize * (rowSize + 1) / 2;
if (matrixIdx < matCount) {
for (int col = x; col < rowSize; col += 32) {
const int i = col / 2;
ui64 offset = pointwiseHistSize * i;
if (hasPointwiseWeights) {
const float leafWeight = pointwiseHist[offset];
const float weight = (col & 1) ? partStats[i].Weight - leafWeight : leafWeight;
linearSystem[col * (col + 1) / 2 + col] += max(weight, 0.0f);
}
const float leafSum = pointwiseHist[offset + hasPointwiseWeights];
const float sum = (col & 1) ? partStats[i].Sum - leafSum : leafSum;
targets[col] = sum;
}
}
}
template <int BLOCK_SIZE>
void RunMakePointwiseDerivatives(const float* pointwiseHist, int binFeatureCount,
const TPartitionStatistics* partStats,
bool hasPointwiseWeights,
int rowSize,
int firstMatrixIdx,
int matricesCount,
float* linearSystem,
TCudaStream stream
) {
if (matricesCount > 0) {
const ui32 pointwiseHistSize = binFeatureCount * (hasPointwiseWeights ? 2 : 1);
const int lineSize = min(32, rowSize);
const int numBlocks = (((size_t) matricesCount) * lineSize + BLOCK_SIZE - 1) / BLOCK_SIZE;
MakePointwiseDerivatives<BLOCK_SIZE> << < numBlocks, BLOCK_SIZE, 0, stream >> > (pointwiseHist, pointwiseHistSize, partStats, hasPointwiseWeights, rowSize, firstMatrixIdx, matricesCount, linearSystem);
}
}
void MakePointwiseDerivatives(const float* pointwiseHist, int pointwiseHistLineSize,
const TPartitionStatistics* partStats,
bool hasPointwiseWeights,
int rowSize,
int firstMatrixIdx,
int matricesCount,
float* linearSystem,
TCudaStream stream) {
RunMakePointwiseDerivatives<128> (pointwiseHist, pointwiseHistLineSize, partStats, hasPointwiseWeights, rowSize, firstMatrixIdx, matricesCount, linearSystem, stream);
}
__global__ void UpdateBinsPairs(TCFeature feature, ui32 binIdx,
const ui32* cindex,
const uint2* pairs,
ui32 pairCount,
ui32 depth,
ui32* bins) {
ui32 idx = blockIdx.x * blockDim.x + threadIdx.x;
cindex += feature.Offset;
const ui32 value = binIdx << feature.Shift;
const ui32 mask = feature.Mask << feature.Shift;
while (idx < pairCount) {
const uint2 p = pairs[idx];
const ui32 d1 = (cindex[p.x] & mask);
const ui32 d2 = (cindex[p.y] & mask);
ui32 bit1 = feature.OneHotFeature ? d1 == value : d1 > value;
ui32 bit2 = feature.OneHotFeature ? d2 == value : d2 > value;
ui32 bin = bins[idx];
bin = ((bit1 * 2 + bit2) << (depth * 2)) | bin;
bins[idx] = bin;
idx += blockDim.x * gridDim.x;
}
}
void UpdateBinsPairs(TCFeature feature, ui32 bin,
const ui32* compressedIndex,
const uint2* pairs,
ui32 pairCount,
ui32 depth,
ui32* bins,
TCudaStream stream) {
const int blockSize = 256;
const int numBlocks = min((pairCount + blockSize - 1) / blockSize,
TArchProps::MaxBlockCount());
hipLaunchKernelGGL(( UpdateBinsPairs), dim3(numBlocks), dim3(blockSize), 0, stream, feature, bin, compressedIndex, pairs, pairCount, depth, bins);
}
template <int BLOCK_SIZE>
__global__ void SelectBestSplitImpl(const float* scores,
const TCBinFeature* binFeature, int size,
int bestIndexBias, TBestSplitPropertiesWithIndex* best) {
float maxScore = -5000000.0f;
int maxIdx = -1;
int tid = threadIdx.x;
#pragma unroll 8
for (int i = tid; i < size; i += BLOCK_SIZE) {
float score = scores[i];
if (score > maxScore) {
maxScore = score;
maxIdx = i;
}
}
__shared__ float vals[BLOCK_SIZE];
__shared__ int inds[BLOCK_SIZE];
vals[tid] = maxScore;
inds[tid] = maxIdx;
__syncthreads();
for (int s = BLOCK_SIZE >> 1; s > 0; s >>= 1) {
if (tid < s) {
if ( vals[tid] < vals[tid + s] || (vals[tid] == vals[tid + s] && inds[tid] > inds[tid + s]) ) {
vals[tid] = vals[tid + s];
inds[tid] = inds[tid + s];
}
}
__syncthreads();
}
if (tid == 0) {
TCBinFeature bestFeature;
const int bestIdx = inds[0];
const float bestScore = vals[0];
if (bestIdx != -1) {
bestFeature = binFeature[bestIdx];
} else {
bestFeature.BinId = 0;
bestFeature.FeatureId = 0;
}
best->Index = bestIndexBias + bestIdx;
best->Score = -bestScore;
best->BinId = bestFeature.BinId;
best->FeatureId = bestFeature.FeatureId;
}
}
void SelectBestSplit(const float* scores,
const TCBinFeature* binFeature, int size,
int bestIndexBias, TBestSplitPropertiesWithIndex* best,
TCudaStream stream) {
const int blockSize = 1024;
hipLaunchKernelGGL(( SelectBestSplitImpl<blockSize>), dim3(1), dim3(blockSize), 0, stream, scores, binFeature, size, bestIndexBias, best);
}
__global__ void ZeroSameLeafBinWeightsImpl(const uint2* pairs,
const ui32* bins,
ui32 pairCount,
float* pairWeights) {
const ui32 i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < pairCount) {
uint2 pair = pairs[i];
const ui32 binx = bins[pair.x];
const ui32 biny = bins[pair.y];
if (binx == biny) {
pairWeights[i] = 0;
}
}
}
void ZeroSameLeafBinWeights(const uint2* pairs,
const ui32* bins,
ui32 pairCount,
float* pairWeights,
TCudaStream stream
) {
if (pairCount > 0) {
const int blockSize = 256;
const ui32 numBlocks = (pairCount + blockSize - 1) / blockSize;
hipLaunchKernelGGL(( ZeroSameLeafBinWeightsImpl), dim3(numBlocks), dim3(blockSize), 0, stream, pairs, bins, pairCount, pairWeights);
}
}
__global__ void FillPairBinsImpl(const uint2* pairs,
const ui32* bins,
ui32 rowSize,
ui32 pairCount,
ui32* pairBins) {
const ui32 i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < pairCount) {
uint2 pair = pairs[i];
const ui32 binx = bins[pair.x];
const ui32 biny = bins[pair.y];
pairBins[i] = binx * rowSize + biny;
}
}
void FillPairBins(const uint2* pairs,
const ui32* bins,
ui32 binCount,
ui32 pairCount,
ui32* pairBins,
TCudaStream stream) {
if (pairCount > 0) {
const int blockSize = 256;
const ui32 numBlocks = (pairCount + blockSize - 1) / blockSize;
hipLaunchKernelGGL(( FillPairBinsImpl), dim3(numBlocks), dim3(blockSize), 0, stream, pairs, bins, binCount, pairCount, pairBins);
}
}
//for leaves estimation
__global__ void FillPairDer2OnlyImpl(const float* ders2,
const float* groupDers2,
const ui32* qids,
const uint2* pairs,
ui32 pairCount,
float* pairDer2) {
const int tid = threadIdx.x;
const int i = blockIdx.x * blockDim.x + tid;
if (i < pairCount) {
uint2 pair = Ldg(pairs + i);
const float der2x = Ldg(ders2 + pair.x);
const float der2y = Ldg(ders2 + pair.y);
const int qid = Ldg(qids + pair.x);
const float groupDer2 = Ldg(groupDers2 + qid);
pairDer2[i] = groupDer2 > 1e-20f ? der2x * der2y / (groupDer2 + 1e-20f) : 0;
}
}
void FillPairDer2Only(const float* ders2,
const float* groupDers2,
const ui32* qids,
const uint2* pairs,
ui32 pairCount,
float* pairDer2,
TCudaStream stream
) {
const int blockSize = 256;
const int numBlocks = (pairCount + blockSize - 1) / blockSize;
if (numBlocks > 0) {
hipLaunchKernelGGL(( FillPairDer2OnlyImpl), dim3(numBlocks), dim3(blockSize), 0, stream , ders2, groupDers2, qids, pairs, pairCount, pairDer2);
}
}
}
| 86a6cc700fef8ad167c2035920d53e1fc81bd4f6.cu | #include "split_pairwise.cuh"
#include "split_properties_helpers.cuh"
#include <cooperative_groups.h>
#include <catboost/cuda/cuda_lib/kernel/arch.cuh>
#include <catboost/cuda/cuda_util/kernel/instructions.cuh>
#include <catboost/cuda/cuda_util/kernel/kernel_helpers.cuh>
using namespace cooperative_groups;
namespace NKernel {
__forceinline__ __device__ void AddToMatrices(int row, int col, float sum,
float* matrix) {
const int ind = col < row ? (row * (row + 1) >> 1) + col : (col * (col + 1) >> 1) + row;
matrix[ind] += sum;
}
template <int BLOCK_SIZE, int PartCount>
__global__ void MakePairwiseDerivatives(const float* pairwiseHistogram,
int matrixOffset,
int matCount,
int histLineSize /* 4 * totalBinFeatureCount */,
float* linearSystem) {
const int logicalWarpSize = PartCount > 32 ? 32 : PartCount;
const int matricesPerBlock = BLOCK_SIZE / logicalWarpSize;
int matrixIdx = blockIdx.x * matricesPerBlock + threadIdx.x / logicalWarpSize;
int localTid = threadIdx.x & (logicalWarpSize - 1);
const int inBlockOffset = threadIdx.x / logicalWarpSize;
if (matrixIdx >= matCount)
return;
{
const size_t rowSize = PartCount * 2;
const size_t linearSystemSize = (rowSize + rowSize * (rowSize + 1) / 2);
linearSystem += matrixIdx * linearSystemSize;
}
pairwiseHistogram += (matrixOffset + matrixIdx) * 4;
__shared__ float lineData[BLOCK_SIZE * 2];
const int N = PartCount / logicalWarpSize;
const int logicalWarpId = threadIdx.x / logicalWarpSize;
const int logicalWarpCount = BLOCK_SIZE / logicalWarpSize;
thread_block_tile<logicalWarpSize> groupTile = tiled_partition<logicalWarpSize>(this_thread_block());
float sum0[N];
float sum1[N];
for (int i = 0; i < N; ++i) {
sum0[i] = 0;
sum1[i] = 0;
}
#pragma unroll 16
for (int y = 0; y < PartCount; ++y) {
#pragma unroll
for (int i = 0; i < N; ++i) {
const int x = localTid + 32 * i;
const int partIdx = ConvertBlockToPart(x, y);
ui64 offset = ((ui64) partIdx * histLineSize * 4ULL);
float4 hist = __ldg((float4*)(pairwiseHistogram + offset));
const float w00 = max((x != y ? hist.x : 0.0f), 0.0f);
const float w01 = max(hist.y, 0.0f);
const float w10 = max(hist.z, 0.0f);
const float w11 = max((x != y ? hist.w : 0.0f), 0.0f);
// sync for row write done in reduce if we need it
const int nextRow = 2 * y;
const int nextCol = 2 * x;
sum0[i] += w00 + w10;
sum1[i] += w01 + w11;
if (x == y) {
AddToMatrices(nextRow + 1, nextRow, -(w01 + w10), linearSystem);
} else if (x < y) {
AddToMatrices(nextRow, nextCol, -w00, linearSystem);
AddToMatrices(nextRow, nextCol + 1, -w01, linearSystem);
AddToMatrices(nextRow + 1, nextCol, -w10, linearSystem);
AddToMatrices(nextRow + 1, nextCol + 1, -w11, linearSystem);
}
groupTile.sync();
}
groupTile.sync();
}
#pragma unroll 16
for (int x = 0; x < PartCount; ++x) {
#pragma unroll
for (int i = 0; i < N; ++i) {
const int y = localTid + 32 * i;
const int partIdx = ConvertBlockToPart(x, y);
ui64 offset = ((ui64) partIdx * histLineSize * 4ULL);
float4 hist = __ldg((float4*)(pairwiseHistogram + offset));
const float w00 = max((x != y ? hist.x : 0.0f), 0.0f);
const float w01 = max(hist.y, 0.0f);
const float w10 = max(hist.z, 0.0f);
const float w11 = max((x != y ? hist.w : 0.0f), 0.0f);
// sync for row write done in reduce if we need it
const int nextRow = 2 * y;
const int nextCol = 2 * x;
sum0[i] += w01 + w00;
sum1[i] += w10 + w11;
if (x > y) {
AddToMatrices(nextRow, nextCol, -w00, linearSystem);
AddToMatrices(nextRow, nextCol + 1, -w01, linearSystem);
AddToMatrices(nextRow + 1, nextCol, -w10, linearSystem);
AddToMatrices(nextRow + 1, nextCol + 1, -w11, linearSystem);
}
groupTile.sync();
}
groupTile.sync();
}
#pragma unroll
for (int i = 0; i < N; ++i) {
const int x = localTid + 32 * i;
const int nextRow = 2 * x;
linearSystem[nextRow * (nextRow + 1) / 2 + nextRow] += sum0[i];
linearSystem[(nextRow + 1) * (nextRow + 2) / 2 + nextRow + 1] += sum1[i];
}
}
template <int BLOCK_SIZE>
void RunMakeMatrices(const float* histogram, int partCount, int histLineSize, int firstMatrix, int matricesCount, float* linearSystem, TCudaStream stream) {
if (matricesCount > 0) {
const int numBlocks = (((size_t) matricesCount) * min(partCount, 32) + BLOCK_SIZE - 1) / BLOCK_SIZE;
#define RUN(PartCount)\
MakePairwiseDerivatives<BLOCK_SIZE, PartCount> << < numBlocks, BLOCK_SIZE, 0, stream >> > (histogram, firstMatrix, matricesCount, histLineSize, linearSystem);
if (partCount == 1) {
RUN(1)
} else if (partCount == 2) {
RUN(2)
} else if (partCount == 4) {
RUN(4)
} else if (partCount == 8) {
RUN(8)
} else if (partCount == 16) {
RUN(16)
} else if (partCount == 32) {
RUN(32)
} else if (partCount == 64) {
RUN(64)
} else if (partCount == 128) {
RUN(128)
} else if (partCount == 256) {
RUN(256)
} else {
Y_VERIFY(false);
}
}
}
void MakePairwiseDerivatives(const float* histogram, int leavesCount, int firstMatrix, int matricesCount, int histLineSize, float* linearSystem,
TCudaStream stream) {
RunMakeMatrices<256>(histogram, leavesCount, histLineSize, firstMatrix, matricesCount, linearSystem, stream);
}
template <int BLOCK_SIZE>
__global__ void MakePointwiseDerivatives(const float* pointwiseHist, ui64 pointwiseHistSize,
const TPartitionStatistics* partStats,
bool hasPointwiseWeights,
int rowSize,
int firstMatrixIdx,
int matCount,
float* linearSystem) {
const int lineSize = min(rowSize, 32);
const int matricesPerBlock = BLOCK_SIZE / lineSize;
const int matrixIdx = blockIdx.x * matricesPerBlock + threadIdx.x / lineSize;
pointwiseHist += (firstMatrixIdx + matrixIdx) * (hasPointwiseWeights ? 2 : 1);
linearSystem += ((size_t)matrixIdx) * (rowSize + rowSize * (rowSize + 1) / 2);
const int x = threadIdx.x & (lineSize - 1);
float* targets = linearSystem + rowSize * (rowSize + 1) / 2;
if (matrixIdx < matCount) {
for (int col = x; col < rowSize; col += 32) {
const int i = col / 2;
ui64 offset = pointwiseHistSize * i;
if (hasPointwiseWeights) {
const float leafWeight = pointwiseHist[offset];
const float weight = (col & 1) ? partStats[i].Weight - leafWeight : leafWeight;
linearSystem[col * (col + 1) / 2 + col] += max(weight, 0.0f);
}
const float leafSum = pointwiseHist[offset + hasPointwiseWeights];
const float sum = (col & 1) ? partStats[i].Sum - leafSum : leafSum;
targets[col] = sum;
}
}
}
template <int BLOCK_SIZE>
void RunMakePointwiseDerivatives(const float* pointwiseHist, int binFeatureCount,
const TPartitionStatistics* partStats,
bool hasPointwiseWeights,
int rowSize,
int firstMatrixIdx,
int matricesCount,
float* linearSystem,
TCudaStream stream
) {
if (matricesCount > 0) {
const ui32 pointwiseHistSize = binFeatureCount * (hasPointwiseWeights ? 2 : 1);
const int lineSize = min(32, rowSize);
const int numBlocks = (((size_t) matricesCount) * lineSize + BLOCK_SIZE - 1) / BLOCK_SIZE;
MakePointwiseDerivatives<BLOCK_SIZE> << < numBlocks, BLOCK_SIZE, 0, stream >> > (pointwiseHist, pointwiseHistSize, partStats, hasPointwiseWeights, rowSize, firstMatrixIdx, matricesCount, linearSystem);
}
}
void MakePointwiseDerivatives(const float* pointwiseHist, int pointwiseHistLineSize,
const TPartitionStatistics* partStats,
bool hasPointwiseWeights,
int rowSize,
int firstMatrixIdx,
int matricesCount,
float* linearSystem,
TCudaStream stream) {
RunMakePointwiseDerivatives<128> (pointwiseHist, pointwiseHistLineSize, partStats, hasPointwiseWeights, rowSize, firstMatrixIdx, matricesCount, linearSystem, stream);
}
__global__ void UpdateBinsPairs(TCFeature feature, ui32 binIdx,
const ui32* cindex,
const uint2* pairs,
ui32 pairCount,
ui32 depth,
ui32* bins) {
ui32 idx = blockIdx.x * blockDim.x + threadIdx.x;
cindex += feature.Offset;
const ui32 value = binIdx << feature.Shift;
const ui32 mask = feature.Mask << feature.Shift;
while (idx < pairCount) {
const uint2 p = pairs[idx];
const ui32 d1 = (cindex[p.x] & mask);
const ui32 d2 = (cindex[p.y] & mask);
ui32 bit1 = feature.OneHotFeature ? d1 == value : d1 > value;
ui32 bit2 = feature.OneHotFeature ? d2 == value : d2 > value;
ui32 bin = bins[idx];
bin = ((bit1 * 2 + bit2) << (depth * 2)) | bin;
bins[idx] = bin;
idx += blockDim.x * gridDim.x;
}
}
void UpdateBinsPairs(TCFeature feature, ui32 bin,
const ui32* compressedIndex,
const uint2* pairs,
ui32 pairCount,
ui32 depth,
ui32* bins,
TCudaStream stream) {
const int blockSize = 256;
const int numBlocks = min((pairCount + blockSize - 1) / blockSize,
TArchProps::MaxBlockCount());
UpdateBinsPairs<<<numBlocks, blockSize, 0, stream>>>(feature, bin, compressedIndex, pairs, pairCount, depth, bins);
}
template <int BLOCK_SIZE>
__global__ void SelectBestSplitImpl(const float* scores,
const TCBinFeature* binFeature, int size,
int bestIndexBias, TBestSplitPropertiesWithIndex* best) {
float maxScore = -5000000.0f;
int maxIdx = -1;
int tid = threadIdx.x;
#pragma unroll 8
for (int i = tid; i < size; i += BLOCK_SIZE) {
float score = scores[i];
if (score > maxScore) {
maxScore = score;
maxIdx = i;
}
}
__shared__ float vals[BLOCK_SIZE];
__shared__ int inds[BLOCK_SIZE];
vals[tid] = maxScore;
inds[tid] = maxIdx;
__syncthreads();
for (int s = BLOCK_SIZE >> 1; s > 0; s >>= 1) {
if (tid < s) {
if ( vals[tid] < vals[tid + s] || (vals[tid] == vals[tid + s] && inds[tid] > inds[tid + s]) ) {
vals[tid] = vals[tid + s];
inds[tid] = inds[tid + s];
}
}
__syncthreads();
}
if (tid == 0) {
TCBinFeature bestFeature;
const int bestIdx = inds[0];
const float bestScore = vals[0];
if (bestIdx != -1) {
bestFeature = binFeature[bestIdx];
} else {
bestFeature.BinId = 0;
bestFeature.FeatureId = 0;
}
best->Index = bestIndexBias + bestIdx;
best->Score = -bestScore;
best->BinId = bestFeature.BinId;
best->FeatureId = bestFeature.FeatureId;
}
}
void SelectBestSplit(const float* scores,
const TCBinFeature* binFeature, int size,
int bestIndexBias, TBestSplitPropertiesWithIndex* best,
TCudaStream stream) {
const int blockSize = 1024;
SelectBestSplitImpl<blockSize><<<1, blockSize, 0, stream>>>(scores, binFeature, size, bestIndexBias, best);
}
__global__ void ZeroSameLeafBinWeightsImpl(const uint2* pairs,
const ui32* bins,
ui32 pairCount,
float* pairWeights) {
const ui32 i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < pairCount) {
uint2 pair = pairs[i];
const ui32 binx = bins[pair.x];
const ui32 biny = bins[pair.y];
if (binx == biny) {
pairWeights[i] = 0;
}
}
}
void ZeroSameLeafBinWeights(const uint2* pairs,
const ui32* bins,
ui32 pairCount,
float* pairWeights,
TCudaStream stream
) {
if (pairCount > 0) {
const int blockSize = 256;
const ui32 numBlocks = (pairCount + blockSize - 1) / blockSize;
ZeroSameLeafBinWeightsImpl<<<numBlocks, blockSize, 0, stream>>>(pairs, bins, pairCount, pairWeights);
}
}
__global__ void FillPairBinsImpl(const uint2* pairs,
const ui32* bins,
ui32 rowSize,
ui32 pairCount,
ui32* pairBins) {
const ui32 i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < pairCount) {
uint2 pair = pairs[i];
const ui32 binx = bins[pair.x];
const ui32 biny = bins[pair.y];
pairBins[i] = binx * rowSize + biny;
}
}
void FillPairBins(const uint2* pairs,
const ui32* bins,
ui32 binCount,
ui32 pairCount,
ui32* pairBins,
TCudaStream stream) {
if (pairCount > 0) {
const int blockSize = 256;
const ui32 numBlocks = (pairCount + blockSize - 1) / blockSize;
FillPairBinsImpl<<<numBlocks, blockSize, 0, stream>>>(pairs, bins, binCount, pairCount, pairBins);
}
}
//for leaves estimation
__global__ void FillPairDer2OnlyImpl(const float* ders2,
const float* groupDers2,
const ui32* qids,
const uint2* pairs,
ui32 pairCount,
float* pairDer2) {
const int tid = threadIdx.x;
const int i = blockIdx.x * blockDim.x + tid;
if (i < pairCount) {
uint2 pair = Ldg(pairs + i);
const float der2x = Ldg(ders2 + pair.x);
const float der2y = Ldg(ders2 + pair.y);
const int qid = Ldg(qids + pair.x);
const float groupDer2 = Ldg(groupDers2 + qid);
pairDer2[i] = groupDer2 > 1e-20f ? der2x * der2y / (groupDer2 + 1e-20f) : 0;
}
}
void FillPairDer2Only(const float* ders2,
const float* groupDers2,
const ui32* qids,
const uint2* pairs,
ui32 pairCount,
float* pairDer2,
TCudaStream stream
) {
const int blockSize = 256;
const int numBlocks = (pairCount + blockSize - 1) / blockSize;
if (numBlocks > 0) {
FillPairDer2OnlyImpl<<< numBlocks, blockSize, 0, stream >>>(ders2, groupDers2, qids, pairs, pairCount, pairDer2);
}
}
}
|
2a07a80d6c9b84e0752212c97825b3087e67cae7.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <list>
#include "cudaDeviceBuffer.h"
#include <hip/hip_runtime.h>
#include "Parser.h"
#include "PhysicalVariable.h"
#include "Type.h"
#include "kernel_hip.cuh"
#include <string>
#include "VTKWriter.h"
#include <iomanip>
#include "Time.hpp"
int main(int argc, const char * argv[]) {
// Reading from file
Parser p(10,argv[1]);
p.readParameters();
p.readInputConfiguration();
// number of Particles
const real_l numparticles = p.num_particles ;
// Creating the device Buffers
cudaDeviceBuffer<real_d> mass(numparticles,PhysicalQuantity::Scalar) ;
cudaDeviceBuffer<real_d> position(numparticles,PhysicalQuantity::Vector) ;
cudaDeviceBuffer<real_d> velocity(numparticles,PhysicalQuantity::Vector) ;
cudaDeviceBuffer<real_d> forceold(numparticles,PhysicalQuantity::Vector) ;
cudaDeviceBuffer<real_d> forcenew(numparticles,PhysicalQuantity::Vector) ;
p.fillBuffers(mass,velocity,position);
// Allocating memory on Device
mass.allocateOnDevice();
position.allocateOnDevice();
velocity.allocateOnDevice();
forceold.allocateOnDevice();
forcenew.allocateOnDevice();
//Copy to Device
mass.copyToDevice();
position.copyToDevice();
velocity.copyToDevice();
forceold.copyToDevice();
forcenew.copyToDevice();
// Parameters from the file
real_d time_end = std::stod(p.params["time_end"]) ;
real_d timestep_length = std::stod(p.params["timestep_length"]) ;
real_d epsilon = std::stod(p.params["epsilon"]) ;
real_d sigma = std::stod(p.params["sigma"]) ;
real_l vtk_out_freq = std::stol(p.params["vtk_out_freq"]) ;
real_l threads_per_blocks = std::stol(p.params["cl_workgroup_1dsize"]) ;
std::string vtk_name = p.params["vtk_out_name_base"] ;
VTKWriter writer(vtk_name) ;
//Calculate the number of blocks
real_l num_blocks ;
if(numparticles % threads_per_blocks ==0) num_blocks = numparticles / threads_per_blocks ;
else num_blocks = (numparticles / threads_per_blocks) + 1 ;
//std::cout<<num_blocks<<" "<<threads_per_blocks<<std::endl;
real_d time_taken = 0.0 ;
HESPA::Timer time ;
// Algorithm to follow
{
real_l iter = 0 ;
// calculate Initial forces
hipLaunchKernelGGL(( calcForces), dim3(num_blocks) ,dim3(threads_per_blocks), 0, 0, forcenew.devicePtr,position.devicePtr,numparticles,sigma,epsilon) ;
for(real_d t =0.0 ; t < time_end ; t+= timestep_length ) {
time.reset();
// Update the Position
hipLaunchKernelGGL(( updatePosition), dim3(num_blocks),dim3(threads_per_blocks), 0, 0, forcenew.devicePtr,position.devicePtr,velocity.devicePtr,mass.devicePtr,numparticles,timestep_length);
// Copy the forces
hipLaunchKernelGGL(( copyForces), dim3(num_blocks),dim3(threads_per_blocks), 0, 0, forceold.devicePtr,forcenew.devicePtr, numparticles);
// Calculate New forces
hipLaunchKernelGGL(( calcForces), dim3(num_blocks),dim3(threads_per_blocks), 0, 0, forcenew.devicePtr,position.devicePtr,numparticles, sigma,epsilon);
// Update the velocity
hipLaunchKernelGGL(( updateVelocity), dim3(num_blocks),dim3(threads_per_blocks), 0, 0, forcenew.devicePtr,forceold.devicePtr,velocity.devicePtr,mass.devicePtr,numparticles,timestep_length);
hipDeviceSynchronize();
time_taken += time.elapsed();
if(iter % vtk_out_freq == 0){
// copy to host back
forcenew.copyToHost();
forceold.copyToHost();
position.copyToHost();
velocity.copyToHost();
writer.writeVTKOutput(mass,position,velocity,numparticles);
}
// Iterator count
++iter ;
}
}
std::cout<<"The time taken for "<<numparticles<<" is:= "<<time_taken<<std::endl ;
return 0;
}
| 2a07a80d6c9b84e0752212c97825b3087e67cae7.cu | #include <iostream>
#include <list>
#include "cudaDeviceBuffer.h"
#include <cuda_runtime.h>
#include "Parser.h"
#include "PhysicalVariable.h"
#include "Type.h"
#include "kernel.cuh"
#include <string>
#include "VTKWriter.h"
#include <iomanip>
#include "Time.hpp"
int main(int argc, const char * argv[]) {
// Reading from file
Parser p(10,argv[1]);
p.readParameters();
p.readInputConfiguration();
// number of Particles
const real_l numparticles = p.num_particles ;
// Creating the device Buffers
cudaDeviceBuffer<real_d> mass(numparticles,PhysicalQuantity::Scalar) ;
cudaDeviceBuffer<real_d> position(numparticles,PhysicalQuantity::Vector) ;
cudaDeviceBuffer<real_d> velocity(numparticles,PhysicalQuantity::Vector) ;
cudaDeviceBuffer<real_d> forceold(numparticles,PhysicalQuantity::Vector) ;
cudaDeviceBuffer<real_d> forcenew(numparticles,PhysicalQuantity::Vector) ;
p.fillBuffers(mass,velocity,position);
// Allocating memory on Device
mass.allocateOnDevice();
position.allocateOnDevice();
velocity.allocateOnDevice();
forceold.allocateOnDevice();
forcenew.allocateOnDevice();
//Copy to Device
mass.copyToDevice();
position.copyToDevice();
velocity.copyToDevice();
forceold.copyToDevice();
forcenew.copyToDevice();
// Parameters from the file
real_d time_end = std::stod(p.params["time_end"]) ;
real_d timestep_length = std::stod(p.params["timestep_length"]) ;
real_d epsilon = std::stod(p.params["epsilon"]) ;
real_d sigma = std::stod(p.params["sigma"]) ;
real_l vtk_out_freq = std::stol(p.params["vtk_out_freq"]) ;
real_l threads_per_blocks = std::stol(p.params["cl_workgroup_1dsize"]) ;
std::string vtk_name = p.params["vtk_out_name_base"] ;
VTKWriter writer(vtk_name) ;
//Calculate the number of blocks
real_l num_blocks ;
if(numparticles % threads_per_blocks ==0) num_blocks = numparticles / threads_per_blocks ;
else num_blocks = (numparticles / threads_per_blocks) + 1 ;
//std::cout<<num_blocks<<" "<<threads_per_blocks<<std::endl;
real_d time_taken = 0.0 ;
HESPA::Timer time ;
// Algorithm to follow
{
real_l iter = 0 ;
// calculate Initial forces
calcForces<<<num_blocks ,threads_per_blocks>>>(forcenew.devicePtr,position.devicePtr,numparticles,sigma,epsilon) ;
for(real_d t =0.0 ; t < time_end ; t+= timestep_length ) {
time.reset();
// Update the Position
updatePosition<<<num_blocks,threads_per_blocks>>>(forcenew.devicePtr,position.devicePtr,velocity.devicePtr,mass.devicePtr,numparticles,timestep_length);
// Copy the forces
copyForces<<<num_blocks,threads_per_blocks>>>(forceold.devicePtr,forcenew.devicePtr, numparticles);
// Calculate New forces
calcForces<<<num_blocks,threads_per_blocks>>>(forcenew.devicePtr,position.devicePtr,numparticles, sigma,epsilon);
// Update the velocity
updateVelocity<<<num_blocks,threads_per_blocks>>>(forcenew.devicePtr,forceold.devicePtr,velocity.devicePtr,mass.devicePtr,numparticles,timestep_length);
cudaDeviceSynchronize();
time_taken += time.elapsed();
if(iter % vtk_out_freq == 0){
// copy to host back
forcenew.copyToHost();
forceold.copyToHost();
position.copyToHost();
velocity.copyToHost();
writer.writeVTKOutput(mass,position,velocity,numparticles);
}
// Iterator count
++iter ;
}
}
std::cout<<"The time taken for "<<numparticles<<" is:= "<<time_taken<<std::endl ;
return 0;
}
|
25301744d47f02647a3f83bb5cbe97e975601e64.hip | // !!! This is a file automatically generated by hipify!!!
/*******************************************************
* Copyright (c) 2014, ArrayFire
* All rights reserved.
*
* This file is distributed under 3-clause BSD license.
* The complete license agreement can be obtained at:
* http://arrayfire.com/licenses/BSD-3-Clause
********************************************************/
#include <common/err_common.hpp>
#include <solve.hpp>
#include <copy.hpp>
#include <rocblas.h>
#include <identity.hpp>
#include <memory.hpp>
#include <platform.hpp>
#include <transpose.hpp>
#include <common/err_common.hpp>
#include <math.hpp>
#include <blas.hpp>
#include <lu.hpp>
#include <qr.hpp>
#include <cstdio>
namespace cuda {
// cusolverStatus_t cusolverDn<>getrs(
// hipsolverDnHandle_t handle,
// hipblasOperation_t trans,
// int n, int nrhs,
// const <> *A, int lda,
// const int *devIpiv,
// <> *B, int ldb,
// int *devInfo );
template<typename T>
struct getrs_func_def_t {
typedef cusolverStatus_t (*getrs_func_def)(hipsolverDnHandle_t,
hipblasOperation_t, int, int,
const T *, int, const int *, T *,
int, int *);
};
#define SOLVE_FUNC_DEF(FUNC) \
template<typename T> \
typename FUNC##_func_def_t<T>::FUNC##_func_def FUNC##_func();
#define SOLVE_FUNC(FUNC, TYPE, PREFIX) \
template<> \
typename FUNC##_func_def_t<TYPE>::FUNC##_func_def FUNC##_func<TYPE>() { \
return (FUNC##_func_def_t<TYPE>::FUNC##_func_def) & \
cusolverDn##PREFIX##FUNC; \
}
SOLVE_FUNC_DEF(getrs)
SOLVE_FUNC(getrs, float, S)
SOLVE_FUNC(getrs, double, D)
SOLVE_FUNC(getrs, cfloat, C)
SOLVE_FUNC(getrs, cdouble, Z)
// cusolverStatus_t cusolverDn<>geqrf_bufferSize(
// hipsolverDnHandle_t handle,
// int m, int n,
// <> *A,
// int lda,
// int *Lwork );
//
// cusolverStatus_t cusolverDn<>geqrf(
// hipsolverDnHandle_t handle,
// int m, int n,
// <> *A, int lda,
// <> *TAU,
// <> *Workspace,
// int Lwork, int *devInfo );
//
// cusolverStatus_t cusolverDn<>mqr(
// hipsolverDnHandle_t handle,
// hipblasSideMode_t side, hipblasOperation_t trans,
// int m, int n, int k,
// const double *A, int lda,
// const double *tau,
// double *C, int ldc,
// double *work,
// int lwork, int *devInfo);
template<typename T>
struct geqrf_solve_func_def_t {
typedef cusolverStatus_t (*geqrf_solve_func_def)(hipsolverDnHandle_t, int,
int, T *, int, T *, T *,
int, int *);
};
template<typename T>
struct geqrf_solve_buf_func_def_t {
typedef cusolverStatus_t (*geqrf_solve_buf_func_def)(hipsolverDnHandle_t,
int, int, T *, int,
int *);
};
template<typename T>
struct mqr_solve_func_def_t {
typedef cusolverStatus_t (*mqr_solve_func_def)(
hipsolverDnHandle_t, hipblasSideMode_t, hipblasOperation_t, int, int, int,
const T *, int, const T *, T *, int, T *, int, int *);
};
#define QR_FUNC_DEF(FUNC) \
template<typename T> \
static typename FUNC##_solve_func_def_t<T>::FUNC##_solve_func_def \
FUNC##_solve_func(); \
\
template<typename T> \
static typename FUNC##_solve_buf_func_def_t<T>::FUNC##_solve_buf_func_def \
FUNC##_solve_buf_func();
#define QR_FUNC(FUNC, TYPE, PREFIX) \
template<> \
typename FUNC##_solve_func_def_t<TYPE>::FUNC##_solve_func_def \
FUNC##_solve_func<TYPE>() { \
return (FUNC##_solve_func_def_t<TYPE>::FUNC##_solve_func_def) & \
cusolverDn##PREFIX##FUNC; \
} \
\
template<> \
typename FUNC##_solve_buf_func_def_t<TYPE>::FUNC##_solve_buf_func_def \
FUNC##_solve_buf_func<TYPE>() { \
return (FUNC##_solve_buf_func_def_t< \
TYPE>::FUNC##_solve_buf_func_def) & \
cusolverDn##PREFIX##FUNC##_bufferSize; \
}
QR_FUNC_DEF(geqrf)
QR_FUNC(geqrf, float, S)
QR_FUNC(geqrf, double, D)
QR_FUNC(geqrf, cfloat, C)
QR_FUNC(geqrf, cdouble, Z)
#define MQR_FUNC_DEF(FUNC) \
template<typename T> \
static typename FUNC##_solve_func_def_t<T>::FUNC##_solve_func_def \
FUNC##_solve_func();
#define MQR_FUNC(FUNC, TYPE, PREFIX) \
template<> \
typename FUNC##_solve_func_def_t<TYPE>::FUNC##_solve_func_def \
FUNC##_solve_func<TYPE>() { \
return (FUNC##_solve_func_def_t<TYPE>::FUNC##_solve_func_def) & \
cusolverDn##PREFIX; \
}
MQR_FUNC_DEF(mqr)
MQR_FUNC(mqr, float, Sormqr)
MQR_FUNC(mqr, double, Dormqr)
MQR_FUNC(mqr, cfloat, Cunmqr)
MQR_FUNC(mqr, cdouble, Zunmqr)
template<typename T>
Array<T> solveLU(const Array<T> &A, const Array<int> &pivot, const Array<T> &b,
const af_mat_prop options) {
UNUSED(options);
int N = A.dims()[0];
int NRHS = b.dims()[1];
Array<T> B = copyArray<T>(b);
auto info = memAlloc<int>(1);
CUSOLVER_CHECK(getrs_func<T>()(solverDnHandle(), HIPBLAS_OP_N, N, NRHS,
A.get(), A.strides()[1], pivot.get(),
B.get(), B.strides()[1], info.get()));
return B;
}
template<typename T>
Array<T> generalSolve(const Array<T> &a, const Array<T> &b) {
int M = a.dims()[0];
int N = a.dims()[1];
int K = b.dims()[1];
Array<T> A = copyArray<T>(a);
Array<T> B = copyArray<T>(b);
Array<int> pivot = lu_inplace(A, false);
auto info = memAlloc<int>(1);
CUSOLVER_CHECK(getrs_func<T>()(solverDnHandle(), HIPBLAS_OP_N, N, K, A.get(),
A.strides()[1], pivot.get(), B.get(),
B.strides()[1], info.get()));
return B;
}
template<typename T>
hipblasOperation_t trans() {
return HIPBLAS_OP_T;
}
template<>
hipblasOperation_t trans<cfloat>() {
return HIPBLAS_OP_C;
}
template<>
hipblasOperation_t trans<cdouble>() {
return HIPBLAS_OP_C;
}
template<typename T>
Array<T> leastSquares(const Array<T> &a, const Array<T> &b) {
int M = a.dims()[0];
int N = a.dims()[1];
int K = b.dims()[1];
Array<T> B = createEmptyArray<T>(dim4());
if (M < N) {
// Least squres for this case is solved using the following
// solve(A, B) == matmul(Q, Xpad);
// Where:
// Xpad == pad(Xt, N - M, 1);
// Xt == tri_solve(R1, B);
// R1 == R(seq(M), seq(M));
// transpose(A) == matmul(Q, R);
// QR is performed on the transpose of A
Array<T> A = transpose<T>(a, true);
B = padArray<T, T>(b, dim4(N, K), scalar<T>(0));
int lwork = 0;
// Get workspace needed for QR
CUSOLVER_CHECK(geqrf_solve_buf_func<T>()(solverDnHandle(), A.dims()[0],
A.dims()[1], A.get(),
A.strides()[1], &lwork));
auto workspace = memAlloc<T>(lwork);
Array<T> t = createEmptyArray<T>(af::dim4(min(M, N), 1, 1, 1));
auto info = memAlloc<int>(1);
// In place Perform in place QR
CUSOLVER_CHECK(geqrf_solve_func<T>()(
solverDnHandle(), A.dims()[0], A.dims()[1], A.get(), A.strides()[1],
t.get(), workspace.get(), lwork, info.get()));
// R1 = R(seq(M), seq(M));
A.resetDims(dim4(M, M));
// Bt = tri_solve(R1, B);
B.resetDims(dim4(M, K));
trsm<T>(A, B, AF_MAT_CTRANS, true, true, false);
// Bpad = pad(Bt, ..)
B.resetDims(dim4(N, K));
// matmul(Q, Bpad)
CUSOLVER_CHECK(mqr_solve_func<T>()(
solverDnHandle(), HIPBLAS_SIDE_LEFT, HIPBLAS_OP_N, B.dims()[0],
B.dims()[1], A.dims()[0], A.get(), A.strides()[1], t.get(), B.get(),
B.strides()[1], workspace.get(), lwork, info.get()));
} else if (M > N) {
// Least squres for this case is solved using the following
// solve(A, B) == tri_solve(R1, Bt);
// Where:
// R1 == R(seq(N), seq(N));
// Bt == matmul(transpose(Q1), B);
// Q1 == Q(span, seq(N));
// A == matmul(Q, R);
Array<T> A = copyArray<T>(a);
B = copyArray(b);
int lwork = 0;
// Get workspace needed for QR
CUSOLVER_CHECK(geqrf_solve_buf_func<T>()(solverDnHandle(), A.dims()[0],
A.dims()[1], A.get(),
A.strides()[1], &lwork));
auto workspace = memAlloc<T>(lwork);
Array<T> t = createEmptyArray<T>(af::dim4(min(M, N), 1, 1, 1));
auto info = memAlloc<int>(1);
// In place Perform in place QR
CUSOLVER_CHECK(geqrf_solve_func<T>()(
solverDnHandle(), A.dims()[0], A.dims()[1], A.get(), A.strides()[1],
t.get(), workspace.get(), lwork, info.get()));
// matmul(Q1, B)
CUSOLVER_CHECK(mqr_solve_func<T>()(
solverDnHandle(), HIPBLAS_SIDE_LEFT, trans<T>(), M, K, N, A.get(),
A.strides()[1], t.get(), B.get(), B.strides()[1], workspace.get(),
lwork, info.get()));
// tri_solve(R1, Bt)
A.resetDims(dim4(N, N));
B.resetDims(dim4(N, K));
trsm(A, B, AF_MAT_NONE, true, true, false);
}
return B;
}
template<typename T>
Array<T> triangleSolve(const Array<T> &A, const Array<T> &b,
const af_mat_prop options) {
Array<T> B = copyArray<T>(b);
trsm(A, B,
AF_MAT_NONE, // transpose flag
options & AF_MAT_UPPER ? true : false,
true, // is_left
options & AF_MAT_DIAG_UNIT ? true : false);
return B;
}
template<typename T>
Array<T> solve(const Array<T> &a, const Array<T> &b,
const af_mat_prop options) {
if (options & AF_MAT_UPPER || options & AF_MAT_LOWER) {
return triangleSolve<T>(a, b, options);
}
if (a.dims()[0] == a.dims()[1]) {
return generalSolve<T>(a, b);
} else {
return leastSquares<T>(a, b);
}
}
#define INSTANTIATE_SOLVE(T) \
template Array<T> solve<T>(const Array<T> &a, const Array<T> &b, \
const af_mat_prop options); \
template Array<T> solveLU<T>(const Array<T> &A, const Array<int> &pivot, \
const Array<T> &b, \
const af_mat_prop options);
INSTANTIATE_SOLVE(float)
INSTANTIATE_SOLVE(cfloat)
INSTANTIATE_SOLVE(double)
INSTANTIATE_SOLVE(cdouble)
} // namespace cuda
| 25301744d47f02647a3f83bb5cbe97e975601e64.cu | /*******************************************************
* Copyright (c) 2014, ArrayFire
* All rights reserved.
*
* This file is distributed under 3-clause BSD license.
* The complete license agreement can be obtained at:
* http://arrayfire.com/licenses/BSD-3-Clause
********************************************************/
#include <common/err_common.hpp>
#include <solve.hpp>
#include <copy.hpp>
#include <cublas_v2.h>
#include <identity.hpp>
#include <memory.hpp>
#include <platform.hpp>
#include <transpose.hpp>
#include <common/err_common.hpp>
#include <math.hpp>
#include <blas.hpp>
#include <lu.hpp>
#include <qr.hpp>
#include <cstdio>
namespace cuda {
// cusolverStatus_t cusolverDn<>getrs(
// cusolverDnHandle_t handle,
// cublasOperation_t trans,
// int n, int nrhs,
// const <> *A, int lda,
// const int *devIpiv,
// <> *B, int ldb,
// int *devInfo );
template<typename T>
struct getrs_func_def_t {
typedef cusolverStatus_t (*getrs_func_def)(cusolverDnHandle_t,
cublasOperation_t, int, int,
const T *, int, const int *, T *,
int, int *);
};
#define SOLVE_FUNC_DEF(FUNC) \
template<typename T> \
typename FUNC##_func_def_t<T>::FUNC##_func_def FUNC##_func();
#define SOLVE_FUNC(FUNC, TYPE, PREFIX) \
template<> \
typename FUNC##_func_def_t<TYPE>::FUNC##_func_def FUNC##_func<TYPE>() { \
return (FUNC##_func_def_t<TYPE>::FUNC##_func_def) & \
cusolverDn##PREFIX##FUNC; \
}
SOLVE_FUNC_DEF(getrs)
SOLVE_FUNC(getrs, float, S)
SOLVE_FUNC(getrs, double, D)
SOLVE_FUNC(getrs, cfloat, C)
SOLVE_FUNC(getrs, cdouble, Z)
// cusolverStatus_t cusolverDn<>geqrf_bufferSize(
// cusolverDnHandle_t handle,
// int m, int n,
// <> *A,
// int lda,
// int *Lwork );
//
// cusolverStatus_t cusolverDn<>geqrf(
// cusolverDnHandle_t handle,
// int m, int n,
// <> *A, int lda,
// <> *TAU,
// <> *Workspace,
// int Lwork, int *devInfo );
//
// cusolverStatus_t cusolverDn<>mqr(
// cusolverDnHandle_t handle,
// cublasSideMode_t side, cublasOperation_t trans,
// int m, int n, int k,
// const double *A, int lda,
// const double *tau,
// double *C, int ldc,
// double *work,
// int lwork, int *devInfo);
template<typename T>
struct geqrf_solve_func_def_t {
typedef cusolverStatus_t (*geqrf_solve_func_def)(cusolverDnHandle_t, int,
int, T *, int, T *, T *,
int, int *);
};
template<typename T>
struct geqrf_solve_buf_func_def_t {
typedef cusolverStatus_t (*geqrf_solve_buf_func_def)(cusolverDnHandle_t,
int, int, T *, int,
int *);
};
template<typename T>
struct mqr_solve_func_def_t {
typedef cusolverStatus_t (*mqr_solve_func_def)(
cusolverDnHandle_t, cublasSideMode_t, cublasOperation_t, int, int, int,
const T *, int, const T *, T *, int, T *, int, int *);
};
#define QR_FUNC_DEF(FUNC) \
template<typename T> \
static typename FUNC##_solve_func_def_t<T>::FUNC##_solve_func_def \
FUNC##_solve_func(); \
\
template<typename T> \
static typename FUNC##_solve_buf_func_def_t<T>::FUNC##_solve_buf_func_def \
FUNC##_solve_buf_func();
#define QR_FUNC(FUNC, TYPE, PREFIX) \
template<> \
typename FUNC##_solve_func_def_t<TYPE>::FUNC##_solve_func_def \
FUNC##_solve_func<TYPE>() { \
return (FUNC##_solve_func_def_t<TYPE>::FUNC##_solve_func_def) & \
cusolverDn##PREFIX##FUNC; \
} \
\
template<> \
typename FUNC##_solve_buf_func_def_t<TYPE>::FUNC##_solve_buf_func_def \
FUNC##_solve_buf_func<TYPE>() { \
return (FUNC##_solve_buf_func_def_t< \
TYPE>::FUNC##_solve_buf_func_def) & \
cusolverDn##PREFIX##FUNC##_bufferSize; \
}
QR_FUNC_DEF(geqrf)
QR_FUNC(geqrf, float, S)
QR_FUNC(geqrf, double, D)
QR_FUNC(geqrf, cfloat, C)
QR_FUNC(geqrf, cdouble, Z)
#define MQR_FUNC_DEF(FUNC) \
template<typename T> \
static typename FUNC##_solve_func_def_t<T>::FUNC##_solve_func_def \
FUNC##_solve_func();
#define MQR_FUNC(FUNC, TYPE, PREFIX) \
template<> \
typename FUNC##_solve_func_def_t<TYPE>::FUNC##_solve_func_def \
FUNC##_solve_func<TYPE>() { \
return (FUNC##_solve_func_def_t<TYPE>::FUNC##_solve_func_def) & \
cusolverDn##PREFIX; \
}
MQR_FUNC_DEF(mqr)
MQR_FUNC(mqr, float, Sormqr)
MQR_FUNC(mqr, double, Dormqr)
MQR_FUNC(mqr, cfloat, Cunmqr)
MQR_FUNC(mqr, cdouble, Zunmqr)
template<typename T>
Array<T> solveLU(const Array<T> &A, const Array<int> &pivot, const Array<T> &b,
const af_mat_prop options) {
UNUSED(options);
int N = A.dims()[0];
int NRHS = b.dims()[1];
Array<T> B = copyArray<T>(b);
auto info = memAlloc<int>(1);
CUSOLVER_CHECK(getrs_func<T>()(solverDnHandle(), CUBLAS_OP_N, N, NRHS,
A.get(), A.strides()[1], pivot.get(),
B.get(), B.strides()[1], info.get()));
return B;
}
template<typename T>
Array<T> generalSolve(const Array<T> &a, const Array<T> &b) {
int M = a.dims()[0];
int N = a.dims()[1];
int K = b.dims()[1];
Array<T> A = copyArray<T>(a);
Array<T> B = copyArray<T>(b);
Array<int> pivot = lu_inplace(A, false);
auto info = memAlloc<int>(1);
CUSOLVER_CHECK(getrs_func<T>()(solverDnHandle(), CUBLAS_OP_N, N, K, A.get(),
A.strides()[1], pivot.get(), B.get(),
B.strides()[1], info.get()));
return B;
}
template<typename T>
cublasOperation_t trans() {
return CUBLAS_OP_T;
}
template<>
cublasOperation_t trans<cfloat>() {
return CUBLAS_OP_C;
}
template<>
cublasOperation_t trans<cdouble>() {
return CUBLAS_OP_C;
}
template<typename T>
Array<T> leastSquares(const Array<T> &a, const Array<T> &b) {
int M = a.dims()[0];
int N = a.dims()[1];
int K = b.dims()[1];
Array<T> B = createEmptyArray<T>(dim4());
if (M < N) {
// Least squres for this case is solved using the following
// solve(A, B) == matmul(Q, Xpad);
// Where:
// Xpad == pad(Xt, N - M, 1);
// Xt == tri_solve(R1, B);
// R1 == R(seq(M), seq(M));
// transpose(A) == matmul(Q, R);
// QR is performed on the transpose of A
Array<T> A = transpose<T>(a, true);
B = padArray<T, T>(b, dim4(N, K), scalar<T>(0));
int lwork = 0;
// Get workspace needed for QR
CUSOLVER_CHECK(geqrf_solve_buf_func<T>()(solverDnHandle(), A.dims()[0],
A.dims()[1], A.get(),
A.strides()[1], &lwork));
auto workspace = memAlloc<T>(lwork);
Array<T> t = createEmptyArray<T>(af::dim4(min(M, N), 1, 1, 1));
auto info = memAlloc<int>(1);
// In place Perform in place QR
CUSOLVER_CHECK(geqrf_solve_func<T>()(
solverDnHandle(), A.dims()[0], A.dims()[1], A.get(), A.strides()[1],
t.get(), workspace.get(), lwork, info.get()));
// R1 = R(seq(M), seq(M));
A.resetDims(dim4(M, M));
// Bt = tri_solve(R1, B);
B.resetDims(dim4(M, K));
trsm<T>(A, B, AF_MAT_CTRANS, true, true, false);
// Bpad = pad(Bt, ..)
B.resetDims(dim4(N, K));
// matmul(Q, Bpad)
CUSOLVER_CHECK(mqr_solve_func<T>()(
solverDnHandle(), CUBLAS_SIDE_LEFT, CUBLAS_OP_N, B.dims()[0],
B.dims()[1], A.dims()[0], A.get(), A.strides()[1], t.get(), B.get(),
B.strides()[1], workspace.get(), lwork, info.get()));
} else if (M > N) {
// Least squres for this case is solved using the following
// solve(A, B) == tri_solve(R1, Bt);
// Where:
// R1 == R(seq(N), seq(N));
// Bt == matmul(transpose(Q1), B);
// Q1 == Q(span, seq(N));
// A == matmul(Q, R);
Array<T> A = copyArray<T>(a);
B = copyArray(b);
int lwork = 0;
// Get workspace needed for QR
CUSOLVER_CHECK(geqrf_solve_buf_func<T>()(solverDnHandle(), A.dims()[0],
A.dims()[1], A.get(),
A.strides()[1], &lwork));
auto workspace = memAlloc<T>(lwork);
Array<T> t = createEmptyArray<T>(af::dim4(min(M, N), 1, 1, 1));
auto info = memAlloc<int>(1);
// In place Perform in place QR
CUSOLVER_CHECK(geqrf_solve_func<T>()(
solverDnHandle(), A.dims()[0], A.dims()[1], A.get(), A.strides()[1],
t.get(), workspace.get(), lwork, info.get()));
// matmul(Q1, B)
CUSOLVER_CHECK(mqr_solve_func<T>()(
solverDnHandle(), CUBLAS_SIDE_LEFT, trans<T>(), M, K, N, A.get(),
A.strides()[1], t.get(), B.get(), B.strides()[1], workspace.get(),
lwork, info.get()));
// tri_solve(R1, Bt)
A.resetDims(dim4(N, N));
B.resetDims(dim4(N, K));
trsm(A, B, AF_MAT_NONE, true, true, false);
}
return B;
}
template<typename T>
Array<T> triangleSolve(const Array<T> &A, const Array<T> &b,
const af_mat_prop options) {
Array<T> B = copyArray<T>(b);
trsm(A, B,
AF_MAT_NONE, // transpose flag
options & AF_MAT_UPPER ? true : false,
true, // is_left
options & AF_MAT_DIAG_UNIT ? true : false);
return B;
}
template<typename T>
Array<T> solve(const Array<T> &a, const Array<T> &b,
const af_mat_prop options) {
if (options & AF_MAT_UPPER || options & AF_MAT_LOWER) {
return triangleSolve<T>(a, b, options);
}
if (a.dims()[0] == a.dims()[1]) {
return generalSolve<T>(a, b);
} else {
return leastSquares<T>(a, b);
}
}
#define INSTANTIATE_SOLVE(T) \
template Array<T> solve<T>(const Array<T> &a, const Array<T> &b, \
const af_mat_prop options); \
template Array<T> solveLU<T>(const Array<T> &A, const Array<int> &pivot, \
const Array<T> &b, \
const af_mat_prop options);
INSTANTIATE_SOLVE(float)
INSTANTIATE_SOLVE(cfloat)
INSTANTIATE_SOLVE(double)
INSTANTIATE_SOLVE(cdouble)
} // namespace cuda
|
10cc572be264c55b3d2cd84df47fe6dc1b134140.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* CUDA Kernel Device code
*
*/
extern "C" __global__ void
boundCalc(const float *A, const float *B, float *C, int numElements)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < numElements)
{
C[i] = A[i] + B[i];
}
}
| 10cc572be264c55b3d2cd84df47fe6dc1b134140.cu | /**
* CUDA Kernel Device code
*
*/
extern "C" __global__ void
boundCalc(const float *A, const float *B, float *C, int numElements)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < numElements)
{
C[i] = A[i] + B[i];
}
}
|
f7691492c164ccd568d21a09ce4f525c6f6eaf43.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "transform.cuh"
#include <library/cpp/cuda/wrappers/arch.cuh>
#include <catboost/cuda/cuda_util/kernel/kernel_helpers.cuh>
#include <catboost/cuda/cuda_util/kernel/operators.cuh>
#include <util/generic/cast.h>
#include <contrib/libs/nvidia/cub/cub/block/block_radix_sort.cuh>
namespace NKernel {
template <typename T>
__global__ void AddVectorImpl(T *x, const T *y, ui64 size) {
ui64 i = (ui64)blockIdx.x * blockDim.x + threadIdx.x;
while (i < size) {
const T y0 = __ldg(y + i);
const T x0 = __ldg(x + i);
const T r0 = y0 + x0;
WriteThrough(x + i, r0);
i += (ui64)gridDim.x * blockDim.x;
}
}
template <typename T>
void AddVector(T *x, const T *y, ui64 size, TCudaStream stream) {
const ui32 blockSize = 512;
const ui32 numBlocks = SafeIntegerCast<ui32>(min((size + blockSize - 1) / blockSize,
(ui64)TArchProps::MaxBlockCount()));
AddVectorImpl<T> << < numBlocks, blockSize, 0, stream >> > (x, y, size);
}
template <typename T>
__global__ void AddVectorImpl(T *x, const T y, ui64 size) {
ui64 i = (ui64)blockIdx.x * blockDim.x + threadIdx.x;
while (i < size) {
const T x0 = __ldg(x + i);
const T r0 = y + x0;
WriteThrough(x + i, r0);
i += (ui64)gridDim.x * blockDim.x;
}
}
template <typename T>
void AddVector(T *x, const T y, ui64 size, TCudaStream stream) {
const ui32 blockSize = 512;
const ui32 numBlocks = SafeIntegerCast<ui32>(min((size + blockSize - 1) / blockSize,
(ui64)TArchProps::MaxBlockCount()));
AddVectorImpl<T> << < numBlocks, blockSize, 0, stream >> > (x, y, size);
}
template <typename T>
__global__ void SubtractVectorImpl(T *x, const T *y, ui64 size) {
ui64 i = (ui64)blockIdx.x * blockDim.x + threadIdx.x;
while (i < size) {
const T y0 = __ldg(y + i);
const T x0 = __ldg(x + i);
const T r0 = x0 - y0;
WriteThrough(x + i, r0);
i += (ui64)gridDim.x * blockDim.x;
}
}
template <typename T>
__global__ void SubtractVectorImpl(T *x, const T y, ui64 size) {
ui64 i = (ui64)blockIdx.x * blockDim.x + threadIdx.x;
while (i < size) {
const T x0 = __ldg(x + i);
const T r0 = x0 - y;
WriteThrough(x + i, r0);
i += (ui64)gridDim.x * blockDim.x;
}
}
template <typename T>
void SubtractVector(T *x, const T *y, ui64 size, TCudaStream stream) {
const ui32 blockSize = 512;
const ui32 numBlocks = SafeIntegerCast<ui32>(min((size + blockSize - 1) / blockSize,
(ui64)TArchProps::MaxBlockCount()));
SubtractVectorImpl<T> << < numBlocks, blockSize, 0, stream >> > (x, y, size);
}
template <typename T>
void SubtractVector(T *x, const T y, ui64 size, TCudaStream stream) {
const ui32 blockSize = 512;
const ui32 numBlocks = SafeIntegerCast<ui32>(min((size + blockSize - 1) / blockSize,
(ui64)TArchProps::MaxBlockCount()));
SubtractVectorImpl<T> << < numBlocks, blockSize, 0, stream >> > (x, y, size);
}
template <typename T>
__global__ void MultiplyVectorImpl(T *x, const T *y, ui64 size) {
ui64 i = (ui64)blockIdx.x * blockDim.x + threadIdx.x;
while (i < size) {
const T y0 = __ldg(y + i);
const T x0 = __ldg(x + i);
const T r0 = y0 * x0;
WriteThrough(x + i, r0);
i += (ui64)gridDim.x * blockDim.x;
}
}
template <typename T>
void MultiplyVector(T *x, const T *y, ui64 size, TCudaStream stream) {
const ui32 blockSize = 512;
const ui32 numBlocks = SafeIntegerCast<ui32>(min((size + blockSize - 1) / blockSize,
(ui64)TArchProps::MaxBlockCount()));
MultiplyVectorImpl<T> << < numBlocks, blockSize, 0, stream >> > (x, y, size);
}
template <typename T>
__global__ void MultiplyVectorImpl(T *x, const T c, ui64 size) {
ui64 i = (ui64)blockIdx.x * blockDim.x + threadIdx.x;
while (i < size) {
T x0 = __ldg(x + i);
T r0 = x0 * c;
WriteThrough(x + i, r0);
i += (ui64)gridDim.x * blockDim.x;
}
}
template <typename T>
void MultiplyVector(T *x, const T c, ui64 size, TCudaStream stream) {
const ui32 blockSize = 512;
const ui32 numBlocks = SafeIntegerCast<ui32>(min((size + blockSize - 1) / blockSize,
(ui64)TArchProps::MaxBlockCount()));
MultiplyVectorImpl<T> << < numBlocks, blockSize, 0, stream >> > (x, c, size);
}
template <typename T>
__global__ void DivideVectorImpl(T *x, const T *y, bool skipZeroes, ui64 size) {
ui64 i = (ui64)blockIdx.x * blockDim.x + threadIdx.x;
while (i < size) {
T x0 = x[i];
T y0 = y[i];
T r0 = ZeroAwareDivide(x0, y0, skipZeroes);
x[i] = r0;
i += (ui64)gridDim.x * blockDim.x;
}
}
template <typename T>
__global__ void DivideVectorImpl(T *x, const T y, bool skipZeroes, ui64 size) {
ui64 i = (ui64)blockIdx.x * blockDim.x + threadIdx.x;
while (i < size) {
T x0 = x[i];
T r0 = ZeroAwareDivide(x0, y, skipZeroes);
x[i] = r0;
i += (ui64)gridDim.x * blockDim.x;
}
}
template <typename T>
void DivideVector(T *x, const T *y, ui64 size, bool skipZeroes, TCudaStream stream) {
const ui32 blockSize = 512;
const ui32 numBlocks = SafeIntegerCast<ui32>(min((size + blockSize - 1) / blockSize,
(ui64)TArchProps::MaxBlockCount()));
DivideVectorImpl<T> << < numBlocks, blockSize, 0, stream >> > (x, y, skipZeroes, size);
}
template <typename T>
void DivideVector(T *x, const T y, ui64 size, bool skipZeroes, TCudaStream stream) {
const ui32 blockSize = 512;
const ui32 numBlocks = SafeIntegerCast<ui32>(min((size + blockSize - 1) / blockSize,
(ui64)TArchProps::MaxBlockCount()));
DivideVectorImpl<T> << < numBlocks, blockSize, 0, stream >> > (x, y, skipZeroes, size);
}
template <typename T>
__global__ void ExpVectorImpl(T *x, ui64 size) {
ui64 i = (ui64)blockIdx.x * blockDim.x + threadIdx.x;
while (i < size) {
T val = __ldg(x + i);
x[i] = __expf(val);
i += (ui64)gridDim.x * blockDim.x;
}
}
template <typename T>
void ExpVector(T *x, ui64 size, TCudaStream stream) {
const ui32 blockSize = 512;
const ui32 numBlocks = SafeIntegerCast<ui32>(min((size + blockSize - 1) / blockSize, (ui64)TArchProps::MaxBlockCount()));
ExpVectorImpl<T> << < numBlocks, blockSize, 0, stream >> > (x, size);
}
template <typename T, typename Index>
__global__ void GatherImpl(T *dst, const T *src, const Index *map, Index size,
int columnCount, ui64 dstColumnAlignSize, ui64 srcColumnAlignSize) {
Index i = (ui64)blockIdx.x * blockDim.x + threadIdx.x;
while (i < size) {
Index m = __ldg(map + i);
for (int column = 0; column < columnCount; ++column) {
WriteThrough(dst + i + column * dstColumnAlignSize, StreamLoad(src + m + column * srcColumnAlignSize));
}
i += (ui64)gridDim.x * blockDim.x;
}
}
template <typename T, typename Index>
void Gather(T *dst, const T *src, const Index* map, ui64 size, int columnCount, ui64 dstColumnAlignSize, ui64 srcColumnAlignSize, TCudaStream stream) {
const ui64 blockSize = 256;
const ui32 numBlocks = SafeIntegerCast<ui32>(min((size + blockSize - 1) / blockSize, (ui64)TArchProps::MaxBlockCount()));
if (numBlocks) {
GatherImpl<T, Index> << < numBlocks, blockSize, 0, stream >> > (dst, src, map, (Index)size, columnCount, dstColumnAlignSize, srcColumnAlignSize);
}
}
template <typename T, typename Index>
__global__ void GatherWithMaskImpl(T *dst, const T *src, const Index *map, Index size, Index mask) {
Index i = (ui64)blockIdx.x * blockDim.x + threadIdx.x;
while (i < size) {
Index m = StreamLoad(map + i) & mask;
WriteThrough(dst + i, StreamLoad(src + m));
i += (ui64)gridDim.x * blockDim.x;
}
}
template <typename T, typename Index>
void GatherWithMask(T *dst, const T *src, const Index* map, ui64 size, Index mask, TCudaStream stream) {
const ui64 blockSize = 256;
const ui32 numBlocks = SafeIntegerCast<ui32>(min((size + blockSize - 1) / blockSize, (ui64)TArchProps::MaxBlockCount()));
if (numBlocks) {
GatherWithMaskImpl<T, Index> << < numBlocks, blockSize, 0, stream >> > (dst, src, map, (Index)size, mask);
}
}
template <typename T, typename Index>
__global__ void ScatterImpl(T* dst, const T* src, const Index* map, Index size, int columnCount, ui64 dstColumnAlignSize, ui64 srcColumnALignSize) {
Index i = (ui64)blockIdx.x * blockDim.x + threadIdx.x;
while (i < size) {
Index m = __ldg(map + i);
for (int column = 0; column < columnCount; ++column) {
WriteThrough(dst + m + dstColumnAlignSize * column, StreamLoad(src + i + srcColumnALignSize * column));
}
i += (ui64)gridDim.x * blockDim.x;
}
}
template <typename T, typename Index>
void Scatter(T *dst, const T *src, const Index* map, ui64 size, int columnCount, ui64 dstColumnAlignSize, ui64 srcColumnAlignSize, TCudaStream stream) {
const ui32 blockSize = 256;
const ui32 numBlocks = SafeIntegerCast<ui32>(min((size + blockSize - 1) / blockSize, (ui64)TArchProps::MaxBlockCount()));
if (numBlocks) {
ScatterImpl<T, Index> << < numBlocks, blockSize, 0, stream >> > (dst, src, map, (Index)size, columnCount, dstColumnAlignSize, srcColumnAlignSize);
}
}
template <typename T, typename Index>
__global__ void ScatterWithMaskImpl(T* dst, const T* src, const Index* map, Index size, Index mask) {
Index i = (ui64)blockIdx.x * blockDim.x + threadIdx.x;
while (i < size) {
Index m = StreamLoad(map + i) & mask;
WriteThrough(dst + m, StreamLoad(src + i));
i += (ui64)gridDim.x * blockDim.x;
}
}
template <typename T, typename Index>
void ScatterWithMask(T *dst, const T *src, const Index* map, ui64 size, Index mask, TCudaStream stream) {
const ui32 blockSize = 256;
const ui32 numBlocks = SafeIntegerCast<ui32>(min((size + blockSize - 1) / blockSize, (ui64)TArchProps::MaxBlockCount()));
if (numBlocks) {
ScatterWithMaskImpl<T, Index> << < numBlocks, blockSize, 0, stream >> > (dst, src, map, (Index)size, mask);
}
}
template <typename T>
__global__ void ReverseImpl(T *data, ui64 size) {
ui64 i = (ui64)blockIdx.x * blockDim.x + threadIdx.x;
ui64 half = size / 2;
while (i < half) {
T a = data[i];
T b = data[size - i - 1];
data[i] = b;
data[size - i - 1] = a;
i += (ui64)gridDim.x * blockDim.x;
}
}
template <typename T>
void Reverse(T* data, ui64 size, TCudaStream stream) {
const ui32 blockSize = 256;
const ui32 numBlocks = SafeIntegerCast<ui32>(min(((size + 1) / 2 + blockSize - 1) / blockSize, (ui64)TArchProps::MaxBlockCount()));
ReverseImpl<T> << < numBlocks, blockSize, 0, stream >> > (data, size);
}
#define BIN_OP_VECTOR_TEMPL(Type) \
template void AddVector<Type>(Type *x, const Type *y, ui64 size, TCudaStream stream);\
template void AddVector<Type>(Type *x, Type y, ui64 size, TCudaStream stream);\
template void SubtractVector<Type>(Type *x, const Type *y, ui64 size, TCudaStream stream);\
template void SubtractVector<Type>(Type *x, Type y, ui64 size, TCudaStream stream); \
template void MultiplyVector<Type>(Type *x, const Type* y, ui64 size, TCudaStream stream);\
template void MultiplyVector<Type>(Type *x, Type y, ui64 size, TCudaStream stream);\
template void DivideVector<Type>(Type *x, const Type* y, ui64 size, bool skipZeroes, TCudaStream stream);\
template void DivideVector<Type>(Type *x, Type y, ui64 size, bool skipZeroes, TCudaStream stream);\
BIN_OP_VECTOR_TEMPL(int)
BIN_OP_VECTOR_TEMPL(float)
BIN_OP_VECTOR_TEMPL(ui32)
BIN_OP_VECTOR_TEMPL(double)
BIN_OP_VECTOR_TEMPL(ui8)
BIN_OP_VECTOR_TEMPL(uint2)
BIN_OP_VECTOR_TEMPL(ui16)
#define FUNC_VECTOR_TEMPL(Type) \
template void ExpVector<Type>(Type *x, ui64 size, TCudaStream stream);\
FUNC_VECTOR_TEMPL(float)
#define GATHER_SCATTER_TEMPL(Type, IndexType) \
template void Gather<Type, IndexType>(Type *dst, const Type *src, const IndexType* map, ui64 size, int columntCount, ui64, ui64, TCudaStream stream); \
template void Scatter<Type, IndexType>(Type *dst, const Type *src, const IndexType* map, ui64 size, int, ui64, ui64, TCudaStream stream); \
template void GatherWithMask<Type, IndexType>(Type *dst, const Type *src, const IndexType* map, ui64 size, IndexType mask, TCudaStream stream); \
template void ScatterWithMask<Type, IndexType>(Type *dst, const Type *src, const IndexType* map, ui64 size, IndexType mask, TCudaStream stream);
GATHER_SCATTER_TEMPL(int, ui32)
GATHER_SCATTER_TEMPL(ui8, ui32)
GATHER_SCATTER_TEMPL(uint2, ui32)
GATHER_SCATTER_TEMPL(ui32, ui32)
GATHER_SCATTER_TEMPL(float, ui32)
GATHER_SCATTER_TEMPL(bool, ui32)
#define REVERSE_VECTOR_TEMPL(Type) \
template void Reverse<Type>(Type *x, ui64 size, TCudaStream stream);
REVERSE_VECTOR_TEMPL(char)
REVERSE_VECTOR_TEMPL(float)
REVERSE_VECTOR_TEMPL(unsigned char)
REVERSE_VECTOR_TEMPL(short)
REVERSE_VECTOR_TEMPL(ui16)
REVERSE_VECTOR_TEMPL(int)
REVERSE_VECTOR_TEMPL(ui32)
// PowVector
template <typename T>
__global__ void PowVectorImpl(T* const x, const T base, const ui64 size) {
ui64 i = (ui64)blockIdx.x * blockDim.x + threadIdx.x;
while (i < size) {
x[i] = pow(base, x[i]);
i += (ui64)gridDim.x * blockDim.x;
}
}
template <typename T>
void PowVector(T* const x, const ui64 size, const T base, const TCudaStream stream) {
const ui32 blockSize = 512;
const ui32 numBlocks = SafeIntegerCast<ui32>(Min(
(size + blockSize - 1) / blockSize,
(ui64)TArchProps::MaxBlockCount()));
hipLaunchKernelGGL(( PowVectorImpl<T>), dim3(numBlocks), dim3(blockSize), 0, stream, x, base, size);
}
#define Y_CATBOOST_CUDA_F_IMPL(T) \
template void PowVector<T>(T* x, ui64 size, T base, TCudaStream stream);
Y_MAP_ARGS(
Y_CATBOOST_CUDA_F_IMPL,
float);
#undef Y_CATBOOST_CUDA_F_IMPL
// PowVector
template <typename T>
__global__ void PowVectorImpl(const T* const x, const T base, const ui64 size, T* y) {
ui64 i = (ui64)blockIdx.x * blockDim.x + threadIdx.x;
while (i < size) {
y[i] = pow(base, x[i]);
i += (ui64)gridDim.x * blockDim.x;
}
}
template <typename T>
void PowVector(const T* x, const ui64 size, const T base, T* y, const TCudaStream stream) {
const ui32 blockSize = 512;
const ui32 numBlocks = SafeIntegerCast<ui32>(Min(
(size + blockSize - 1) / blockSize,
(ui64)TArchProps::MaxBlockCount()));
hipLaunchKernelGGL(( PowVectorImpl<T>), dim3(numBlocks), dim3(blockSize), 0, stream, x, base, size, y);
}
#define Y_CATBOOST_CUDA_F_IMPL(T) \
template void PowVector<T>(const T* x, ui64 size, T base, T* y, TCudaStream stream);
Y_MAP_ARGS(
Y_CATBOOST_CUDA_F_IMPL,
float);
#undef Y_CATBOOST_CUDA_F_IMPL
}
| f7691492c164ccd568d21a09ce4f525c6f6eaf43.cu | #include "transform.cuh"
#include <library/cpp/cuda/wrappers/arch.cuh>
#include <catboost/cuda/cuda_util/kernel/kernel_helpers.cuh>
#include <catboost/cuda/cuda_util/kernel/operators.cuh>
#include <util/generic/cast.h>
#include <contrib/libs/nvidia/cub/cub/block/block_radix_sort.cuh>
namespace NKernel {
template <typename T>
__global__ void AddVectorImpl(T *x, const T *y, ui64 size) {
ui64 i = (ui64)blockIdx.x * blockDim.x + threadIdx.x;
while (i < size) {
const T y0 = __ldg(y + i);
const T x0 = __ldg(x + i);
const T r0 = y0 + x0;
WriteThrough(x + i, r0);
i += (ui64)gridDim.x * blockDim.x;
}
}
template <typename T>
void AddVector(T *x, const T *y, ui64 size, TCudaStream stream) {
const ui32 blockSize = 512;
const ui32 numBlocks = SafeIntegerCast<ui32>(min((size + blockSize - 1) / blockSize,
(ui64)TArchProps::MaxBlockCount()));
AddVectorImpl<T> << < numBlocks, blockSize, 0, stream >> > (x, y, size);
}
template <typename T>
__global__ void AddVectorImpl(T *x, const T y, ui64 size) {
ui64 i = (ui64)blockIdx.x * blockDim.x + threadIdx.x;
while (i < size) {
const T x0 = __ldg(x + i);
const T r0 = y + x0;
WriteThrough(x + i, r0);
i += (ui64)gridDim.x * blockDim.x;
}
}
template <typename T>
void AddVector(T *x, const T y, ui64 size, TCudaStream stream) {
const ui32 blockSize = 512;
const ui32 numBlocks = SafeIntegerCast<ui32>(min((size + blockSize - 1) / blockSize,
(ui64)TArchProps::MaxBlockCount()));
AddVectorImpl<T> << < numBlocks, blockSize, 0, stream >> > (x, y, size);
}
template <typename T>
__global__ void SubtractVectorImpl(T *x, const T *y, ui64 size) {
ui64 i = (ui64)blockIdx.x * blockDim.x + threadIdx.x;
while (i < size) {
const T y0 = __ldg(y + i);
const T x0 = __ldg(x + i);
const T r0 = x0 - y0;
WriteThrough(x + i, r0);
i += (ui64)gridDim.x * blockDim.x;
}
}
template <typename T>
__global__ void SubtractVectorImpl(T *x, const T y, ui64 size) {
ui64 i = (ui64)blockIdx.x * blockDim.x + threadIdx.x;
while (i < size) {
const T x0 = __ldg(x + i);
const T r0 = x0 - y;
WriteThrough(x + i, r0);
i += (ui64)gridDim.x * blockDim.x;
}
}
template <typename T>
void SubtractVector(T *x, const T *y, ui64 size, TCudaStream stream) {
const ui32 blockSize = 512;
const ui32 numBlocks = SafeIntegerCast<ui32>(min((size + blockSize - 1) / blockSize,
(ui64)TArchProps::MaxBlockCount()));
SubtractVectorImpl<T> << < numBlocks, blockSize, 0, stream >> > (x, y, size);
}
template <typename T>
void SubtractVector(T *x, const T y, ui64 size, TCudaStream stream) {
const ui32 blockSize = 512;
const ui32 numBlocks = SafeIntegerCast<ui32>(min((size + blockSize - 1) / blockSize,
(ui64)TArchProps::MaxBlockCount()));
SubtractVectorImpl<T> << < numBlocks, blockSize, 0, stream >> > (x, y, size);
}
template <typename T>
__global__ void MultiplyVectorImpl(T *x, const T *y, ui64 size) {
ui64 i = (ui64)blockIdx.x * blockDim.x + threadIdx.x;
while (i < size) {
const T y0 = __ldg(y + i);
const T x0 = __ldg(x + i);
const T r0 = y0 * x0;
WriteThrough(x + i, r0);
i += (ui64)gridDim.x * blockDim.x;
}
}
template <typename T>
void MultiplyVector(T *x, const T *y, ui64 size, TCudaStream stream) {
const ui32 blockSize = 512;
const ui32 numBlocks = SafeIntegerCast<ui32>(min((size + blockSize - 1) / blockSize,
(ui64)TArchProps::MaxBlockCount()));
MultiplyVectorImpl<T> << < numBlocks, blockSize, 0, stream >> > (x, y, size);
}
template <typename T>
__global__ void MultiplyVectorImpl(T *x, const T c, ui64 size) {
ui64 i = (ui64)blockIdx.x * blockDim.x + threadIdx.x;
while (i < size) {
T x0 = __ldg(x + i);
T r0 = x0 * c;
WriteThrough(x + i, r0);
i += (ui64)gridDim.x * blockDim.x;
}
}
template <typename T>
void MultiplyVector(T *x, const T c, ui64 size, TCudaStream stream) {
const ui32 blockSize = 512;
const ui32 numBlocks = SafeIntegerCast<ui32>(min((size + blockSize - 1) / blockSize,
(ui64)TArchProps::MaxBlockCount()));
MultiplyVectorImpl<T> << < numBlocks, blockSize, 0, stream >> > (x, c, size);
}
template <typename T>
__global__ void DivideVectorImpl(T *x, const T *y, bool skipZeroes, ui64 size) {
ui64 i = (ui64)blockIdx.x * blockDim.x + threadIdx.x;
while (i < size) {
T x0 = x[i];
T y0 = y[i];
T r0 = ZeroAwareDivide(x0, y0, skipZeroes);
x[i] = r0;
i += (ui64)gridDim.x * blockDim.x;
}
}
template <typename T>
__global__ void DivideVectorImpl(T *x, const T y, bool skipZeroes, ui64 size) {
ui64 i = (ui64)blockIdx.x * blockDim.x + threadIdx.x;
while (i < size) {
T x0 = x[i];
T r0 = ZeroAwareDivide(x0, y, skipZeroes);
x[i] = r0;
i += (ui64)gridDim.x * blockDim.x;
}
}
template <typename T>
void DivideVector(T *x, const T *y, ui64 size, bool skipZeroes, TCudaStream stream) {
const ui32 blockSize = 512;
const ui32 numBlocks = SafeIntegerCast<ui32>(min((size + blockSize - 1) / blockSize,
(ui64)TArchProps::MaxBlockCount()));
DivideVectorImpl<T> << < numBlocks, blockSize, 0, stream >> > (x, y, skipZeroes, size);
}
template <typename T>
void DivideVector(T *x, const T y, ui64 size, bool skipZeroes, TCudaStream stream) {
const ui32 blockSize = 512;
const ui32 numBlocks = SafeIntegerCast<ui32>(min((size + blockSize - 1) / blockSize,
(ui64)TArchProps::MaxBlockCount()));
DivideVectorImpl<T> << < numBlocks, blockSize, 0, stream >> > (x, y, skipZeroes, size);
}
template <typename T>
__global__ void ExpVectorImpl(T *x, ui64 size) {
ui64 i = (ui64)blockIdx.x * blockDim.x + threadIdx.x;
while (i < size) {
T val = __ldg(x + i);
x[i] = __expf(val);
i += (ui64)gridDim.x * blockDim.x;
}
}
template <typename T>
void ExpVector(T *x, ui64 size, TCudaStream stream) {
const ui32 blockSize = 512;
const ui32 numBlocks = SafeIntegerCast<ui32>(min((size + blockSize - 1) / blockSize, (ui64)TArchProps::MaxBlockCount()));
ExpVectorImpl<T> << < numBlocks, blockSize, 0, stream >> > (x, size);
}
template <typename T, typename Index>
__global__ void GatherImpl(T *dst, const T *src, const Index *map, Index size,
int columnCount, ui64 dstColumnAlignSize, ui64 srcColumnAlignSize) {
Index i = (ui64)blockIdx.x * blockDim.x + threadIdx.x;
while (i < size) {
Index m = __ldg(map + i);
for (int column = 0; column < columnCount; ++column) {
WriteThrough(dst + i + column * dstColumnAlignSize, StreamLoad(src + m + column * srcColumnAlignSize));
}
i += (ui64)gridDim.x * blockDim.x;
}
}
template <typename T, typename Index>
void Gather(T *dst, const T *src, const Index* map, ui64 size, int columnCount, ui64 dstColumnAlignSize, ui64 srcColumnAlignSize, TCudaStream stream) {
const ui64 blockSize = 256;
const ui32 numBlocks = SafeIntegerCast<ui32>(min((size + blockSize - 1) / blockSize, (ui64)TArchProps::MaxBlockCount()));
if (numBlocks) {
GatherImpl<T, Index> << < numBlocks, blockSize, 0, stream >> > (dst, src, map, (Index)size, columnCount, dstColumnAlignSize, srcColumnAlignSize);
}
}
template <typename T, typename Index>
__global__ void GatherWithMaskImpl(T *dst, const T *src, const Index *map, Index size, Index mask) {
Index i = (ui64)blockIdx.x * blockDim.x + threadIdx.x;
while (i < size) {
Index m = StreamLoad(map + i) & mask;
WriteThrough(dst + i, StreamLoad(src + m));
i += (ui64)gridDim.x * blockDim.x;
}
}
template <typename T, typename Index>
void GatherWithMask(T *dst, const T *src, const Index* map, ui64 size, Index mask, TCudaStream stream) {
const ui64 blockSize = 256;
const ui32 numBlocks = SafeIntegerCast<ui32>(min((size + blockSize - 1) / blockSize, (ui64)TArchProps::MaxBlockCount()));
if (numBlocks) {
GatherWithMaskImpl<T, Index> << < numBlocks, blockSize, 0, stream >> > (dst, src, map, (Index)size, mask);
}
}
template <typename T, typename Index>
__global__ void ScatterImpl(T* dst, const T* src, const Index* map, Index size, int columnCount, ui64 dstColumnAlignSize, ui64 srcColumnALignSize) {
Index i = (ui64)blockIdx.x * blockDim.x + threadIdx.x;
while (i < size) {
Index m = __ldg(map + i);
for (int column = 0; column < columnCount; ++column) {
WriteThrough(dst + m + dstColumnAlignSize * column, StreamLoad(src + i + srcColumnALignSize * column));
}
i += (ui64)gridDim.x * blockDim.x;
}
}
template <typename T, typename Index>
void Scatter(T *dst, const T *src, const Index* map, ui64 size, int columnCount, ui64 dstColumnAlignSize, ui64 srcColumnAlignSize, TCudaStream stream) {
const ui32 blockSize = 256;
const ui32 numBlocks = SafeIntegerCast<ui32>(min((size + blockSize - 1) / blockSize, (ui64)TArchProps::MaxBlockCount()));
if (numBlocks) {
ScatterImpl<T, Index> << < numBlocks, blockSize, 0, stream >> > (dst, src, map, (Index)size, columnCount, dstColumnAlignSize, srcColumnAlignSize);
}
}
template <typename T, typename Index>
__global__ void ScatterWithMaskImpl(T* dst, const T* src, const Index* map, Index size, Index mask) {
Index i = (ui64)blockIdx.x * blockDim.x + threadIdx.x;
while (i < size) {
Index m = StreamLoad(map + i) & mask;
WriteThrough(dst + m, StreamLoad(src + i));
i += (ui64)gridDim.x * blockDim.x;
}
}
template <typename T, typename Index>
void ScatterWithMask(T *dst, const T *src, const Index* map, ui64 size, Index mask, TCudaStream stream) {
const ui32 blockSize = 256;
const ui32 numBlocks = SafeIntegerCast<ui32>(min((size + blockSize - 1) / blockSize, (ui64)TArchProps::MaxBlockCount()));
if (numBlocks) {
ScatterWithMaskImpl<T, Index> << < numBlocks, blockSize, 0, stream >> > (dst, src, map, (Index)size, mask);
}
}
template <typename T>
__global__ void ReverseImpl(T *data, ui64 size) {
ui64 i = (ui64)blockIdx.x * blockDim.x + threadIdx.x;
ui64 half = size / 2;
while (i < half) {
T a = data[i];
T b = data[size - i - 1];
data[i] = b;
data[size - i - 1] = a;
i += (ui64)gridDim.x * blockDim.x;
}
}
template <typename T>
void Reverse(T* data, ui64 size, TCudaStream stream) {
const ui32 blockSize = 256;
const ui32 numBlocks = SafeIntegerCast<ui32>(min(((size + 1) / 2 + blockSize - 1) / blockSize, (ui64)TArchProps::MaxBlockCount()));
ReverseImpl<T> << < numBlocks, blockSize, 0, stream >> > (data, size);
}
#define BIN_OP_VECTOR_TEMPL(Type) \
template void AddVector<Type>(Type *x, const Type *y, ui64 size, TCudaStream stream);\
template void AddVector<Type>(Type *x, Type y, ui64 size, TCudaStream stream);\
template void SubtractVector<Type>(Type *x, const Type *y, ui64 size, TCudaStream stream);\
template void SubtractVector<Type>(Type *x, Type y, ui64 size, TCudaStream stream); \
template void MultiplyVector<Type>(Type *x, const Type* y, ui64 size, TCudaStream stream);\
template void MultiplyVector<Type>(Type *x, Type y, ui64 size, TCudaStream stream);\
template void DivideVector<Type>(Type *x, const Type* y, ui64 size, bool skipZeroes, TCudaStream stream);\
template void DivideVector<Type>(Type *x, Type y, ui64 size, bool skipZeroes, TCudaStream stream);\
BIN_OP_VECTOR_TEMPL(int)
BIN_OP_VECTOR_TEMPL(float)
BIN_OP_VECTOR_TEMPL(ui32)
BIN_OP_VECTOR_TEMPL(double)
BIN_OP_VECTOR_TEMPL(ui8)
BIN_OP_VECTOR_TEMPL(uint2)
BIN_OP_VECTOR_TEMPL(ui16)
#define FUNC_VECTOR_TEMPL(Type) \
template void ExpVector<Type>(Type *x, ui64 size, TCudaStream stream);\
FUNC_VECTOR_TEMPL(float)
#define GATHER_SCATTER_TEMPL(Type, IndexType) \
template void Gather<Type, IndexType>(Type *dst, const Type *src, const IndexType* map, ui64 size, int columntCount, ui64, ui64, TCudaStream stream); \
template void Scatter<Type, IndexType>(Type *dst, const Type *src, const IndexType* map, ui64 size, int, ui64, ui64, TCudaStream stream); \
template void GatherWithMask<Type, IndexType>(Type *dst, const Type *src, const IndexType* map, ui64 size, IndexType mask, TCudaStream stream); \
template void ScatterWithMask<Type, IndexType>(Type *dst, const Type *src, const IndexType* map, ui64 size, IndexType mask, TCudaStream stream);
GATHER_SCATTER_TEMPL(int, ui32)
GATHER_SCATTER_TEMPL(ui8, ui32)
GATHER_SCATTER_TEMPL(uint2, ui32)
GATHER_SCATTER_TEMPL(ui32, ui32)
GATHER_SCATTER_TEMPL(float, ui32)
GATHER_SCATTER_TEMPL(bool, ui32)
#define REVERSE_VECTOR_TEMPL(Type) \
template void Reverse<Type>(Type *x, ui64 size, TCudaStream stream);
REVERSE_VECTOR_TEMPL(char)
REVERSE_VECTOR_TEMPL(float)
REVERSE_VECTOR_TEMPL(unsigned char)
REVERSE_VECTOR_TEMPL(short)
REVERSE_VECTOR_TEMPL(ui16)
REVERSE_VECTOR_TEMPL(int)
REVERSE_VECTOR_TEMPL(ui32)
// PowVector
template <typename T>
__global__ void PowVectorImpl(T* const x, const T base, const ui64 size) {
ui64 i = (ui64)blockIdx.x * blockDim.x + threadIdx.x;
while (i < size) {
x[i] = pow(base, x[i]);
i += (ui64)gridDim.x * blockDim.x;
}
}
template <typename T>
void PowVector(T* const x, const ui64 size, const T base, const TCudaStream stream) {
const ui32 blockSize = 512;
const ui32 numBlocks = SafeIntegerCast<ui32>(Min(
(size + blockSize - 1) / blockSize,
(ui64)TArchProps::MaxBlockCount()));
PowVectorImpl<T><<<numBlocks, blockSize, 0, stream>>>(x, base, size);
}
#define Y_CATBOOST_CUDA_F_IMPL(T) \
template void PowVector<T>(T* x, ui64 size, T base, TCudaStream stream);
Y_MAP_ARGS(
Y_CATBOOST_CUDA_F_IMPL,
float);
#undef Y_CATBOOST_CUDA_F_IMPL
// PowVector
template <typename T>
__global__ void PowVectorImpl(const T* const x, const T base, const ui64 size, T* y) {
ui64 i = (ui64)blockIdx.x * blockDim.x + threadIdx.x;
while (i < size) {
y[i] = pow(base, x[i]);
i += (ui64)gridDim.x * blockDim.x;
}
}
template <typename T>
void PowVector(const T* x, const ui64 size, const T base, T* y, const TCudaStream stream) {
const ui32 blockSize = 512;
const ui32 numBlocks = SafeIntegerCast<ui32>(Min(
(size + blockSize - 1) / blockSize,
(ui64)TArchProps::MaxBlockCount()));
PowVectorImpl<T><<<numBlocks, blockSize, 0, stream>>>(x, base, size, y);
}
#define Y_CATBOOST_CUDA_F_IMPL(T) \
template void PowVector<T>(const T* x, ui64 size, T base, T* y, TCudaStream stream);
Y_MAP_ARGS(
Y_CATBOOST_CUDA_F_IMPL,
float);
#undef Y_CATBOOST_CUDA_F_IMPL
}
|
1d1e13788c1aea331c5ac5c937d30a74b4e84017.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// code based on
// https://github.com/ramakarl/fluids3/blob/master/fluids/prefix_sum.cu
// TODO: use template argument to avoid repetition for different data types
// number of shared memory banks is 32 after compute capability 3.5
#define NUM_BANKS 32
#define LOG_NUM_BANKS 5
#ifdef ZERO_BANK_CONFLICTS
#define CONFLICT_FREE_OFFSET(index) \
((index) >> LOG_NUM_BANKS + (index) >> (2 * LOG_NUM_BANKS))
#else
#define CONFLICT_FREE_OFFSET(index) ((index) >> LOG_NUM_BANKS)
#endif
#include "prefix_sum.h"
#include <ATen/ATen.h>
#include <string>
template <bool isNP2>
__device__ void loadSharedChunkFromMemInt(int *s_data, const int *g_idata,
int n, int baseIndex, int &ai,
int &bi, int &mem_ai, int &mem_bi,
int &bankOffsetA, int &bankOffsetB) {
int thid = threadIdx.x;
mem_ai = baseIndex + threadIdx.x;
mem_bi = mem_ai + blockDim.x;
ai = thid;
bi = thid + blockDim.x;
bankOffsetA =
CONFLICT_FREE_OFFSET(ai); // compute spacing to avoid bank conflicts
bankOffsetB = CONFLICT_FREE_OFFSET(bi);
s_data[ai + bankOffsetA] =
g_idata[mem_ai]; // Cache the computational window in shared memory pad
// values beyond n with zeros
if (isNP2) { // compile-time decision
s_data[bi + bankOffsetB] = (bi < n) ? g_idata[mem_bi] : 0;
} else {
s_data[bi + bankOffsetB] = g_idata[mem_bi];
}
}
template <bool isNP2>
__device__ void storeSharedChunkToMemInt(int *g_odata, const int *s_data, int n,
int ai, int bi, int mem_ai, int mem_bi,
int bankOffsetA, int bankOffsetB) {
__syncthreads();
g_odata[mem_ai] = s_data[ai + bankOffsetA]; // write results to global memory
if (isNP2) { // compile-time decision
if (bi < n)
g_odata[mem_bi] = s_data[bi + bankOffsetB];
} else {
g_odata[mem_bi] = s_data[bi + bankOffsetB];
}
}
template <bool storeSum>
__device__ void clearLastElementInt(int *s_data, int *g_blockSums,
int blockIndex) {
if (threadIdx.x == 0) {
int index = (blockDim.x << 1) - 1;
index += CONFLICT_FREE_OFFSET(index);
if (storeSum) { // compile-time decision
// write this block's total sum to the corresponding index in the
// blockSums array
g_blockSums[blockIndex] = s_data[index];
}
s_data[index] = 0; // zero the last element in the scan so it will propagate
// back to the front
}
}
__device__ unsigned int buildSumInt(int *s_data) {
unsigned int thid = threadIdx.x;
unsigned int stride = 1;
// build the sum in place up the tree
for (int d = blockDim.x; d > 0; d >>= 1) {
__syncthreads();
if (thid < d) {
int i = __mul24(__mul24(2, stride), thid);
int ai = i + stride - 1;
int bi = ai + stride;
ai += CONFLICT_FREE_OFFSET(ai);
bi += CONFLICT_FREE_OFFSET(bi);
s_data[bi] += s_data[ai];
}
stride *= 2;
}
return stride;
}
__device__ void scanRootToLeavesInt(int *s_data, unsigned int stride) {
unsigned int thid = threadIdx.x;
// traverse down the tree building the scan in place
for (int d = 1; d <= blockDim.x; d *= 2) {
stride >>= 1;
__syncthreads();
if (thid < d) {
int i = __mul24(__mul24(2, stride), thid);
int ai = i + stride - 1;
int bi = ai + stride;
ai += CONFLICT_FREE_OFFSET(ai);
bi += CONFLICT_FREE_OFFSET(bi);
int t = s_data[ai];
s_data[ai] = s_data[bi];
s_data[bi] += t;
}
}
}
template <bool storeSum>
__device__ void prescanBlockInt(int *data, int blockIndex, int *blockSums) {
int stride = buildSumInt(data); // build the sum in place up the tree
clearLastElementInt<storeSum>(data, blockSums,
(blockIndex == 0) ? blockIdx.x : blockIndex);
scanRootToLeavesInt(data, stride); // traverse down tree to build the scan
}
__global__ void uniformAddInt(int *g_data, int *uniforms, int n,
int blockOffset, int baseIndex) {
__shared__ int uni;
if (threadIdx.x == 0)
uni = uniforms[blockIdx.x + blockOffset];
unsigned int address =
__mul24(blockIdx.x, (blockDim.x << 1)) + baseIndex + threadIdx.x;
__syncthreads();
// note two adds per thread
g_data[address] += uni;
g_data[address + blockDim.x] += (threadIdx.x + blockDim.x < n) * uni;
}
template <bool storeSum, bool isNP2>
__global__ void prescanInt(int *g_odata, const int *g_idata, int *g_blockSums,
int n, int blockIndex, int baseIndex) {
int ai, bi, mem_ai, mem_bi, bankOffsetA, bankOffsetB;
extern __shared__ int s_dataInt[];
loadSharedChunkFromMemInt<isNP2>(
s_dataInt, g_idata, n,
(baseIndex == 0) ? __mul24(blockIdx.x, (blockDim.x << 1)) : baseIndex, ai,
bi, mem_ai, mem_bi, bankOffsetA, bankOffsetB);
prescanBlockInt<storeSum>(s_dataInt, blockIndex, g_blockSums);
storeSharedChunkToMemInt<isNP2>(g_odata, s_dataInt, n, ai, bi, mem_ai, mem_bi,
bankOffsetA, bankOffsetB);
}
inline bool isPowerOfTwo(int n) { return ((n & (n - 1)) == 0); }
inline int floorPow2(int n) {
int exp;
frexp((float)n, &exp);
return 1 << (exp - 1);
}
#define BLOCK_SIZE 256
int **g_scanBlockSumsInt = 0;
unsigned int g_numEltsAllocated = 0;
unsigned int g_numLevelsAllocated = 0;
bool cudaCheck(hipError_t status, const std::string &msg) {
if (status != hipSuccess) {
printf("CUDA ERROR: %s\n", hipGetErrorString(status));
return false;
} else {
// app_printf ( "%s. OK.\n", msg );
}
return true;
}
void preallocBlockSumsInt(unsigned int maxNumElements) {
assert(g_numEltsAllocated == 0); // shouldn't be called
g_numEltsAllocated = maxNumElements;
unsigned int blockSize = BLOCK_SIZE; // max size of the thread blocks
unsigned int numElts = maxNumElements;
int level = 0;
do {
unsigned int numBlocks =
max(1, (int)ceil((float)numElts / (2.f * blockSize)));
if (numBlocks > 1)
level++;
numElts = numBlocks;
} while (numElts > 1);
g_scanBlockSumsInt = (int **)malloc(level * sizeof(int *));
g_numLevelsAllocated = level;
numElts = maxNumElements;
level = 0;
do {
unsigned int numBlocks =
max(1, (int)ceil((float)numElts / (2.f * blockSize)));
if (numBlocks > 1)
cudaCheck(hipMalloc((void **)&g_scanBlockSumsInt[level++],
numBlocks * sizeof(int)),
"Malloc prescanBlockSumsInt g_scanBlockSumsInt");
numElts = numBlocks;
} while (numElts > 1);
}
void deallocBlockSumsInt() {
if (g_scanBlockSumsInt != 0x0) {
for (unsigned int i = 0; i < g_numLevelsAllocated; i++)
cudaCheck(hipFree(g_scanBlockSumsInt[i]),
"Malloc deallocBlockSumsInt g_scanBlockSumsInt");
free((void **)g_scanBlockSumsInt);
}
g_scanBlockSumsInt = 0;
g_numEltsAllocated = 0;
g_numLevelsAllocated = 0;
}
void prescanArrayRecursiveInt(int *outArray, const int *inArray,
int numElements, int level) {
unsigned int blockSize = BLOCK_SIZE; // max size of the thread blocks
unsigned int numBlocks =
max(1, (int)ceil((float)numElements / (2.f * blockSize)));
unsigned int numThreads;
if (numBlocks > 1)
numThreads = blockSize;
else if (isPowerOfTwo(numElements))
numThreads = numElements / 2;
else
numThreads = floorPow2(numElements);
unsigned int numEltsPerBlock = numThreads * 2;
// if this is a non-power-of-2 array, the last block will be non-full
// compute the smallest power of 2 able to compute its scan.
unsigned int numEltsLastBlock =
numElements - (numBlocks - 1) * numEltsPerBlock;
unsigned int numThreadsLastBlock = max(1, numEltsLastBlock / 2);
unsigned int np2LastBlock = 0;
unsigned int sharedMemLastBlock = 0;
if (numEltsLastBlock != numEltsPerBlock) {
np2LastBlock = 1;
if (!isPowerOfTwo(numEltsLastBlock))
numThreadsLastBlock = floorPow2(numEltsLastBlock);
unsigned int extraSpace = (2 * numThreadsLastBlock) / NUM_BANKS;
sharedMemLastBlock = sizeof(float) * (2 * numThreadsLastBlock + extraSpace);
}
// padding space is used to avoid shared memory bank conflicts
unsigned int extraSpace = numEltsPerBlock / NUM_BANKS;
unsigned int sharedMemSize = sizeof(float) * (numEltsPerBlock + extraSpace);
#ifdef DEBUG
if (numBlocks > 1)
assert(g_numEltsAllocated >= numElements);
#endif
// setup execution parameters
// if NP2, we process the last block separately
dim3 grid(max(1, numBlocks - np2LastBlock), 1, 1);
dim3 threads(numThreads, 1, 1);
// execute the scan
if (numBlocks > 1) {
hipLaunchKernelGGL(( prescanInt<true, false>), dim3(grid), dim3(threads), sharedMemSize, 0,
outArray, inArray, g_scanBlockSumsInt[level], numThreads * 2, 0, 0);
if (np2LastBlock) {
hipLaunchKernelGGL(( prescanInt<true, true>), dim3(1), dim3(numThreadsLastBlock), sharedMemLastBlock, 0,
outArray, inArray, g_scanBlockSumsInt[level], numEltsLastBlock,
numBlocks - 1, numElements - numEltsLastBlock);
}
// After scanning all the sub-blocks, we are mostly done. But now we
// need to take all of the last values of the sub-blocks and scan those.
// This will give us a new value that must be added to each block to
// get the final results.
// recursive (CPU) call
prescanArrayRecursiveInt(g_scanBlockSumsInt[level],
g_scanBlockSumsInt[level], numBlocks, level + 1);
hipLaunchKernelGGL(( uniformAddInt), dim3(grid), dim3(threads), 0, 0, outArray, g_scanBlockSumsInt[level],
numElements - numEltsLastBlock, 0, 0);
if (np2LastBlock) {
hipLaunchKernelGGL(( uniformAddInt), dim3(1), dim3(numThreadsLastBlock), 0, 0,
outArray, g_scanBlockSumsInt[level], numEltsLastBlock, numBlocks - 1,
numElements - numEltsLastBlock);
}
} else if (isPowerOfTwo(numElements)) {
hipLaunchKernelGGL(( prescanInt<false, false>), dim3(grid), dim3(threads), sharedMemSize, 0,
outArray, inArray, 0, numThreads * 2, 0, 0);
} else {
hipLaunchKernelGGL(( prescanInt<false, true>), dim3(grid), dim3(threads), sharedMemSize, 0,
outArray, inArray, 0, numElements, 0, 0);
}
}
// params should be located on cpu
at::Tensor PrefixSumCUDA(const at::Tensor grid_cnt, const at::Tensor params) {
int N = grid_cnt.size(0);
int G = grid_cnt.size(1);
auto params_a = params.accessor<float, 2>();
// at::Tensor grid_off = at::full({N, G}, -1, grid_cnt.options());
at::Tensor grid_off = at::full({N, G}, 0, grid_cnt.options());
for (int n = 0; n < N; ++n) {
// std::cout << "prefixsum iter " << n << std::endl;
int num_grids = params_a[n][GRID_3D_TOTAL];
// std::cout << num_grids << std::endl;
preallocBlockSumsInt(num_grids);
prescanArrayRecursiveInt(grid_off.contiguous().data_ptr<int>() + n * G,
grid_cnt.contiguous().data_ptr<int>() + n * G,
num_grids, 0);
deallocBlockSumsInt();
}
return grid_off;
} | 1d1e13788c1aea331c5ac5c937d30a74b4e84017.cu | // code based on
// https://github.com/ramakarl/fluids3/blob/master/fluids/prefix_sum.cu
// TODO: use template argument to avoid repetition for different data types
// number of shared memory banks is 32 after compute capability 3.5
#define NUM_BANKS 32
#define LOG_NUM_BANKS 5
#ifdef ZERO_BANK_CONFLICTS
#define CONFLICT_FREE_OFFSET(index) \
((index) >> LOG_NUM_BANKS + (index) >> (2 * LOG_NUM_BANKS))
#else
#define CONFLICT_FREE_OFFSET(index) ((index) >> LOG_NUM_BANKS)
#endif
#include "prefix_sum.h"
#include <ATen/ATen.h>
#include <string>
template <bool isNP2>
__device__ void loadSharedChunkFromMemInt(int *s_data, const int *g_idata,
int n, int baseIndex, int &ai,
int &bi, int &mem_ai, int &mem_bi,
int &bankOffsetA, int &bankOffsetB) {
int thid = threadIdx.x;
mem_ai = baseIndex + threadIdx.x;
mem_bi = mem_ai + blockDim.x;
ai = thid;
bi = thid + blockDim.x;
bankOffsetA =
CONFLICT_FREE_OFFSET(ai); // compute spacing to avoid bank conflicts
bankOffsetB = CONFLICT_FREE_OFFSET(bi);
s_data[ai + bankOffsetA] =
g_idata[mem_ai]; // Cache the computational window in shared memory pad
// values beyond n with zeros
if (isNP2) { // compile-time decision
s_data[bi + bankOffsetB] = (bi < n) ? g_idata[mem_bi] : 0;
} else {
s_data[bi + bankOffsetB] = g_idata[mem_bi];
}
}
template <bool isNP2>
__device__ void storeSharedChunkToMemInt(int *g_odata, const int *s_data, int n,
int ai, int bi, int mem_ai, int mem_bi,
int bankOffsetA, int bankOffsetB) {
__syncthreads();
g_odata[mem_ai] = s_data[ai + bankOffsetA]; // write results to global memory
if (isNP2) { // compile-time decision
if (bi < n)
g_odata[mem_bi] = s_data[bi + bankOffsetB];
} else {
g_odata[mem_bi] = s_data[bi + bankOffsetB];
}
}
template <bool storeSum>
__device__ void clearLastElementInt(int *s_data, int *g_blockSums,
int blockIndex) {
if (threadIdx.x == 0) {
int index = (blockDim.x << 1) - 1;
index += CONFLICT_FREE_OFFSET(index);
if (storeSum) { // compile-time decision
// write this block's total sum to the corresponding index in the
// blockSums array
g_blockSums[blockIndex] = s_data[index];
}
s_data[index] = 0; // zero the last element in the scan so it will propagate
// back to the front
}
}
__device__ unsigned int buildSumInt(int *s_data) {
unsigned int thid = threadIdx.x;
unsigned int stride = 1;
// build the sum in place up the tree
for (int d = blockDim.x; d > 0; d >>= 1) {
__syncthreads();
if (thid < d) {
int i = __mul24(__mul24(2, stride), thid);
int ai = i + stride - 1;
int bi = ai + stride;
ai += CONFLICT_FREE_OFFSET(ai);
bi += CONFLICT_FREE_OFFSET(bi);
s_data[bi] += s_data[ai];
}
stride *= 2;
}
return stride;
}
__device__ void scanRootToLeavesInt(int *s_data, unsigned int stride) {
unsigned int thid = threadIdx.x;
// traverse down the tree building the scan in place
for (int d = 1; d <= blockDim.x; d *= 2) {
stride >>= 1;
__syncthreads();
if (thid < d) {
int i = __mul24(__mul24(2, stride), thid);
int ai = i + stride - 1;
int bi = ai + stride;
ai += CONFLICT_FREE_OFFSET(ai);
bi += CONFLICT_FREE_OFFSET(bi);
int t = s_data[ai];
s_data[ai] = s_data[bi];
s_data[bi] += t;
}
}
}
template <bool storeSum>
__device__ void prescanBlockInt(int *data, int blockIndex, int *blockSums) {
int stride = buildSumInt(data); // build the sum in place up the tree
clearLastElementInt<storeSum>(data, blockSums,
(blockIndex == 0) ? blockIdx.x : blockIndex);
scanRootToLeavesInt(data, stride); // traverse down tree to build the scan
}
__global__ void uniformAddInt(int *g_data, int *uniforms, int n,
int blockOffset, int baseIndex) {
__shared__ int uni;
if (threadIdx.x == 0)
uni = uniforms[blockIdx.x + blockOffset];
unsigned int address =
__mul24(blockIdx.x, (blockDim.x << 1)) + baseIndex + threadIdx.x;
__syncthreads();
// note two adds per thread
g_data[address] += uni;
g_data[address + blockDim.x] += (threadIdx.x + blockDim.x < n) * uni;
}
template <bool storeSum, bool isNP2>
__global__ void prescanInt(int *g_odata, const int *g_idata, int *g_blockSums,
int n, int blockIndex, int baseIndex) {
int ai, bi, mem_ai, mem_bi, bankOffsetA, bankOffsetB;
extern __shared__ int s_dataInt[];
loadSharedChunkFromMemInt<isNP2>(
s_dataInt, g_idata, n,
(baseIndex == 0) ? __mul24(blockIdx.x, (blockDim.x << 1)) : baseIndex, ai,
bi, mem_ai, mem_bi, bankOffsetA, bankOffsetB);
prescanBlockInt<storeSum>(s_dataInt, blockIndex, g_blockSums);
storeSharedChunkToMemInt<isNP2>(g_odata, s_dataInt, n, ai, bi, mem_ai, mem_bi,
bankOffsetA, bankOffsetB);
}
inline bool isPowerOfTwo(int n) { return ((n & (n - 1)) == 0); }
inline int floorPow2(int n) {
int exp;
frexp((float)n, &exp);
return 1 << (exp - 1);
}
#define BLOCK_SIZE 256
int **g_scanBlockSumsInt = 0;
unsigned int g_numEltsAllocated = 0;
unsigned int g_numLevelsAllocated = 0;
bool cudaCheck(cudaError_t status, const std::string &msg) {
if (status != cudaSuccess) {
printf("CUDA ERROR: %s\n", cudaGetErrorString(status));
return false;
} else {
// app_printf ( "%s. OK.\n", msg );
}
return true;
}
void preallocBlockSumsInt(unsigned int maxNumElements) {
assert(g_numEltsAllocated == 0); // shouldn't be called
g_numEltsAllocated = maxNumElements;
unsigned int blockSize = BLOCK_SIZE; // max size of the thread blocks
unsigned int numElts = maxNumElements;
int level = 0;
do {
unsigned int numBlocks =
max(1, (int)ceil((float)numElts / (2.f * blockSize)));
if (numBlocks > 1)
level++;
numElts = numBlocks;
} while (numElts > 1);
g_scanBlockSumsInt = (int **)malloc(level * sizeof(int *));
g_numLevelsAllocated = level;
numElts = maxNumElements;
level = 0;
do {
unsigned int numBlocks =
max(1, (int)ceil((float)numElts / (2.f * blockSize)));
if (numBlocks > 1)
cudaCheck(cudaMalloc((void **)&g_scanBlockSumsInt[level++],
numBlocks * sizeof(int)),
"Malloc prescanBlockSumsInt g_scanBlockSumsInt");
numElts = numBlocks;
} while (numElts > 1);
}
void deallocBlockSumsInt() {
if (g_scanBlockSumsInt != 0x0) {
for (unsigned int i = 0; i < g_numLevelsAllocated; i++)
cudaCheck(cudaFree(g_scanBlockSumsInt[i]),
"Malloc deallocBlockSumsInt g_scanBlockSumsInt");
free((void **)g_scanBlockSumsInt);
}
g_scanBlockSumsInt = 0;
g_numEltsAllocated = 0;
g_numLevelsAllocated = 0;
}
void prescanArrayRecursiveInt(int *outArray, const int *inArray,
int numElements, int level) {
unsigned int blockSize = BLOCK_SIZE; // max size of the thread blocks
unsigned int numBlocks =
max(1, (int)ceil((float)numElements / (2.f * blockSize)));
unsigned int numThreads;
if (numBlocks > 1)
numThreads = blockSize;
else if (isPowerOfTwo(numElements))
numThreads = numElements / 2;
else
numThreads = floorPow2(numElements);
unsigned int numEltsPerBlock = numThreads * 2;
// if this is a non-power-of-2 array, the last block will be non-full
// compute the smallest power of 2 able to compute its scan.
unsigned int numEltsLastBlock =
numElements - (numBlocks - 1) * numEltsPerBlock;
unsigned int numThreadsLastBlock = max(1, numEltsLastBlock / 2);
unsigned int np2LastBlock = 0;
unsigned int sharedMemLastBlock = 0;
if (numEltsLastBlock != numEltsPerBlock) {
np2LastBlock = 1;
if (!isPowerOfTwo(numEltsLastBlock))
numThreadsLastBlock = floorPow2(numEltsLastBlock);
unsigned int extraSpace = (2 * numThreadsLastBlock) / NUM_BANKS;
sharedMemLastBlock = sizeof(float) * (2 * numThreadsLastBlock + extraSpace);
}
// padding space is used to avoid shared memory bank conflicts
unsigned int extraSpace = numEltsPerBlock / NUM_BANKS;
unsigned int sharedMemSize = sizeof(float) * (numEltsPerBlock + extraSpace);
#ifdef DEBUG
if (numBlocks > 1)
assert(g_numEltsAllocated >= numElements);
#endif
// setup execution parameters
// if NP2, we process the last block separately
dim3 grid(max(1, numBlocks - np2LastBlock), 1, 1);
dim3 threads(numThreads, 1, 1);
// execute the scan
if (numBlocks > 1) {
prescanInt<true, false><<<grid, threads, sharedMemSize>>>(
outArray, inArray, g_scanBlockSumsInt[level], numThreads * 2, 0, 0);
if (np2LastBlock) {
prescanInt<true, true><<<1, numThreadsLastBlock, sharedMemLastBlock>>>(
outArray, inArray, g_scanBlockSumsInt[level], numEltsLastBlock,
numBlocks - 1, numElements - numEltsLastBlock);
}
// After scanning all the sub-blocks, we are mostly done. But now we
// need to take all of the last values of the sub-blocks and scan those.
// This will give us a new value that must be added to each block to
// get the final results.
// recursive (CPU) call
prescanArrayRecursiveInt(g_scanBlockSumsInt[level],
g_scanBlockSumsInt[level], numBlocks, level + 1);
uniformAddInt<<<grid, threads>>>(outArray, g_scanBlockSumsInt[level],
numElements - numEltsLastBlock, 0, 0);
if (np2LastBlock) {
uniformAddInt<<<1, numThreadsLastBlock>>>(
outArray, g_scanBlockSumsInt[level], numEltsLastBlock, numBlocks - 1,
numElements - numEltsLastBlock);
}
} else if (isPowerOfTwo(numElements)) {
prescanInt<false, false><<<grid, threads, sharedMemSize>>>(
outArray, inArray, 0, numThreads * 2, 0, 0);
} else {
prescanInt<false, true><<<grid, threads, sharedMemSize>>>(
outArray, inArray, 0, numElements, 0, 0);
}
}
// params should be located on cpu
at::Tensor PrefixSumCUDA(const at::Tensor grid_cnt, const at::Tensor params) {
int N = grid_cnt.size(0);
int G = grid_cnt.size(1);
auto params_a = params.accessor<float, 2>();
// at::Tensor grid_off = at::full({N, G}, -1, grid_cnt.options());
at::Tensor grid_off = at::full({N, G}, 0, grid_cnt.options());
for (int n = 0; n < N; ++n) {
// std::cout << "prefixsum iter " << n << std::endl;
int num_grids = params_a[n][GRID_3D_TOTAL];
// std::cout << num_grids << std::endl;
preallocBlockSumsInt(num_grids);
prescanArrayRecursiveInt(grid_off.contiguous().data_ptr<int>() + n * G,
grid_cnt.contiguous().data_ptr<int>() + n * G,
num_grids, 0);
deallocBlockSumsInt();
}
return grid_off;
} |
888bfdd327101871866ee97098f062ea11ebdfbf.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
* NVIDIA CORPORATION and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
* and any modifications thereto. Any use, reproduction, disclosure or
* distribution of this software and related documentation without an express
* license agreement from NVIDIA CORPORATION is strictly prohibited.
*/
#include "overlapper_triggered.hpp"
#include <fstream>
#include <cstdlib>
#include <hipcub/hipcub.hpp>
#include <thrust/execution_policy.h>
#include <claraparabricks/genomeworks/utils/cudautils.hpp>
#ifndef NDEBUG // only needed to check if input is sorted in assert
#include <algorithm>
#include <thrust/host_vector.h>
#endif
namespace claraparabricks
{
namespace genomeworks
{
namespace cudamapper
{
__host__ __device__ bool operator==(const Anchor& lhs,
const Anchor& rhs)
{
auto score_threshold = 1;
// Very simple scoring function to quantify quality of overlaps.
auto score = 1;
if ((rhs.query_position_in_read_ - lhs.query_position_in_read_) < 150 and abs(int(rhs.target_position_in_read_) - int(lhs.target_position_in_read_)) < 150)
score = 2;
return ((lhs.query_read_id_ == rhs.query_read_id_) &&
(lhs.target_read_id_ == rhs.target_read_id_) &&
score > score_threshold);
}
struct cuOverlapKey
{
const Anchor* anchor;
};
struct cuOverlapKey_transform
{
const Anchor* d_anchors;
const int32_t* d_chain_start;
cuOverlapKey_transform(const Anchor* anchors, const int32_t* chain_start)
: d_anchors(anchors)
, d_chain_start(chain_start)
{
}
__host__ __device__ __forceinline__ cuOverlapKey
operator()(const int32_t& idx) const
{
auto anchor_idx = d_chain_start[idx];
cuOverlapKey key;
key.anchor = &d_anchors[anchor_idx];
return key;
}
};
__host__ __device__ bool operator==(const cuOverlapKey& key0,
const cuOverlapKey& key1)
{
const Anchor* a = key0.anchor;
const Anchor* b = key1.anchor;
int distance_difference = abs(abs(int(a->query_position_in_read_) - int(b->query_position_in_read_)) -
abs(int(a->target_position_in_read_) - int(b->target_position_in_read_)));
bool equal = (a->target_read_id_ == b->target_read_id_) &&
(a->query_read_id_ == b->query_read_id_) &&
distance_difference < 300;
return equal;
}
struct cuOverlapArgs
{
int32_t overlap_end;
int32_t num_residues;
int32_t overlap_start;
};
struct cuOverlapArgs_transform
{
const int32_t* d_chain_start;
const int32_t* d_chain_length;
cuOverlapArgs_transform(const int32_t* chain_start, const int32_t* chain_length)
: d_chain_start(chain_start)
, d_chain_length(chain_length)
{
}
__host__ __device__ __forceinline__ cuOverlapArgs
operator()(const int32_t& idx) const
{
cuOverlapArgs overlap;
auto overlap_start = d_chain_start[idx];
auto overlap_length = d_chain_length[idx];
overlap.overlap_end = overlap_start + overlap_length;
overlap.num_residues = overlap_length;
overlap.overlap_start = overlap_start;
return overlap;
}
};
struct FuseOverlapOp
{
__host__ __device__ cuOverlapArgs operator()(const cuOverlapArgs& a,
const cuOverlapArgs& b) const
{
cuOverlapArgs fused_overlap;
fused_overlap.num_residues = a.num_residues + b.num_residues;
fused_overlap.overlap_end =
a.overlap_end > b.overlap_end ? a.overlap_end : b.overlap_end;
fused_overlap.overlap_start =
a.overlap_start < b.overlap_start ? a.overlap_start : b.overlap_start;
return fused_overlap;
}
};
struct FilterOverlapOp
{
size_t min_residues;
size_t min_overlap_len;
size_t min_bases_per_residue;
float min_overlap_fraction;
__host__ __device__ __forceinline__ FilterOverlapOp(size_t min_residues,
size_t min_overlap_len,
size_t min_bases_per_residue,
float min_overlap_fraction)
: min_residues(min_residues)
, min_overlap_len(min_overlap_len)
, min_bases_per_residue(min_bases_per_residue)
, min_overlap_fraction(min_overlap_fraction)
{
}
__host__ __device__ __forceinline__ bool operator()(const Overlap& overlap) const
{
const auto target_overlap_length = overlap.target_end_position_in_read_ - overlap.target_start_position_in_read_;
const auto query_overlap_length = overlap.query_end_position_in_read_ - overlap.query_start_position_in_read_;
const auto overlap_length = max(target_overlap_length, query_overlap_length);
return ((overlap.num_residues_ >= min_residues) &&
((overlap_length / overlap.num_residues_) < min_bases_per_residue) &&
(query_overlap_length >= min_overlap_len) &&
(target_overlap_length >= min_overlap_len) &&
(overlap.query_read_id_ != overlap.target_read_id_) &&
((static_cast<float>(target_overlap_length) / static_cast<float>(overlap_length)) > min_overlap_fraction) &&
((static_cast<float>(query_overlap_length) / static_cast<float>(overlap_length)) > min_overlap_fraction));
}
};
struct CreateOverlap
{
const Anchor* d_anchors;
__host__ __device__ __forceinline__ CreateOverlap(const Anchor* anchors_ptr)
: d_anchors(anchors_ptr)
{
}
__host__ __device__ __forceinline__ Overlap
operator()(cuOverlapArgs overlap)
{
Anchor overlap_start_anchor = d_anchors[overlap.overlap_start];
Anchor overlap_end_anchor = d_anchors[overlap.overlap_end - 1];
Overlap new_overlap;
new_overlap.query_read_id_ = overlap_end_anchor.query_read_id_;
new_overlap.target_read_id_ = overlap_end_anchor.target_read_id_;
new_overlap.num_residues_ = overlap.num_residues;
new_overlap.target_end_position_in_read_ =
overlap_end_anchor.target_position_in_read_;
new_overlap.target_start_position_in_read_ =
overlap_start_anchor.target_position_in_read_;
new_overlap.query_end_position_in_read_ =
overlap_end_anchor.query_position_in_read_;
new_overlap.query_start_position_in_read_ =
overlap_start_anchor.query_position_in_read_;
new_overlap.overlap_complete = true;
// If the target start position is greater than the target end position
// We can safely assume that the query and target are template and
// complement reads. TODO: Incorporate sketchelement direction value when
// this is implemented
if (new_overlap.target_start_position_in_read_ >
new_overlap.target_end_position_in_read_)
{
new_overlap.relative_strand = RelativeStrand::Reverse;
auto tmp = new_overlap.target_end_position_in_read_;
new_overlap.target_end_position_in_read_ =
new_overlap.target_start_position_in_read_;
new_overlap.target_start_position_in_read_ = tmp;
}
else
{
new_overlap.relative_strand = RelativeStrand::Forward;
}
return new_overlap;
};
};
OverlapperTriggered::OverlapperTriggered(DefaultDeviceAllocator allocator,
const hipStream_t cuda_stream)
: _allocator(allocator)
, _cuda_stream(cuda_stream)
{
}
void OverlapperTriggered::get_overlaps(std::vector<Overlap>& fused_overlaps,
const device_buffer<Anchor>& d_anchors,
int64_t min_residues,
int64_t min_overlap_len,
int64_t min_bases_per_residue,
float min_overlap_fraction)
{
GW_NVTX_RANGE(profiler, "OverlapperTriggered::get_overlaps");
const auto tail_length_for_chain = 3;
auto n_anchors = d_anchors.size();
#ifndef NDEBUG
// check if anchors are sorted properly
// TODO: Copying data to host and doing the check there as using thrust::is_sorted
// leads to a compilaiton error. It is probably a bug in device_buffer implementation
thrust::host_vector<Anchor> h_anchors(d_anchors.size());
cudautils::device_copy_n(d_anchors.data(), d_anchors.size(), h_anchors.data()); // D2H
auto comp_anchors = [](const Anchor& i, const Anchor& j) { return (i.query_read_id_ < j.query_read_id_) ||
((i.query_read_id_ == j.query_read_id_) &&
(i.target_read_id_ < j.target_read_id_)) ||
((i.query_read_id_ == j.query_read_id_) &&
(i.target_read_id_ == j.target_read_id_) &&
(i.query_position_in_read_ < j.query_position_in_read_)) ||
((i.query_read_id_ == j.query_read_id_) &&
(i.target_read_id_ == j.target_read_id_) &&
(i.query_position_in_read_ == j.query_position_in_read_) &&
(i.target_position_in_read_ < j.target_position_in_read_)); };
assert(std::is_sorted(std::begin(h_anchors),
std::end(h_anchors),
comp_anchors));
#endif
// temporary workspace buffer on device
device_buffer<char> d_temp_buf(_allocator, _cuda_stream);
// Do run length encode to compute the chains
// note - identifies the start and end anchor of the chain without moving the anchors
// >>>>>>>>>
// d_start_anchor[i] contains the starting anchor of chain i
device_buffer<Anchor> d_start_anchor(n_anchors, _allocator, _cuda_stream);
// d_chain_length[i] contains the length of chain i
device_buffer<int32_t> d_chain_length(n_anchors, _allocator, _cuda_stream);
// total number of chains found
device_buffer<int32_t> d_nchains(1, _allocator, _cuda_stream);
//The equality of two anchors has been overriden, such that they are equal (members of the same chain) if their QID,TID are equal and they fall within a fixed distance of one another
void* d_temp_storage = nullptr;
size_t temp_storage_bytes = 0;
// calculate storage requirement for run length encoding
hipcub::DeviceRunLengthEncode::Encode(
d_temp_storage, temp_storage_bytes, d_anchors.data(), d_start_anchor.data(),
d_chain_length.data(), d_nchains.data(), n_anchors, _cuda_stream);
// allocate temporary storage
d_temp_buf.clear_and_resize(temp_storage_bytes);
d_temp_storage = d_temp_buf.data();
// run encoding
hipcub::DeviceRunLengthEncode::Encode(
d_temp_storage, temp_storage_bytes, d_anchors.data(), d_start_anchor.data(),
d_chain_length.data(), d_nchains.data(), n_anchors, _cuda_stream);
// <<<<<<<<<<
// memcpy D2H
auto n_chains = cudautils::get_value_from_device(d_nchains.data(), _cuda_stream); //We now know the number of chains we are working with.
// use prefix sum to calculate the starting index position of all the chains
// >>>>>>>>>>>>
// for a chain i, d_chain_start[i] contains the index of starting anchor from d_anchors array
device_buffer<int32_t> d_chain_start(n_chains, _allocator, _cuda_stream);
d_temp_storage = nullptr;
temp_storage_bytes = 0;
hipcub::DeviceScan::ExclusiveSum(d_temp_storage, temp_storage_bytes,
d_chain_length.data(), d_chain_start.data(),
n_chains, _cuda_stream);
// allocate temporary storage
d_temp_buf.clear_and_resize(temp_storage_bytes);
d_temp_storage = d_temp_buf.data();
hipcub::DeviceScan::ExclusiveSum(d_temp_storage, temp_storage_bytes,
d_chain_length.data(), d_chain_start.data(),
n_chains, _cuda_stream);
// <<<<<<<<<<<<
// calculate overlaps where overlap is a chain with length > tail_length_for_chain
// >>>>>>>>>>>>
auto thrust_exec_policy = thrust::hip::par(_allocator).on(_cuda_stream);
// d_overlaps[j] contains index to d_chain_length/d_chain_start where
// d_chain_length[d_overlaps[j]] and d_chain_start[d_overlaps[j]] corresponds
// to length and index to starting anchor of the chain-d_overlaps[j] (also referred as overlap j)
device_buffer<int32_t> d_overlaps(n_chains, _allocator, _cuda_stream);
auto indices_end =
thrust::copy_if(thrust_exec_policy, thrust::make_counting_iterator<int32_t>(0),
thrust::make_counting_iterator<int32_t>(n_chains),
d_chain_length.data(), d_overlaps.data(),
[=] __host__ __device__(const int32_t& len) -> bool {
return (len >= tail_length_for_chain);
});
auto n_overlaps = indices_end - d_overlaps.data();
// <<<<<<<<<<<<<
// >>>>>>>>>>>>
// fuse overlaps using reduce by key operations
// key is a minimal data structure that is required to compare the overlaps
cuOverlapKey_transform key_op(d_anchors.data(),
d_chain_start.data());
hipcub::TransformInputIterator<cuOverlapKey, cuOverlapKey_transform, int32_t*>
d_keys_in(d_overlaps.data(),
key_op);
// value is a minimal data structure that represents a overlap
cuOverlapArgs_transform value_op(d_chain_start.data(),
d_chain_length.data());
hipcub::TransformInputIterator<cuOverlapArgs, cuOverlapArgs_transform, int32_t*>
d_values_in(d_overlaps.data(),
value_op);
device_buffer<cuOverlapKey> d_fusedoverlap_keys(n_overlaps, _allocator, _cuda_stream);
device_buffer<cuOverlapArgs> d_fusedoverlaps_args(n_overlaps, _allocator, _cuda_stream);
device_buffer<int32_t> d_nfused_overlaps(1, _allocator, _cuda_stream);
FuseOverlapOp reduction_op;
d_temp_storage = nullptr;
temp_storage_bytes = 0;
hipcub::DeviceReduce::ReduceByKey(d_temp_storage,
temp_storage_bytes,
d_keys_in,
d_fusedoverlap_keys.data(), d_values_in,
d_fusedoverlaps_args.data(), d_nfused_overlaps.data(),
reduction_op,
n_overlaps,
_cuda_stream);
// allocate temporary storage
d_temp_buf.clear_and_resize(temp_storage_bytes);
d_temp_storage = d_temp_buf.data();
hipcub::DeviceReduce::ReduceByKey(d_temp_storage,
temp_storage_bytes,
d_keys_in,
d_fusedoverlap_keys.data(), //Write out the unique keys here
d_values_in,
d_fusedoverlaps_args.data(), //Write out the values here
d_nfused_overlaps.data(),
reduction_op,
n_overlaps,
_cuda_stream);
// memcpyD2H
auto n_fused_overlap = cudautils::get_value_from_device(d_nfused_overlaps.data(), _cuda_stream);
// construct overlap from the overlap args
CreateOverlap fuse_op(d_anchors.data());
device_buffer<Overlap> d_fused_overlaps(n_fused_overlap, _allocator, _cuda_stream); //Overlaps written here
thrust::transform(thrust_exec_policy, d_fusedoverlaps_args.data(),
d_fusedoverlaps_args.data() + n_fused_overlap,
d_fused_overlaps.data(), fuse_op);
device_buffer<Overlap> d_filtered_overlaps(n_fused_overlap, _allocator, _cuda_stream);
FilterOverlapOp filterOp(min_residues, min_overlap_len, min_bases_per_residue, min_overlap_fraction);
auto filtered_overlaps_end =
thrust::copy_if(thrust_exec_policy,
d_fused_overlaps.data(), d_fused_overlaps.data() + n_fused_overlap,
d_filtered_overlaps.data(),
filterOp);
auto n_filtered_overlaps = filtered_overlaps_end - d_filtered_overlaps.data();
// memcpyD2H - move fused and filtered overlaps to host
fused_overlaps.resize(n_filtered_overlaps);
cudautils::device_copy_n(d_filtered_overlaps.data(), n_filtered_overlaps, fused_overlaps.data(), _cuda_stream);
// This is not completely necessary, but if removed one has to make sure that the next step
// uses the same stream or that sync is done in caller
GW_CU_CHECK_ERR(hipStreamSynchronize(_cuda_stream));
}
} // namespace cudamapper
} // namespace genomeworks
} // namespace claraparabricks
| 888bfdd327101871866ee97098f062ea11ebdfbf.cu | /*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
* NVIDIA CORPORATION and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
* and any modifications thereto. Any use, reproduction, disclosure or
* distribution of this software and related documentation without an express
* license agreement from NVIDIA CORPORATION is strictly prohibited.
*/
#include "overlapper_triggered.hpp"
#include <fstream>
#include <cstdlib>
#include <cub/cub.cuh>
#include <thrust/execution_policy.h>
#include <claraparabricks/genomeworks/utils/cudautils.hpp>
#ifndef NDEBUG // only needed to check if input is sorted in assert
#include <algorithm>
#include <thrust/host_vector.h>
#endif
namespace claraparabricks
{
namespace genomeworks
{
namespace cudamapper
{
__host__ __device__ bool operator==(const Anchor& lhs,
const Anchor& rhs)
{
auto score_threshold = 1;
// Very simple scoring function to quantify quality of overlaps.
auto score = 1;
if ((rhs.query_position_in_read_ - lhs.query_position_in_read_) < 150 and abs(int(rhs.target_position_in_read_) - int(lhs.target_position_in_read_)) < 150)
score = 2;
return ((lhs.query_read_id_ == rhs.query_read_id_) &&
(lhs.target_read_id_ == rhs.target_read_id_) &&
score > score_threshold);
}
struct cuOverlapKey
{
const Anchor* anchor;
};
struct cuOverlapKey_transform
{
const Anchor* d_anchors;
const int32_t* d_chain_start;
cuOverlapKey_transform(const Anchor* anchors, const int32_t* chain_start)
: d_anchors(anchors)
, d_chain_start(chain_start)
{
}
__host__ __device__ __forceinline__ cuOverlapKey
operator()(const int32_t& idx) const
{
auto anchor_idx = d_chain_start[idx];
cuOverlapKey key;
key.anchor = &d_anchors[anchor_idx];
return key;
}
};
__host__ __device__ bool operator==(const cuOverlapKey& key0,
const cuOverlapKey& key1)
{
const Anchor* a = key0.anchor;
const Anchor* b = key1.anchor;
int distance_difference = abs(abs(int(a->query_position_in_read_) - int(b->query_position_in_read_)) -
abs(int(a->target_position_in_read_) - int(b->target_position_in_read_)));
bool equal = (a->target_read_id_ == b->target_read_id_) &&
(a->query_read_id_ == b->query_read_id_) &&
distance_difference < 300;
return equal;
}
struct cuOverlapArgs
{
int32_t overlap_end;
int32_t num_residues;
int32_t overlap_start;
};
struct cuOverlapArgs_transform
{
const int32_t* d_chain_start;
const int32_t* d_chain_length;
cuOverlapArgs_transform(const int32_t* chain_start, const int32_t* chain_length)
: d_chain_start(chain_start)
, d_chain_length(chain_length)
{
}
__host__ __device__ __forceinline__ cuOverlapArgs
operator()(const int32_t& idx) const
{
cuOverlapArgs overlap;
auto overlap_start = d_chain_start[idx];
auto overlap_length = d_chain_length[idx];
overlap.overlap_end = overlap_start + overlap_length;
overlap.num_residues = overlap_length;
overlap.overlap_start = overlap_start;
return overlap;
}
};
struct FuseOverlapOp
{
__host__ __device__ cuOverlapArgs operator()(const cuOverlapArgs& a,
const cuOverlapArgs& b) const
{
cuOverlapArgs fused_overlap;
fused_overlap.num_residues = a.num_residues + b.num_residues;
fused_overlap.overlap_end =
a.overlap_end > b.overlap_end ? a.overlap_end : b.overlap_end;
fused_overlap.overlap_start =
a.overlap_start < b.overlap_start ? a.overlap_start : b.overlap_start;
return fused_overlap;
}
};
struct FilterOverlapOp
{
size_t min_residues;
size_t min_overlap_len;
size_t min_bases_per_residue;
float min_overlap_fraction;
__host__ __device__ __forceinline__ FilterOverlapOp(size_t min_residues,
size_t min_overlap_len,
size_t min_bases_per_residue,
float min_overlap_fraction)
: min_residues(min_residues)
, min_overlap_len(min_overlap_len)
, min_bases_per_residue(min_bases_per_residue)
, min_overlap_fraction(min_overlap_fraction)
{
}
__host__ __device__ __forceinline__ bool operator()(const Overlap& overlap) const
{
const auto target_overlap_length = overlap.target_end_position_in_read_ - overlap.target_start_position_in_read_;
const auto query_overlap_length = overlap.query_end_position_in_read_ - overlap.query_start_position_in_read_;
const auto overlap_length = max(target_overlap_length, query_overlap_length);
return ((overlap.num_residues_ >= min_residues) &&
((overlap_length / overlap.num_residues_) < min_bases_per_residue) &&
(query_overlap_length >= min_overlap_len) &&
(target_overlap_length >= min_overlap_len) &&
(overlap.query_read_id_ != overlap.target_read_id_) &&
((static_cast<float>(target_overlap_length) / static_cast<float>(overlap_length)) > min_overlap_fraction) &&
((static_cast<float>(query_overlap_length) / static_cast<float>(overlap_length)) > min_overlap_fraction));
}
};
struct CreateOverlap
{
const Anchor* d_anchors;
__host__ __device__ __forceinline__ CreateOverlap(const Anchor* anchors_ptr)
: d_anchors(anchors_ptr)
{
}
__host__ __device__ __forceinline__ Overlap
operator()(cuOverlapArgs overlap)
{
Anchor overlap_start_anchor = d_anchors[overlap.overlap_start];
Anchor overlap_end_anchor = d_anchors[overlap.overlap_end - 1];
Overlap new_overlap;
new_overlap.query_read_id_ = overlap_end_anchor.query_read_id_;
new_overlap.target_read_id_ = overlap_end_anchor.target_read_id_;
new_overlap.num_residues_ = overlap.num_residues;
new_overlap.target_end_position_in_read_ =
overlap_end_anchor.target_position_in_read_;
new_overlap.target_start_position_in_read_ =
overlap_start_anchor.target_position_in_read_;
new_overlap.query_end_position_in_read_ =
overlap_end_anchor.query_position_in_read_;
new_overlap.query_start_position_in_read_ =
overlap_start_anchor.query_position_in_read_;
new_overlap.overlap_complete = true;
// If the target start position is greater than the target end position
// We can safely assume that the query and target are template and
// complement reads. TODO: Incorporate sketchelement direction value when
// this is implemented
if (new_overlap.target_start_position_in_read_ >
new_overlap.target_end_position_in_read_)
{
new_overlap.relative_strand = RelativeStrand::Reverse;
auto tmp = new_overlap.target_end_position_in_read_;
new_overlap.target_end_position_in_read_ =
new_overlap.target_start_position_in_read_;
new_overlap.target_start_position_in_read_ = tmp;
}
else
{
new_overlap.relative_strand = RelativeStrand::Forward;
}
return new_overlap;
};
};
OverlapperTriggered::OverlapperTriggered(DefaultDeviceAllocator allocator,
const cudaStream_t cuda_stream)
: _allocator(allocator)
, _cuda_stream(cuda_stream)
{
}
void OverlapperTriggered::get_overlaps(std::vector<Overlap>& fused_overlaps,
const device_buffer<Anchor>& d_anchors,
int64_t min_residues,
int64_t min_overlap_len,
int64_t min_bases_per_residue,
float min_overlap_fraction)
{
GW_NVTX_RANGE(profiler, "OverlapperTriggered::get_overlaps");
const auto tail_length_for_chain = 3;
auto n_anchors = d_anchors.size();
#ifndef NDEBUG
// check if anchors are sorted properly
// TODO: Copying data to host and doing the check there as using thrust::is_sorted
// leads to a compilaiton error. It is probably a bug in device_buffer implementation
thrust::host_vector<Anchor> h_anchors(d_anchors.size());
cudautils::device_copy_n(d_anchors.data(), d_anchors.size(), h_anchors.data()); // D2H
auto comp_anchors = [](const Anchor& i, const Anchor& j) { return (i.query_read_id_ < j.query_read_id_) ||
((i.query_read_id_ == j.query_read_id_) &&
(i.target_read_id_ < j.target_read_id_)) ||
((i.query_read_id_ == j.query_read_id_) &&
(i.target_read_id_ == j.target_read_id_) &&
(i.query_position_in_read_ < j.query_position_in_read_)) ||
((i.query_read_id_ == j.query_read_id_) &&
(i.target_read_id_ == j.target_read_id_) &&
(i.query_position_in_read_ == j.query_position_in_read_) &&
(i.target_position_in_read_ < j.target_position_in_read_)); };
assert(std::is_sorted(std::begin(h_anchors),
std::end(h_anchors),
comp_anchors));
#endif
// temporary workspace buffer on device
device_buffer<char> d_temp_buf(_allocator, _cuda_stream);
// Do run length encode to compute the chains
// note - identifies the start and end anchor of the chain without moving the anchors
// >>>>>>>>>
// d_start_anchor[i] contains the starting anchor of chain i
device_buffer<Anchor> d_start_anchor(n_anchors, _allocator, _cuda_stream);
// d_chain_length[i] contains the length of chain i
device_buffer<int32_t> d_chain_length(n_anchors, _allocator, _cuda_stream);
// total number of chains found
device_buffer<int32_t> d_nchains(1, _allocator, _cuda_stream);
//The equality of two anchors has been overriden, such that they are equal (members of the same chain) if their QID,TID are equal and they fall within a fixed distance of one another
void* d_temp_storage = nullptr;
size_t temp_storage_bytes = 0;
// calculate storage requirement for run length encoding
cub::DeviceRunLengthEncode::Encode(
d_temp_storage, temp_storage_bytes, d_anchors.data(), d_start_anchor.data(),
d_chain_length.data(), d_nchains.data(), n_anchors, _cuda_stream);
// allocate temporary storage
d_temp_buf.clear_and_resize(temp_storage_bytes);
d_temp_storage = d_temp_buf.data();
// run encoding
cub::DeviceRunLengthEncode::Encode(
d_temp_storage, temp_storage_bytes, d_anchors.data(), d_start_anchor.data(),
d_chain_length.data(), d_nchains.data(), n_anchors, _cuda_stream);
// <<<<<<<<<<
// memcpy D2H
auto n_chains = cudautils::get_value_from_device(d_nchains.data(), _cuda_stream); //We now know the number of chains we are working with.
// use prefix sum to calculate the starting index position of all the chains
// >>>>>>>>>>>>
// for a chain i, d_chain_start[i] contains the index of starting anchor from d_anchors array
device_buffer<int32_t> d_chain_start(n_chains, _allocator, _cuda_stream);
d_temp_storage = nullptr;
temp_storage_bytes = 0;
cub::DeviceScan::ExclusiveSum(d_temp_storage, temp_storage_bytes,
d_chain_length.data(), d_chain_start.data(),
n_chains, _cuda_stream);
// allocate temporary storage
d_temp_buf.clear_and_resize(temp_storage_bytes);
d_temp_storage = d_temp_buf.data();
cub::DeviceScan::ExclusiveSum(d_temp_storage, temp_storage_bytes,
d_chain_length.data(), d_chain_start.data(),
n_chains, _cuda_stream);
// <<<<<<<<<<<<
// calculate overlaps where overlap is a chain with length > tail_length_for_chain
// >>>>>>>>>>>>
auto thrust_exec_policy = thrust::cuda::par(_allocator).on(_cuda_stream);
// d_overlaps[j] contains index to d_chain_length/d_chain_start where
// d_chain_length[d_overlaps[j]] and d_chain_start[d_overlaps[j]] corresponds
// to length and index to starting anchor of the chain-d_overlaps[j] (also referred as overlap j)
device_buffer<int32_t> d_overlaps(n_chains, _allocator, _cuda_stream);
auto indices_end =
thrust::copy_if(thrust_exec_policy, thrust::make_counting_iterator<int32_t>(0),
thrust::make_counting_iterator<int32_t>(n_chains),
d_chain_length.data(), d_overlaps.data(),
[=] __host__ __device__(const int32_t& len) -> bool {
return (len >= tail_length_for_chain);
});
auto n_overlaps = indices_end - d_overlaps.data();
// <<<<<<<<<<<<<
// >>>>>>>>>>>>
// fuse overlaps using reduce by key operations
// key is a minimal data structure that is required to compare the overlaps
cuOverlapKey_transform key_op(d_anchors.data(),
d_chain_start.data());
cub::TransformInputIterator<cuOverlapKey, cuOverlapKey_transform, int32_t*>
d_keys_in(d_overlaps.data(),
key_op);
// value is a minimal data structure that represents a overlap
cuOverlapArgs_transform value_op(d_chain_start.data(),
d_chain_length.data());
cub::TransformInputIterator<cuOverlapArgs, cuOverlapArgs_transform, int32_t*>
d_values_in(d_overlaps.data(),
value_op);
device_buffer<cuOverlapKey> d_fusedoverlap_keys(n_overlaps, _allocator, _cuda_stream);
device_buffer<cuOverlapArgs> d_fusedoverlaps_args(n_overlaps, _allocator, _cuda_stream);
device_buffer<int32_t> d_nfused_overlaps(1, _allocator, _cuda_stream);
FuseOverlapOp reduction_op;
d_temp_storage = nullptr;
temp_storage_bytes = 0;
cub::DeviceReduce::ReduceByKey(d_temp_storage,
temp_storage_bytes,
d_keys_in,
d_fusedoverlap_keys.data(), d_values_in,
d_fusedoverlaps_args.data(), d_nfused_overlaps.data(),
reduction_op,
n_overlaps,
_cuda_stream);
// allocate temporary storage
d_temp_buf.clear_and_resize(temp_storage_bytes);
d_temp_storage = d_temp_buf.data();
cub::DeviceReduce::ReduceByKey(d_temp_storage,
temp_storage_bytes,
d_keys_in,
d_fusedoverlap_keys.data(), //Write out the unique keys here
d_values_in,
d_fusedoverlaps_args.data(), //Write out the values here
d_nfused_overlaps.data(),
reduction_op,
n_overlaps,
_cuda_stream);
// memcpyD2H
auto n_fused_overlap = cudautils::get_value_from_device(d_nfused_overlaps.data(), _cuda_stream);
// construct overlap from the overlap args
CreateOverlap fuse_op(d_anchors.data());
device_buffer<Overlap> d_fused_overlaps(n_fused_overlap, _allocator, _cuda_stream); //Overlaps written here
thrust::transform(thrust_exec_policy, d_fusedoverlaps_args.data(),
d_fusedoverlaps_args.data() + n_fused_overlap,
d_fused_overlaps.data(), fuse_op);
device_buffer<Overlap> d_filtered_overlaps(n_fused_overlap, _allocator, _cuda_stream);
FilterOverlapOp filterOp(min_residues, min_overlap_len, min_bases_per_residue, min_overlap_fraction);
auto filtered_overlaps_end =
thrust::copy_if(thrust_exec_policy,
d_fused_overlaps.data(), d_fused_overlaps.data() + n_fused_overlap,
d_filtered_overlaps.data(),
filterOp);
auto n_filtered_overlaps = filtered_overlaps_end - d_filtered_overlaps.data();
// memcpyD2H - move fused and filtered overlaps to host
fused_overlaps.resize(n_filtered_overlaps);
cudautils::device_copy_n(d_filtered_overlaps.data(), n_filtered_overlaps, fused_overlaps.data(), _cuda_stream);
// This is not completely necessary, but if removed one has to make sure that the next step
// uses the same stream or that sync is done in caller
GW_CU_CHECK_ERR(cudaStreamSynchronize(_cuda_stream));
}
} // namespace cudamapper
} // namespace genomeworks
} // namespace claraparabricks
|
b9bea7fad6f9b8ef004632bd44cee8f961f7cfdd.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <math.h>
#include <stdint.h> //uint32_tint4
#include <stdlib.h> //
#include <hip/hip_runtime.h>
//
#define width 1024
#define heigth 1024
#define pixel width*heigth
//
float lumi_intensity[pixel]; //
unsigned char img[pixel]; //bmp
/*--------------------bmp--------------------*/
#pragma pack(push,1)
typedef struct tagBITMAPFILEHEADER{ //BITMAPFILEHEADER14 byte
unsigned short bfType; //bfTypebmp"BM"
uint32_t bfSize; //bfsize
unsigned short bfReserved1; //bfReserved120
unsigned short bfReserved2;
uint32_t bf0ffBits; //bf0ffBits
}BITMAPFILEHEADER;
#pragma pack(pop)
typedef struct tagBITMAPINFOHEADER{ //BITMAPINFOHEADERbmp40 byte
uint32_t biSize; //
uint32_t biWidth; //
uint32_t biHeight; //
unsigned short biPlanes; //1
unsigned short biBitCount; //bit8
uint32_t biCompression; //bmp0
uint32_t biSizeImage; //bmpbiCompression=00
uint32_t biXPelsPerMeter; //biXPelsPerMeterbiYPelsPerMeter0
uint32_t biYPelsPerMeter;
uint32_t biCirUsed; //0
uint32_t biCirImportant; //0
}BITMAPINFOHEADER;
typedef struct tagRGBQUAD{
unsigned char rgbBlue;
unsigned char rgbGreen;
unsigned char rgbRed;
unsigned char rgbReserved;
}RGBQUAD;
/*--------------------------------------------------*/
/*----------------------------------------*/
__global__ void fresnel_gpu(int *x_d, int *y_d, float *z_d, float *lumi_intensity_d){
int i, j, k;
int adr;
float xx, yy;
j = blockDim.x*blockIdx.x+threadIdx.x; //width
i = blockDim.y*blockIdx.y+threadIdx.y; //heigth
adr = i*width+j;
//
float wave_len=0.633F; //
float wave_num=M_PI/wave_len; //21
for (k=0; k<284; k++) {
xx = ((float)j-x_d[k])*((float)j-x_d[k]);
yy = ((float)i-y_d[k])*((float)i-y_d[k]);
lumi_intensity_d[adr] = lumi_intensity_d[adr]+__cosf(wave_num*(xx+yy)*z_d[k]);
}
}
/*--------------------------------------------------*/
/*--------------------main--------------------*/
int main(){
BITMAPFILEHEADER bmpFh;
BITMAPINFOHEADER bmpIh;
RGBQUAD rgbQ[256];
//
int i;
int points; //
float min = 0.0F, max = 0.0F, mid; //2
FILE *fp;
//3D
fp = fopen("cube284.3d","rb"); //
if (!fp) {
printf("3D file not found!\n");
exit(1);
}
fread(&points, sizeof(int), 1, fp); //
printf("the number of points is %d\n", points);
//
int x[points]; //~~~~
int y[points];
float z[points];
int x_buf, y_buf, z_buf; //
//
for (i=0; i<points; i++) {
fread(&x_buf, sizeof(int), 1, fp);
fread(&y_buf, sizeof(int), 1, fp);
fread(&z_buf, sizeof(int), 1, fp);
x[i] = x_buf*40+width*0.5; //40
y[i] = y_buf*40+heigth*0.5;
z[i] = 1.0F/(((float)z_buf)*40+10000.0F);
}
fclose(fp);
/*--------------------GPUCGH--------------------*/
int *x_d, *y_d;
float *z_d;
float *lumi_intensity_d;
dim3 block(32,32,1); //()
dim3 grid(ceil(width/block.x),ceil(heigth/block.y),1); //()
// dim3 grid((width+block.x-1)/block.x,(heigth+block.y-1)/block.y,1);
//
hipMalloc((void**)&x_d, points*sizeof(int));
hipMalloc((void**)&y_d, points*sizeof(int));
hipMalloc((void**)&z_d, points*sizeof(float));
hipMalloc((void**)&lumi_intensity_d, pixel*sizeof(float));
//
hipMemcpy(x_d, x, points*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(y_d, y, points*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(z_d, z, points*sizeof(float), hipMemcpyHostToDevice);
//
hipLaunchKernelGGL(( fresnel_gpu), dim3(grid), dim3(block) , 0, 0, x_d, y_d, z_d, lumi_intensity_d);
//
hipMemcpy(lumi_intensity, lumi_intensity_d, pixel*sizeof(float), hipMemcpyDeviceToHost);
//
hipFree(x_d);
hipFree(y_d);
hipFree(z_d);
hipFree(lumi_intensity_d);
/*--------------------------------------------------*/
//
for (i=0; i<pixel; i++) {
if (min>lumi_intensity[i]) {
min = lumi_intensity[i];
}
if (max<lumi_intensity[i]) {
max = lumi_intensity[i];
}
}
mid = (min+max)/2; //
printf("min=%lf, max=%lf, mid=%lf\n", min, max, mid);
//2
for (i=0; i<pixel; i++) {
if (lumi_intensity[i]<mid) {
img[i] = 0;
}
else{
img[i] = 255;
}
}
/*--------------------BMP--------------------*/
//BITMAPFILEHEADER
bmpFh.bfType = 19778; //'B'=0x42,'M'=0x4d,'BM'=0x4d42=19778
bmpFh.bfSize = 14+40+1024+(pixel); //10242564 byte
bmpFh.bfReserved1 = 0;
bmpFh.bfReserved2 = 0;
bmpFh.bf0ffBits = 14+40+1024;
//BITMAPINFOHEADER
bmpIh.biSize = 40;
bmpIh.biWidth = width;
bmpIh.biHeight = heigth;
bmpIh.biPlanes = 1;
bmpIh.biBitCount = 8;
bmpIh.biCompression = 0;
bmpIh.biSizeImage = 0;
bmpIh.biXPelsPerMeter = 0;
bmpIh.biYPelsPerMeter = 0;
bmpIh.biCirUsed = 0;
bmpIh.biCirImportant = 0;
//RGBQUAD
for (i=0; i<256; i++) {
rgbQ[i].rgbBlue = i;
rgbQ[i].rgbGreen = i;
rgbQ[i].rgbRed = i;
rgbQ[i].rgbReserved = 0;
}
/*--------------------------------------------------*/
fp = fopen("fresnel-gpu.bmp","wb"); //fp(b)(w)
fwrite(&bmpFh, sizeof(bmpFh), 1, fp); //
fwrite(&bmpIh, sizeof(bmpIh), 1, fp); //(&bmpFh.bfType, sizeof(bmpFh.bfType), 1, fp);
fwrite(&rgbQ[0], sizeof(rgbQ[0]), 256, fp);
fwrite(img, sizeof(unsigned char), pixel, fp); //bmp
printf("'fresnel-gpu.bmp' was saved.\n\n");
fclose(fp);
return 0;
}
| b9bea7fad6f9b8ef004632bd44cee8f961f7cfdd.cu | #include <stdio.h>
#include <math.h>
#include <stdint.h> //uint32_tは符号なしintで4バイトに指定
#include <stdlib.h> //記憶域管理を使うため
#include <cuda.h>
//記号定数として横幅と縦幅を定義
#define width 1024
#define heigth 1024
#define pixel width*heigth
//画像生成用の配列
float lumi_intensity[pixel]; //光強度用の配列
unsigned char img[pixel]; //bmp用の配列
/*--------------------bmpの構造体--------------------*/
#pragma pack(push,1)
typedef struct tagBITMAPFILEHEADER{ //構造体BITMAPFILEHEADERはファイルの先頭に来るもので,サイズは14 byte
unsigned short bfType; //bfTypeは,bmp形式であることを示すため,"BM"が入る
uint32_t bfSize; //bfsizeは,ファイル全体のバイト数
unsigned short bfReserved1; //bfReserved1と2は予約領域で,0になる
unsigned short bfReserved2;
uint32_t bf0ffBits; //bf0ffBitsは先頭から画素データまでのバイト数
}BITMAPFILEHEADER;
#pragma pack(pop)
typedef struct tagBITMAPINFOHEADER{ //BITMAPINFOHEADERはbmpファイルの画像の情報の構造体で,サイズは40 byte
uint32_t biSize; //画像のサイズ
uint32_t biWidth; //横の画素数
uint32_t biHeight; //縦の画素数
unsigned short biPlanes; //1
unsigned short biBitCount; //一画素あたりの色の数のbit数.今回は8
uint32_t biCompression; //圧縮タイプを表す.bmpは非圧縮なので0
uint32_t biSizeImage; //bmp配列のサイズを表す.biCompression=0なら基本的に0
uint32_t biXPelsPerMeter; //biXPelsPerMeterとbiYPelsPerMeterは基本的に0
uint32_t biYPelsPerMeter;
uint32_t biCirUsed; //0
uint32_t biCirImportant; //0
}BITMAPINFOHEADER;
typedef struct tagRGBQUAD{
unsigned char rgbBlue;
unsigned char rgbGreen;
unsigned char rgbRed;
unsigned char rgbReserved;
}RGBQUAD;
/*--------------------------------------------------*/
/*--------------------フレネル近似のカーネル関数--------------------*/
__global__ void fresnel_gpu(int *x_d, int *y_d, float *z_d, float *lumi_intensity_d){
int i, j, k;
int adr;
float xx, yy;
j = blockDim.x*blockIdx.x+threadIdx.x; //widthのループの置き換え
i = blockDim.y*blockIdx.y+threadIdx.y; //heigthのループの置き換え
adr = i*width+j;
//計算に必要な変数の定義
float wave_len=0.633F; //光波長
float wave_num=M_PI/wave_len; //波数の2分の1
for (k=0; k<284; k++) {
xx = ((float)j-x_d[k])*((float)j-x_d[k]);
yy = ((float)i-y_d[k])*((float)i-y_d[k]);
lumi_intensity_d[adr] = lumi_intensity_d[adr]+__cosf(wave_num*(xx+yy)*z_d[k]);
}
}
/*--------------------------------------------------*/
/*--------------------main関数--------------------*/
int main(){
BITMAPFILEHEADER bmpFh;
BITMAPINFOHEADER bmpIh;
RGBQUAD rgbQ[256];
//ホスト側の変数
int i;
int points; //物体点
float min = 0.0F, max = 0.0F, mid; //2値化に用いる
FILE *fp;
//3Dファイルの読み込み
fp = fopen("cube284.3d","rb"); //バイナリで読み込み
if (!fp) {
printf("3D file not found!\n");
exit(1);
}
fread(&points, sizeof(int), 1, fp); //データのアドレス,サイズ,個数,ファイルポインタを指定
printf("the number of points is %d\n", points);
//取り出した物体点を入れる配列
int x[points]; //~~データを読み込むことで初めてこの配列が定義できる~~
int y[points];
float z[points];
int x_buf, y_buf, z_buf; //データを一時的に溜めておくための変数
//各バッファに物体点座標を取り込み,ホログラム面と物体点の位置を考慮したデータを各配列に入れる
for (i=0; i<points; i++) {
fread(&x_buf, sizeof(int), 1, fp);
fread(&y_buf, sizeof(int), 1, fp);
fread(&z_buf, sizeof(int), 1, fp);
x[i] = x_buf*40+width*0.5; //物体点を離すために物体点座標に40を掛け,中心の座標を足す
y[i] = y_buf*40+heigth*0.5;
z[i] = 1.0F/(((float)z_buf)*40+10000.0F);
}
fclose(fp);
/*--------------------GPUによるCGH計算--------------------*/
int *x_d, *y_d;
float *z_d;
float *lumi_intensity_d;
dim3 block(32,32,1); //ブロックサイズ(スレッド数)の配置
dim3 grid(ceil(width/block.x),ceil(heigth/block.y),1); //グリッドサイズ(ブロック数)の配置
// dim3 grid((width+block.x-1)/block.x,(heigth+block.y-1)/block.y,1);
//デバイス側のメモリ確保
cudaMalloc((void**)&x_d, points*sizeof(int));
cudaMalloc((void**)&y_d, points*sizeof(int));
cudaMalloc((void**)&z_d, points*sizeof(float));
cudaMalloc((void**)&lumi_intensity_d, pixel*sizeof(float));
//ホスト側からデバイス側にデータ転送
cudaMemcpy(x_d, x, points*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(y_d, y, points*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(z_d, z, points*sizeof(float), cudaMemcpyHostToDevice);
//カーネル関数の起動
fresnel_gpu<<< grid, block >>>(x_d, y_d, z_d, lumi_intensity_d);
//デバイス側からホスト側にデータ転送
cudaMemcpy(lumi_intensity, lumi_intensity_d, pixel*sizeof(float), cudaMemcpyDeviceToHost);
//デバイスのメモリ解放
cudaFree(x_d);
cudaFree(y_d);
cudaFree(z_d);
cudaFree(lumi_intensity_d);
/*--------------------------------------------------*/
//最大値,最小値を求める
for (i=0; i<pixel; i++) {
if (min>lumi_intensity[i]) {
min = lumi_intensity[i];
}
if (max<lumi_intensity[i]) {
max = lumi_intensity[i];
}
}
mid = (min+max)/2; //中間値(閾値)を求める
printf("min=%lf, max=%lf, mid=%lf\n", min, max, mid);
//各々の光強度配列の値を中間値と比較し,2値化する
for (i=0; i<pixel; i++) {
if (lumi_intensity[i]<mid) {
img[i] = 0;
}
else{
img[i] = 255;
}
}
/*--------------------BMP関連--------------------*/
//BITMAPFILEHEADERの構造体
bmpFh.bfType = 19778; //'B'=0x42,'M'=0x4d,'BM'=0x4d42=19778
bmpFh.bfSize = 14+40+1024+(pixel); //1024はカラーパレットのサイズ.256階調で4 byte一組
bmpFh.bfReserved1 = 0;
bmpFh.bfReserved2 = 0;
bmpFh.bf0ffBits = 14+40+1024;
//BITMAPINFOHEADERの構造体
bmpIh.biSize = 40;
bmpIh.biWidth = width;
bmpIh.biHeight = heigth;
bmpIh.biPlanes = 1;
bmpIh.biBitCount = 8;
bmpIh.biCompression = 0;
bmpIh.biSizeImage = 0;
bmpIh.biXPelsPerMeter = 0;
bmpIh.biYPelsPerMeter = 0;
bmpIh.biCirUsed = 0;
bmpIh.biCirImportant = 0;
//RGBQUADの構造体
for (i=0; i<256; i++) {
rgbQ[i].rgbBlue = i;
rgbQ[i].rgbGreen = i;
rgbQ[i].rgbRed = i;
rgbQ[i].rgbReserved = 0;
}
/*--------------------------------------------------*/
fp = fopen("fresnel-gpu.bmp","wb"); //宣言したfpと使用するファイル名,その読み書きモードを設定.バイナリ(b)で書き込み(w)
fwrite(&bmpFh, sizeof(bmpFh), 1, fp); //書き込むデータのアドレス,データのサイズ,データの個数,ファイルのポインタを指定
fwrite(&bmpIh, sizeof(bmpIh), 1, fp); //(&bmpFh.bfType, sizeof(bmpFh.bfType), 1, fp);というように個別に書くことも可能
fwrite(&rgbQ[0], sizeof(rgbQ[0]), 256, fp);
fwrite(img, sizeof(unsigned char), pixel, fp); //bmpに書き込み
printf("'fresnel-gpu.bmp' was saved.\n\n");
fclose(fp);
return 0;
}
|
ce0856d1f98aa7c09482c962f0324a764ca653ec.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/***************************************************************************
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
***************************************************************************/
#include "CopySurface.h"
#include <device_launch_parameters.h>
// The CUDA kernel. This sample simply copies the input surface.
template<class T>
__global__ void copySurface(hipSurfaceObject_t input, hipSurfaceObject_t output, unsigned int width, unsigned int height)
{
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < width && y < height)
{
T data;
surf2Dread(&data, input, sizeof(T) * x, y);
surf2Dwrite(data, output, sizeof(T) * x, y);
}
}
// A wrapper function that launches the kernel.
void launchCopySurface(hipSurfaceObject_t input, hipSurfaceObject_t output, unsigned int width, unsigned int height, unsigned int format)
{
dim3 dimBlock(16, 16);
dim3 dimGrid((width + dimBlock.x - 1) / dimBlock.x, (height + dimBlock.y - 1) / dimBlock.y);
if (format == hipChannelFormatKindFloat)hipLaunchKernelGGL(( copySurface<float>), dim3(dimGrid), dim3(dimBlock), 0, 0, input, output, width, height);
elsehipLaunchKernelGGL(( copySurface<int>), dim3(dimGrid), dim3(dimBlock), 0, 0, input, output, width, height);
}
| ce0856d1f98aa7c09482c962f0324a764ca653ec.cu | /***************************************************************************
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
***************************************************************************/
#include "CopySurface.h"
#include <device_launch_parameters.h>
// The CUDA kernel. This sample simply copies the input surface.
template<class T>
__global__ void copySurface(cudaSurfaceObject_t input, cudaSurfaceObject_t output, unsigned int width, unsigned int height)
{
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < width && y < height)
{
T data;
surf2Dread(&data, input, sizeof(T) * x, y);
surf2Dwrite(data, output, sizeof(T) * x, y);
}
}
// A wrapper function that launches the kernel.
void launchCopySurface(cudaSurfaceObject_t input, cudaSurfaceObject_t output, unsigned int width, unsigned int height, unsigned int format)
{
dim3 dimBlock(16, 16);
dim3 dimGrid((width + dimBlock.x - 1) / dimBlock.x, (height + dimBlock.y - 1) / dimBlock.y);
if (format == cudaChannelFormatKindFloat) copySurface<float><<<dimGrid, dimBlock>>>(input, output, width, height);
else copySurface<int><<<dimGrid, dimBlock>>>(input, output, width, height);
}
|
4cb065eefd3acab45f955f8407e0cb6a819c8281.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "RectangleReceiverRectGridRayTracing.cuh"
#include "Grid/rectGridDDA.cuh"
#include "Receiver/rectangleReceiverIntersection.cuh"
__global__ void map_raytracing(SunrayArgument sunrayArgument, RectangleReceiver rectangleReceiver,
RectGrid rectGrid, HeliostatArgument heliostatArgument,
float3 *d_heliostat_vertexes, float factor) {
long long myId = global_func::getThreadId();
if (myId >= heliostatArgument.numberOfMicroHeliostats * sunrayArgument.numberOfLightsPerGroup)
return;
// Step 1: whether the incident light is shadowed by other heliostats
int address = (heliostatArgument.d_microHelio_groups[myId / sunrayArgument.numberOfLightsPerGroup] +
myId % sunrayArgument.numberOfLightsPerGroup) % sunrayArgument.pool_size;
float3 dir = global_func::local2world(sunrayArgument.d_samplelights[address], -sunrayArgument.sunray_direction);
float3 origin = heliostatArgument.d_microHelio_origins[myId / sunrayArgument.numberOfLightsPerGroup];
if (rectGridDDA::collision(origin, dir, rectGrid, d_heliostat_vertexes, heliostatArgument)) {
return;
}
// Step 2: whether the reflect light is shadowed by other heliostats
float3 normal = heliostatArgument.d_microHelio_normals[myId / sunrayArgument.numberOfLightsPerGroup];
address = (heliostatArgument.d_microHelio_groups[(myId / sunrayArgument.numberOfLightsPerGroup + 1) %
sunrayArgument.pool_size] +
myId % sunrayArgument.numberOfLightsPerGroup) % sunrayArgument.pool_size;
normal = global_func::local2world(sunrayArgument.d_perturbations[address], normal);
normal = normalize(normal);
dir = normalize(reflect(-dir, normal));
if (rectGridDDA::collision(origin, dir, rectGrid, d_heliostat_vertexes, heliostatArgument)) {
return;
}
// Step 3: intersect with receiver
rectangleReceiverIntersect::receiver_drawing(rectangleReceiver, origin, dir, normal, factor);
}
void RectangleReceiverRectGridRayTracing(SunrayArgument &sunrayArgument, RectangleReceiver *rectangleReceiver,
RectGrid *rectGrid, HeliostatArgument &heliostatArgument,
float3 *d_subHeliostat_vertexes, float factor) {
int nThreads = 512;
dim3 nBlocks;
global_func::setThreadsBlocks(nBlocks, nThreads,
heliostatArgument.numberOfMicroHeliostats * sunrayArgument.numberOfLightsPerGroup,
true);
map_raytracing << < nBlocks, nThreads >> >
(sunrayArgument, *rectangleReceiver, *rectGrid, heliostatArgument, d_subHeliostat_vertexes, factor);
hipDeviceSynchronize();
checkCudaErrors(hipGetLastError());
} | 4cb065eefd3acab45f955f8407e0cb6a819c8281.cu | #include "RectangleReceiverRectGridRayTracing.cuh"
#include "Grid/rectGridDDA.cuh"
#include "Receiver/rectangleReceiverIntersection.cuh"
__global__ void map_raytracing(SunrayArgument sunrayArgument, RectangleReceiver rectangleReceiver,
RectGrid rectGrid, HeliostatArgument heliostatArgument,
float3 *d_heliostat_vertexes, float factor) {
long long myId = global_func::getThreadId();
if (myId >= heliostatArgument.numberOfMicroHeliostats * sunrayArgument.numberOfLightsPerGroup)
return;
// Step 1: whether the incident light is shadowed by other heliostats
int address = (heliostatArgument.d_microHelio_groups[myId / sunrayArgument.numberOfLightsPerGroup] +
myId % sunrayArgument.numberOfLightsPerGroup) % sunrayArgument.pool_size;
float3 dir = global_func::local2world(sunrayArgument.d_samplelights[address], -sunrayArgument.sunray_direction);
float3 origin = heliostatArgument.d_microHelio_origins[myId / sunrayArgument.numberOfLightsPerGroup];
if (rectGridDDA::collision(origin, dir, rectGrid, d_heliostat_vertexes, heliostatArgument)) {
return;
}
// Step 2: whether the reflect light is shadowed by other heliostats
float3 normal = heliostatArgument.d_microHelio_normals[myId / sunrayArgument.numberOfLightsPerGroup];
address = (heliostatArgument.d_microHelio_groups[(myId / sunrayArgument.numberOfLightsPerGroup + 1) %
sunrayArgument.pool_size] +
myId % sunrayArgument.numberOfLightsPerGroup) % sunrayArgument.pool_size;
normal = global_func::local2world(sunrayArgument.d_perturbations[address], normal);
normal = normalize(normal);
dir = normalize(reflect(-dir, normal));
if (rectGridDDA::collision(origin, dir, rectGrid, d_heliostat_vertexes, heliostatArgument)) {
return;
}
// Step 3: intersect with receiver
rectangleReceiverIntersect::receiver_drawing(rectangleReceiver, origin, dir, normal, factor);
}
void RectangleReceiverRectGridRayTracing(SunrayArgument &sunrayArgument, RectangleReceiver *rectangleReceiver,
RectGrid *rectGrid, HeliostatArgument &heliostatArgument,
float3 *d_subHeliostat_vertexes, float factor) {
int nThreads = 512;
dim3 nBlocks;
global_func::setThreadsBlocks(nBlocks, nThreads,
heliostatArgument.numberOfMicroHeliostats * sunrayArgument.numberOfLightsPerGroup,
true);
map_raytracing << < nBlocks, nThreads >> >
(sunrayArgument, *rectangleReceiver, *rectGrid, heliostatArgument, d_subHeliostat_vertexes, factor);
cudaDeviceSynchronize();
checkCudaErrors(cudaGetLastError());
} |
9b494897b94d12b6871c22fa0affc986aa34a2df.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2019
@generated from sparse/blas/zjacobisetup.cu, normal z -> d, Wed Jan 2 14:18:53 2019
@author Hartwig Anzt
*/
#include "magmasparse_internal.h"
#define BLOCK_SIZE 512
#define PRECISION_d
__global__ void
dvjacobisetup_gpu( int num_rows,
int num_vecs,
double *b,
double *d,
double *c,
double *x)
{
int row = blockDim.x * blockIdx.x + threadIdx.x;
if(row < num_rows ){
for( int i=0; i<num_vecs; i++ ){
c[row+i*num_rows] = b[row+i*num_rows] / d[row];
x[row+i*num_rows] = c[row+i*num_rows];
}
}
}
/**
Purpose
-------
Prepares the Jacobi Iteration according to
x^(k+1) = D^(-1) * b - D^(-1) * (L+U) * x^k
x^(k+1) = c - M * x^k.
Returns the vector c. It calls a GPU kernel
Arguments
---------
@param[in]
num_rows magma_int_t
number of rows
@param[in]
b magma_d_matrix
RHS b
@param[in]
d magma_d_matrix
vector with diagonal entries
@param[out]
c magma_d_matrix*
c = D^(-1) * b
@param[out]
x magma_d_matrix*
iteration vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_dgegpuk
********************************************************************/
extern "C" magma_int_t
magma_djacobisetup_vector_gpu(
magma_int_t num_rows,
magma_d_matrix b,
magma_d_matrix d,
magma_d_matrix c,
magma_d_matrix *x,
magma_queue_t queue )
{
dim3 grid( magma_ceildiv( num_rows, BLOCK_SIZE ) );
int num_vecs = b.num_rows / num_rows;
magma_int_t threads = BLOCK_SIZE;
hipLaunchKernelGGL(( dvjacobisetup_gpu), dim3(grid), dim3(threads), 0, queue->cuda_stream(),
num_rows, num_vecs, b.dval, d.dval, c.dval, x->val );
return MAGMA_SUCCESS;
}
__global__ void
djacobidiagscal_kernel( int num_rows,
int num_vecs,
double *b,
double *d,
double *c)
{
int row = blockDim.x * blockIdx.x + threadIdx.x;
if(row < num_rows ){
for( int i=0; i<num_vecs; i++)
c[row+i*num_rows] = b[row+i*num_rows] * d[row];
}
}
/**
Purpose
-------
Prepares the Jacobi Iteration according to
x^(k+1) = D^(-1) * b - D^(-1) * (L+U) * x^k
x^(k+1) = c - M * x^k.
Returns the vector c. It calls a GPU kernel
Arguments
---------
@param[in]
num_rows magma_int_t
number of rows
@param[in]
b magma_d_matrix
RHS b
@param[in]
d magma_d_matrix
vector with diagonal entries
@param[out]
c magma_d_matrix*
c = D^(-1) * b
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_d
********************************************************************/
extern "C" magma_int_t
magma_djacobi_diagscal(
magma_int_t num_rows,
magma_d_matrix d,
magma_d_matrix b,
magma_d_matrix *c,
magma_queue_t queue )
{
dim3 grid( magma_ceildiv( num_rows, 512 ));
int num_vecs = b.num_rows*b.num_cols/num_rows;
magma_int_t threads = 512;
hipLaunchKernelGGL(( djacobidiagscal_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream(), num_rows, num_vecs, b.dval, d.dval, c->val );
return MAGMA_SUCCESS;
}
__global__ void
djacobiupdate_kernel( int num_rows,
int num_cols,
double *t,
double *b,
double *d,
double *x)
{
int row = blockDim.x * blockIdx.x + threadIdx.x;
if(row < num_rows ){
for( int i=0; i<num_cols; i++)
x[row+i*num_rows] += (b[row+i*num_rows]-t[row+i*num_rows]) * d[row];
}
}
/**
Purpose
-------
Updates the iteration vector x for the Jacobi iteration
according to
x=x+d.*(b-t)
where d is the diagonal of the system matrix A and t=Ax.
Arguments
---------
@param[in]
t magma_d_matrix
t = A*x
@param[in]
b magma_d_matrix
RHS b
@param[in]
d magma_d_matrix
vector with diagonal entries
@param[out]
x magma_d_matrix*
iteration vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_d
********************************************************************/
extern "C" magma_int_t
magma_djacobiupdate(
magma_d_matrix t,
magma_d_matrix b,
magma_d_matrix d,
magma_d_matrix *x,
magma_queue_t queue )
{
dim3 grid( magma_ceildiv( t.num_rows, BLOCK_SIZE ));
magma_int_t threads = BLOCK_SIZE;
hipLaunchKernelGGL(( djacobiupdate_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream(), t.num_rows, t.num_cols, t.dval, b.dval, d.dval, x->dval );
return MAGMA_SUCCESS;
}
__global__ void
djacobispmvupdate_kernel(
int num_rows,
int num_cols,
double * dval,
magma_index_t * drowptr,
magma_index_t * dcolind,
double *t,
double *b,
double *d,
double *x )
{
int row = blockDim.x * blockIdx.x + threadIdx.x;
int j;
if(row<num_rows){
double dot = MAGMA_D_ZERO;
int start = drowptr[ row ];
int end = drowptr[ row+1 ];
for( int i=0; i<num_cols; i++){
for( j=start; j<end; j++){
dot += dval[ j ] * x[ dcolind[j]+i*num_rows ];
}
x[row+i*num_rows] += (b[row+i*num_rows]-dot) * d[row];
}
}
}
/**
Purpose
-------
Updates the iteration vector x for the Jacobi iteration
according to
x=x+d.*(b-Ax)
Arguments
---------
@param[in]
maxiter magma_int_t
number of Jacobi iterations
@param[in]
A magma_d_matrix
system matrix
@param[in]
t magma_d_matrix
workspace
@param[in]
b magma_d_matrix
RHS b
@param[in]
d magma_d_matrix
vector with diagonal entries
@param[out]
x magma_d_matrix*
iteration vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_d
********************************************************************/
extern "C" magma_int_t
magma_djacobispmvupdate(
magma_int_t maxiter,
magma_d_matrix A,
magma_d_matrix t,
magma_d_matrix b,
magma_d_matrix d,
magma_d_matrix *x,
magma_queue_t queue )
{
// local variables
//double c_zero = MAGMA_D_ZERO;
//double c_one = MAGMA_D_ONE;
dim3 grid( magma_ceildiv( t.num_rows, BLOCK_SIZE ));
magma_int_t threads = BLOCK_SIZE;
for( magma_int_t i=0; i<maxiter; i++ ) {
// distinct routines imply synchronization
// magma_d_spmv( c_one, A, *x, c_zero, t, queue ); // t = A * x
//hipLaunchKernelGGL(( djacobiupdate_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream(), t.num_rows, t.num_cols, t.dval, b.dval, d.dval, x->dval );
// merged in one implies asynchronous update
hipLaunchKernelGGL(( djacobispmvupdate_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream(),
t.num_rows, t.num_cols, A.dval, A.drow, A.dcol, t.dval, b.dval, d.dval, x->dval );
}
return MAGMA_SUCCESS;
}
__global__ void
djacobispmvupdate_bw_kernel(
int num_rows,
int num_cols,
double * dval,
magma_index_t * drowptr,
magma_index_t * dcolind,
double *t,
double *b,
double *d,
double *x )
{
int row_tmp = blockDim.x * blockIdx.x + threadIdx.x;
int row = num_rows-1 - row_tmp;
int j;
if( row>-1 ){
double dot = MAGMA_D_ZERO;
int start = drowptr[ row ];
int end = drowptr[ row+1 ];
for( int i=0; i<num_cols; i++){
for( j=start; j<end; j++){
dot += dval[ j ] * x[ dcolind[j]+i*num_rows ];
}
x[row+i*num_rows] += (b[row+i*num_rows]-dot) * d[row];
}
}
}
/**
Purpose
-------
Updates the iteration vector x for the Jacobi iteration
according to
x=x+d.*(b-Ax)
This kernel processes the thread blocks in reversed order.
Arguments
---------
@param[in]
maxiter magma_int_t
number of Jacobi iterations
@param[in]
A magma_d_matrix
system matrix
@param[in]
t magma_d_matrix
workspace
@param[in]
b magma_d_matrix
RHS b
@param[in]
d magma_d_matrix
vector with diagonal entries
@param[out]
x magma_d_matrix*
iteration vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_d
********************************************************************/
extern "C" magma_int_t
magma_djacobispmvupdate_bw(
magma_int_t maxiter,
magma_d_matrix A,
magma_d_matrix t,
magma_d_matrix b,
magma_d_matrix d,
magma_d_matrix *x,
magma_queue_t queue )
{
// local variables
//double c_zero = MAGMA_D_ZERO;
//double c_one = MAGMA_D_ONE;
dim3 grid( magma_ceildiv( t.num_rows, BLOCK_SIZE ));
magma_int_t threads = BLOCK_SIZE;
for( magma_int_t i=0; i<maxiter; i++ ) {
// distinct routines imply synchronization
// magma_d_spmv( c_one, A, *x, c_zero, t, queue ); // t = A * x
//hipLaunchKernelGGL(( djacobiupdate_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream(), t.num_rows, t.num_cols, t.dval, b.dval, d.dval, x->dval );
// merged in one implies asynchronous update
hipLaunchKernelGGL(( djacobispmvupdate_bw_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream(),
t.num_rows, t.num_cols, A.dval, A.drow, A.dcol, t.dval, b.dval, d.dval, x->dval );
}
return MAGMA_SUCCESS;
}
__global__ void
djacobispmvupdateselect_kernel(
int num_rows,
int num_cols,
int num_updates,
magma_index_t * indices,
double * dval,
magma_index_t * drowptr,
magma_index_t * dcolind,
double *t,
double *b,
double *d,
double *x,
double *y )
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int j;
if( idx<num_updates){
int row = indices[ idx ];
printf(" ");
//if( row < num_rows ){
double dot = MAGMA_D_ZERO;
int start = drowptr[ row ];
int end = drowptr[ row+1 ];
for( int i=0; i<num_cols; i++){
for( j=start; j<end; j++){
dot += dval[ j ] * x[ dcolind[j]+i*num_rows ];
}
x[row+i*num_rows] = x[row+i*num_rows] + (b[row+i*num_rows]-dot) * d[row];
//double add = (b[row+i*num_rows]-dot) * d[row];
//#if defined(PRECISION_s) //|| defined(PRECISION_d)
// atomicAdd( x + row + i*num_rows, add );
//#endif
// ( unsigned int* address, unsigned int val);
//}
}
}
}
/**
Purpose
-------
Updates the iteration vector x for the Jacobi iteration
according to
x=x+d.*(b-Ax)
This kernel allows for overlapping domains: the indices-array contains
the locations that are updated. Locations may be repeated to simulate
overlapping domains.
Arguments
---------
@param[in]
maxiter magma_int_t
number of Jacobi iterations
@param[in]
num_updates magma_int_t
number of updates - length of the indices array
@param[in]
indices magma_index_t*
indices, which entries of x to update
@param[in]
A magma_d_matrix
system matrix
@param[in]
t magma_d_matrix
workspace
@param[in]
b magma_d_matrix
RHS b
@param[in]
d magma_d_matrix
vector with diagonal entries
@param[in]
tmp magma_d_matrix
workspace
@param[out]
x magma_d_matrix*
iteration vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_d
********************************************************************/
extern "C" magma_int_t
magma_djacobispmvupdateselect(
magma_int_t maxiter,
magma_int_t num_updates,
magma_index_t *indices,
magma_d_matrix A,
magma_d_matrix t,
magma_d_matrix b,
magma_d_matrix d,
magma_d_matrix tmp,
magma_d_matrix *x,
magma_queue_t queue )
{
// local variables
//double c_zero = MAGMA_D_ZERO
//double c_one = MAGMA_D_ONE;
//magma_d_matrix swp;
dim3 grid( magma_ceildiv( num_updates, BLOCK_SIZE ));
magma_int_t threads = BLOCK_SIZE;
printf("num updates:%d %d %d\n", int(num_updates), int(threads), int(grid.x) );
for( magma_int_t i=0; i<maxiter; i++ ) {
hipLaunchKernelGGL(( djacobispmvupdateselect_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream(),
t.num_rows, t.num_cols, num_updates, indices, A.dval, A.drow, A.dcol, t.dval, b.dval, d.dval, x->dval, tmp.dval );
//swp.dval = x->dval;
//x->dval = tmp.dval;
//tmp.dval = swp.dval;
}
return MAGMA_SUCCESS;
}
__global__ void
dftjacobicontractions_kernel(
int num_rows,
double * xkm2val,
double * xkm1val,
double * xkval,
double * zval,
double * cval )
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if( idx<num_rows ){
zval[idx] = MAGMA_D_MAKE( MAGMA_D_ABS( xkm1val[idx] - xkval[idx] ), 0.0);
cval[ idx ] = MAGMA_D_MAKE(
MAGMA_D_ABS( xkm2val[idx] - xkm1val[idx] )
/ MAGMA_D_ABS( xkm1val[idx] - xkval[idx] )
,0.0 );
}
}
/**
Purpose
-------
Computes the contraction coefficients c_i:
c_i = z_i^{k-1} / z_i^{k}
= | x_i^{k-1} - x_i^{k-2} | / | x_i^{k} - x_i^{k-1} |
Arguments
---------
@param[in]
xkm2 magma_d_matrix
vector x^{k-2}
@param[in]
xkm1 magma_d_matrix
vector x^{k-2}
@param[in]
xk magma_d_matrix
vector x^{k-2}
@param[out]
z magma_d_matrix*
ratio
@param[out]
c magma_d_matrix*
contraction coefficients
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_d
********************************************************************/
extern "C" magma_int_t
magma_dftjacobicontractions(
magma_d_matrix xkm2,
magma_d_matrix xkm1,
magma_d_matrix xk,
magma_d_matrix *z,
magma_d_matrix *c,
magma_queue_t queue )
{
dim3 grid( magma_ceildiv( xk.num_rows, BLOCK_SIZE ));
magma_int_t threads = BLOCK_SIZE;
hipLaunchKernelGGL(( dftjacobicontractions_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream(),
xkm2.num_rows, xkm2.dval, xkm1.dval, xk.dval, z->dval, c->dval );
return MAGMA_SUCCESS;
}
__global__ void
dftjacobiupdatecheck_kernel(
int num_rows,
double delta,
double * xold,
double * xnew,
double * zprev,
double * cval,
magma_int_t *flag_t,
magma_int_t *flag_fp )
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if( idx<num_rows ){
double t1 = delta * MAGMA_D_ABS(cval[idx]);
double vkv = 1.0;
for( magma_int_t i=0; i<min( flag_fp[idx], 100 ); i++){
vkv = vkv*2;
}
double xold_l = xold[idx];
double xnew_l = xnew[idx];
double znew = MAGMA_D_MAKE(
max( MAGMA_D_ABS( xold_l - xnew_l), 1e-15), 0.0 );
double znr = zprev[idx] / znew;
double t2 = MAGMA_D_ABS( znr - cval[idx] );
//% evaluate fp-cond
magma_int_t fpcond = 0;
if( MAGMA_D_ABS(znr)>vkv ){
fpcond = 1;
}
// % combine t-cond and fp-cond + flag_t == 1
magma_int_t cond = 0;
if( t2<t1 || (flag_t[idx]>0 && fpcond > 0 ) ){
cond = 1;
}
flag_fp[idx] = flag_fp[idx]+1;
if( fpcond>0 ){
flag_fp[idx] = 0;
}
if( cond > 0 ){
flag_t[idx] = 0;
zprev[idx] = znew;
xold[idx] = xnew_l;
} else {
flag_t[idx] = 1;
xnew[idx] = xold_l;
}
}
}
/**
Purpose
-------
Checks the Jacobi updates accorting to the condition in the ScaLA'15 paper.
Arguments
---------
@param[in]
delta double
threshold
@param[in,out]
xold magma_d_matrix*
vector xold
@param[in,out]
xnew magma_d_matrix*
vector xnew
@param[in,out]
zprev magma_d_matrix*
vector z = | x_k-1 - x_k |
@param[in]
c magma_d_matrix
contraction coefficients
@param[in,out]
flag_t magma_int_t
threshold condition
@param[in,out]
flag_fp magma_int_t
false positive condition
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_d
********************************************************************/
extern "C" magma_int_t
magma_dftjacobiupdatecheck(
double delta,
magma_d_matrix *xold,
magma_d_matrix *xnew,
magma_d_matrix *zprev,
magma_d_matrix c,
magma_int_t *flag_t,
magma_int_t *flag_fp,
magma_queue_t queue )
{
dim3 grid( magma_ceildiv( xnew->num_rows, BLOCK_SIZE ));
magma_int_t threads = BLOCK_SIZE;
hipLaunchKernelGGL(( dftjacobiupdatecheck_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream(),
xold->num_rows, delta, xold->dval, xnew->dval, zprev->dval, c.dval,
flag_t, flag_fp );
return MAGMA_SUCCESS;
}
| 9b494897b94d12b6871c22fa0affc986aa34a2df.cu | /*
-- MAGMA (version 2.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2019
@generated from sparse/blas/zjacobisetup.cu, normal z -> d, Wed Jan 2 14:18:53 2019
@author Hartwig Anzt
*/
#include "magmasparse_internal.h"
#define BLOCK_SIZE 512
#define PRECISION_d
__global__ void
dvjacobisetup_gpu( int num_rows,
int num_vecs,
double *b,
double *d,
double *c,
double *x)
{
int row = blockDim.x * blockIdx.x + threadIdx.x;
if(row < num_rows ){
for( int i=0; i<num_vecs; i++ ){
c[row+i*num_rows] = b[row+i*num_rows] / d[row];
x[row+i*num_rows] = c[row+i*num_rows];
}
}
}
/**
Purpose
-------
Prepares the Jacobi Iteration according to
x^(k+1) = D^(-1) * b - D^(-1) * (L+U) * x^k
x^(k+1) = c - M * x^k.
Returns the vector c. It calls a GPU kernel
Arguments
---------
@param[in]
num_rows magma_int_t
number of rows
@param[in]
b magma_d_matrix
RHS b
@param[in]
d magma_d_matrix
vector with diagonal entries
@param[out]
c magma_d_matrix*
c = D^(-1) * b
@param[out]
x magma_d_matrix*
iteration vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_dgegpuk
********************************************************************/
extern "C" magma_int_t
magma_djacobisetup_vector_gpu(
magma_int_t num_rows,
magma_d_matrix b,
magma_d_matrix d,
magma_d_matrix c,
magma_d_matrix *x,
magma_queue_t queue )
{
dim3 grid( magma_ceildiv( num_rows, BLOCK_SIZE ) );
int num_vecs = b.num_rows / num_rows;
magma_int_t threads = BLOCK_SIZE;
dvjacobisetup_gpu<<< grid, threads, 0, queue->cuda_stream()>>>
( num_rows, num_vecs, b.dval, d.dval, c.dval, x->val );
return MAGMA_SUCCESS;
}
__global__ void
djacobidiagscal_kernel( int num_rows,
int num_vecs,
double *b,
double *d,
double *c)
{
int row = blockDim.x * blockIdx.x + threadIdx.x;
if(row < num_rows ){
for( int i=0; i<num_vecs; i++)
c[row+i*num_rows] = b[row+i*num_rows] * d[row];
}
}
/**
Purpose
-------
Prepares the Jacobi Iteration according to
x^(k+1) = D^(-1) * b - D^(-1) * (L+U) * x^k
x^(k+1) = c - M * x^k.
Returns the vector c. It calls a GPU kernel
Arguments
---------
@param[in]
num_rows magma_int_t
number of rows
@param[in]
b magma_d_matrix
RHS b
@param[in]
d magma_d_matrix
vector with diagonal entries
@param[out]
c magma_d_matrix*
c = D^(-1) * b
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_d
********************************************************************/
extern "C" magma_int_t
magma_djacobi_diagscal(
magma_int_t num_rows,
magma_d_matrix d,
magma_d_matrix b,
magma_d_matrix *c,
magma_queue_t queue )
{
dim3 grid( magma_ceildiv( num_rows, 512 ));
int num_vecs = b.num_rows*b.num_cols/num_rows;
magma_int_t threads = 512;
djacobidiagscal_kernel<<< grid, threads, 0, queue->cuda_stream()>>>( num_rows, num_vecs, b.dval, d.dval, c->val );
return MAGMA_SUCCESS;
}
__global__ void
djacobiupdate_kernel( int num_rows,
int num_cols,
double *t,
double *b,
double *d,
double *x)
{
int row = blockDim.x * blockIdx.x + threadIdx.x;
if(row < num_rows ){
for( int i=0; i<num_cols; i++)
x[row+i*num_rows] += (b[row+i*num_rows]-t[row+i*num_rows]) * d[row];
}
}
/**
Purpose
-------
Updates the iteration vector x for the Jacobi iteration
according to
x=x+d.*(b-t)
where d is the diagonal of the system matrix A and t=Ax.
Arguments
---------
@param[in]
t magma_d_matrix
t = A*x
@param[in]
b magma_d_matrix
RHS b
@param[in]
d magma_d_matrix
vector with diagonal entries
@param[out]
x magma_d_matrix*
iteration vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_d
********************************************************************/
extern "C" magma_int_t
magma_djacobiupdate(
magma_d_matrix t,
magma_d_matrix b,
magma_d_matrix d,
magma_d_matrix *x,
magma_queue_t queue )
{
dim3 grid( magma_ceildiv( t.num_rows, BLOCK_SIZE ));
magma_int_t threads = BLOCK_SIZE;
djacobiupdate_kernel<<< grid, threads, 0, queue->cuda_stream()>>>( t.num_rows, t.num_cols, t.dval, b.dval, d.dval, x->dval );
return MAGMA_SUCCESS;
}
__global__ void
djacobispmvupdate_kernel(
int num_rows,
int num_cols,
double * dval,
magma_index_t * drowptr,
magma_index_t * dcolind,
double *t,
double *b,
double *d,
double *x )
{
int row = blockDim.x * blockIdx.x + threadIdx.x;
int j;
if(row<num_rows){
double dot = MAGMA_D_ZERO;
int start = drowptr[ row ];
int end = drowptr[ row+1 ];
for( int i=0; i<num_cols; i++){
for( j=start; j<end; j++){
dot += dval[ j ] * x[ dcolind[j]+i*num_rows ];
}
x[row+i*num_rows] += (b[row+i*num_rows]-dot) * d[row];
}
}
}
/**
Purpose
-------
Updates the iteration vector x for the Jacobi iteration
according to
x=x+d.*(b-Ax)
Arguments
---------
@param[in]
maxiter magma_int_t
number of Jacobi iterations
@param[in]
A magma_d_matrix
system matrix
@param[in]
t magma_d_matrix
workspace
@param[in]
b magma_d_matrix
RHS b
@param[in]
d magma_d_matrix
vector with diagonal entries
@param[out]
x magma_d_matrix*
iteration vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_d
********************************************************************/
extern "C" magma_int_t
magma_djacobispmvupdate(
magma_int_t maxiter,
magma_d_matrix A,
magma_d_matrix t,
magma_d_matrix b,
magma_d_matrix d,
magma_d_matrix *x,
magma_queue_t queue )
{
// local variables
//double c_zero = MAGMA_D_ZERO;
//double c_one = MAGMA_D_ONE;
dim3 grid( magma_ceildiv( t.num_rows, BLOCK_SIZE ));
magma_int_t threads = BLOCK_SIZE;
for( magma_int_t i=0; i<maxiter; i++ ) {
// distinct routines imply synchronization
// magma_d_spmv( c_one, A, *x, c_zero, t, queue ); // t = A * x
// djacobiupdate_kernel<<< grid, threads, 0, queue->cuda_stream()>>>( t.num_rows, t.num_cols, t.dval, b.dval, d.dval, x->dval );
// merged in one implies asynchronous update
djacobispmvupdate_kernel<<< grid, threads, 0, queue->cuda_stream()>>>
( t.num_rows, t.num_cols, A.dval, A.drow, A.dcol, t.dval, b.dval, d.dval, x->dval );
}
return MAGMA_SUCCESS;
}
__global__ void
djacobispmvupdate_bw_kernel(
int num_rows,
int num_cols,
double * dval,
magma_index_t * drowptr,
magma_index_t * dcolind,
double *t,
double *b,
double *d,
double *x )
{
int row_tmp = blockDim.x * blockIdx.x + threadIdx.x;
int row = num_rows-1 - row_tmp;
int j;
if( row>-1 ){
double dot = MAGMA_D_ZERO;
int start = drowptr[ row ];
int end = drowptr[ row+1 ];
for( int i=0; i<num_cols; i++){
for( j=start; j<end; j++){
dot += dval[ j ] * x[ dcolind[j]+i*num_rows ];
}
x[row+i*num_rows] += (b[row+i*num_rows]-dot) * d[row];
}
}
}
/**
Purpose
-------
Updates the iteration vector x for the Jacobi iteration
according to
x=x+d.*(b-Ax)
This kernel processes the thread blocks in reversed order.
Arguments
---------
@param[in]
maxiter magma_int_t
number of Jacobi iterations
@param[in]
A magma_d_matrix
system matrix
@param[in]
t magma_d_matrix
workspace
@param[in]
b magma_d_matrix
RHS b
@param[in]
d magma_d_matrix
vector with diagonal entries
@param[out]
x magma_d_matrix*
iteration vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_d
********************************************************************/
extern "C" magma_int_t
magma_djacobispmvupdate_bw(
magma_int_t maxiter,
magma_d_matrix A,
magma_d_matrix t,
magma_d_matrix b,
magma_d_matrix d,
magma_d_matrix *x,
magma_queue_t queue )
{
// local variables
//double c_zero = MAGMA_D_ZERO;
//double c_one = MAGMA_D_ONE;
dim3 grid( magma_ceildiv( t.num_rows, BLOCK_SIZE ));
magma_int_t threads = BLOCK_SIZE;
for( magma_int_t i=0; i<maxiter; i++ ) {
// distinct routines imply synchronization
// magma_d_spmv( c_one, A, *x, c_zero, t, queue ); // t = A * x
// djacobiupdate_kernel<<< grid, threads, 0, queue->cuda_stream()>>>( t.num_rows, t.num_cols, t.dval, b.dval, d.dval, x->dval );
// merged in one implies asynchronous update
djacobispmvupdate_bw_kernel<<< grid, threads, 0, queue->cuda_stream()>>>
( t.num_rows, t.num_cols, A.dval, A.drow, A.dcol, t.dval, b.dval, d.dval, x->dval );
}
return MAGMA_SUCCESS;
}
__global__ void
djacobispmvupdateselect_kernel(
int num_rows,
int num_cols,
int num_updates,
magma_index_t * indices,
double * dval,
magma_index_t * drowptr,
magma_index_t * dcolind,
double *t,
double *b,
double *d,
double *x,
double *y )
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int j;
if( idx<num_updates){
int row = indices[ idx ];
printf(" ");
//if( row < num_rows ){
double dot = MAGMA_D_ZERO;
int start = drowptr[ row ];
int end = drowptr[ row+1 ];
for( int i=0; i<num_cols; i++){
for( j=start; j<end; j++){
dot += dval[ j ] * x[ dcolind[j]+i*num_rows ];
}
x[row+i*num_rows] = x[row+i*num_rows] + (b[row+i*num_rows]-dot) * d[row];
//double add = (b[row+i*num_rows]-dot) * d[row];
//#if defined(PRECISION_s) //|| defined(PRECISION_d)
// atomicAdd( x + row + i*num_rows, add );
//#endif
// ( unsigned int* address, unsigned int val);
//}
}
}
}
/**
Purpose
-------
Updates the iteration vector x for the Jacobi iteration
according to
x=x+d.*(b-Ax)
This kernel allows for overlapping domains: the indices-array contains
the locations that are updated. Locations may be repeated to simulate
overlapping domains.
Arguments
---------
@param[in]
maxiter magma_int_t
number of Jacobi iterations
@param[in]
num_updates magma_int_t
number of updates - length of the indices array
@param[in]
indices magma_index_t*
indices, which entries of x to update
@param[in]
A magma_d_matrix
system matrix
@param[in]
t magma_d_matrix
workspace
@param[in]
b magma_d_matrix
RHS b
@param[in]
d magma_d_matrix
vector with diagonal entries
@param[in]
tmp magma_d_matrix
workspace
@param[out]
x magma_d_matrix*
iteration vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_d
********************************************************************/
extern "C" magma_int_t
magma_djacobispmvupdateselect(
magma_int_t maxiter,
magma_int_t num_updates,
magma_index_t *indices,
magma_d_matrix A,
magma_d_matrix t,
magma_d_matrix b,
magma_d_matrix d,
magma_d_matrix tmp,
magma_d_matrix *x,
magma_queue_t queue )
{
// local variables
//double c_zero = MAGMA_D_ZERO
//double c_one = MAGMA_D_ONE;
//magma_d_matrix swp;
dim3 grid( magma_ceildiv( num_updates, BLOCK_SIZE ));
magma_int_t threads = BLOCK_SIZE;
printf("num updates:%d %d %d\n", int(num_updates), int(threads), int(grid.x) );
for( magma_int_t i=0; i<maxiter; i++ ) {
djacobispmvupdateselect_kernel<<< grid, threads, 0, queue->cuda_stream()>>>
( t.num_rows, t.num_cols, num_updates, indices, A.dval, A.drow, A.dcol, t.dval, b.dval, d.dval, x->dval, tmp.dval );
//swp.dval = x->dval;
//x->dval = tmp.dval;
//tmp.dval = swp.dval;
}
return MAGMA_SUCCESS;
}
__global__ void
dftjacobicontractions_kernel(
int num_rows,
double * xkm2val,
double * xkm1val,
double * xkval,
double * zval,
double * cval )
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if( idx<num_rows ){
zval[idx] = MAGMA_D_MAKE( MAGMA_D_ABS( xkm1val[idx] - xkval[idx] ), 0.0);
cval[ idx ] = MAGMA_D_MAKE(
MAGMA_D_ABS( xkm2val[idx] - xkm1val[idx] )
/ MAGMA_D_ABS( xkm1val[idx] - xkval[idx] )
,0.0 );
}
}
/**
Purpose
-------
Computes the contraction coefficients c_i:
c_i = z_i^{k-1} / z_i^{k}
= | x_i^{k-1} - x_i^{k-2} | / | x_i^{k} - x_i^{k-1} |
Arguments
---------
@param[in]
xkm2 magma_d_matrix
vector x^{k-2}
@param[in]
xkm1 magma_d_matrix
vector x^{k-2}
@param[in]
xk magma_d_matrix
vector x^{k-2}
@param[out]
z magma_d_matrix*
ratio
@param[out]
c magma_d_matrix*
contraction coefficients
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_d
********************************************************************/
extern "C" magma_int_t
magma_dftjacobicontractions(
magma_d_matrix xkm2,
magma_d_matrix xkm1,
magma_d_matrix xk,
magma_d_matrix *z,
magma_d_matrix *c,
magma_queue_t queue )
{
dim3 grid( magma_ceildiv( xk.num_rows, BLOCK_SIZE ));
magma_int_t threads = BLOCK_SIZE;
dftjacobicontractions_kernel<<< grid, threads, 0, queue->cuda_stream()>>>
( xkm2.num_rows, xkm2.dval, xkm1.dval, xk.dval, z->dval, c->dval );
return MAGMA_SUCCESS;
}
__global__ void
dftjacobiupdatecheck_kernel(
int num_rows,
double delta,
double * xold,
double * xnew,
double * zprev,
double * cval,
magma_int_t *flag_t,
magma_int_t *flag_fp )
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if( idx<num_rows ){
double t1 = delta * MAGMA_D_ABS(cval[idx]);
double vkv = 1.0;
for( magma_int_t i=0; i<min( flag_fp[idx], 100 ); i++){
vkv = vkv*2;
}
double xold_l = xold[idx];
double xnew_l = xnew[idx];
double znew = MAGMA_D_MAKE(
max( MAGMA_D_ABS( xold_l - xnew_l), 1e-15), 0.0 );
double znr = zprev[idx] / znew;
double t2 = MAGMA_D_ABS( znr - cval[idx] );
//% evaluate fp-cond
magma_int_t fpcond = 0;
if( MAGMA_D_ABS(znr)>vkv ){
fpcond = 1;
}
// % combine t-cond and fp-cond + flag_t == 1
magma_int_t cond = 0;
if( t2<t1 || (flag_t[idx]>0 && fpcond > 0 ) ){
cond = 1;
}
flag_fp[idx] = flag_fp[idx]+1;
if( fpcond>0 ){
flag_fp[idx] = 0;
}
if( cond > 0 ){
flag_t[idx] = 0;
zprev[idx] = znew;
xold[idx] = xnew_l;
} else {
flag_t[idx] = 1;
xnew[idx] = xold_l;
}
}
}
/**
Purpose
-------
Checks the Jacobi updates accorting to the condition in the ScaLA'15 paper.
Arguments
---------
@param[in]
delta double
threshold
@param[in,out]
xold magma_d_matrix*
vector xold
@param[in,out]
xnew magma_d_matrix*
vector xnew
@param[in,out]
zprev magma_d_matrix*
vector z = | x_k-1 - x_k |
@param[in]
c magma_d_matrix
contraction coefficients
@param[in,out]
flag_t magma_int_t
threshold condition
@param[in,out]
flag_fp magma_int_t
false positive condition
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_d
********************************************************************/
extern "C" magma_int_t
magma_dftjacobiupdatecheck(
double delta,
magma_d_matrix *xold,
magma_d_matrix *xnew,
magma_d_matrix *zprev,
magma_d_matrix c,
magma_int_t *flag_t,
magma_int_t *flag_fp,
magma_queue_t queue )
{
dim3 grid( magma_ceildiv( xnew->num_rows, BLOCK_SIZE ));
magma_int_t threads = BLOCK_SIZE;
dftjacobiupdatecheck_kernel<<< grid, threads, 0, queue->cuda_stream()>>>
( xold->num_rows, delta, xold->dval, xnew->dval, zprev->dval, c.dval,
flag_t, flag_fp );
return MAGMA_SUCCESS;
}
|
153c488936ffd779f3790f0a749eaaa987c5735a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// ***********************************************************************
//
// Rundemanen: CUDA C++ parallel program for community detection
// Md Naim (naim.md@gmail.com), Fredrik Manne (Fredrik.Manne@uib.no)
// University of Bergen
//
// ***********************************************************************
//
// Copyright (2016) University of Bergen
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// 1. Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
// 3. Neither the name of the copyright holder nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
// ************************************************************************
#include "graphGPU.h"
#include"thrust/extrema.h"
#include"thrust/reduce.h"
#include"thrust/execution_policy.h"
#include"thrust/fill.h"
#include"thrust/functional.h"
#ifdef RUNONGPU
__device__
#endif
void copy_from_global_to_shared_(int laneId, int segment_len, volatile int* dest,
int* src, unsigned int WARP_SIZE) {
for (int i = laneId; i < segment_len; i = i + WARP_SIZE) {
dest[i] = src[i];
}
}
#ifdef RUNONGPU
__device__
#endif
void modify_available_array(int laneId, int nr_neighors, unsigned int* neighbors,
int* colors, bool* available, bool flag, unsigned int WARP_SIZE) {
for (int i = laneId; i < nr_neighors; i = i + WARP_SIZE) {
int nbr = neighbors[laneId];
int nbrColor = colors[nbr]; // read current color,may be atomic read
if (nbrColor != -1)
available[nbrColor] = flag;
}
}
#ifdef RUNONGPU
__device__
#endif
int pickMinimumColor(int laneId, int maxDegree, bool* available, unsigned int WARP_SIZE) {
int minColor = 2 * maxDegree + 1;
for (int c = laneId; c <= maxDegree; c = c + WARP_SIZE) {
// pick the first available color
if (available[c]) {
minColor = c;
break;
}
}
//decide best
for (int i = WARP_SIZE / 2; i >= 1; i = i / 2) {
float tempColor = __shfl_xor(minColor, i, WARP_SIZE);
if (tempColor < minColor) {
minColor = tempColor;
}
}
return minColor;
}
/*
*@available, local array for each warp
*@colors, contains current coloring
*/
__global__ void coloringKernel(int* indices, unsigned int* links, int* colors,
bool* available, int numVertices, int maxDegree, unsigned int WARP_SIZE) {
unsigned int wid = threadIdx.x / WARP_SIZE;
unsigned int laneId = threadIdx.x % WARP_SIZE; // id in the warp
extern __shared__ int blockMemory[];
int* myMemory = blockMemory + (CHUNK_PER_WARP + 1 + CHUNK_PER_WARP) * wid;
//(CHUNK_PER_WARP+1) indices
// last (CHUNK_PER_WARP) *************** NEED TO DECIDE******************
volatile int* warpMemory = myMemory;
// local warp id
if (!laneId) {
//printf(" @tid: %d \n", (wid * WARP_SIZE + laneId));
}
// Global warp ID
wid = blockIdx.x * (NR_THREAD_PER_BLOCK / WARP_SIZE) + wid;
int num_vertex_to_process = numVertices - wid*CHUNK_PER_WARP;
if ((wid + 1) * CHUNK_PER_WARP <= numVertices) {
num_vertex_to_process = CHUNK_PER_WARP;
}
if (num_vertex_to_process > 0) {
// copy indices from global memory to shared memory
copy_from_global_to_shared_(laneId, num_vertex_to_process + 1,
warpMemory, &indices[wid * CHUNK_PER_WARP], WARP_SIZE);
}
int global_ptr = wid * (maxDegree + 1);
for (int c = laneId; c <= maxDegree; c = c + WARP_SIZE) {
available[global_ptr + c] = true;
}
//process each vertex sequentially
for (int vid_index_in_warp = 0; vid_index_in_warp < num_vertex_to_process;
vid_index_in_warp++) {
int node = wid * CHUNK_PER_WARP + vid_index_in_warp;
//if (!wid && !laneId)
// printf("For node %d: \n ", node);
unsigned int start_of_neighbors = warpMemory[vid_index_in_warp];
unsigned int end_of_neighbors = warpMemory[vid_index_in_warp + 1];
// make neighboring colors unavailable
modify_available_array(laneId, (end_of_neighbors - start_of_neighbors),
&links[start_of_neighbors], colors, &available[global_ptr],
false, WARP_SIZE);
if (!laneId && !wid) {
printf("v=%d:", node);
for (int k = 0; k <= maxDegree; k++) {
printf("%d ", available[global_ptr + k]);
}
printf("\n");
}
int minColor = pickMinimumColor(laneId, maxDegree, &available[global_ptr], WARP_SIZE);
if (!laneId)
colors[node] = minColor;
if (!laneId && !wid) {
printf("Picking %d\n", minColor);
}
//reset available array
modify_available_array(laneId, (end_of_neighbors - start_of_neighbors),
&links[start_of_neighbors], colors, &available[global_ptr], true, WARP_SIZE);
if (!laneId && !wid) {
for (int k = 0; k <= maxDegree; k++) {
printf("%d ", available[global_ptr + k]);
}
printf("\n");
}
}
}
void GraphGPU::greedyColoring(unsigned int WARP_SIZE) {
colors.resize(nb_nodes);
int load_per_blk = CHUNK_PER_WARP * (NR_THREAD_PER_BLOCK / WARP_SIZE);
int nr_of_block = (nb_nodes + load_per_blk - 1) / load_per_blk;
int size_of_shared_memory = (2 * CHUNK_PER_WARP + 1)*(NR_THREAD_PER_BLOCK / WARP_SIZE) * sizeof (int);
//print flag
int hostPrint = 1;
//determine max degree
thrust::device_vector<int> degree_per_node(nb_nodes);
thrust::transform(thrust::device, indices.begin() + 1, indices.end(), indices.begin(),
degree_per_node.begin(), thrust::minus<int>());
if (hostPrint) {
std::cout << "Degree per Node:" << std::endl;
//print_vector(degree_per_node);
}
int MAX_DEGREE = *thrust::max_element(degree_per_node.begin(), degree_per_node.end());
if (hostPrint) {
std::cout << "Max_Degree: " << MAX_DEGREE << std::endl;
}
// available array to be used locally by each warp
thrust::device_vector<bool> available;
available.resize((MAX_DEGREE + 1) * nr_of_block * (NR_THREAD_PER_BLOCK / WARP_SIZE));
// Initialize colors
thrust::fill_n(thrust::device, colors.begin(), colors.size(), -1);
if (hostPrint) {
std::cout << std::endl << "Color:" << std::endl;
thrust::copy(colors.begin(), colors.end(), std::ostream_iterator<int>(std::cout, " "));
std::cout << std::endl;
}
coloringKernel << < nr_of_block, NR_THREAD_PER_BLOCK, size_of_shared_memory >>>
(thrust::raw_pointer_cast(indices.data()), thrust::raw_pointer_cast(links.data()),
thrust::raw_pointer_cast(colors.data()), thrust::raw_pointer_cast(available.data()),
nb_nodes, MAX_DEGREE, WARP_SIZE);
hipDeviceSynchronize();
if (hostPrint) {
std::cout << std::endl << "Color:" << std::endl;
thrust::copy(colors.begin(), colors.end(), std::ostream_iterator<int>(std::cout, " "));
std::cout << std::endl;
}
}
| 153c488936ffd779f3790f0a749eaaa987c5735a.cu | // ***********************************************************************
//
// Rundemanen: CUDA C++ parallel program for community detection
// Md Naim (naim.md@gmail.com), Fredrik Manne (Fredrik.Manne@uib.no)
// University of Bergen
//
// ***********************************************************************
//
// Copyright (2016) University of Bergen
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// 1. Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
// 3. Neither the name of the copyright holder nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
// ************************************************************************
#include "graphGPU.h"
#include"thrust/extrema.h"
#include"thrust/reduce.h"
#include"thrust/execution_policy.h"
#include"thrust/fill.h"
#include"thrust/functional.h"
#ifdef RUNONGPU
__device__
#endif
void copy_from_global_to_shared_(int laneId, int segment_len, volatile int* dest,
int* src, unsigned int WARP_SIZE) {
for (int i = laneId; i < segment_len; i = i + WARP_SIZE) {
dest[i] = src[i];
}
}
#ifdef RUNONGPU
__device__
#endif
void modify_available_array(int laneId, int nr_neighors, unsigned int* neighbors,
int* colors, bool* available, bool flag, unsigned int WARP_SIZE) {
for (int i = laneId; i < nr_neighors; i = i + WARP_SIZE) {
int nbr = neighbors[laneId];
int nbrColor = colors[nbr]; // read current color,may be atomic read
if (nbrColor != -1)
available[nbrColor] = flag;
}
}
#ifdef RUNONGPU
__device__
#endif
int pickMinimumColor(int laneId, int maxDegree, bool* available, unsigned int WARP_SIZE) {
int minColor = 2 * maxDegree + 1;
for (int c = laneId; c <= maxDegree; c = c + WARP_SIZE) {
// pick the first available color
if (available[c]) {
minColor = c;
break;
}
}
//decide best
for (int i = WARP_SIZE / 2; i >= 1; i = i / 2) {
float tempColor = __shfl_xor(minColor, i, WARP_SIZE);
if (tempColor < minColor) {
minColor = tempColor;
}
}
return minColor;
}
/*
*@available, local array for each warp
*@colors, contains current coloring
*/
__global__ void coloringKernel(int* indices, unsigned int* links, int* colors,
bool* available, int numVertices, int maxDegree, unsigned int WARP_SIZE) {
unsigned int wid = threadIdx.x / WARP_SIZE;
unsigned int laneId = threadIdx.x % WARP_SIZE; // id in the warp
extern __shared__ int blockMemory[];
int* myMemory = blockMemory + (CHUNK_PER_WARP + 1 + CHUNK_PER_WARP) * wid;
//(CHUNK_PER_WARP+1) indices
// last (CHUNK_PER_WARP) *************** NEED TO DECIDE******************
volatile int* warpMemory = myMemory;
// local warp id
if (!laneId) {
//printf(" @tid: %d \n", (wid * WARP_SIZE + laneId));
}
// Global warp ID
wid = blockIdx.x * (NR_THREAD_PER_BLOCK / WARP_SIZE) + wid;
int num_vertex_to_process = numVertices - wid*CHUNK_PER_WARP;
if ((wid + 1) * CHUNK_PER_WARP <= numVertices) {
num_vertex_to_process = CHUNK_PER_WARP;
}
if (num_vertex_to_process > 0) {
// copy indices from global memory to shared memory
copy_from_global_to_shared_(laneId, num_vertex_to_process + 1,
warpMemory, &indices[wid * CHUNK_PER_WARP], WARP_SIZE);
}
int global_ptr = wid * (maxDegree + 1);
for (int c = laneId; c <= maxDegree; c = c + WARP_SIZE) {
available[global_ptr + c] = true;
}
//process each vertex sequentially
for (int vid_index_in_warp = 0; vid_index_in_warp < num_vertex_to_process;
vid_index_in_warp++) {
int node = wid * CHUNK_PER_WARP + vid_index_in_warp;
//if (!wid && !laneId)
// printf("For node %d: \n ", node);
unsigned int start_of_neighbors = warpMemory[vid_index_in_warp];
unsigned int end_of_neighbors = warpMemory[vid_index_in_warp + 1];
// make neighboring colors unavailable
modify_available_array(laneId, (end_of_neighbors - start_of_neighbors),
&links[start_of_neighbors], colors, &available[global_ptr],
false, WARP_SIZE);
if (!laneId && !wid) {
printf("v=%d:", node);
for (int k = 0; k <= maxDegree; k++) {
printf("%d ", available[global_ptr + k]);
}
printf("\n");
}
int minColor = pickMinimumColor(laneId, maxDegree, &available[global_ptr], WARP_SIZE);
if (!laneId)
colors[node] = minColor;
if (!laneId && !wid) {
printf("Picking %d\n", minColor);
}
//reset available array
modify_available_array(laneId, (end_of_neighbors - start_of_neighbors),
&links[start_of_neighbors], colors, &available[global_ptr], true, WARP_SIZE);
if (!laneId && !wid) {
for (int k = 0; k <= maxDegree; k++) {
printf("%d ", available[global_ptr + k]);
}
printf("\n");
}
}
}
void GraphGPU::greedyColoring(unsigned int WARP_SIZE) {
colors.resize(nb_nodes);
int load_per_blk = CHUNK_PER_WARP * (NR_THREAD_PER_BLOCK / WARP_SIZE);
int nr_of_block = (nb_nodes + load_per_blk - 1) / load_per_blk;
int size_of_shared_memory = (2 * CHUNK_PER_WARP + 1)*(NR_THREAD_PER_BLOCK / WARP_SIZE) * sizeof (int);
//print flag
int hostPrint = 1;
//determine max degree
thrust::device_vector<int> degree_per_node(nb_nodes);
thrust::transform(thrust::device, indices.begin() + 1, indices.end(), indices.begin(),
degree_per_node.begin(), thrust::minus<int>());
if (hostPrint) {
std::cout << "Degree per Node:" << std::endl;
//print_vector(degree_per_node);
}
int MAX_DEGREE = *thrust::max_element(degree_per_node.begin(), degree_per_node.end());
if (hostPrint) {
std::cout << "Max_Degree: " << MAX_DEGREE << std::endl;
}
// available array to be used locally by each warp
thrust::device_vector<bool> available;
available.resize((MAX_DEGREE + 1) * nr_of_block * (NR_THREAD_PER_BLOCK / WARP_SIZE));
// Initialize colors
thrust::fill_n(thrust::device, colors.begin(), colors.size(), -1);
if (hostPrint) {
std::cout << std::endl << "Color:" << std::endl;
thrust::copy(colors.begin(), colors.end(), std::ostream_iterator<int>(std::cout, " "));
std::cout << std::endl;
}
coloringKernel << < nr_of_block, NR_THREAD_PER_BLOCK, size_of_shared_memory >>>
(thrust::raw_pointer_cast(indices.data()), thrust::raw_pointer_cast(links.data()),
thrust::raw_pointer_cast(colors.data()), thrust::raw_pointer_cast(available.data()),
nb_nodes, MAX_DEGREE, WARP_SIZE);
cudaDeviceSynchronize();
if (hostPrint) {
std::cout << std::endl << "Color:" << std::endl;
thrust::copy(colors.begin(), colors.end(), std::ostream_iterator<int>(std::cout, " "));
std::cout << std::endl;
}
}
|
961bb848d2b985a8d779881c9b916f76d589b8fe.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int dims_preproc_kernel [12][2];
static int dims_preproc_kernel_h [12][2] = {0};
//user function
__device__
void preproc_kernel_gpu(const ACC<double> &u,
ACC<double> &du,
ACC<double> &ax,
ACC<double> &bx,
ACC<double> &cx,
ACC<double> &ay,
ACC<double> &by,
ACC<double> &cy,
ACC<double> &az,
ACC<double> &bz,
ACC<double> &cz,
int *idx){
double a, b, c, d;
if(idx[0]==0 || idx[0]==nx-1 || idx[1]==0 || idx[1]==ny-1 || idx[2]==0 || idx[2]==nz-1) {
d = 0.0f;
a = 0.0f;
b = 1.0f;
c = 0.0f;
} else {
d = lambda*( u(-1,0,0) + u(1,0,0)
+ u(0,-1,0) + u(0,1,0)
+ u(0,0,-1) + u(0,0,1)
- 6.0f*u(0,0,0));
a = -0.5f * lambda;
b = 1.0f + lambda;
c = -0.5f * lambda;
}
du(0,0,0) = d;
ax(0,0,0) = a;
bx(0,0,0) = b;
cx(0,0,0) = c;
ay(0,0,0) = a;
by(0,0,0) = b;
cy(0,0,0) = c;
az(0,0,0) = a;
bz(0,0,0) = b;
cz(0,0,0) = c;
}
__global__ void ops_preproc_kernel(
double* __restrict arg0,
double* __restrict arg1,
double* __restrict arg2,
double* __restrict arg3,
double* __restrict arg4,
double* __restrict arg5,
double* __restrict arg6,
double* __restrict arg7,
double* __restrict arg8,
double* __restrict arg9,
double* __restrict arg10,
int arg_idx0, int arg_idx1, int arg_idx2,
int size0,
int size1,
int size2 ){
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
int arg_idx[3];
arg_idx[0] = arg_idx0+idx_x;
arg_idx[1] = arg_idx1+idx_y;
arg_idx[2] = arg_idx2+idx_z;
arg0 += idx_x * 1*1 + idx_y * 1*1 * dims_preproc_kernel[0][0] + idx_z * 1*1 * dims_preproc_kernel[0][0] * dims_preproc_kernel[0][1];
arg1 += idx_x * 1*1 + idx_y * 1*1 * dims_preproc_kernel[1][0] + idx_z * 1*1 * dims_preproc_kernel[1][0] * dims_preproc_kernel[1][1];
arg2 += idx_x * 1*1 + idx_y * 1*1 * dims_preproc_kernel[2][0] + idx_z * 1*1 * dims_preproc_kernel[2][0] * dims_preproc_kernel[2][1];
arg3 += idx_x * 1*1 + idx_y * 1*1 * dims_preproc_kernel[3][0] + idx_z * 1*1 * dims_preproc_kernel[3][0] * dims_preproc_kernel[3][1];
arg4 += idx_x * 1*1 + idx_y * 1*1 * dims_preproc_kernel[4][0] + idx_z * 1*1 * dims_preproc_kernel[4][0] * dims_preproc_kernel[4][1];
arg5 += idx_x * 1*1 + idx_y * 1*1 * dims_preproc_kernel[5][0] + idx_z * 1*1 * dims_preproc_kernel[5][0] * dims_preproc_kernel[5][1];
arg6 += idx_x * 1*1 + idx_y * 1*1 * dims_preproc_kernel[6][0] + idx_z * 1*1 * dims_preproc_kernel[6][0] * dims_preproc_kernel[6][1];
arg7 += idx_x * 1*1 + idx_y * 1*1 * dims_preproc_kernel[7][0] + idx_z * 1*1 * dims_preproc_kernel[7][0] * dims_preproc_kernel[7][1];
arg8 += idx_x * 1*1 + idx_y * 1*1 * dims_preproc_kernel[8][0] + idx_z * 1*1 * dims_preproc_kernel[8][0] * dims_preproc_kernel[8][1];
arg9 += idx_x * 1*1 + idx_y * 1*1 * dims_preproc_kernel[9][0] + idx_z * 1*1 * dims_preproc_kernel[9][0] * dims_preproc_kernel[9][1];
arg10 += idx_x * 1*1 + idx_y * 1*1 * dims_preproc_kernel[10][0] + idx_z * 1*1 * dims_preproc_kernel[10][0] * dims_preproc_kernel[10][1];
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
const ACC<double> argp0(dims_preproc_kernel[0][0], dims_preproc_kernel[0][1], arg0);
ACC<double> argp1(dims_preproc_kernel[1][0], dims_preproc_kernel[1][1], arg1);
ACC<double> argp2(dims_preproc_kernel[2][0], dims_preproc_kernel[2][1], arg2);
ACC<double> argp3(dims_preproc_kernel[3][0], dims_preproc_kernel[3][1], arg3);
ACC<double> argp4(dims_preproc_kernel[4][0], dims_preproc_kernel[4][1], arg4);
ACC<double> argp5(dims_preproc_kernel[5][0], dims_preproc_kernel[5][1], arg5);
ACC<double> argp6(dims_preproc_kernel[6][0], dims_preproc_kernel[6][1], arg6);
ACC<double> argp7(dims_preproc_kernel[7][0], dims_preproc_kernel[7][1], arg7);
ACC<double> argp8(dims_preproc_kernel[8][0], dims_preproc_kernel[8][1], arg8);
ACC<double> argp9(dims_preproc_kernel[9][0], dims_preproc_kernel[9][1], arg9);
ACC<double> argp10(dims_preproc_kernel[10][0], dims_preproc_kernel[10][1], arg10);
preproc_kernel_gpu(argp0, argp1, argp2, argp3,
argp4, argp5, argp6, argp7, argp8,
argp9, argp10, arg_idx);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_preproc_kernel(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3,
ops_arg arg4, ops_arg arg5, ops_arg arg6, ops_arg arg7, ops_arg arg8,
ops_arg arg9, ops_arg arg10, ops_arg arg11) {
#else
void ops_par_loop_preproc_kernel_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
#if OPS_MPI
ops_block block = desc->block;
#endif
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
ops_arg arg3 = desc->args[3];
ops_arg arg4 = desc->args[4];
ops_arg arg5 = desc->args[5];
ops_arg arg6 = desc->args[6];
ops_arg arg7 = desc->args[7];
ops_arg arg8 = desc->args[8];
ops_arg arg9 = desc->args[9];
ops_arg arg10 = desc->args[10];
ops_arg arg11 = desc->args[11];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[12] = { arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10, arg11};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,12,range,1)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(1,"preproc_kernel");
OPS_kernels[1].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
#endif //OPS_MPI
int arg_idx[3];
#ifdef OPS_MPI
if (compute_ranges(args, 12,block, range, start, end, arg_idx) < 0) return;
#else //OPS_MPI
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
arg_idx[n] = start[n];
}
#endif
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
int xdim2 = args[2].dat->size[0];
int ydim2 = args[2].dat->size[1];
int xdim3 = args[3].dat->size[0];
int ydim3 = args[3].dat->size[1];
int xdim4 = args[4].dat->size[0];
int ydim4 = args[4].dat->size[1];
int xdim5 = args[5].dat->size[0];
int ydim5 = args[5].dat->size[1];
int xdim6 = args[6].dat->size[0];
int ydim6 = args[6].dat->size[1];
int xdim7 = args[7].dat->size[0];
int ydim7 = args[7].dat->size[1];
int xdim8 = args[8].dat->size[0];
int ydim8 = args[8].dat->size[1];
int xdim9 = args[9].dat->size[0];
int ydim9 = args[9].dat->size[1];
int xdim10 = args[10].dat->size[0];
int ydim10 = args[10].dat->size[1];
if (xdim0 != dims_preproc_kernel_h[0][0] || ydim0 != dims_preproc_kernel_h[0][1] || xdim1 != dims_preproc_kernel_h[1][0] || ydim1 != dims_preproc_kernel_h[1][1] || xdim2 != dims_preproc_kernel_h[2][0] || ydim2 != dims_preproc_kernel_h[2][1] || xdim3 != dims_preproc_kernel_h[3][0] || ydim3 != dims_preproc_kernel_h[3][1] || xdim4 != dims_preproc_kernel_h[4][0] || ydim4 != dims_preproc_kernel_h[4][1] || xdim5 != dims_preproc_kernel_h[5][0] || ydim5 != dims_preproc_kernel_h[5][1] || xdim6 != dims_preproc_kernel_h[6][0] || ydim6 != dims_preproc_kernel_h[6][1] || xdim7 != dims_preproc_kernel_h[7][0] || ydim7 != dims_preproc_kernel_h[7][1] || xdim8 != dims_preproc_kernel_h[8][0] || ydim8 != dims_preproc_kernel_h[8][1] || xdim9 != dims_preproc_kernel_h[9][0] || ydim9 != dims_preproc_kernel_h[9][1] || xdim10 != dims_preproc_kernel_h[10][0] || ydim10 != dims_preproc_kernel_h[10][1]) {
dims_preproc_kernel_h[0][0] = xdim0;
dims_preproc_kernel_h[0][1] = ydim0;
dims_preproc_kernel_h[1][0] = xdim1;
dims_preproc_kernel_h[1][1] = ydim1;
dims_preproc_kernel_h[2][0] = xdim2;
dims_preproc_kernel_h[2][1] = ydim2;
dims_preproc_kernel_h[3][0] = xdim3;
dims_preproc_kernel_h[3][1] = ydim3;
dims_preproc_kernel_h[4][0] = xdim4;
dims_preproc_kernel_h[4][1] = ydim4;
dims_preproc_kernel_h[5][0] = xdim5;
dims_preproc_kernel_h[5][1] = ydim5;
dims_preproc_kernel_h[6][0] = xdim6;
dims_preproc_kernel_h[6][1] = ydim6;
dims_preproc_kernel_h[7][0] = xdim7;
dims_preproc_kernel_h[7][1] = ydim7;
dims_preproc_kernel_h[8][0] = xdim8;
dims_preproc_kernel_h[8][1] = ydim8;
dims_preproc_kernel_h[9][0] = xdim9;
dims_preproc_kernel_h[9][1] = ydim9;
dims_preproc_kernel_h[10][0] = xdim10;
dims_preproc_kernel_h[10][1] = ydim10;
cutilSafeCall(hipMemcpyToSymbol( dims_preproc_kernel, dims_preproc_kernel_h, sizeof(dims_preproc_kernel)));
}
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size);
int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size);
int dat4 = (OPS_soa ? args[4].dat->type_size : args[4].dat->elem_size);
int dat5 = (OPS_soa ? args[5].dat->type_size : args[5].dat->elem_size);
int dat6 = (OPS_soa ? args[6].dat->type_size : args[6].dat->elem_size);
int dat7 = (OPS_soa ? args[7].dat->type_size : args[7].dat->elem_size);
int dat8 = (OPS_soa ? args[8].dat->type_size : args[8].dat->elem_size);
int dat9 = (OPS_soa ? args[9].dat->type_size : args[9].dat->elem_size);
int dat10 = (OPS_soa ? args[10].dat->type_size : args[10].dat->elem_size);
char *p_a[12];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
int base2 = args[2].dat->base_offset +
dat2 * 1 * (start[0] * args[2].stencil->stride[0]);
base2 = base2+ dat2 *
args[2].dat->size[0] *
(start[1] * args[2].stencil->stride[1]);
base2 = base2+ dat2 *
args[2].dat->size[0] *
args[2].dat->size[1] *
(start[2] * args[2].stencil->stride[2]);
p_a[2] = (char *)args[2].data_d + base2;
int base3 = args[3].dat->base_offset +
dat3 * 1 * (start[0] * args[3].stencil->stride[0]);
base3 = base3+ dat3 *
args[3].dat->size[0] *
(start[1] * args[3].stencil->stride[1]);
base3 = base3+ dat3 *
args[3].dat->size[0] *
args[3].dat->size[1] *
(start[2] * args[3].stencil->stride[2]);
p_a[3] = (char *)args[3].data_d + base3;
int base4 = args[4].dat->base_offset +
dat4 * 1 * (start[0] * args[4].stencil->stride[0]);
base4 = base4+ dat4 *
args[4].dat->size[0] *
(start[1] * args[4].stencil->stride[1]);
base4 = base4+ dat4 *
args[4].dat->size[0] *
args[4].dat->size[1] *
(start[2] * args[4].stencil->stride[2]);
p_a[4] = (char *)args[4].data_d + base4;
int base5 = args[5].dat->base_offset +
dat5 * 1 * (start[0] * args[5].stencil->stride[0]);
base5 = base5+ dat5 *
args[5].dat->size[0] *
(start[1] * args[5].stencil->stride[1]);
base5 = base5+ dat5 *
args[5].dat->size[0] *
args[5].dat->size[1] *
(start[2] * args[5].stencil->stride[2]);
p_a[5] = (char *)args[5].data_d + base5;
int base6 = args[6].dat->base_offset +
dat6 * 1 * (start[0] * args[6].stencil->stride[0]);
base6 = base6+ dat6 *
args[6].dat->size[0] *
(start[1] * args[6].stencil->stride[1]);
base6 = base6+ dat6 *
args[6].dat->size[0] *
args[6].dat->size[1] *
(start[2] * args[6].stencil->stride[2]);
p_a[6] = (char *)args[6].data_d + base6;
int base7 = args[7].dat->base_offset +
dat7 * 1 * (start[0] * args[7].stencil->stride[0]);
base7 = base7+ dat7 *
args[7].dat->size[0] *
(start[1] * args[7].stencil->stride[1]);
base7 = base7+ dat7 *
args[7].dat->size[0] *
args[7].dat->size[1] *
(start[2] * args[7].stencil->stride[2]);
p_a[7] = (char *)args[7].data_d + base7;
int base8 = args[8].dat->base_offset +
dat8 * 1 * (start[0] * args[8].stencil->stride[0]);
base8 = base8+ dat8 *
args[8].dat->size[0] *
(start[1] * args[8].stencil->stride[1]);
base8 = base8+ dat8 *
args[8].dat->size[0] *
args[8].dat->size[1] *
(start[2] * args[8].stencil->stride[2]);
p_a[8] = (char *)args[8].data_d + base8;
int base9 = args[9].dat->base_offset +
dat9 * 1 * (start[0] * args[9].stencil->stride[0]);
base9 = base9+ dat9 *
args[9].dat->size[0] *
(start[1] * args[9].stencil->stride[1]);
base9 = base9+ dat9 *
args[9].dat->size[0] *
args[9].dat->size[1] *
(start[2] * args[9].stencil->stride[2]);
p_a[9] = (char *)args[9].data_d + base9;
int base10 = args[10].dat->base_offset +
dat10 * 1 * (start[0] * args[10].stencil->stride[0]);
base10 = base10+ dat10 *
args[10].dat->size[0] *
(start[1] * args[10].stencil->stride[1]);
base10 = base10+ dat10 *
args[10].dat->size[0] *
args[10].dat->size[1] *
(start[2] * args[10].stencil->stride[2]);
p_a[10] = (char *)args[10].data_d + base10;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 12);
ops_halo_exchanges(args,12,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[1].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0 && z_size > 0)
hipLaunchKernelGGL(( ops_preproc_kernel), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1],
(double *)p_a[2], (double *)p_a[3],
(double *)p_a[4], (double *)p_a[5],
(double *)p_a[6], (double *)p_a[7],
(double *)p_a[8], (double *)p_a[9],
(double *)p_a[10], arg_idx[0], arg_idx[1], arg_idx[2],x_size, y_size, z_size);
cutilSafeCall(hipGetLastError());
if (OPS_diags>1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[1].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 12);
ops_set_halo_dirtybit3(&args[1],range);
ops_set_halo_dirtybit3(&args[2],range);
ops_set_halo_dirtybit3(&args[3],range);
ops_set_halo_dirtybit3(&args[4],range);
ops_set_halo_dirtybit3(&args[5],range);
ops_set_halo_dirtybit3(&args[6],range);
ops_set_halo_dirtybit3(&args[7],range);
ops_set_halo_dirtybit3(&args[8],range);
ops_set_halo_dirtybit3(&args[9],range);
ops_set_halo_dirtybit3(&args[10],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[1].mpi_time += t2-t1;
OPS_kernels[1].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[1].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[1].transfer += ops_compute_transfer(dim, start, end, &arg2);
OPS_kernels[1].transfer += ops_compute_transfer(dim, start, end, &arg3);
OPS_kernels[1].transfer += ops_compute_transfer(dim, start, end, &arg4);
OPS_kernels[1].transfer += ops_compute_transfer(dim, start, end, &arg5);
OPS_kernels[1].transfer += ops_compute_transfer(dim, start, end, &arg6);
OPS_kernels[1].transfer += ops_compute_transfer(dim, start, end, &arg7);
OPS_kernels[1].transfer += ops_compute_transfer(dim, start, end, &arg8);
OPS_kernels[1].transfer += ops_compute_transfer(dim, start, end, &arg9);
OPS_kernels[1].transfer += ops_compute_transfer(dim, start, end, &arg10);
}
}
#ifdef OPS_LAZY
void ops_par_loop_preproc_kernel(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4, ops_arg arg5, ops_arg arg6, ops_arg arg7, ops_arg arg8, ops_arg arg9, ops_arg arg10, ops_arg arg11) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 1;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 1;
for ( int i=0; i<6; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 12;
desc->args = (ops_arg*)malloc(12*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index;
desc->args[3] = arg3;
desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index;
desc->args[4] = arg4;
desc->hash = ((desc->hash << 5) + desc->hash) + arg4.dat->index;
desc->args[5] = arg5;
desc->hash = ((desc->hash << 5) + desc->hash) + arg5.dat->index;
desc->args[6] = arg6;
desc->hash = ((desc->hash << 5) + desc->hash) + arg6.dat->index;
desc->args[7] = arg7;
desc->hash = ((desc->hash << 5) + desc->hash) + arg7.dat->index;
desc->args[8] = arg8;
desc->hash = ((desc->hash << 5) + desc->hash) + arg8.dat->index;
desc->args[9] = arg9;
desc->hash = ((desc->hash << 5) + desc->hash) + arg9.dat->index;
desc->args[10] = arg10;
desc->hash = ((desc->hash << 5) + desc->hash) + arg10.dat->index;
desc->args[11] = arg11;
desc->function = ops_par_loop_preproc_kernel_execute;
if (OPS_diags > 1) {
ops_timing_realloc(1,"preproc_kernel");
}
ops_enqueue_kernel(desc);
}
#endif
| 961bb848d2b985a8d779881c9b916f76d589b8fe.cu | //
// auto-generated by ops.py
//
__constant__ int dims_preproc_kernel [12][2];
static int dims_preproc_kernel_h [12][2] = {0};
//user function
__device__
void preproc_kernel_gpu(const ACC<double> &u,
ACC<double> &du,
ACC<double> &ax,
ACC<double> &bx,
ACC<double> &cx,
ACC<double> &ay,
ACC<double> &by,
ACC<double> &cy,
ACC<double> &az,
ACC<double> &bz,
ACC<double> &cz,
int *idx){
double a, b, c, d;
if(idx[0]==0 || idx[0]==nx-1 || idx[1]==0 || idx[1]==ny-1 || idx[2]==0 || idx[2]==nz-1) {
d = 0.0f;
a = 0.0f;
b = 1.0f;
c = 0.0f;
} else {
d = lambda*( u(-1,0,0) + u(1,0,0)
+ u(0,-1,0) + u(0,1,0)
+ u(0,0,-1) + u(0,0,1)
- 6.0f*u(0,0,0));
a = -0.5f * lambda;
b = 1.0f + lambda;
c = -0.5f * lambda;
}
du(0,0,0) = d;
ax(0,0,0) = a;
bx(0,0,0) = b;
cx(0,0,0) = c;
ay(0,0,0) = a;
by(0,0,0) = b;
cy(0,0,0) = c;
az(0,0,0) = a;
bz(0,0,0) = b;
cz(0,0,0) = c;
}
__global__ void ops_preproc_kernel(
double* __restrict arg0,
double* __restrict arg1,
double* __restrict arg2,
double* __restrict arg3,
double* __restrict arg4,
double* __restrict arg5,
double* __restrict arg6,
double* __restrict arg7,
double* __restrict arg8,
double* __restrict arg9,
double* __restrict arg10,
int arg_idx0, int arg_idx1, int arg_idx2,
int size0,
int size1,
int size2 ){
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
int arg_idx[3];
arg_idx[0] = arg_idx0+idx_x;
arg_idx[1] = arg_idx1+idx_y;
arg_idx[2] = arg_idx2+idx_z;
arg0 += idx_x * 1*1 + idx_y * 1*1 * dims_preproc_kernel[0][0] + idx_z * 1*1 * dims_preproc_kernel[0][0] * dims_preproc_kernel[0][1];
arg1 += idx_x * 1*1 + idx_y * 1*1 * dims_preproc_kernel[1][0] + idx_z * 1*1 * dims_preproc_kernel[1][0] * dims_preproc_kernel[1][1];
arg2 += idx_x * 1*1 + idx_y * 1*1 * dims_preproc_kernel[2][0] + idx_z * 1*1 * dims_preproc_kernel[2][0] * dims_preproc_kernel[2][1];
arg3 += idx_x * 1*1 + idx_y * 1*1 * dims_preproc_kernel[3][0] + idx_z * 1*1 * dims_preproc_kernel[3][0] * dims_preproc_kernel[3][1];
arg4 += idx_x * 1*1 + idx_y * 1*1 * dims_preproc_kernel[4][0] + idx_z * 1*1 * dims_preproc_kernel[4][0] * dims_preproc_kernel[4][1];
arg5 += idx_x * 1*1 + idx_y * 1*1 * dims_preproc_kernel[5][0] + idx_z * 1*1 * dims_preproc_kernel[5][0] * dims_preproc_kernel[5][1];
arg6 += idx_x * 1*1 + idx_y * 1*1 * dims_preproc_kernel[6][0] + idx_z * 1*1 * dims_preproc_kernel[6][0] * dims_preproc_kernel[6][1];
arg7 += idx_x * 1*1 + idx_y * 1*1 * dims_preproc_kernel[7][0] + idx_z * 1*1 * dims_preproc_kernel[7][0] * dims_preproc_kernel[7][1];
arg8 += idx_x * 1*1 + idx_y * 1*1 * dims_preproc_kernel[8][0] + idx_z * 1*1 * dims_preproc_kernel[8][0] * dims_preproc_kernel[8][1];
arg9 += idx_x * 1*1 + idx_y * 1*1 * dims_preproc_kernel[9][0] + idx_z * 1*1 * dims_preproc_kernel[9][0] * dims_preproc_kernel[9][1];
arg10 += idx_x * 1*1 + idx_y * 1*1 * dims_preproc_kernel[10][0] + idx_z * 1*1 * dims_preproc_kernel[10][0] * dims_preproc_kernel[10][1];
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
const ACC<double> argp0(dims_preproc_kernel[0][0], dims_preproc_kernel[0][1], arg0);
ACC<double> argp1(dims_preproc_kernel[1][0], dims_preproc_kernel[1][1], arg1);
ACC<double> argp2(dims_preproc_kernel[2][0], dims_preproc_kernel[2][1], arg2);
ACC<double> argp3(dims_preproc_kernel[3][0], dims_preproc_kernel[3][1], arg3);
ACC<double> argp4(dims_preproc_kernel[4][0], dims_preproc_kernel[4][1], arg4);
ACC<double> argp5(dims_preproc_kernel[5][0], dims_preproc_kernel[5][1], arg5);
ACC<double> argp6(dims_preproc_kernel[6][0], dims_preproc_kernel[6][1], arg6);
ACC<double> argp7(dims_preproc_kernel[7][0], dims_preproc_kernel[7][1], arg7);
ACC<double> argp8(dims_preproc_kernel[8][0], dims_preproc_kernel[8][1], arg8);
ACC<double> argp9(dims_preproc_kernel[9][0], dims_preproc_kernel[9][1], arg9);
ACC<double> argp10(dims_preproc_kernel[10][0], dims_preproc_kernel[10][1], arg10);
preproc_kernel_gpu(argp0, argp1, argp2, argp3,
argp4, argp5, argp6, argp7, argp8,
argp9, argp10, arg_idx);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_preproc_kernel(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3,
ops_arg arg4, ops_arg arg5, ops_arg arg6, ops_arg arg7, ops_arg arg8,
ops_arg arg9, ops_arg arg10, ops_arg arg11) {
#else
void ops_par_loop_preproc_kernel_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
#if OPS_MPI
ops_block block = desc->block;
#endif
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
ops_arg arg3 = desc->args[3];
ops_arg arg4 = desc->args[4];
ops_arg arg5 = desc->args[5];
ops_arg arg6 = desc->args[6];
ops_arg arg7 = desc->args[7];
ops_arg arg8 = desc->args[8];
ops_arg arg9 = desc->args[9];
ops_arg arg10 = desc->args[10];
ops_arg arg11 = desc->args[11];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[12] = { arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10, arg11};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,12,range,1)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(1,"preproc_kernel");
OPS_kernels[1].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
#endif //OPS_MPI
int arg_idx[3];
#ifdef OPS_MPI
if (compute_ranges(args, 12,block, range, start, end, arg_idx) < 0) return;
#else //OPS_MPI
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
arg_idx[n] = start[n];
}
#endif
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
int xdim2 = args[2].dat->size[0];
int ydim2 = args[2].dat->size[1];
int xdim3 = args[3].dat->size[0];
int ydim3 = args[3].dat->size[1];
int xdim4 = args[4].dat->size[0];
int ydim4 = args[4].dat->size[1];
int xdim5 = args[5].dat->size[0];
int ydim5 = args[5].dat->size[1];
int xdim6 = args[6].dat->size[0];
int ydim6 = args[6].dat->size[1];
int xdim7 = args[7].dat->size[0];
int ydim7 = args[7].dat->size[1];
int xdim8 = args[8].dat->size[0];
int ydim8 = args[8].dat->size[1];
int xdim9 = args[9].dat->size[0];
int ydim9 = args[9].dat->size[1];
int xdim10 = args[10].dat->size[0];
int ydim10 = args[10].dat->size[1];
if (xdim0 != dims_preproc_kernel_h[0][0] || ydim0 != dims_preproc_kernel_h[0][1] || xdim1 != dims_preproc_kernel_h[1][0] || ydim1 != dims_preproc_kernel_h[1][1] || xdim2 != dims_preproc_kernel_h[2][0] || ydim2 != dims_preproc_kernel_h[2][1] || xdim3 != dims_preproc_kernel_h[3][0] || ydim3 != dims_preproc_kernel_h[3][1] || xdim4 != dims_preproc_kernel_h[4][0] || ydim4 != dims_preproc_kernel_h[4][1] || xdim5 != dims_preproc_kernel_h[5][0] || ydim5 != dims_preproc_kernel_h[5][1] || xdim6 != dims_preproc_kernel_h[6][0] || ydim6 != dims_preproc_kernel_h[6][1] || xdim7 != dims_preproc_kernel_h[7][0] || ydim7 != dims_preproc_kernel_h[7][1] || xdim8 != dims_preproc_kernel_h[8][0] || ydim8 != dims_preproc_kernel_h[8][1] || xdim9 != dims_preproc_kernel_h[9][0] || ydim9 != dims_preproc_kernel_h[9][1] || xdim10 != dims_preproc_kernel_h[10][0] || ydim10 != dims_preproc_kernel_h[10][1]) {
dims_preproc_kernel_h[0][0] = xdim0;
dims_preproc_kernel_h[0][1] = ydim0;
dims_preproc_kernel_h[1][0] = xdim1;
dims_preproc_kernel_h[1][1] = ydim1;
dims_preproc_kernel_h[2][0] = xdim2;
dims_preproc_kernel_h[2][1] = ydim2;
dims_preproc_kernel_h[3][0] = xdim3;
dims_preproc_kernel_h[3][1] = ydim3;
dims_preproc_kernel_h[4][0] = xdim4;
dims_preproc_kernel_h[4][1] = ydim4;
dims_preproc_kernel_h[5][0] = xdim5;
dims_preproc_kernel_h[5][1] = ydim5;
dims_preproc_kernel_h[6][0] = xdim6;
dims_preproc_kernel_h[6][1] = ydim6;
dims_preproc_kernel_h[7][0] = xdim7;
dims_preproc_kernel_h[7][1] = ydim7;
dims_preproc_kernel_h[8][0] = xdim8;
dims_preproc_kernel_h[8][1] = ydim8;
dims_preproc_kernel_h[9][0] = xdim9;
dims_preproc_kernel_h[9][1] = ydim9;
dims_preproc_kernel_h[10][0] = xdim10;
dims_preproc_kernel_h[10][1] = ydim10;
cutilSafeCall(cudaMemcpyToSymbol( dims_preproc_kernel, dims_preproc_kernel_h, sizeof(dims_preproc_kernel)));
}
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size);
int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size);
int dat4 = (OPS_soa ? args[4].dat->type_size : args[4].dat->elem_size);
int dat5 = (OPS_soa ? args[5].dat->type_size : args[5].dat->elem_size);
int dat6 = (OPS_soa ? args[6].dat->type_size : args[6].dat->elem_size);
int dat7 = (OPS_soa ? args[7].dat->type_size : args[7].dat->elem_size);
int dat8 = (OPS_soa ? args[8].dat->type_size : args[8].dat->elem_size);
int dat9 = (OPS_soa ? args[9].dat->type_size : args[9].dat->elem_size);
int dat10 = (OPS_soa ? args[10].dat->type_size : args[10].dat->elem_size);
char *p_a[12];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
int base2 = args[2].dat->base_offset +
dat2 * 1 * (start[0] * args[2].stencil->stride[0]);
base2 = base2+ dat2 *
args[2].dat->size[0] *
(start[1] * args[2].stencil->stride[1]);
base2 = base2+ dat2 *
args[2].dat->size[0] *
args[2].dat->size[1] *
(start[2] * args[2].stencil->stride[2]);
p_a[2] = (char *)args[2].data_d + base2;
int base3 = args[3].dat->base_offset +
dat3 * 1 * (start[0] * args[3].stencil->stride[0]);
base3 = base3+ dat3 *
args[3].dat->size[0] *
(start[1] * args[3].stencil->stride[1]);
base3 = base3+ dat3 *
args[3].dat->size[0] *
args[3].dat->size[1] *
(start[2] * args[3].stencil->stride[2]);
p_a[3] = (char *)args[3].data_d + base3;
int base4 = args[4].dat->base_offset +
dat4 * 1 * (start[0] * args[4].stencil->stride[0]);
base4 = base4+ dat4 *
args[4].dat->size[0] *
(start[1] * args[4].stencil->stride[1]);
base4 = base4+ dat4 *
args[4].dat->size[0] *
args[4].dat->size[1] *
(start[2] * args[4].stencil->stride[2]);
p_a[4] = (char *)args[4].data_d + base4;
int base5 = args[5].dat->base_offset +
dat5 * 1 * (start[0] * args[5].stencil->stride[0]);
base5 = base5+ dat5 *
args[5].dat->size[0] *
(start[1] * args[5].stencil->stride[1]);
base5 = base5+ dat5 *
args[5].dat->size[0] *
args[5].dat->size[1] *
(start[2] * args[5].stencil->stride[2]);
p_a[5] = (char *)args[5].data_d + base5;
int base6 = args[6].dat->base_offset +
dat6 * 1 * (start[0] * args[6].stencil->stride[0]);
base6 = base6+ dat6 *
args[6].dat->size[0] *
(start[1] * args[6].stencil->stride[1]);
base6 = base6+ dat6 *
args[6].dat->size[0] *
args[6].dat->size[1] *
(start[2] * args[6].stencil->stride[2]);
p_a[6] = (char *)args[6].data_d + base6;
int base7 = args[7].dat->base_offset +
dat7 * 1 * (start[0] * args[7].stencil->stride[0]);
base7 = base7+ dat7 *
args[7].dat->size[0] *
(start[1] * args[7].stencil->stride[1]);
base7 = base7+ dat7 *
args[7].dat->size[0] *
args[7].dat->size[1] *
(start[2] * args[7].stencil->stride[2]);
p_a[7] = (char *)args[7].data_d + base7;
int base8 = args[8].dat->base_offset +
dat8 * 1 * (start[0] * args[8].stencil->stride[0]);
base8 = base8+ dat8 *
args[8].dat->size[0] *
(start[1] * args[8].stencil->stride[1]);
base8 = base8+ dat8 *
args[8].dat->size[0] *
args[8].dat->size[1] *
(start[2] * args[8].stencil->stride[2]);
p_a[8] = (char *)args[8].data_d + base8;
int base9 = args[9].dat->base_offset +
dat9 * 1 * (start[0] * args[9].stencil->stride[0]);
base9 = base9+ dat9 *
args[9].dat->size[0] *
(start[1] * args[9].stencil->stride[1]);
base9 = base9+ dat9 *
args[9].dat->size[0] *
args[9].dat->size[1] *
(start[2] * args[9].stencil->stride[2]);
p_a[9] = (char *)args[9].data_d + base9;
int base10 = args[10].dat->base_offset +
dat10 * 1 * (start[0] * args[10].stencil->stride[0]);
base10 = base10+ dat10 *
args[10].dat->size[0] *
(start[1] * args[10].stencil->stride[1]);
base10 = base10+ dat10 *
args[10].dat->size[0] *
args[10].dat->size[1] *
(start[2] * args[10].stencil->stride[2]);
p_a[10] = (char *)args[10].data_d + base10;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 12);
ops_halo_exchanges(args,12,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[1].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0 && z_size > 0)
ops_preproc_kernel<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1],
(double *)p_a[2], (double *)p_a[3],
(double *)p_a[4], (double *)p_a[5],
(double *)p_a[6], (double *)p_a[7],
(double *)p_a[8], (double *)p_a[9],
(double *)p_a[10], arg_idx[0], arg_idx[1], arg_idx[2],x_size, y_size, z_size);
cutilSafeCall(cudaGetLastError());
if (OPS_diags>1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[1].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 12);
ops_set_halo_dirtybit3(&args[1],range);
ops_set_halo_dirtybit3(&args[2],range);
ops_set_halo_dirtybit3(&args[3],range);
ops_set_halo_dirtybit3(&args[4],range);
ops_set_halo_dirtybit3(&args[5],range);
ops_set_halo_dirtybit3(&args[6],range);
ops_set_halo_dirtybit3(&args[7],range);
ops_set_halo_dirtybit3(&args[8],range);
ops_set_halo_dirtybit3(&args[9],range);
ops_set_halo_dirtybit3(&args[10],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[1].mpi_time += t2-t1;
OPS_kernels[1].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[1].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[1].transfer += ops_compute_transfer(dim, start, end, &arg2);
OPS_kernels[1].transfer += ops_compute_transfer(dim, start, end, &arg3);
OPS_kernels[1].transfer += ops_compute_transfer(dim, start, end, &arg4);
OPS_kernels[1].transfer += ops_compute_transfer(dim, start, end, &arg5);
OPS_kernels[1].transfer += ops_compute_transfer(dim, start, end, &arg6);
OPS_kernels[1].transfer += ops_compute_transfer(dim, start, end, &arg7);
OPS_kernels[1].transfer += ops_compute_transfer(dim, start, end, &arg8);
OPS_kernels[1].transfer += ops_compute_transfer(dim, start, end, &arg9);
OPS_kernels[1].transfer += ops_compute_transfer(dim, start, end, &arg10);
}
}
#ifdef OPS_LAZY
void ops_par_loop_preproc_kernel(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4, ops_arg arg5, ops_arg arg6, ops_arg arg7, ops_arg arg8, ops_arg arg9, ops_arg arg10, ops_arg arg11) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 1;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 1;
for ( int i=0; i<6; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 12;
desc->args = (ops_arg*)malloc(12*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index;
desc->args[3] = arg3;
desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index;
desc->args[4] = arg4;
desc->hash = ((desc->hash << 5) + desc->hash) + arg4.dat->index;
desc->args[5] = arg5;
desc->hash = ((desc->hash << 5) + desc->hash) + arg5.dat->index;
desc->args[6] = arg6;
desc->hash = ((desc->hash << 5) + desc->hash) + arg6.dat->index;
desc->args[7] = arg7;
desc->hash = ((desc->hash << 5) + desc->hash) + arg7.dat->index;
desc->args[8] = arg8;
desc->hash = ((desc->hash << 5) + desc->hash) + arg8.dat->index;
desc->args[9] = arg9;
desc->hash = ((desc->hash << 5) + desc->hash) + arg9.dat->index;
desc->args[10] = arg10;
desc->hash = ((desc->hash << 5) + desc->hash) + arg10.dat->index;
desc->args[11] = arg11;
desc->function = ops_par_loop_preproc_kernel_execute;
if (OPS_diags > 1) {
ops_timing_realloc(1,"preproc_kernel");
}
ops_enqueue_kernel(desc);
}
#endif
|
fe8cbcb0818bda46e8903c7920123177c44f7f12.hip | // !!! This is a file automatically generated by hipify!!!
#include <hipcub/hipcub.hpp>
#include <hip/hip_runtime.h>
#include "Utilities.cuh"
#include <iostream>
#define WARPSIZE 32
#define BLOCKSIZE 256
const int N = 1024;
/*************************/
/* WARP REDUCTION KERNEL */
/*************************/
__global__ void sum(const float * __restrict__ indata, float * __restrict__ outdata) {
unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int warp_id = threadIdx.x / WARPSIZE;
// --- Specialize WarpReduce for type float.
typedef hipcub::WarpReduce<float, WARPSIZE> WarpReduce;
// --- Allocate WarpReduce shared memory for (N / WARPSIZE) warps
__shared__ typename WarpReduce::TempStorage temp_storage[BLOCKSIZE / WARPSIZE];
float result;
if(tid < N) result = WarpReduce(temp_storage[warp_id]).Sum(indata[tid]);
if(tid % WARPSIZE == 0) outdata[tid / WARPSIZE] = result;
}
/********/
/* MAIN */
/********/
int main() {
// --- Allocate host side space for
float *h_data = (float *)malloc(N * sizeof(float));
float *h_result = (float *)malloc((N / WARPSIZE) * sizeof(float));
float *d_data; gpuErrchk(hipMalloc(&d_data, N * sizeof(float)));
float *d_result; gpuErrchk(hipMalloc(&d_result, (N / WARPSIZE) * sizeof(float)));
for (int i = 0; i < N; i++) h_data[i] = (float)i;
gpuErrchk(hipMemcpy(d_data, h_data, N * sizeof(float), hipMemcpyHostToDevice));
hipLaunchKernelGGL(( sum), dim3(iDivUp(N, BLOCKSIZE)), dim3(BLOCKSIZE), 0, 0, d_data, d_result);
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipDeviceSynchronize());
gpuErrchk(hipMemcpy(h_result, d_result, (N / WARPSIZE) * sizeof(float), hipMemcpyDeviceToHost));
std::cout << "output: ";
for(int i = 0; i < (N / WARPSIZE); i++) std::cout << h_result[i] << " ";
std::cout << std::endl;
gpuErrchk(hipFree(d_data));
gpuErrchk(hipFree(d_result));
return 0;
}
| fe8cbcb0818bda46e8903c7920123177c44f7f12.cu | #include <cub/cub.cuh>
#include <cuda.h>
#include "Utilities.cuh"
#include <iostream>
#define WARPSIZE 32
#define BLOCKSIZE 256
const int N = 1024;
/*************************/
/* WARP REDUCTION KERNEL */
/*************************/
__global__ void sum(const float * __restrict__ indata, float * __restrict__ outdata) {
unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int warp_id = threadIdx.x / WARPSIZE;
// --- Specialize WarpReduce for type float.
typedef cub::WarpReduce<float, WARPSIZE> WarpReduce;
// --- Allocate WarpReduce shared memory for (N / WARPSIZE) warps
__shared__ typename WarpReduce::TempStorage temp_storage[BLOCKSIZE / WARPSIZE];
float result;
if(tid < N) result = WarpReduce(temp_storage[warp_id]).Sum(indata[tid]);
if(tid % WARPSIZE == 0) outdata[tid / WARPSIZE] = result;
}
/********/
/* MAIN */
/********/
int main() {
// --- Allocate host side space for
float *h_data = (float *)malloc(N * sizeof(float));
float *h_result = (float *)malloc((N / WARPSIZE) * sizeof(float));
float *d_data; gpuErrchk(cudaMalloc(&d_data, N * sizeof(float)));
float *d_result; gpuErrchk(cudaMalloc(&d_result, (N / WARPSIZE) * sizeof(float)));
for (int i = 0; i < N; i++) h_data[i] = (float)i;
gpuErrchk(cudaMemcpy(d_data, h_data, N * sizeof(float), cudaMemcpyHostToDevice));
sum<<<iDivUp(N, BLOCKSIZE), BLOCKSIZE>>>(d_data, d_result);
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
gpuErrchk(cudaMemcpy(h_result, d_result, (N / WARPSIZE) * sizeof(float), cudaMemcpyDeviceToHost));
std::cout << "output: ";
for(int i = 0; i < (N / WARPSIZE); i++) std::cout << h_result[i] << " ";
std::cout << std::endl;
gpuErrchk(cudaFree(d_data));
gpuErrchk(cudaFree(d_result));
return 0;
}
|
62231e6e98d08416acb6b91090b63a5fa47ea137.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <math.h>
// Kernel function to add the elements of two arrays
__global__
void add(int n, float *x, float *y)
{
int index = threadIdx.x;
int stride = blockDim.x;
for (int i = index; i < n; i += stride)
y[i] = x[i] + y[i];
}
int main(void)
{
int N = 1<<20;
float *x, *y;
// Allocate Unified Memory accessible from CPU or GPU
hipMallocManaged(&x, N*sizeof(float));
hipMallocManaged(&y, N*sizeof(float));
// initialize x and y arrays on the host
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
// Run kernel on 1M elements on the GPU
hipLaunchKernelGGL(( add), dim3(1), dim3(256), 0, 0, N, x, y);
// Wait for GPU to finish before accessing on host
hipDeviceSynchronize();
// Check for errors (all values should be 3.0f)
float maxError = 0.0f;
for (int i = 0; i < N; i++)
maxError = fmax(maxError, fabs(y[i]-3.0f));
std::cout << "Max error: " << maxError << std::endl;
// Free memory
hipFree(x);
hipFree(y);
return 0;
}
/*
C:\tools
nvprof add_cuda1.exe
==29600== NVPROF is profiling process 29600, command: add_cuda1.exe
Max error: 0
==29600== Profiling application: add_cuda1.exe
==29600== Profiling result:
Type Time(%) Time Calls Avg Min Max Name
GPU activities: 100.00% 2.1688ms 1 2.1688ms 2.1688ms 2.1688ms add(int, float*, float*)
API calls: 75.32% 217.13ms 2 108.57ms 968.50us 216.17ms hipMallocManaged
18.43% 53.141ms 1 53.141ms 53.141ms 53.141ms hipDevicePrimaryCtxRelease
4.48% 12.926ms 1 12.926ms 12.926ms 12.926ms cudaLaunchKernel
0.80% 2.3111ms 2 1.1555ms 710.70us 1.6004ms hipFree
0.79% 2.2862ms 1 2.2862ms 2.2862ms 2.2862ms hipDeviceSynchronize
0.10% 275.00us 97 2.8350us 100ns 120.80us hipDeviceGetAttribute
0.05% 140.50us 1 140.50us 140.50us 140.50us hipModuleUnload
0.01% 32.100us 1 32.100us 32.100us 32.100us cuDeviceTotalMem
0.01% 14.800us 1 14.800us 14.800us 14.800us hipDeviceGetPCIBusId
0.00% 3.5000us 2 1.7500us 500ns 3.0000us hipDeviceGet
0.00% 1.5000us 3 500ns 200ns 700ns hipGetDeviceCount
0.00% 800ns 1 800ns 800ns 800ns hipDeviceGetName
0.00% 300ns 1 300ns 300ns 300ns hipDeviceGetUuid
0.00% 300ns 1 300ns 300ns 300ns cuDeviceGetLuid
==29600== Unified Memory profiling result:
Device "Quadro T1000 (0)"
Count Avg Size Min Size Max Size Total Size Total Time Name
258 31.751KB 4.0000KB 48.000KB 8.000000MB 9.051700ms Host To Device
385 31.916KB 16.000KB 32.000KB 12.00000MB 77.45940ms Device To Host
*/ | 62231e6e98d08416acb6b91090b63a5fa47ea137.cu | #include <iostream>
#include <math.h>
// Kernel function to add the elements of two arrays
__global__
void add(int n, float *x, float *y)
{
int index = threadIdx.x;
int stride = blockDim.x;
for (int i = index; i < n; i += stride)
y[i] = x[i] + y[i];
}
int main(void)
{
int N = 1<<20;
float *x, *y;
// Allocate Unified Memory – accessible from CPU or GPU
cudaMallocManaged(&x, N*sizeof(float));
cudaMallocManaged(&y, N*sizeof(float));
// initialize x and y arrays on the host
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
// Run kernel on 1M elements on the GPU
add<<<1, 256>>>(N, x, y);
// Wait for GPU to finish before accessing on host
cudaDeviceSynchronize();
// Check for errors (all values should be 3.0f)
float maxError = 0.0f;
for (int i = 0; i < N; i++)
maxError = fmax(maxError, fabs(y[i]-3.0f));
std::cout << "Max error: " << maxError << std::endl;
// Free memory
cudaFree(x);
cudaFree(y);
return 0;
}
/*
C:\tools
λ nvprof add_cuda1.exe
==29600== NVPROF is profiling process 29600, command: add_cuda1.exe
Max error: 0
==29600== Profiling application: add_cuda1.exe
==29600== Profiling result:
Type Time(%) Time Calls Avg Min Max Name
GPU activities: 100.00% 2.1688ms 1 2.1688ms 2.1688ms 2.1688ms add(int, float*, float*)
API calls: 75.32% 217.13ms 2 108.57ms 968.50us 216.17ms cudaMallocManaged
18.43% 53.141ms 1 53.141ms 53.141ms 53.141ms cuDevicePrimaryCtxRelease
4.48% 12.926ms 1 12.926ms 12.926ms 12.926ms cudaLaunchKernel
0.80% 2.3111ms 2 1.1555ms 710.70us 1.6004ms cudaFree
0.79% 2.2862ms 1 2.2862ms 2.2862ms 2.2862ms cudaDeviceSynchronize
0.10% 275.00us 97 2.8350us 100ns 120.80us cuDeviceGetAttribute
0.05% 140.50us 1 140.50us 140.50us 140.50us cuModuleUnload
0.01% 32.100us 1 32.100us 32.100us 32.100us cuDeviceTotalMem
0.01% 14.800us 1 14.800us 14.800us 14.800us cuDeviceGetPCIBusId
0.00% 3.5000us 2 1.7500us 500ns 3.0000us cuDeviceGet
0.00% 1.5000us 3 500ns 200ns 700ns cuDeviceGetCount
0.00% 800ns 1 800ns 800ns 800ns cuDeviceGetName
0.00% 300ns 1 300ns 300ns 300ns cuDeviceGetUuid
0.00% 300ns 1 300ns 300ns 300ns cuDeviceGetLuid
==29600== Unified Memory profiling result:
Device "Quadro T1000 (0)"
Count Avg Size Min Size Max Size Total Size Total Time Name
258 31.751KB 4.0000KB 48.000KB 8.000000MB 9.051700ms Host To Device
385 31.916KB 16.000KB 32.000KB 12.00000MB 77.45940ms Device To Host
*/ |
c80764b4a1fc806af96528bf000d4c4fcb145c1c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2017 Sony Corporation. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <nbla/cuda/common.hpp>
#include <nbla/cuda/communicator/multi_process_data_parallel_communicator.hpp>
#include <algorithm>
#include <cstdlib>
#include <memory>
#include "mpi.h"
#include <stdint.h>
#include <unistd.h>
namespace nbla {
using std::make_shared;
template <typename T>
__global__ void kernel_divide_inplace(const int size, const int n_devices,
T *dw) {
NBLA_CUDA_KERNEL_LOOP(i, size) { dw[i] /= n_devices; }
}
/*
* Referred from
* http://docs.nvidia.com/deeplearning/sdk/nccl-developer-guide/index.html#onedevprothrd
*/
static uint64_t get_host_hash(const char *string) {
// Based on DJB2, result = result * 33 + char
uint64_t result = 5381;
for (int c = 0; string[c] != '\0'; c++) {
result = ((result << 5) + result) + string[c];
}
return result;
}
static void get_host_name(char *hostname, int maxlen) {
gethostname(hostname, maxlen);
for (int i = 0; i < maxlen; i++) {
if (hostname[i] == '.') {
hostname[i] = '\0';
return;
}
}
}
template <typename T>
MultiProcessDataParallelCommunicatorNccl<
T>::MultiProcessDataParallelCommunicatorNccl(const Context &ctx)
: MultiProcessDataParallelCommunicator<T>(ctx) {
mpi_initialized_ = false;
}
template <typename T>
MultiProcessDataParallelCommunicatorNccl<
T>::~MultiProcessDataParallelCommunicatorNccl() {
if (this->initialized_) {
for (int i = 0; i < streams_.size(); ++i) {
NBLA_CUDA_CHECK(hipStreamDestroy(streams_[i]));
}
for (auto e : this->comms_) {
ncclCommDestroy(e.second);
}
}
if (mpi_initialized_) {
MPI_Finalize();
}
}
template <typename T>
bool MultiProcessDataParallelCommunicatorNccl<T>::mpi_initialized_;
template <typename T> void MultiProcessDataParallelCommunicatorNccl<T>::init() {
Communicator::init();
try {
// MPI init
if (!mpi_initialized_) {
int argc = 0;
char **argv = NULL;
int requiredThreadLevelSupport = MPI_THREAD_SERIALIZED;
int provided;
MPI_Init_thread(&argc, &argv, requiredThreadLevelSupport, &provided);
if (provided != requiredThreadLevelSupport) {
NBLA_ERROR(error_code::target_specific,
"MPI_Init_thread failed since provided (%d) is not equal to "
"requiredThreadLevelSupport (%d)",
provided, requiredThreadLevelSupport);
}
mpi_initialized_ = true;
}
// Create comm, set size, and rank
MPI_Comm_size(MPI_COMM_WORLD, &this->size_);
MPI_Comm_rank(MPI_COMM_WORLD, &this->rank_);
// Set local rank and device id
uint64_t host_hashs[this->size_];
char hostname[1024];
get_host_name(hostname, 1024);
host_hashs[this->rank_] = get_host_hash(hostname);
MPI_Allgather(MPI_IN_PLACE, 0, MPI_DATATYPE_NULL, host_hashs,
sizeof(uint64_t), MPI_BYTE, MPI_COMM_WORLD);
MPI_Barrier(MPI_COMM_WORLD);
int local_rank = 0;
for (int i = 0; i < this->size_; ++i) {
if (i == this->rank_) {
break;
}
if (host_hashs[i] == host_hashs[this->rank_]) {
local_rank++;
}
}
this->device_id_ = local_rank;
this->local_rank_ = local_rank;
this->ctx_.device_id = std::to_string(local_rank);
// Exchange comm_id among processes
ncclUniqueId comm_id;
if (this->rank_ == 0) {
ncclGetUniqueId(&comm_id);
}
MPI_Bcast(&comm_id, sizeof(comm_id), MPI_BYTE, 0, MPI_COMM_WORLD);
MPI_Barrier(MPI_COMM_WORLD);
// NCCL Init
cuda_set_device(device_id_);
ncclComm_t comm;
ncclResult_t ret =
ncclCommInitRank(&comm, this->size_, comm_id, this->rank_);
if (ret != ncclSuccess) {
NBLA_ERROR(error_code::target_specific, "ncclCommInitRank failed.");
}
// Create streams
for (int i = 0; i < streams_.size(); ++i) {
hipStream_t stream;
NBLA_CUDA_CHECK(hipStreamCreate(&stream));
streams_[i] = stream;
}
// Create world group
this->comms_["world"] = comm;
vector<int> ranks(this->size_);
std::iota(ranks.begin(), ranks.end(), 0);
this->groups_["world"] = ranks;
this->initialized_ = true;
} catch (...) {
NBLA_ERROR(error_code::unclassified, "Communicator init failed.");
}
}
template <typename T>
string MultiProcessDataParallelCommunicatorNccl<T>::new_group(
pair<string, vector<int>> name_ranks_pair) {
string group_name = name_ranks_pair.first;
vector<int> ranks = name_ranks_pair.second;
// Checks
if (this->groups_.find(group_name) !=
this->groups_.end()) { // group name already exists.
NBLA_ERROR(error_code::value, "group_name = %s already exists",
group_name.c_str());
}
int max = *std::max_element(ranks.begin(), ranks.end());
NBLA_CHECK(max < this->size_, error_code::value,
"Max value of the specified ranks should be less than the size () "
"of the communicator.",
this->size_);
int min = *std::min_element(ranks.begin(), ranks.end());
NBLA_CHECK(min >= 0, error_code::value,
"Min value of the specified ranks is greater than or equal to 0.");
// Create new group
MPI_Group world_group;
MPI_Comm_group(MPI_COMM_WORLD, &world_group);
MPI_Group new_group;
MPI_Group_incl(world_group, ranks.size(), ranks.data(), &new_group);
// Create mpi communicator
MPI_Comm mpi_comm;
MPI_Comm_create(MPI_COMM_WORLD, new_group,
&mpi_comm); // have to call in all processes
// Add group name in all ranks
this->groups_[group_name] = ranks;
// Leave if self is not in ranks
auto result = std::find(ranks.begin(), ranks.end(), this->rank_);
if (result == ranks.end()) { // self is not found in ranks.
return group_name;
}
// Create nccl unique id and bcast it
ncclUniqueId comm_id;
if (this->rank_ == ranks[0]) {
ncclGetUniqueId(&comm_id);
}
int rank;
MPI_Comm_rank(mpi_comm, &rank);
MPI_Bcast(&comm_id, sizeof(comm_id), MPI_BYTE, 0, mpi_comm);
MPI_Barrier(mpi_comm);
MPI_Comm_free(&mpi_comm);
// NCCL Comm Init
cuda_set_device(device_id_);
ncclComm_t comm;
ncclResult_t ret = ncclCommInitRank(&comm, ranks.size(), comm_id, rank);
if (ret != ncclSuccess) {
NBLA_ERROR(error_code::target_specific, "ncclCommInitRank failed with %d",
ret);
}
this->comms_[group_name] = comm;
return group_name;
}
template <typename T>
shared_ptr<NdArray>
MultiProcessDataParallelCommunicatorNccl<T>::copy_inside_device(
const vector<NdArrayPtr> &ndarray_list) {
// preparation
Size_t total_params = 0;
for (auto ndarray : ndarray_list) {
auto n_param = ndarray->size();
total_params += n_param;
}
dtypes dtype = get_dtype<T>();
NdArrayPtr large_ndarray = make_shared<NdArray>(Shape_t{total_params});
T *buff = large_ndarray->cast(dtype, this->ctx_)->pointer<T>();
Size_t type_size = sizeof(T);
int k = 0;
// copy inside device
for (auto ndarray : ndarray_list) {
const T *dw = ndarray->cast(dtype, this->ctx_)->const_pointer<T>();
auto n_param = ndarray->size();
int stream_id = k % num_streams_;
hipMemcpyAsync(buff, dw, type_size * n_param, hipMemcpyDeviceToDevice,
streams_[stream_id]);
buff += n_param;
k++;
}
return large_ndarray;
}
template <typename T>
void MultiProcessDataParallelCommunicatorNccl<T>::copy_back_inside_device(
const vector<NdArrayPtr> &ndarray_list, NdArrayPtr large_ndarray) {
dtypes dtype = get_dtype<T>();
T *buff = large_ndarray->cast(dtype, this->ctx_)->pointer<T>();
Size_t type_size = sizeof(T);
int k = 0;
for (auto ndarray : ndarray_list) {
T *dw = ndarray->cast(dtype, this->ctx_)->pointer<T>();
auto n_param = ndarray->size();
int stream_id = k % num_streams_;
hipMemcpyAsync(dw, buff, type_size * n_param, hipMemcpyDeviceToDevice,
streams_[stream_id]);
buff += n_param;
k++;
}
}
template <typename T>
void MultiProcessDataParallelCommunicatorNccl<T>::reduce(
const vector<NdArrayPtr> &ndarray_list, int dst, bool division,
bool inplace, const string &group) {
if (!this->find_self(group)) {
NBLA_ERROR(error_code::value, "self (rank=%d) is not included in %s.",
this->rank_, group.c_str());
}
// TODO: currently nnabla uses default stream for computation.
// The following logic relies on that, so if nnabla uses another stream for
// computation,
// we have to issue null kernel to the default stream at the beginning of this
// method
// and at the end of this method for using the implicit synchronization
// technique for
// main thread not to wait for a result of a kernel call.
// TODO: the usage of multi streams is round-robin fashion, it may not be
// optimal.
if (inplace) { // in-place
int k = 0;
dtypes dtype = get_dtype<T>();
for (auto ndarray : ndarray_list) { // ndarray loop
int stream_id = k % num_streams_;
reduce(ndarray, streams_[stream_id], dst, division, inplace);
k++;
}
} else { // out-of-place. use a large array.
NdArrayPtr large_ndarray = copy_inside_device(ndarray_list);
reduce(large_ndarray, nullptr, dst, division, inplace, group);
copy_back_inside_device(ndarray_list, large_ndarray);
}
// no need to call null kernel since nnabla uses default stream currently.
}
template <typename T>
void MultiProcessDataParallelCommunicatorNccl<T>::reduce(NdArrayPtr ndarray,
int dst, bool division,
bool inplace,
const string &group) {
if (!this->find_self(group)) {
NBLA_ERROR(error_code::value, "self (rank=%d) is not included in %s.",
this->rank_, group.c_str());
}
reduce(ndarray, nullptr, dst, division, inplace, group);
}
template <typename T>
void MultiProcessDataParallelCommunicatorNccl<T>::reduce(NdArrayPtr ndarray,
hipStream_t stream,
int dst, bool division,
bool inplace,
const string &group) {
auto n_param = ndarray->size();
dtypes dtype = get_dtype<T>();
const T *dw0 = ndarray->get(dtype, this->ctx_)->const_pointer<T>();
T *dw1 = ndarray->cast(dtype, this->ctx_)->pointer<T>();
ncclResult_t ret = ncclReduce(dw0, dw1, n_param,
ncclFloat, // TODO: address ncclFloat
ncclSum, dst, comms_[group], stream);
if (ret != ncclSuccess) {
NBLA_ERROR(error_code::target_specific, "ncclReduce fails with %d.", ret);
}
if (division) {
NBLA_CUDA_LAUNCH_KERNEL_IN_STREAM(kernel_divide_inplace, stream, n_param,
this->size_, dw1);
// TODO: strange because of implicit synchronization without inplace and
// with division does not occur.
// copy(streams) -> all_reduce(default stream) ->
// -> division(default stream) -> copy_back(streams) -> xxx(default stream)
// Even if launching null kernel, no sync. Thus, call stream synchronize.
if (!inplace) {
hipStreamSynchronize(stream);
}
}
}
template <typename T>
void MultiProcessDataParallelCommunicatorNccl<T>::allreduce(bool division,
bool inplace) {
// TODO: Delete this function when appropriate
// TODO: currently nnabla uses default stream for computation.
// The following logic relies on that, so if nnabla uses another stream for
// computation,
// we have to issue null kernel to the default stream at the beginning of this
// method
// and at the end of this method for using the implicit synchronization
// technique for
// main thread not to wait for a result of a kernel call.
// TODO: the usage of multi streams is round-robin fashion, it may not be
// optimal.
// Once sync to prevent the hang where the memcpy occurs during the allreduce.
this->sync_all_params();
if (inplace) { // in-place
Context ctx = this->contexts_[0];
auto func_named_param = this->device_func_named_param_[0];
int k = 0;
for (auto elm : func_named_param) { // function-loop
VariablePtr vp = elm.second;
auto n_param = vp->size();
const T *dw0 = vp->get_grad_pointer<T>(ctx);
T *dw1 = vp->cast_grad_and_get_pointer<T>(ctx);
int stream_id = k % num_streams_;
// AllReduce
ncclResult_t ret =
ncclAllReduce(dw0, dw1, n_param, ncclFloat, // TODO: address ncclFloat
ncclSum, comms_["world"], streams_[stream_id]);
if (ret != ncclSuccess) {
NBLA_ERROR(error_code::target_specific, "ncclAllReduce fails with %d.",
ret);
}
// Divide
if (division) {
NBLA_CUDA_LAUNCH_KERNEL_IN_STREAM(kernel_divide_inplace,
streams_[stream_id], n_param,
this->size_, dw1);
}
k++;
}
} else { // out-of-place. use a large array.
Context ctx = this->contexts_[0];
shared_ptr<CudaCachedArray> arr_buff = // TODO: address 16 bits also here?
make_shared<CudaCachedArray>(this->total_params_, get_dtype<T>(), ctx);
T *buff = arr_buff->pointer<T>();
T *buff_start = buff;
auto func_named_param = this->device_func_named_param_[0];
Size_t type_size = sizeof(T);
int k = 0;
// 1. copy inside device
for (auto elm : func_named_param) {
VariablePtr vp = elm.second;
const T *dw = vp->get_grad_pointer<T>(ctx);
auto n_param = vp->size();
int stream_id = k % num_streams_;
hipMemcpyAsync(buff, dw, type_size * n_param, hipMemcpyDeviceToDevice,
streams_[stream_id]);
buff += n_param;
k++;
}
// 2. all reduce
ncclResult_t ret =
ncclAllReduce(buff_start, buff_start, this->total_params_,
ncclFloat, // TODO: address ncclFloat
ncclSum, comms_["world"], 0); // use default stream
if (ret != ncclSuccess) {
NBLA_ERROR(error_code::target_specific, "ncclAllReduce fails with %d.",
ret);
}
// 3. divide
if (division) {
// use default stream
NBLA_CUDA_LAUNCH_KERNEL_IN_STREAM(kernel_divide_inplace, 0,
this->total_params_, this->size_,
buff_start);
}
// 4. copy back inside device
buff = buff_start;
k = 0;
for (auto elm : func_named_param) {
VariablePtr vp = elm.second;
T *dw = vp->cast_grad_and_get_pointer<T>(ctx);
auto n_param = vp->size();
int stream_id = k % num_streams_;
hipMemcpyAsync(dw, buff, type_size * n_param, hipMemcpyDeviceToDevice,
streams_[stream_id]);
buff += n_param;
k++;
}
}
// no need to call null kernel since nnabla uses default stream currently.
}
template <typename T>
void MultiProcessDataParallelCommunicatorNccl<T>::all_reduce(
const vector<NdArrayPtr> &ndarray_list, bool division, bool inplace,
const string &group) {
if (!this->find_self(group)) {
NBLA_ERROR(error_code::value, "self (rank=%d) is not included in %s.",
this->rank_, group.c_str());
}
// TODO: currently nnabla uses default stream for computation.
// The following logic relies on that, so if nnabla uses another stream for
// computation,
// we have to issue null kernel to the default stream at the beginning of this
// method
// and at the end of this method for using the implicit synchronization
// technique for
// main thread not to wait for a result of a kernel call.
// TODO: the usage of multi streams is round-robin fashion, it may not be
// optimal.
if (inplace) { // in-place
int k = 0;
dtypes dtype = get_dtype<T>();
for (auto ndarray : ndarray_list) { // ndarray loop
int stream_id = k % num_streams_;
all_reduce(ndarray, streams_[stream_id], division, inplace, group);
k++;
}
} else { // out-of-place. use a large array.
NdArrayPtr large_ndarray = copy_inside_device(ndarray_list);
all_reduce(large_ndarray, nullptr, division, inplace, group);
copy_back_inside_device(ndarray_list, large_ndarray);
}
// no need to call null kernel since nnabla uses default stream currently.
}
template <typename T>
void MultiProcessDataParallelCommunicatorNccl<T>::all_reduce(
NdArrayPtr ndarray, bool division, bool inplace, const string &group) {
if (!this->find_self(group)) {
NBLA_ERROR(error_code::value, "self (rank=%d) is not included in %s.",
this->rank_, group.c_str());
}
all_reduce(ndarray, nullptr, division, inplace, group);
}
template <typename T>
void MultiProcessDataParallelCommunicatorNccl<T>::all_reduce(
NdArrayPtr ndarray, hipStream_t stream, bool division, bool inplace,
const string &group) {
auto n_param = ndarray->size();
dtypes dtype = get_dtype<T>();
const T *dw0 = ndarray->get(dtype, this->ctx_)->const_pointer<T>();
T *dw1 = ndarray->cast(dtype, this->ctx_)->pointer<T>();
ncclResult_t ret = ncclAllReduce(dw0, dw1, n_param,
ncclFloat, // TODO: address ncclFloat
ncclSum, comms_[group], stream);
if (ret != ncclSuccess) {
NBLA_ERROR(error_code::target_specific, "ncclAllReduce fails with %d.",
ret);
}
if (division) {
NBLA_CUDA_LAUNCH_KERNEL_IN_STREAM(kernel_divide_inplace, stream, n_param,
this->size_, dw1);
// TODO: strange because of implicit synchronization without inplace and
// with division does not occur.
// copy(streams) -> all_reduce(default stream) ->
// -> division(default stream) -> copy_back(streams) -> xxx(default stream)
// Even if launching null kernel, no sync. Thus, call stream synchronize.
if (!inplace) {
hipStreamSynchronize(stream);
}
}
}
template <typename T>
void MultiProcessDataParallelCommunicatorNccl<T>::reduce_scatter(
const vector<NdArrayPtr> &ndarray_list, NdArrayPtr ndarray, bool division,
const string &group) {
if (!this->find_self(group)) {
NBLA_ERROR(error_code::value, "self (rank=%d) is not included in %s.",
this->rank_, group.c_str());
}
// TODO: currently nnabla uses default stream for computation.
// The following logic relies on that, so if nnabla uses another stream for
// computation,
// we have to issue null kernel to the default stream at the beginning of this
// method
// and at the end of this method for using the implicit synchronization
// technique for
// main thread not to wait for a result of a kernel call.
NdArrayPtr large_ndarray = copy_inside_device(ndarray_list);
dtypes dtype = get_dtype<T>();
const T *sendbuff = large_ndarray->get(dtype, this->ctx_)->const_pointer<T>();
T *recvbuff = ndarray->cast(dtype, this->ctx_)->pointer<T>();
Size_t recvcount = ndarray->size();
ncclResult_t ret =
ncclReduceScatter(sendbuff, recvbuff, recvcount,
ncclFloat, // TODO: address ncclFloat
ncclSum, comms_[group], 0); // use default stream
if (ret != ncclSuccess) {
NBLA_ERROR(error_code::target_specific, "ncclBcast fails with %d.", ret);
}
// divide
if (division) {
// use default stream
NBLA_CUDA_LAUNCH_KERNEL_IN_STREAM(kernel_divide_inplace, 0, recvcount,
this->size_, recvbuff);
}
// no need to call null kernel since nnabla uses default stream currently.
}
template <typename T>
void MultiProcessDataParallelCommunicatorNccl<T>::bcast(
const vector<NdArrayPtr> &ndarray_list, int src, bool inplace,
const string &group) {
if (!this->find_self(group)) {
NBLA_ERROR(error_code::value, "self (rank=%d) is not included in %s.",
this->rank_, group.c_str());
}
// TODO: currently nnabla uses default stream for computation.
// The following logic relies on that, so if nnabla uses another stream for
// computation,
// we have to issue null kernel to the default stream at the beginning of this
// method
// and at the end of this method for using the implicit synchronization
// technique for
// main thread not to wait for a result of a kernel call.
// TODO: the usage of multi streams is round-robin fashion, it may not be
// optimal.
if (inplace) { // in-place
int k = 0;
dtypes dtype = get_dtype<T>();
for (auto ndarray : ndarray_list) { // ndarray loop
int stream_id = k % num_streams_;
bcast(ndarray, streams_[stream_id], src, inplace, group);
k++;
}
} else { // out-of-place. use a large array.
NdArrayPtr large_ndarray = copy_inside_device(ndarray_list);
bcast(large_ndarray, nullptr, src, inplace, group);
copy_back_inside_device(ndarray_list, large_ndarray);
}
// no need to call null kernel since nnabla uses default stream currently.
}
template <typename T>
void MultiProcessDataParallelCommunicatorNccl<T>::bcast(NdArrayPtr ndarray,
int src, bool inplace,
const string &group) {
if (!this->find_self(group)) {
NBLA_ERROR(error_code::value, "self (rank=%d) is not included in %s.",
this->rank_, group.c_str());
}
bcast(ndarray, nullptr, src, inplace, group);
}
template <typename T>
void MultiProcessDataParallelCommunicatorNccl<T>::bcast(NdArrayPtr ndarray,
hipStream_t stream,
int src, bool inplace,
const string &group) {
auto n_param = ndarray->size();
dtypes dtype = get_dtype<T>();
T *dw0 = ndarray->cast(dtype, this->ctx_)->pointer<T>();
ncclResult_t ret =
ncclBcast(dw0, n_param, ncclFloat, src, comms_[group], stream);
if (ret != ncclSuccess) {
NBLA_ERROR(error_code::target_specific, "ncclBcast fails with %d.", ret);
}
}
template <typename T>
void MultiProcessDataParallelCommunicatorNccl<T>::all_gather(
NdArrayPtr ndarray, const vector<NdArrayPtr> &ndarray_list,
const string &group) {
if (!this->find_self(group)) {
NBLA_ERROR(error_code::value, "self (rank=%d) is not included in %s.",
this->rank_, group.c_str());
}
// TODO: currently nnabla uses default stream for computation.
// The following logic relies on that, so if nnabla uses another stream for
// computation,
// we have to issue null kernel to the default stream at the beginning of this
// method
// and at the end of this method for using the implicit synchronization
// technique for
// main thread not to wait for a result of a kernel call.
// TODO: the usage of multi streams is round-robin fashion, it may not be
// optimal.
NdArrayPtr large_ndarray = copy_inside_device(ndarray_list);
dtypes dtype = get_dtype<T>();
const T *sendbuff = ndarray->get(dtype, this->ctx_)->const_pointer<T>();
T *recvbuff = large_ndarray->cast(dtype, this->ctx_)->pointer<T>();
Size_t sendcount = ndarray->size();
ncclResult_t ret = ncclAllGather(sendbuff, recvbuff, sendcount,
ncclFloat, // TODO: address ncclFloat
comms_[group], 0); // use default stream
if (ret != ncclSuccess) {
NBLA_ERROR(error_code::target_specific, "ncclAllGather fails with %d.",
ret);
}
copy_back_inside_device(ndarray_list, large_ndarray);
// no need to call null kernel since nnabla uses default stream currently.
}
template <typename T>
void MultiProcessDataParallelCommunicatorNccl<T>::reduce_async(bool division) {
NBLA_ERROR(error_code::not_implemented,
"CUDA GPU reduce_async is not implemented.")
}
template <typename T>
void MultiProcessDataParallelCommunicatorNccl<T>::allreduce_async(
bool division, bool inplace) {
NBLA_ERROR(error_code::not_implemented,
"CUDA GPU allreduce_async is not implemented.")
}
template <typename T>
void MultiProcessDataParallelCommunicatorNccl<T>::reducescatter_async(
bool division) {
NBLA_ERROR(error_code::not_implemented,
"CUDA GPU reducescatter_async is not implemented.")
}
template <typename T>
void MultiProcessDataParallelCommunicatorNccl<T>::bcast_async() {
NBLA_ERROR(error_code::not_implemented,
"CUDA GPU bcast_async is not implemented.")
}
template <typename T>
void MultiProcessDataParallelCommunicatorNccl<T>::allgather_async() {
NBLA_ERROR(error_code::not_implemented,
"CUDA GPU allgather_async is not implemented.")
}
template <typename T>
vector<string>
MultiProcessDataParallelCommunicatorNccl<T>::allowed_array_classes() {
NBLA_ERROR(error_code::not_implemented,
"Derived class of MultiProcessDataParallelCommunicatorNccl must "
"implement allowed_array_classes().")
}
template <typename T>
void MultiProcessDataParallelCommunicatorNccl<
T>::wait_by_device_synchronization() {
cuda_device_synchronize(device_id_);
}
template <typename T>
void MultiProcessDataParallelCommunicatorNccl<
T>::wait_by_streams_synchronization() {
for (int i = 0; i < streams_.size(); ++i) {
NBLA_CUDA_CHECK(hipStreamSynchronize(streams_[i]));
}
}
template <typename T>
void MultiProcessDataParallelCommunicatorNccl<T>::sync_all_params() {
auto func_named_param = this->device_func_named_param_[0];
Context ctx = this->contexts_[0];
auto size = func_named_param.size();
for (auto elm : func_named_param) { // function-loop
VariablePtr vp = elm.second;
// If the arrays are different, output the warning.
this->check_array_class(ctx, vp);
// Sync
vp->get_grad_pointer<T>(ctx);
}
}
template class MultiProcessDataParallelCommunicatorNccl<float>;
}
| c80764b4a1fc806af96528bf000d4c4fcb145c1c.cu | // Copyright (c) 2017 Sony Corporation. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <nbla/cuda/common.hpp>
#include <nbla/cuda/communicator/multi_process_data_parallel_communicator.hpp>
#include <algorithm>
#include <cstdlib>
#include <memory>
#include "mpi.h"
#include <stdint.h>
#include <unistd.h>
namespace nbla {
using std::make_shared;
template <typename T>
__global__ void kernel_divide_inplace(const int size, const int n_devices,
T *dw) {
NBLA_CUDA_KERNEL_LOOP(i, size) { dw[i] /= n_devices; }
}
/*
* Referred from
* http://docs.nvidia.com/deeplearning/sdk/nccl-developer-guide/index.html#onedevprothrd
*/
static uint64_t get_host_hash(const char *string) {
// Based on DJB2, result = result * 33 + char
uint64_t result = 5381;
for (int c = 0; string[c] != '\0'; c++) {
result = ((result << 5) + result) + string[c];
}
return result;
}
static void get_host_name(char *hostname, int maxlen) {
gethostname(hostname, maxlen);
for (int i = 0; i < maxlen; i++) {
if (hostname[i] == '.') {
hostname[i] = '\0';
return;
}
}
}
template <typename T>
MultiProcessDataParallelCommunicatorNccl<
T>::MultiProcessDataParallelCommunicatorNccl(const Context &ctx)
: MultiProcessDataParallelCommunicator<T>(ctx) {
mpi_initialized_ = false;
}
template <typename T>
MultiProcessDataParallelCommunicatorNccl<
T>::~MultiProcessDataParallelCommunicatorNccl() {
if (this->initialized_) {
for (int i = 0; i < streams_.size(); ++i) {
NBLA_CUDA_CHECK(cudaStreamDestroy(streams_[i]));
}
for (auto e : this->comms_) {
ncclCommDestroy(e.second);
}
}
if (mpi_initialized_) {
MPI_Finalize();
}
}
template <typename T>
bool MultiProcessDataParallelCommunicatorNccl<T>::mpi_initialized_;
template <typename T> void MultiProcessDataParallelCommunicatorNccl<T>::init() {
Communicator::init();
try {
// MPI init
if (!mpi_initialized_) {
int argc = 0;
char **argv = NULL;
int requiredThreadLevelSupport = MPI_THREAD_SERIALIZED;
int provided;
MPI_Init_thread(&argc, &argv, requiredThreadLevelSupport, &provided);
if (provided != requiredThreadLevelSupport) {
NBLA_ERROR(error_code::target_specific,
"MPI_Init_thread failed since provided (%d) is not equal to "
"requiredThreadLevelSupport (%d)",
provided, requiredThreadLevelSupport);
}
mpi_initialized_ = true;
}
// Create comm, set size, and rank
MPI_Comm_size(MPI_COMM_WORLD, &this->size_);
MPI_Comm_rank(MPI_COMM_WORLD, &this->rank_);
// Set local rank and device id
uint64_t host_hashs[this->size_];
char hostname[1024];
get_host_name(hostname, 1024);
host_hashs[this->rank_] = get_host_hash(hostname);
MPI_Allgather(MPI_IN_PLACE, 0, MPI_DATATYPE_NULL, host_hashs,
sizeof(uint64_t), MPI_BYTE, MPI_COMM_WORLD);
MPI_Barrier(MPI_COMM_WORLD);
int local_rank = 0;
for (int i = 0; i < this->size_; ++i) {
if (i == this->rank_) {
break;
}
if (host_hashs[i] == host_hashs[this->rank_]) {
local_rank++;
}
}
this->device_id_ = local_rank;
this->local_rank_ = local_rank;
this->ctx_.device_id = std::to_string(local_rank);
// Exchange comm_id among processes
ncclUniqueId comm_id;
if (this->rank_ == 0) {
ncclGetUniqueId(&comm_id);
}
MPI_Bcast(&comm_id, sizeof(comm_id), MPI_BYTE, 0, MPI_COMM_WORLD);
MPI_Barrier(MPI_COMM_WORLD);
// NCCL Init
cuda_set_device(device_id_);
ncclComm_t comm;
ncclResult_t ret =
ncclCommInitRank(&comm, this->size_, comm_id, this->rank_);
if (ret != ncclSuccess) {
NBLA_ERROR(error_code::target_specific, "ncclCommInitRank failed.");
}
// Create streams
for (int i = 0; i < streams_.size(); ++i) {
cudaStream_t stream;
NBLA_CUDA_CHECK(cudaStreamCreate(&stream));
streams_[i] = stream;
}
// Create world group
this->comms_["world"] = comm;
vector<int> ranks(this->size_);
std::iota(ranks.begin(), ranks.end(), 0);
this->groups_["world"] = ranks;
this->initialized_ = true;
} catch (...) {
NBLA_ERROR(error_code::unclassified, "Communicator init failed.");
}
}
template <typename T>
string MultiProcessDataParallelCommunicatorNccl<T>::new_group(
pair<string, vector<int>> name_ranks_pair) {
string group_name = name_ranks_pair.first;
vector<int> ranks = name_ranks_pair.second;
// Checks
if (this->groups_.find(group_name) !=
this->groups_.end()) { // group name already exists.
NBLA_ERROR(error_code::value, "group_name = %s already exists",
group_name.c_str());
}
int max = *std::max_element(ranks.begin(), ranks.end());
NBLA_CHECK(max < this->size_, error_code::value,
"Max value of the specified ranks should be less than the size () "
"of the communicator.",
this->size_);
int min = *std::min_element(ranks.begin(), ranks.end());
NBLA_CHECK(min >= 0, error_code::value,
"Min value of the specified ranks is greater than or equal to 0.");
// Create new group
MPI_Group world_group;
MPI_Comm_group(MPI_COMM_WORLD, &world_group);
MPI_Group new_group;
MPI_Group_incl(world_group, ranks.size(), ranks.data(), &new_group);
// Create mpi communicator
MPI_Comm mpi_comm;
MPI_Comm_create(MPI_COMM_WORLD, new_group,
&mpi_comm); // have to call in all processes
// Add group name in all ranks
this->groups_[group_name] = ranks;
// Leave if self is not in ranks
auto result = std::find(ranks.begin(), ranks.end(), this->rank_);
if (result == ranks.end()) { // self is not found in ranks.
return group_name;
}
// Create nccl unique id and bcast it
ncclUniqueId comm_id;
if (this->rank_ == ranks[0]) {
ncclGetUniqueId(&comm_id);
}
int rank;
MPI_Comm_rank(mpi_comm, &rank);
MPI_Bcast(&comm_id, sizeof(comm_id), MPI_BYTE, 0, mpi_comm);
MPI_Barrier(mpi_comm);
MPI_Comm_free(&mpi_comm);
// NCCL Comm Init
cuda_set_device(device_id_);
ncclComm_t comm;
ncclResult_t ret = ncclCommInitRank(&comm, ranks.size(), comm_id, rank);
if (ret != ncclSuccess) {
NBLA_ERROR(error_code::target_specific, "ncclCommInitRank failed with %d",
ret);
}
this->comms_[group_name] = comm;
return group_name;
}
template <typename T>
shared_ptr<NdArray>
MultiProcessDataParallelCommunicatorNccl<T>::copy_inside_device(
const vector<NdArrayPtr> &ndarray_list) {
// preparation
Size_t total_params = 0;
for (auto ndarray : ndarray_list) {
auto n_param = ndarray->size();
total_params += n_param;
}
dtypes dtype = get_dtype<T>();
NdArrayPtr large_ndarray = make_shared<NdArray>(Shape_t{total_params});
T *buff = large_ndarray->cast(dtype, this->ctx_)->pointer<T>();
Size_t type_size = sizeof(T);
int k = 0;
// copy inside device
for (auto ndarray : ndarray_list) {
const T *dw = ndarray->cast(dtype, this->ctx_)->const_pointer<T>();
auto n_param = ndarray->size();
int stream_id = k % num_streams_;
cudaMemcpyAsync(buff, dw, type_size * n_param, cudaMemcpyDeviceToDevice,
streams_[stream_id]);
buff += n_param;
k++;
}
return large_ndarray;
}
template <typename T>
void MultiProcessDataParallelCommunicatorNccl<T>::copy_back_inside_device(
const vector<NdArrayPtr> &ndarray_list, NdArrayPtr large_ndarray) {
dtypes dtype = get_dtype<T>();
T *buff = large_ndarray->cast(dtype, this->ctx_)->pointer<T>();
Size_t type_size = sizeof(T);
int k = 0;
for (auto ndarray : ndarray_list) {
T *dw = ndarray->cast(dtype, this->ctx_)->pointer<T>();
auto n_param = ndarray->size();
int stream_id = k % num_streams_;
cudaMemcpyAsync(dw, buff, type_size * n_param, cudaMemcpyDeviceToDevice,
streams_[stream_id]);
buff += n_param;
k++;
}
}
template <typename T>
void MultiProcessDataParallelCommunicatorNccl<T>::reduce(
const vector<NdArrayPtr> &ndarray_list, int dst, bool division,
bool inplace, const string &group) {
if (!this->find_self(group)) {
NBLA_ERROR(error_code::value, "self (rank=%d) is not included in %s.",
this->rank_, group.c_str());
}
// TODO: currently nnabla uses default stream for computation.
// The following logic relies on that, so if nnabla uses another stream for
// computation,
// we have to issue null kernel to the default stream at the beginning of this
// method
// and at the end of this method for using the implicit synchronization
// technique for
// main thread not to wait for a result of a kernel call.
// TODO: the usage of multi streams is round-robin fashion, it may not be
// optimal.
if (inplace) { // in-place
int k = 0;
dtypes dtype = get_dtype<T>();
for (auto ndarray : ndarray_list) { // ndarray loop
int stream_id = k % num_streams_;
reduce(ndarray, streams_[stream_id], dst, division, inplace);
k++;
}
} else { // out-of-place. use a large array.
NdArrayPtr large_ndarray = copy_inside_device(ndarray_list);
reduce(large_ndarray, nullptr, dst, division, inplace, group);
copy_back_inside_device(ndarray_list, large_ndarray);
}
// no need to call null kernel since nnabla uses default stream currently.
}
template <typename T>
void MultiProcessDataParallelCommunicatorNccl<T>::reduce(NdArrayPtr ndarray,
int dst, bool division,
bool inplace,
const string &group) {
if (!this->find_self(group)) {
NBLA_ERROR(error_code::value, "self (rank=%d) is not included in %s.",
this->rank_, group.c_str());
}
reduce(ndarray, nullptr, dst, division, inplace, group);
}
template <typename T>
void MultiProcessDataParallelCommunicatorNccl<T>::reduce(NdArrayPtr ndarray,
cudaStream_t stream,
int dst, bool division,
bool inplace,
const string &group) {
auto n_param = ndarray->size();
dtypes dtype = get_dtype<T>();
const T *dw0 = ndarray->get(dtype, this->ctx_)->const_pointer<T>();
T *dw1 = ndarray->cast(dtype, this->ctx_)->pointer<T>();
ncclResult_t ret = ncclReduce(dw0, dw1, n_param,
ncclFloat, // TODO: address ncclFloat
ncclSum, dst, comms_[group], stream);
if (ret != ncclSuccess) {
NBLA_ERROR(error_code::target_specific, "ncclReduce fails with %d.", ret);
}
if (division) {
NBLA_CUDA_LAUNCH_KERNEL_IN_STREAM(kernel_divide_inplace, stream, n_param,
this->size_, dw1);
// TODO: strange because of implicit synchronization without inplace and
// with division does not occur.
// copy(streams) -> all_reduce(default stream) ->
// -> division(default stream) -> copy_back(streams) -> xxx(default stream)
// Even if launching null kernel, no sync. Thus, call stream synchronize.
if (!inplace) {
cudaStreamSynchronize(stream);
}
}
}
template <typename T>
void MultiProcessDataParallelCommunicatorNccl<T>::allreduce(bool division,
bool inplace) {
// TODO: Delete this function when appropriate
// TODO: currently nnabla uses default stream for computation.
// The following logic relies on that, so if nnabla uses another stream for
// computation,
// we have to issue null kernel to the default stream at the beginning of this
// method
// and at the end of this method for using the implicit synchronization
// technique for
// main thread not to wait for a result of a kernel call.
// TODO: the usage of multi streams is round-robin fashion, it may not be
// optimal.
// Once sync to prevent the hang where the memcpy occurs during the allreduce.
this->sync_all_params();
if (inplace) { // in-place
Context ctx = this->contexts_[0];
auto func_named_param = this->device_func_named_param_[0];
int k = 0;
for (auto elm : func_named_param) { // function-loop
VariablePtr vp = elm.second;
auto n_param = vp->size();
const T *dw0 = vp->get_grad_pointer<T>(ctx);
T *dw1 = vp->cast_grad_and_get_pointer<T>(ctx);
int stream_id = k % num_streams_;
// AllReduce
ncclResult_t ret =
ncclAllReduce(dw0, dw1, n_param, ncclFloat, // TODO: address ncclFloat
ncclSum, comms_["world"], streams_[stream_id]);
if (ret != ncclSuccess) {
NBLA_ERROR(error_code::target_specific, "ncclAllReduce fails with %d.",
ret);
}
// Divide
if (division) {
NBLA_CUDA_LAUNCH_KERNEL_IN_STREAM(kernel_divide_inplace,
streams_[stream_id], n_param,
this->size_, dw1);
}
k++;
}
} else { // out-of-place. use a large array.
Context ctx = this->contexts_[0];
shared_ptr<CudaCachedArray> arr_buff = // TODO: address 16 bits also here?
make_shared<CudaCachedArray>(this->total_params_, get_dtype<T>(), ctx);
T *buff = arr_buff->pointer<T>();
T *buff_start = buff;
auto func_named_param = this->device_func_named_param_[0];
Size_t type_size = sizeof(T);
int k = 0;
// 1. copy inside device
for (auto elm : func_named_param) {
VariablePtr vp = elm.second;
const T *dw = vp->get_grad_pointer<T>(ctx);
auto n_param = vp->size();
int stream_id = k % num_streams_;
cudaMemcpyAsync(buff, dw, type_size * n_param, cudaMemcpyDeviceToDevice,
streams_[stream_id]);
buff += n_param;
k++;
}
// 2. all reduce
ncclResult_t ret =
ncclAllReduce(buff_start, buff_start, this->total_params_,
ncclFloat, // TODO: address ncclFloat
ncclSum, comms_["world"], 0); // use default stream
if (ret != ncclSuccess) {
NBLA_ERROR(error_code::target_specific, "ncclAllReduce fails with %d.",
ret);
}
// 3. divide
if (division) {
// use default stream
NBLA_CUDA_LAUNCH_KERNEL_IN_STREAM(kernel_divide_inplace, 0,
this->total_params_, this->size_,
buff_start);
}
// 4. copy back inside device
buff = buff_start;
k = 0;
for (auto elm : func_named_param) {
VariablePtr vp = elm.second;
T *dw = vp->cast_grad_and_get_pointer<T>(ctx);
auto n_param = vp->size();
int stream_id = k % num_streams_;
cudaMemcpyAsync(dw, buff, type_size * n_param, cudaMemcpyDeviceToDevice,
streams_[stream_id]);
buff += n_param;
k++;
}
}
// no need to call null kernel since nnabla uses default stream currently.
}
template <typename T>
void MultiProcessDataParallelCommunicatorNccl<T>::all_reduce(
const vector<NdArrayPtr> &ndarray_list, bool division, bool inplace,
const string &group) {
if (!this->find_self(group)) {
NBLA_ERROR(error_code::value, "self (rank=%d) is not included in %s.",
this->rank_, group.c_str());
}
// TODO: currently nnabla uses default stream for computation.
// The following logic relies on that, so if nnabla uses another stream for
// computation,
// we have to issue null kernel to the default stream at the beginning of this
// method
// and at the end of this method for using the implicit synchronization
// technique for
// main thread not to wait for a result of a kernel call.
// TODO: the usage of multi streams is round-robin fashion, it may not be
// optimal.
if (inplace) { // in-place
int k = 0;
dtypes dtype = get_dtype<T>();
for (auto ndarray : ndarray_list) { // ndarray loop
int stream_id = k % num_streams_;
all_reduce(ndarray, streams_[stream_id], division, inplace, group);
k++;
}
} else { // out-of-place. use a large array.
NdArrayPtr large_ndarray = copy_inside_device(ndarray_list);
all_reduce(large_ndarray, nullptr, division, inplace, group);
copy_back_inside_device(ndarray_list, large_ndarray);
}
// no need to call null kernel since nnabla uses default stream currently.
}
template <typename T>
void MultiProcessDataParallelCommunicatorNccl<T>::all_reduce(
NdArrayPtr ndarray, bool division, bool inplace, const string &group) {
if (!this->find_self(group)) {
NBLA_ERROR(error_code::value, "self (rank=%d) is not included in %s.",
this->rank_, group.c_str());
}
all_reduce(ndarray, nullptr, division, inplace, group);
}
template <typename T>
void MultiProcessDataParallelCommunicatorNccl<T>::all_reduce(
NdArrayPtr ndarray, cudaStream_t stream, bool division, bool inplace,
const string &group) {
auto n_param = ndarray->size();
dtypes dtype = get_dtype<T>();
const T *dw0 = ndarray->get(dtype, this->ctx_)->const_pointer<T>();
T *dw1 = ndarray->cast(dtype, this->ctx_)->pointer<T>();
ncclResult_t ret = ncclAllReduce(dw0, dw1, n_param,
ncclFloat, // TODO: address ncclFloat
ncclSum, comms_[group], stream);
if (ret != ncclSuccess) {
NBLA_ERROR(error_code::target_specific, "ncclAllReduce fails with %d.",
ret);
}
if (division) {
NBLA_CUDA_LAUNCH_KERNEL_IN_STREAM(kernel_divide_inplace, stream, n_param,
this->size_, dw1);
// TODO: strange because of implicit synchronization without inplace and
// with division does not occur.
// copy(streams) -> all_reduce(default stream) ->
// -> division(default stream) -> copy_back(streams) -> xxx(default stream)
// Even if launching null kernel, no sync. Thus, call stream synchronize.
if (!inplace) {
cudaStreamSynchronize(stream);
}
}
}
template <typename T>
void MultiProcessDataParallelCommunicatorNccl<T>::reduce_scatter(
const vector<NdArrayPtr> &ndarray_list, NdArrayPtr ndarray, bool division,
const string &group) {
if (!this->find_self(group)) {
NBLA_ERROR(error_code::value, "self (rank=%d) is not included in %s.",
this->rank_, group.c_str());
}
// TODO: currently nnabla uses default stream for computation.
// The following logic relies on that, so if nnabla uses another stream for
// computation,
// we have to issue null kernel to the default stream at the beginning of this
// method
// and at the end of this method for using the implicit synchronization
// technique for
// main thread not to wait for a result of a kernel call.
NdArrayPtr large_ndarray = copy_inside_device(ndarray_list);
dtypes dtype = get_dtype<T>();
const T *sendbuff = large_ndarray->get(dtype, this->ctx_)->const_pointer<T>();
T *recvbuff = ndarray->cast(dtype, this->ctx_)->pointer<T>();
Size_t recvcount = ndarray->size();
ncclResult_t ret =
ncclReduceScatter(sendbuff, recvbuff, recvcount,
ncclFloat, // TODO: address ncclFloat
ncclSum, comms_[group], 0); // use default stream
if (ret != ncclSuccess) {
NBLA_ERROR(error_code::target_specific, "ncclBcast fails with %d.", ret);
}
// divide
if (division) {
// use default stream
NBLA_CUDA_LAUNCH_KERNEL_IN_STREAM(kernel_divide_inplace, 0, recvcount,
this->size_, recvbuff);
}
// no need to call null kernel since nnabla uses default stream currently.
}
template <typename T>
void MultiProcessDataParallelCommunicatorNccl<T>::bcast(
const vector<NdArrayPtr> &ndarray_list, int src, bool inplace,
const string &group) {
if (!this->find_self(group)) {
NBLA_ERROR(error_code::value, "self (rank=%d) is not included in %s.",
this->rank_, group.c_str());
}
// TODO: currently nnabla uses default stream for computation.
// The following logic relies on that, so if nnabla uses another stream for
// computation,
// we have to issue null kernel to the default stream at the beginning of this
// method
// and at the end of this method for using the implicit synchronization
// technique for
// main thread not to wait for a result of a kernel call.
// TODO: the usage of multi streams is round-robin fashion, it may not be
// optimal.
if (inplace) { // in-place
int k = 0;
dtypes dtype = get_dtype<T>();
for (auto ndarray : ndarray_list) { // ndarray loop
int stream_id = k % num_streams_;
bcast(ndarray, streams_[stream_id], src, inplace, group);
k++;
}
} else { // out-of-place. use a large array.
NdArrayPtr large_ndarray = copy_inside_device(ndarray_list);
bcast(large_ndarray, nullptr, src, inplace, group);
copy_back_inside_device(ndarray_list, large_ndarray);
}
// no need to call null kernel since nnabla uses default stream currently.
}
template <typename T>
void MultiProcessDataParallelCommunicatorNccl<T>::bcast(NdArrayPtr ndarray,
int src, bool inplace,
const string &group) {
if (!this->find_self(group)) {
NBLA_ERROR(error_code::value, "self (rank=%d) is not included in %s.",
this->rank_, group.c_str());
}
bcast(ndarray, nullptr, src, inplace, group);
}
template <typename T>
void MultiProcessDataParallelCommunicatorNccl<T>::bcast(NdArrayPtr ndarray,
cudaStream_t stream,
int src, bool inplace,
const string &group) {
auto n_param = ndarray->size();
dtypes dtype = get_dtype<T>();
T *dw0 = ndarray->cast(dtype, this->ctx_)->pointer<T>();
ncclResult_t ret =
ncclBcast(dw0, n_param, ncclFloat, src, comms_[group], stream);
if (ret != ncclSuccess) {
NBLA_ERROR(error_code::target_specific, "ncclBcast fails with %d.", ret);
}
}
template <typename T>
void MultiProcessDataParallelCommunicatorNccl<T>::all_gather(
NdArrayPtr ndarray, const vector<NdArrayPtr> &ndarray_list,
const string &group) {
if (!this->find_self(group)) {
NBLA_ERROR(error_code::value, "self (rank=%d) is not included in %s.",
this->rank_, group.c_str());
}
// TODO: currently nnabla uses default stream for computation.
// The following logic relies on that, so if nnabla uses another stream for
// computation,
// we have to issue null kernel to the default stream at the beginning of this
// method
// and at the end of this method for using the implicit synchronization
// technique for
// main thread not to wait for a result of a kernel call.
// TODO: the usage of multi streams is round-robin fashion, it may not be
// optimal.
NdArrayPtr large_ndarray = copy_inside_device(ndarray_list);
dtypes dtype = get_dtype<T>();
const T *sendbuff = ndarray->get(dtype, this->ctx_)->const_pointer<T>();
T *recvbuff = large_ndarray->cast(dtype, this->ctx_)->pointer<T>();
Size_t sendcount = ndarray->size();
ncclResult_t ret = ncclAllGather(sendbuff, recvbuff, sendcount,
ncclFloat, // TODO: address ncclFloat
comms_[group], 0); // use default stream
if (ret != ncclSuccess) {
NBLA_ERROR(error_code::target_specific, "ncclAllGather fails with %d.",
ret);
}
copy_back_inside_device(ndarray_list, large_ndarray);
// no need to call null kernel since nnabla uses default stream currently.
}
template <typename T>
void MultiProcessDataParallelCommunicatorNccl<T>::reduce_async(bool division) {
NBLA_ERROR(error_code::not_implemented,
"CUDA GPU reduce_async is not implemented.")
}
template <typename T>
void MultiProcessDataParallelCommunicatorNccl<T>::allreduce_async(
bool division, bool inplace) {
NBLA_ERROR(error_code::not_implemented,
"CUDA GPU allreduce_async is not implemented.")
}
template <typename T>
void MultiProcessDataParallelCommunicatorNccl<T>::reducescatter_async(
bool division) {
NBLA_ERROR(error_code::not_implemented,
"CUDA GPU reducescatter_async is not implemented.")
}
template <typename T>
void MultiProcessDataParallelCommunicatorNccl<T>::bcast_async() {
NBLA_ERROR(error_code::not_implemented,
"CUDA GPU bcast_async is not implemented.")
}
template <typename T>
void MultiProcessDataParallelCommunicatorNccl<T>::allgather_async() {
NBLA_ERROR(error_code::not_implemented,
"CUDA GPU allgather_async is not implemented.")
}
template <typename T>
vector<string>
MultiProcessDataParallelCommunicatorNccl<T>::allowed_array_classes() {
NBLA_ERROR(error_code::not_implemented,
"Derived class of MultiProcessDataParallelCommunicatorNccl must "
"implement allowed_array_classes().")
}
template <typename T>
void MultiProcessDataParallelCommunicatorNccl<
T>::wait_by_device_synchronization() {
cuda_device_synchronize(device_id_);
}
template <typename T>
void MultiProcessDataParallelCommunicatorNccl<
T>::wait_by_streams_synchronization() {
for (int i = 0; i < streams_.size(); ++i) {
NBLA_CUDA_CHECK(cudaStreamSynchronize(streams_[i]));
}
}
template <typename T>
void MultiProcessDataParallelCommunicatorNccl<T>::sync_all_params() {
auto func_named_param = this->device_func_named_param_[0];
Context ctx = this->contexts_[0];
auto size = func_named_param.size();
for (auto elm : func_named_param) { // function-loop
VariablePtr vp = elm.second;
// If the arrays are different, output the warning.
this->check_array_class(ctx, vp);
// Sync
vp->get_grad_pointer<T>(ctx);
}
}
template class MultiProcessDataParallelCommunicatorNccl<float>;
}
|
279561b9989297e92918460f77eb16fefb922668.hip | // !!! This is a file automatically generated by hipify!!!
/* Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <readers.h>
#include <multiply.h>
#include <fstream>
#include <ios>
#include <iostream>
#include <map>
#include <iterator>
#include <algorithm>
#include <amgx_types/util.h>
#include <amgx_types/io.h>
namespace amgx
{
template <typename T>
void LoadValueFromStream(std::ifstream &fin, T &val);
template <>
void LoadValueFromStream(std::ifstream &fin, float &val)
{
fin >> val;
}
template <>
void LoadValueFromStream(std::ifstream &fin, double &val)
{
fin >> val;
}
template <>
void LoadValueFromStream(std::ifstream &fin, hipComplex &val)
{
float x, y;
fin >> x >> y;
val = make_cuComplex(x, y);
}
template <>
void LoadValueFromStream(std::ifstream &fin, hipDoubleComplex &val)
{
double x, y;
fin >> x >> y;
val = make_cuDoubleComplex(x, y);
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
bool LoadVector(std::ifstream &fin, bool read_all, int rows_total, int block_size, Vector<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> > &vec, const std::map<const int, int> &GlobalToLocalRowMap = std::map<const int, int>())
{
std::map<const int, int>::const_iterator gtl_it;
//std::vector<double> block_vals(block_size);
typedef typename VecPrecisionMap<t_vecPrec>::Type value_type;
std::vector<value_type> block_vals(block_size);
//for each entry
int idx = 0;
if (fin)
{
for (int i = 0; i < rows_total; i++)
{
//read entry
for (int k = 0; k < block_size; k++)
{
LoadValueFromStream(fin, block_vals[k]);
}
//fin >> block_vals[k];
if (read_all)
for (int k = 0; k < block_size; k++)
{
vec[i * block_size + k] = block_vals[k];
idx++;
}
else
{
gtl_it = GlobalToLocalRowMap.find(i);
if (gtl_it != GlobalToLocalRowMap.end())
{
for (int k = 0; k < block_size; k++)
{
vec[gtl_it->second * block_size + k] = block_vals[k];
idx++;
}
}
}
}
if (idx != vec.size())
{
FatalError("Matrix Market reader rows mismatch", AMGX_ERR_IO);
}
}
else
{
return false;
}
return true;
}
// Distrubuted version
void skip_vals(std::ifstream &fin, int num_values)
{
double val;
for (long int i = 0; i < num_values; i++)
{
fin >> val;
}
}
template <typename T>
T getBoostValue();
template <>
float getBoostValue()
{
return 1.e-6f;
}
template <>
double getBoostValue()
{
return 1.e-6;
}
template <>
hipComplex getBoostValue()
{
return make_cuComplex(1e-6f, 0.f);
}
template <>
hipDoubleComplex getBoostValue()
{
return make_cuDoubleComplex(1e-6, 0.);
}
template<AMGX_VecPrecision prec>
struct vecRealToComplexPrec
{
static const AMGX_VecPrecision CPrec = prec;
};
template <>
struct vecRealToComplexPrec<AMGX_vecDouble>
{
static const AMGX_VecPrecision CPrec = AMGX_vecDoubleComplex;
};
template <>
struct vecRealToComplexPrec<AMGX_vecFloat>
{
static const AMGX_VecPrecision CPrec = AMGX_vecComplex;
};
template<AMGX_MatPrecision prec>
struct matRealToComplexPrec
{
static const AMGX_MatPrecision CPrec = prec;
};
template <>
struct matRealToComplexPrec<AMGX_matDouble>
{
static const AMGX_MatPrecision CPrec = AMGX_matDoubleComplex;
};
template <>
struct matRealToComplexPrec<AMGX_matFloat>
{
static const AMGX_MatPrecision CPrec = AMGX_matComplex;
};
template <class TReal, class TComplex, class PartVec, bool init_flag>
struct ReadAndConvert;
template <class TReal, class TComplex, class PartVec>
struct ReadAndConvert<TReal, TComplex, PartVec, true>
{
static void readAndConvert(std::ifstream &fin, const char *fname, int conversion_type
, Matrix<TReal> &A
, Vector<TReal> &b
, Vector<TReal> &x
, unsigned int props
, const PartVec &rank_rows)
{
FatalError("Converversion from complex matrix to ERF, but one of the complex modes is specified", AMGX_ERR_IO);
}
};
template <class TReal, class TComplex, class PartVec>
struct ReadAndConvert<TReal, TComplex, PartVec, false>
{
static void readAndConvert(std::ifstream &fin, const char *fname, int conversion_type
, Matrix<TReal> &A
, Vector<TReal> &b
, Vector<TReal> &x
, unsigned int props
, const PartVec &rank_rows)
{
AMG_Config tcfg;
Matrix<TComplex> Ac;
Vector<TComplex> xc, bc;
typedef typename TReal::MatPrec RValueTypeA;
typedef typename TReal::VecPrec RValueTypeB;
typedef typename TComplex::MatPrec CValueTypeA;
typedef typename TComplex::VecPrec CValueTypeB;
printf("ERF conversion: reading complex valued system\n");
fflush(stdout);
ReadMatrixMarket<TComplex>::readMatrixMarket(fin, fname, Ac, bc, xc, tcfg);
// modes = 1..4 - convert to the scalar system of 2x size using K1..K4 formulation
if (conversion_type > 0 && conversion_type < 5)
{
// fill CSR values, common for all modes
int cnrows = Ac.get_num_rows();
int cnnz = Ac.get_num_nz();
int nrows = cnrows * 2;
int nnz = Ac.get_num_nz() * 4;
A.addProps(CSR);
A.resize(nrows, nrows, nnz);
// set row offsets
for (int i = 0; i < cnrows; i++)
{
A.row_offsets[i] = Ac.row_offsets[i] * 2;
A.row_offsets[i + cnrows] = Ac.row_offsets[i] * 2 + cnnz * 2;
}
A.row_offsets[nrows] = nnz;
// set col indices
for (int r = 0; r < nrows ; r++)
{
int *Ac_col_ptr = Ac.col_indices.raw() + Ac.row_offsets[r % cnrows];
int row_nnz = A.row_offsets[r + 1] - A.row_offsets[r];
for (int c = 0; c < (row_nnz / 2); c++)
{
A.col_indices[A.row_offsets[r] + c] = Ac_col_ptr[c];
A.col_indices[A.row_offsets[r] + c + row_nnz / 2] = Ac_col_ptr[c] + nrows / 2;
}
}
// set values
for (int r = 0; r < cnrows; r++)
{
CValueTypeA *Ac_values = Ac.values.raw() + Ac.row_offsets[r];
int row_nnz = Ac.row_offsets[r + 1] - Ac.row_offsets[r];
for (int c = 0; c < row_nnz; c++)
{
switch (conversion_type)
{
case 1:
A.values[A.row_offsets[r] + c] = types::get_re(Ac_values[c]);
A.values[A.row_offsets[r] + c + row_nnz] = -types::get_im(Ac_values[c]);
A.values[A.row_offsets[r] + c + 2 * cnnz] = types::get_im(Ac_values[c]);
A.values[A.row_offsets[r] + c + row_nnz + 2 * cnnz] = types::get_re(Ac_values[c]);
break;
case 2:
A.values[A.row_offsets[r] + c] = types::get_re(Ac_values[c]);
A.values[A.row_offsets[r] + c + row_nnz] = types::get_im(Ac_values[c]);
A.values[A.row_offsets[r] + c + 2 * cnnz] = types::get_im(Ac_values[c]);
A.values[A.row_offsets[r] + c + row_nnz + 2 * cnnz] = -types::get_re(Ac_values[c]);
break;
case 3:
A.values[A.row_offsets[r] + c] = types::get_im(Ac_values[c]);
A.values[A.row_offsets[r] + c + row_nnz] = types::get_re(Ac_values[c]);
A.values[A.row_offsets[r] + c + 2 * cnnz] = types::get_re(Ac_values[c]);
A.values[A.row_offsets[r] + c + row_nnz + 2 * cnnz] = -types::get_im(Ac_values[c]);
break;
case 4:
A.values[A.row_offsets[r] + c] = types::get_im(Ac_values[c]);
A.values[A.row_offsets[r] + c + row_nnz] = -types::get_re(Ac_values[c]);
A.values[A.row_offsets[r] + c + 2 * cnnz] = types::get_re(Ac_values[c]);
A.values[A.row_offsets[r] + c + row_nnz + 2 * cnnz] = types::get_im(Ac_values[c]);
break;
}
}
}
// set b
b.set_block_dimx(1);
b.set_block_dimy(1);
b.resize(nrows);
for (int r = 0; r < cnrows; r++)
{
switch (conversion_type)
{
case 1:
case 2:
b[r] = types::get_re(bc[r]);
b[r + cnrows] = types::get_im(bc[r]);
break;
case 3:
case 4:
b[r] = types::get_im(bc[r]);
b[r + cnrows] = types::get_re(bc[r]);
break;
}
}
//set x if needed
x.set_block_dimx(1);
x.set_block_dimy(1);
if (xc.size() > 0)
{
// set b
x.resize(nrows);
for (int r = 0; r < cnrows; r++)
{
switch (conversion_type)
{
case 1:
case 3:
x[r] = types::get_re(xc[r]);
x[r + cnrows] = types::get_im(xc[r]);
break;
case 2:
case 4:
x[r] = types::get_re(xc[r]);
x[r + cnrows] = -types::get_im(xc[r]);
break;
}
}
}
A.computeDiagonal();
std::stringstream info;
info << "Converted complex matrix " << cnrows << "x" << cnrows << " with " << cnnz << " nonzeros to the ERF - using K" << conversion_type << " formulation." << std::endl;
std::cout << info.str();
}
// modes 221..224 - convert to the system of the same size but with 2x2 blocks,
// where each block converted from original Aij value using K1..K4 formulation
// this switch is for original blocksize of 1
else if (conversion_type > 220 && conversion_type < 225 && Ac.get_block_dimy()*Ac.get_block_dimx() == 1)
{
// fill CSR values, common for all modes
int nrows = Ac.get_num_rows();
int nnz = Ac.get_num_nz();
A.addProps(Ac.hasProps(DIAG) ? CSR | DIAG : CSR);
A.resize(nrows, nrows, nnz, 2 * Ac.get_block_dimx(), 2 * Ac.get_block_dimy(), 1);
thrust::copy(Ac.row_offsets.begin(), Ac.row_offsets.end(), A.row_offsets.begin());
thrust::copy(Ac.col_indices.begin(), Ac.col_indices.end(), A.col_indices.begin());
for (int i = 0; i < nnz; i++)
{
switch (conversion_type)
{
case 221:
A.values[4 * i ] = types::get_re(Ac.values[i]);
A.values[4 * i + 1] = -types::get_im(Ac.values[i]);
A.values[4 * i + 2] = types::get_im(Ac.values[i]);
A.values[4 * i + 3] = types::get_re(Ac.values[i]);
break;
case 222:
A.values[4 * i ] = types::get_re(Ac.values[i]);
A.values[4 * i + 1] = types::get_im(Ac.values[i]);
A.values[4 * i + 2] = types::get_im(Ac.values[i]);
A.values[4 * i + 3] = -types::get_re(Ac.values[i]);
break;
case 223:
A.values[4 * i ] = types::get_im(Ac.values[i]);
A.values[4 * i + 1] = types::get_re(Ac.values[i]);
A.values[4 * i + 2] = types::get_re(Ac.values[i]);
A.values[4 * i + 3] = -types::get_im(Ac.values[i]);
break;
case 224:
A.values[4 * i ] = types::get_im(Ac.values[i]);
A.values[4 * i + 1] = -types::get_re(Ac.values[i]);
A.values[4 * i + 2] = types::get_re(Ac.values[i]);
A.values[4 * i + 3] = types::get_im(Ac.values[i]);
break;
}
}
A.computeDiagonal();
b.resize(nrows * 2);
b.set_block_dimx(1);
b.set_block_dimy(2);
for (int r = 0; r < nrows; r++)
{
switch (conversion_type)
{
case 221:
case 222:
b[2 * r ] = types::get_re(bc[r]);
b[2 * r + 1] = types::get_im(bc[r]);
break;
case 223:
case 224:
b[2 * r ] = types::get_im(bc[r]);
b[2 * r + 1] = types::get_re(bc[r]);
break;
}
}
//set x if needed
if (xc.size() > 0)
{
// set b
x.resize(nrows * 2);
x.set_block_dimx(1);
x.set_block_dimy(2);
for (int r = 0; r < nrows; r++)
{
switch (conversion_type)
{
case 221:
case 223:
x[2 * r ] = types::get_re(xc[r]);
x[2 * r + 1] = types::get_im(xc[r]);
break;
case 222:
case 224:
x[2 * r ] = types::get_re(xc[r]);
x[2 * r + 1] = -types::get_im(xc[r]);
break;
}
}
}
std::stringstream info;
info << "Converted complex matrix " << nrows << "x" << nrows << " with " << nnz << " nonzeros to the (2x2) block-ERF - using K" << conversion_type - 220 << " formulation." << std::endl;
std::cout << info.str();
}
// modes 221..224 - convert to the system of the same size but with 2x2 blocks,
// where each block converted from original Aij value using K1..K4 formulation
// this switch is for original blocksize of 1
else if (conversion_type > 220 && conversion_type < 225)
{
// fill CSR values, common for all modes
int nrows = Ac.get_num_rows();
int nnz = Ac.get_num_nz();
A.addProps(Ac.hasProps(DIAG) ? (CSR | DIAG) : CSR);
int bdimx = 2 * Ac.get_block_dimx();
int bdimy = 2 * Ac.get_block_dimy();
A.resize(nrows, nrows, nnz, bdimx, bdimy, 1);
thrust::copy(Ac.row_offsets.begin(), Ac.row_offsets.end(), A.row_offsets.begin());
thrust::copy(Ac.col_indices.begin(), Ac.col_indices.end(), A.col_indices.begin());
thrust::fill(A.values.begin(), A.values.end(), amgx::types::util<RValueTypeA>::get_zero());
std::cout << "Input block system " << Ac.get_block_dimx() << "x" << Ac.get_block_dimy() << " will be converted to system with blocks " << bdimx << "x" << bdimy << std::endl;
std::cout << "Converting values...\n";
// iterate through blocks
for (int i = 0; i < nnz; i++)
{
int block_offsetc = Ac.get_block_dimx() * Ac.get_block_dimy() * i;
int block_offset = bdimx * bdimy * i;
// iterate through values in the blocks
for (int j = 0; j < Ac.get_block_dimx()*Ac.get_block_dimy(); j++)
{
int cx = j / Ac.get_block_dimy();
int cy = j % Ac.get_block_dimy();
// interleaved blocks
int val_offset = block_offset + cx * bdimx + cy;
// in-place blocks
//int val_offset = block_offset + 2 * cx * bdimx + 2 * cy;
switch (conversion_type)
{
case 221:
// interleaved blocks
A.values[val_offset ] = types::get_re(Ac.values[block_offsetc + j]);
A.values[val_offset + Ac.get_block_dimx() ] = -types::get_im(Ac.values[block_offsetc + j]);
A.values[val_offset + 2 * Ac.get_block_size() ] = types::get_im(Ac.values[block_offsetc + j]);
A.values[val_offset + 2 * Ac.get_block_size() + Ac.get_block_dimx() ] = types::get_re(Ac.values[block_offsetc + j]);
// in-place blocks
//A.values[val_offset ] = types::get_re(Ac.values[block_offsetc + j]);
//A.values[val_offset + 1 ] = -types::get_im(Ac.values[block_offsetc + j]);
//A.values[val_offset + bdimx ] = types::get_im(Ac.values[block_offsetc + j]);
//A.values[val_offset + 1 + bdimx ] = types::get_re(Ac.values[block_offsetc + j]);
break;
case 222:
A.values[val_offset ] = types::get_re(Ac.values[block_offsetc + j]);
A.values[val_offset + 1 ] = types::get_im(Ac.values[block_offsetc + j]);
A.values[val_offset + bdimx] = types::get_im(Ac.values[block_offsetc + j]);
A.values[val_offset + 1 + bdimx] = -types::get_re(Ac.values[block_offsetc + j]);
break;
case 223:
A.values[val_offset ] = types::get_im(Ac.values[block_offsetc + j]);
A.values[val_offset + 1 ] = types::get_re(Ac.values[block_offsetc + j]);
A.values[val_offset + bdimx] = types::get_re(Ac.values[block_offsetc + j]);
A.values[val_offset + 1 + bdimx] = -types::get_im(Ac.values[block_offsetc + j]);
break;
case 224:
A.values[val_offset ] = types::get_im(Ac.values[block_offsetc + j]);
A.values[val_offset + 1 ] = -types::get_re(Ac.values[block_offsetc + j]);
A.values[val_offset + bdimx] = types::get_re(Ac.values[block_offsetc + j]);
A.values[val_offset + 1 + bdimx] = types::get_im(Ac.values[block_offsetc + j]);
break;
}
}
}
std::cout << "Compute diagonal\n";
A.computeDiagonal();
// if external diagonal - convert those values too
if (A.hasProps(DIAG))
{
std::cout << "Convert diagonal (warning!)\n";
for (int i = 0; i < Ac.get_num_rows(); i++)
{
int block_offsetc = Ac.diag[i] * Ac.get_block_dimx() * Ac.get_block_dimy();
int block_offset = A.diag[i] * bdimx * bdimy;
for (int j = 0; j < Ac.get_block_dimx()*Ac.get_block_dimy(); j++)
{
int val_offset = block_offset + (j / bdimx) * 2 * bdimx + (j % bdimx) * 2;
switch (conversion_type)
{
case 221:
A.values[val_offset ] = types::get_re(Ac.values[block_offsetc + j]);
A.values[val_offset + 1 ] = -types::get_im(Ac.values[block_offsetc + j]);
A.values[val_offset + bdimx] = types::get_im(Ac.values[block_offsetc + j]);
A.values[val_offset + 1 + bdimx] = types::get_re(Ac.values[block_offsetc + j]);
break;
case 222:
A.values[val_offset ] = types::get_re(Ac.values[block_offsetc + j]);
A.values[val_offset + 1 ] = -types::get_im(Ac.values[block_offsetc + j]);
A.values[val_offset + bdimx] = types::get_im(Ac.values[block_offsetc + j]);
A.values[val_offset + 1 + bdimx] = types::get_re(Ac.values[block_offsetc + j]);
break;
case 223:
A.values[val_offset ] = types::get_re(Ac.values[block_offsetc + j]);
A.values[val_offset + 1 ] = -types::get_im(Ac.values[block_offsetc + j]);
A.values[val_offset + bdimx] = types::get_im(Ac.values[block_offsetc + j]);
A.values[val_offset + 1 + bdimx] = types::get_re(Ac.values[block_offsetc + j]);
break;
case 224:
A.values[val_offset ] = types::get_re(Ac.values[block_offsetc + j]);
A.values[val_offset + 1 ] = -types::get_im(Ac.values[block_offsetc + j]);
A.values[val_offset + bdimx] = types::get_im(Ac.values[block_offsetc + j]);
A.values[val_offset + 1 + bdimx] = types::get_re(Ac.values[block_offsetc + j]);
break;
}
}
}
}
std::cout << "Convert rhs\n";
b.resize(nrows * bdimy);
b.set_block_dimx(1);
b.set_block_dimy(bdimy);
// interleaved blocks
for (int r = 0; r < nrows; r++)
{
for (int j = 0; j < Ac.get_block_dimy(); j++)
{
switch (conversion_type)
{
case 221:
case 222:
b[r * bdimy + j ] = types::get_re(bc[r * Ac.get_block_dimy() + j]);
b[r * bdimy + j + Ac.get_block_dimy()] = types::get_im(bc[r * Ac.get_block_dimy() + j]);
break;
case 223:
case 224:
b[r * bdimy + j ] = types::get_im(bc[r * Ac.get_block_dimy() + j]);
b[r * bdimy + j + Ac.get_block_dimy()] = types::get_re(bc[r * Ac.get_block_dimy() + j]);
break;
}
}
}
std::cout << "Convert soln\n";
//set x if needed
if (xc.size() > 0)
{
x.resize(nrows * bdimx);
x.set_block_dimx(1);
x.set_block_dimy(bdimy);
// interleaved blocks
for (int r = 0; r < nrows; r++)
{
for (int j = 0; j < Ac.get_block_dimx(); j++)
{
switch (conversion_type)
{
case 221:
case 223:
x[r * bdimx + j ] = types::get_re(xc[r * Ac.get_block_dimx() + j]);
x[r * bdimx + j + Ac.get_block_dimx()] = types::get_im(xc[r * Ac.get_block_dimx() + j]);
break;
case 222:
case 224:
x[r * bdimx + j ] = types::get_re(xc[r * Ac.get_block_dimx() + j]);
x[r * bdimx + j + Ac.get_block_dimx()] =-types::get_im(xc[r * Ac.get_block_dimx() + j]);
break;
}
}
}
}
std::stringstream info;
info << "Converted complex matrix " << nrows << "x" << nrows << " with " << nnz << " nonzeros to the (2x2) block-ERF - using K" << conversion_type - 220 << " formulation." << std::endl;
std::cout << info.str();
}
}
};
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
bool ReadMatrixMarket<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::readMatrixMarket(std::ifstream &fin, const char *fname, Matrix_h &A
, Vector_h &b
, Vector_h &x
, const AMG_Config &cfg
, unsigned int props
, const IVector_h &rank_rows // row indices for given rank
)
{
fin.seekg(std::ios::beg);
typedef typename Matrix_h::index_type IndexType;
typedef typename Matrix_h::value_type ValueTypeA;// change later back to load in high precision!
typedef typename TConfig_h::VecPrec ValueTypeB;
std::string warning;
int complex_conversion = cfg.AMG_Config::template getParameter<IndexType>("complex_conversion", "default");
// if we are in the real-valued mode and complex conversion is specified and we are reading actual matrix
if (complex_conversion != 0 && !types::util<ValueTypeA>::is_complex && !types::util<ValueTypeB>::is_complex && !io_config::hasProps(io_config::SIZE, props))
{
// read system as complex valued system of same precision and convert it to our matrices
typedef typename TConfig_h::template setVecPrec<vecRealToComplexPrec<TConfig_h::vecPrec>::CPrec>::Type TConfig_h_cvec;
typedef typename TConfig_h_cvec::template setMatPrec<matRealToComplexPrec<TConfig_h::matPrec>::CPrec>::Type TComplex_h;
ReadAndConvert<TConfig_h, TComplex_h, IVector_h, types::util<ValueTypeA>::is_complex>::readAndConvert(fin, fname, complex_conversion, A, b, x, props, rank_rows);
return true;
}
//skip comments and read amgx relevant parameters
std::list<std::string> nvConfig;
std::list<std::string> mmConfig;
// Workaround section to convert external diagonal into internal
// in CLASSICAL
bool isClassical = false;
std::string solver_scope, solver_value;
std::string precond_scope, precond_value;
AlgorithmType algorithm_s, algorithm_p;
Resources *resources = A.getResources();
if (resources != NULL)
{
resources->getResourcesConfig()->getParameter<std::string>("solver", solver_value, "default", solver_scope);
algorithm_s = resources->getResourcesConfig()->getParameter<AlgorithmType>("algorithm", solver_scope);
resources->getResourcesConfig()->getParameter<std::string>("preconditioner", precond_value, solver_scope, precond_scope);
algorithm_p = resources->getResourcesConfig()->getParameter<AlgorithmType>("algorithm", precond_scope);
if (algorithm_s == CLASSICAL && algorithm_p == CLASSICAL)
{
isClassical = true;
}
}
// End of CLASSICAL workaround
bool has_zero_diagonal_element = false;
bool check_zero_diagonal = false;
const bool boost_zero_diagonal = false;
ValueTypeA boostValue = getBoostValue<ValueTypeA>();
if (boost_zero_diagonal) { check_zero_diagonal = true; }
while (fin.peek() == '%')
{
std::string nvString;
int fpos = fin.tellg(); // store current position
getline(fin, nvString);
std::transform(nvString.begin(), nvString.end(), nvString.begin(), ::tolower);
std::istringstream nvString_s(nvString);
std::string nvFormat;
nvString_s >> nvFormat;
if (nvFormat.size() > 2)
{
if ((nvFormat.substr(2, nvFormat.size()) == "nvamg") ||
(nvFormat.substr(2, nvFormat.size()) == "amgx"))
{
std::copy(std::istream_iterator<std::string>(nvString_s), std::istream_iterator<std::string>(),
std::back_inserter<std::list<std::string> >(nvConfig));
}
if (nvFormat.substr(2, nvFormat.size()) == "matrixmarket")
{
std::copy(std::istream_iterator<std::string>(nvString_s), std::istream_iterator<std::string>(),
std::back_inserter<std::list<std::string> >(mmConfig));
}
}
fin.seekg(fpos, std::ios_base::beg);
fin.ignore(INT_MAX, '\n');
}
// process MatrixMarket config string
bool symmetric = false;
bool skew_symmetric = false;
bool hermitian = false;
if (mmConfig.size() > 0)
{
for (std::list<std::string>::const_iterator it = mmConfig.begin(); it != mmConfig.end(); ++it)
{
if (*it == "symmetric") {symmetric = true; continue;}
if (*it == "complex")
{
if (!types::util<ValueTypeA>::is_complex && complex_conversion == 0)
{
FatalError("Trying to load file with complex matrix to real valued matrix structure", AMGX_ERR_IO);
}
continue;
}
if (*it == "real")
{
if (!types::util<ValueTypeA>::is_real)
{
FatalError("Trying to load file with real matrix to complex valued matrix structure", AMGX_ERR_IO);
}
}
if (*it == "pattern") {FatalError("'pattern' is not supported in %%MatrixMarket format string", AMGX_ERR_IO);}
if (*it == "skew-symmetric") {symmetric = true; skew_symmetric = true; continue;}
//if (*it == "skew-symmetric") {FatalError("'skew-symmetric' is not supported in %%MatrixMarket format string", AMGX_ERR_IO);}
if (*it == "hermitian") {hermitian = true; continue;}
}
}
// process amgx config string
int block_dimx = 1, block_dimy = 1, index_base = 1;
bool diag_prop = false, rhs = false, soln = false, mtx = false, sorted = false;
std::list<int> block_sizes;
if (nvConfig.size() > 0)
{
for (std::list<std::string>::const_iterator it = nvConfig.begin(); it != nvConfig.end(); ++it)
{
if (*it == "diagonal") {diag_prop = true; continue;}
if (*it == "rhs") {rhs = true; continue;}
if (*it == "solution") {soln = true; continue;}
if (*it == "sorted") {sorted = true; continue;}
if (*it == "base0") {index_base = 0; continue;}
if (isdigit((*it)[0])) { int bsize; std::istringstream(*it) >> bsize; block_sizes.push_back(bsize); continue;};
}
}
// CLASSICAL fix
if (sorted && isClassical && diag_prop) { sorted = false; }
// Currently not implemented sorted symmetric matrices
if (sorted && symmetric || sorted && hermitian) { sorted = false; }
if (std::find(mmConfig.begin(), mmConfig.end(), "matrix") != mmConfig.end()) { mtx = true; }
if (block_sizes.size() == 2)
{
block_dimy = block_sizes.back();
block_dimx = block_sizes.front();
}
else if (block_sizes.size() == 1)
{
block_dimy = block_dimx = block_sizes.back();
}
int fpos = fin.tellg(); // store current position
int rows, cols, entries;
//read rows cols entries
fin >> rows >> cols >> entries;
if (rows % block_dimx != 0 || cols % block_dimy != 0 || entries % (block_dimx * block_dimy) != 0)
{
FatalError("Matrix dimensions do not match with block sizes", AMGX_ERR_IO);
}
rows /= block_dimx;
cols /= block_dimy;
entries /= (block_dimx * block_dimy);
if (io_config::hasProps(io_config::SIZE, props))
{
if (complex_conversion != 0 && block_dimy * block_dimx != 1)
{
FatalError("Complex conversion is supported only for non-coupled matrices with blocks of 1x1", AMGX_ERR_IO);
}
if (complex_conversion == 0)
{
A.set_num_rows(rows);
A.set_num_cols(cols);
A.set_block_dimy(block_dimy);
A.set_block_dimx(block_dimx);
}
else if (complex_conversion > 0 && complex_conversion < 5)
{
// general ERF
A.set_num_rows(rows * 2);
A.set_num_cols(cols * 2);
A.set_block_dimy(block_dimy);
A.set_block_dimx(block_dimx);
}
else if (complex_conversion > 220 && complex_conversion < 225)
{
// 2x2 block ERF
A.set_num_rows(rows);
A.set_num_cols(cols);
A.set_block_dimy(block_dimy * 2); // complex 1x1 only supported, which converts to 2x2 real blocks
A.set_block_dimx(block_dimx * 2);
}
else
{
FatalError("Unsupported complex_conversion mode", AMGX_ERR_IO);
}
int num_entries = 0;
if (symmetric || hermitian)
{
int i, j;
int idiag = 0;
ValueTypeA v;
for (int e = 0; e < entries * (block_dimx * block_dimy); e++)
{
fin >> i >> j;
LoadValueFromStream(fin, v);
// skip explicit zeroes, only block_size=1 is supported
if (block_dimx == 1 && block_dimy == 1 && types::util<ValueTypeA>::is_zero(v))
{
continue;
}
if (i == j)
{
idiag++;
}
}
num_entries = 2 * entries - idiag / (block_dimx * block_dimy);
}
else
{
if (isClassical && diag_prop)
{
num_entries = entries + rows;
}
else
{
num_entries = entries;
}
}
if (complex_conversion == 0)
{
A.set_num_nz(num_entries);
}
else if (complex_conversion > 0 && complex_conversion < 5)
{
// general ERF
A.set_num_nz(num_entries * 4);
}
else if (complex_conversion > 220 && complex_conversion < 225)
{
// 2x2 block ERF
A.set_num_nz(num_entries);
}
else
{
FatalError("Unsupported complex_conversion mode", AMGX_ERR_IO);
}
return true;
}
warning = "Reading data...\n";
if (isClassical && diag_prop)
{
warning += "Warning: external diagonal will be converted into internal for CLASSICAL path\n";
}
amgx_output(warning.c_str(), warning.length());
// check for consistent input
if (io_config::hasProps(io_config::MTX, props))
{
if (!mtx)
{
FatalError("Expecting 'matrix' keyword in %%MatrixMarket format string", AMGX_ERR_IO);
}
}
else
{
if (mtx)
{
skip_vals(fin, 3 * entries * (block_dimy * block_dimx));
if (diag_prop)
{
skip_vals(fin, rows * block_dimy * block_dimx);
}
}
}
bool read_all = (rank_rows.size() == 0) ? true : false;
const IVector_h &partRowVec = rank_rows;
int n_rows_part = (read_all) ? rows : partRowVec.size();
std::map<const int, int> GlobalToLocalRowMap; // should try unordered_map
std::map<const int, int>::const_iterator gtl_i;
std::map<const int, int>::const_iterator gtl_j;
// Generate inverse map for faster searching during the read
if (!read_all)
for (int i = 0; i < n_rows_part; i++)
{
GlobalToLocalRowMap.insert(std::pair<const int, int>(partRowVec[i], i));
}
if (io_config::hasProps(io_config::MTX, props))
{
int ival = 0, idiag = 0;
int block_size = block_dimy * block_dimx;
typedef std::map<const int, std::vector<ValueTypeA> > ColValuesMap_t;
typedef std::pair<const int, std::vector<ValueTypeA> > ColValuesPair_t;
typedef std::vector<ValueTypeA> ValuesVector_t;
typedef std::vector<int> ColVector_t;
std::vector<ColValuesMap_t> input;
std::vector<int> nnz_per_row;
//typename Matrix_h::MVector input_sorted_v;
//IVector_h input_sorted_c;
ValuesVector_t input_sorted_v;
ColVector_t input_sorted_c;
std::vector<int> trackDiag;
if (check_zero_diagonal)
{
trackDiag.resize(n_rows_part, 0);
}
if (sorted)
{
nnz_per_row.resize(n_rows_part, 0);
if (read_all)
{
input_sorted_v.resize(entries * block_size);
input_sorted_c.resize(entries);
}
}
else
{
input.resize(n_rows_part);
}
typename Matrix_h::MVector diag(n_rows_part * block_size, types::util<ValueTypeA>::get_zero());
std::vector<ValueTypeA> block_vals(block_size);
//for each entry
int i, j, ii, jj, i_old = -1;
bool skip = false;
bool has_ii = true, has_jj = false;
if (symmetric || hermitian) { has_jj = true; }
int explicit_zeroes = 0;
for (int e = 0; e < entries; e++)
{
for (int kx = 0; kx < block_dimx; kx++)
for (int ky = 0; ky < block_dimy; ky++)
{
//read entry
fin >> i >> j;
LoadValueFromStream(fin, block_vals[kx * block_dimy + ky]);
// check we haven't been given a 0-indexed matrix
if ((i == 0 || j == 0) && index_base == 1)
{
FatalError("Matrix Market format requires 1-based indexing. Use 'base0' AMGX format option to override.", AMGX_ERR_IO);
}
}
// skip explicit zeroes, only block_size=1 is supported
if (block_dimx == 1 && block_dimy == 1 && types::util<ValueTypeA>::is_zero(block_vals[0]))
{
explicit_zeroes++;
if (i == j)
{
idiag++;
has_zero_diagonal_element = true;
if (check_zero_diagonal)
{
trackDiag[i - index_base] = 0;
}
}
continue;
}
else
{
if (i == j)
{
if (check_zero_diagonal)
{
trackDiag[i - index_base] = 1;
}
}
}
i = (i - index_base) / block_dimx;
j = (j - index_base) / block_dimy;
if (!read_all)
if (!symmetric && !hermitian)
{
if (i != i_old) // reduce overhead of searching in GlobalToLocalRowMap
{
has_ii = false;
i_old = i;
gtl_i = GlobalToLocalRowMap.find(i);
if (gtl_i == GlobalToLocalRowMap.end())
{
skip = true;
continue;
}
else
{
has_ii = true;
skip = false;
ii = gtl_i->second;
}
}
else if (skip)
{
continue;
}
}
else
{
ii = i;
jj = j;
if (!read_all)
{
gtl_i = GlobalToLocalRowMap.find(i);
gtl_j = GlobalToLocalRowMap.find(j);
has_ii = has_jj = false;
if (gtl_i != GlobalToLocalRowMap.end()) { has_ii = true; }
if (gtl_j != GlobalToLocalRowMap.end()) { has_jj = true; }
if (!has_ii && !has_jj)
{
continue;
}
else
{
if (has_ii)
{
ii = gtl_i->second;
}
if (has_jj)
{
jj = gtl_j->second;
}
}
}
}
else
{
ii = i;
if (symmetric || hermitian)
{
jj = j;
}
}
if (sorted)
{
nnz_per_row[ii]++;
if (!read_all)
{
input_sorted_v.insert(input_sorted_v.end(), block_vals.begin(), block_vals.end());
input_sorted_c.push_back(j);
}
else
{
std::copy(block_vals.begin(), block_vals.end(), &input_sorted_v[ival * block_size]);
input_sorted_c[ival] = j;
}
ival++;
}
else
{
if (has_ii)
{
ival++;
input[ii].insert(ColValuesPair_t(j, block_vals));
}
if (has_jj)
{
ival++;
if ((skew_symmetric || hermitian) && i != j)
for (int k = 0; k < block_dimx * block_dimy; k++)
{
if (skew_symmetric)
{
block_vals[k] = types::util<ValueTypeA>::invert(block_vals[k]);
}
else if (hermitian)
{
block_vals[k] = types::util<ValueTypeA>::conjugate(block_vals[k]);
}
}
input[jj].insert(ColValuesPair_t(i, block_vals));
}
}
if (i == j)
{
idiag++;
std::copy(block_vals.begin(), block_vals.end(), &diag[ii * block_size]);
}
} // end of entries loop
int diagIdx = 0;
if (check_zero_diagonal)
{
for (int i = 0; i < rows; i++)
{
if (trackDiag[i] == 0)
{
trackDiag[diagIdx] = i;
diagIdx++;
}
}
}
else
{
diagIdx = idiag;
}
if (has_zero_diagonal_element && block_dimx == 1 && block_dimy == 1)
{
if (check_zero_diagonal)
{
printf("Warning! Input matrix has zeroes on diagonal: %d %d\nZero diagonal elements are:\n", rows, diagIdx);
for (int i = 0; i < diagIdx; i++)
{
printf("%d ", trackDiag[i]);
}
printf("\n");
}
}
if (boost_zero_diagonal && has_zero_diagonal_element && block_dimx == 1 && block_dimy == 1)
{
for (int i = 0; i < diagIdx; i++)
{
block_vals[0] = boostValue;
input[ii].insert(ColValuesPair_t(trackDiag[i], block_vals));
}
}
if (!(symmetric || hermitian) && (ival + explicit_zeroes) != entries && read_all)
{
FatalError("Matrix Market mismatch in number of entries", AMGX_ERR_IO);
}
IndexType n_nonzeros_part;
if (symmetric || hermitian)
{
n_nonzeros_part = ival - idiag;
}
else
{
n_nonzeros_part = ival;
}
//if (symmetric)
// printf("Matrix is symmetric. Counted %d entries and %d diag elements, corresponding to %d nonzeroes\n ", ival, idiag, n_nonzeros_part);
if (sorted && input_sorted_c.size() != n_nonzeros_part)
{
//printf("input_sorted_c.size() = %d n_nonzeros_part = %d\n", input_sorted_c.size(), n_nonzeros_part);
FatalError("Matrix Market mismatch in number of entries", AMGX_ERR_IO);
}
if (sorted && input_sorted_v.size() != n_nonzeros_part * block_size)
{
//printf("input_sorted_v.size() = %d n_nonzeros_part*block_size = %d\n", input_sorted_v.size(), n_nonzeros_part*block_size);
FatalError("Matrix Market mismatch in number of entries", AMGX_ERR_IO);
}
A.resize(0, 0, 0);
//A.delProps(COO);
A.addProps(CSR);
if (diag_prop && !isClassical)
{
A.addProps(DIAG);
}
else
{
A.delProps(DIAG);
}
if (diag_prop)
{
LoadVector(fin, read_all, rows, block_size, diag, GlobalToLocalRowMap);
}
if (isClassical && diag_prop)
{
n_nonzeros_part = n_nonzeros_part + n_rows_part;
for (int i = 0; i < n_rows_part; i++)
{
std::copy(&diag[i * block_size], &diag[i * block_size] + block_size, block_vals.begin());
input[i].insert(ColValuesPair_t(read_all ? i : rank_rows[i], block_vals));
}
}
A.resize(n_rows_part, cols, n_nonzeros_part, block_dimx, block_dimy);
ValueTypeA *dia_values_ptr = thrust::raw_pointer_cast(&(A.values[block_dimx * block_dimy * n_nonzeros_part]));
if (A.hasProps(CSR))
{
A.row_offsets[0] = 0;
ival = 0;
if (!sorted)
{
for (int i = 0; i < n_rows_part; i++)
{
for (auto it = input[i].begin(); it != input[i].end(); it++)
{
A.col_indices[ival] = it->first;
for (int k = 0; k < block_size; k++)
{
A.values[ival * block_size + k] = it->second[k];
}
ival++;
}
A.row_offsets[i + 1] = ival;
}
}
else
{
A.row_offsets[0] = 0;
for (int i = 0; i < n_rows_part; i++)
{
A.row_offsets[i + 1] = A.row_offsets[i] + nnz_per_row[i];
}
if (A.row_offsets[n_rows_part] != n_nonzeros_part)
{
FatalError("Matrix Market mismatch in number of entries", AMGX_ERR_IO);
}
std::copy(input_sorted_c.begin(), input_sorted_c.end(), A.col_indices.begin());
std::copy(input_sorted_v.begin(), input_sorted_v.end(), A.values.begin());
}
}
else
{
FatalError("Matrix Market reader COO output is not supported", AMGX_ERR_IO);
}
if (diag_prop && !isClassical)
{
A.computeDiagonal();
}
if (A.hasProps(DIAG) && !isClassical)
for (int i = 0; i < diag.size(); i++)
{
dia_values_ptr[i] = diag[i];
}
}// End of load matrix
if (!io_config::hasProps(io_config::RHS, props))
if (rhs)
{
skip_vals(fin, rows * block_dimy);
}
if (io_config::hasProps(io_config::RHS, props))
{
b.resize(n_rows_part * block_dimy);
b.set_block_dimy(block_dimy);
b.set_block_dimx(1);
if (rhs)
{
LoadVector(fin, read_all, rows, block_dimy, b, GlobalToLocalRowMap);
}
else
{
//initialize RHS
if (io_config::hasProps(io_config::GEN_RHS, props))
{
Vector_h b0(n_rows_part * block_dimy, types::util<ValueTypeB>::get_one());
b0.set_block_dimy(block_dimy);
b0.set_block_dimx(1);
warning = "RHS vector was not found. Using RHS b=A*e where e=[1,,1]^T\n";
A.set_initialized(true);
multiply(A, b0, b);
A.set_initialized(false);
}
else
{
warning = "RHS vector was not found. Using RHS b=[1,,1]^T\n";
for (int i = 0; i < n_rows_part * block_dimy; i++)
{
b[i] = types::util<ValueTypeB>::get_one();
}
}
amgx_output(warning.c_str(), warning.length());
}
}
// try to read initial guess
if (io_config::hasProps(io_config::SOLN, props))
{
x.resize(n_rows_part * block_dimx);
x.set_block_dimy(block_dimy);
x.set_block_dimx(1);
if (soln)
{
LoadVector(fin, read_all, rows, block_dimx, x, GlobalToLocalRowMap);
}
else
{
warning = "Solution vector was not found. Setting initial solution to x=[0,,0]^T\n";
for (int i = 0; i < n_rows_part * block_dimx; i++)
{
x[i] = types::util<ValueTypeB>::get_zero();
}
}
amgx_output(warning.c_str(), warning.length());
}
if (rank_rows.size() > 0)
{
A.set_is_matrix_read_partitioned(true);
b.set_is_vector_read_partitioned(true);
if (x.size() > 0)
{
x.set_is_vector_read_partitioned(true);
}
}
warning = "";
if (has_zero_diagonal_element || skew_symmetric)
{
warning += "Warning: Matrix has at least one zero on its diagonal\n";
}
warning = +"Finished reading\n";
amgx_output(warning.c_str(), warning.length());
return true;
}
// Distrubuted version
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
bool ReadMatrixMarket<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::readMatrixMarketV2(std::ifstream &fin, const char *fname, Matrix_h &A
, Vector_h &b
, Vector_h &x
, const AMG_Config &cfg
, unsigned int props
, const IVector_h &rank_rows // row indices for given rank
)
{
fin.seekg(std::ios::beg);
typedef typename Matrix_h::index_type IndexType;
typedef typename Matrix_h::value_type ValueTypeA;// change later back to load in high precision!
typedef typename TConfig_h::VecPrec ValueTypeB;
//skip comments
while (fin.peek() == '%')
{
fin.ignore(INT_MAX, '\n');
}
int rows, cols, entries, block_dimx, block_dimy, diag_prop;
//read rows cols entries
fin >> rows >> cols >> entries >> block_dimx >> block_dimy >> diag_prop;
if (io_config::hasProps(io_config::SIZE, props))
{
A.set_num_rows(rows);
A.set_num_cols(cols);
A.set_num_nz(entries);
A.set_block_dimy(block_dimy);
A.set_block_dimx(block_dimx);
return true;
}
fflush(stdout);
bool read_all = (rank_rows.size() == 0) ? true : false;
const IVector_h &partRowVec = rank_rows;
int n_rows_part = (read_all) ? rows : partRowVec.size();
std::map<const int, int> GlobalToLocalRowMap; // should try unordered_map
std::map<const int, int>::const_iterator gtl_it;
// Generate inverse map for faster searching during the read
if (!read_all)
for (int i = 0; i < n_rows_part; i++)
{
GlobalToLocalRowMap.insert(std::pair<const int, int>(partRowVec[i], i));
}
typedef std::map<const int, std::vector<ValueTypeA> > ColValuesMap_t;
typedef std::pair<const int, std::vector<ValueTypeA> > ColValuesPair_t;
std::vector<ColValuesMap_t> input(n_rows_part);
int ival = 0;
int block_size = block_dimy * block_dimx;
typename Matrix_h::MVector diag(n_rows_part * block_size, types::util<ValueTypeA>::get_zero());
std::vector<ValueTypeA> block_vals(block_size);
//for each entry
for (int e = 0; e < entries; e++)
{
int i, j;
//read entry
fin >> i >> j;
// check we haven't been given a 0-indexed matrix
if (i == 0 || j == 0)
{
FatalError("Matrix Market format requires 1-based indexing", AMGX_ERR_IO);
}
for (int k = 0; k < block_size; k++)
{
LoadValueFromStream(fin, block_vals[k]);
}
if (!read_all)
{
gtl_it = GlobalToLocalRowMap.find(i - 1);
if (gtl_it != GlobalToLocalRowMap.end())
{
input[gtl_it->second].insert(ColValuesPair_t(j - 1, block_vals));
if (i == j)
for (int k = 0; k < block_size; k++)
{
diag[block_size * gtl_it->second + k] = block_vals[k];
}
ival++;
}
}
else
{
input[i - 1].insert(ColValuesPair_t(j - 1, block_vals));
if (i == j)
for (int k = 0; k < block_size; k++)
{
diag[block_size * (i - 1) + k] = block_vals[k];
}
ival++;
}
}
if (ival != entries && read_all)
{
FatalError("Matrix Market mismatch in number of entries", AMGX_ERR_IO);
}
IndexType n_nonzeros_part = ival;
A.resize(0, 0, 0);
//A.delProps(COO);
A.addProps(CSR);
if (diag_prop)
{
A.addProps(DIAG);
}
else
{
A.delProps(DIAG);
}
A.resize(n_rows_part, cols, n_nonzeros_part, block_dimx, block_dimy);
ValueTypeA *dia_values_ptr = thrust::raw_pointer_cast(&(A.values[block_dimx * block_dimy * n_nonzeros_part]));
if (A.hasProps(CSR))
{
A.row_offsets[0] = 0;
ival = 0;
for (int i = 0; i < n_rows_part; i++)
{
for (auto it = input[i].begin(); it != input[i].end(); it++)
{
A.col_indices[ival] = it->first;
for (int k = 0; k < block_size; k++)
{
A.values[ival * block_size + k] = it->second[k];
}
ival++;
}
A.row_offsets[i + 1] = ival;
}
}
else
{
FatalError("Matrix Market reader COO output is not supported", AMGX_ERR_IO);
}
if (diag_prop)
{
A.computeDiagonal();
LoadVector(fin, read_all, rows, block_size, diag, GlobalToLocalRowMap);
}
if (A.hasProps(DIAG))
for (int i = 0; i < diag.size(); i++)
{
dia_values_ptr[i] = diag[i];
}
if (io_config::hasProps(io_config::RHS, props))
{
b.resize(n_rows_part * block_dimy);
b.set_block_dimy(block_dimy);
//initialize RHS
for (int i = 0; i < n_rows_part * block_dimy; i++)
{
b[i] = types::util<ValueTypeB>::get_one();
}
//read num rows
fin >> rows;
LoadVector(fin, read_all, rows / block_dimy, block_dimy, b, GlobalToLocalRowMap);
}
// try to read initial guess
if (io_config::hasProps(io_config::SOLN, props))
{
fin >> rows;
if (rows)
{
x.resize(n_rows_part * block_dimx);
x.set_block_dimy(block_dimx);
LoadVector(fin, read_all, rows / block_dimx, block_dimx, x, GlobalToLocalRowMap);
}
else
{
x.resize(0);
}
}
if (rank_rows.size() > 0)
{
A.set_is_matrix_read_partitioned(true);
b.set_is_vector_read_partitioned(true);
if (x.size() > 0)
{
x.set_is_vector_read_partitioned(true);
}
}
return true;
}
template <typename TSRC, typename TDST>
void val_copy(const TSRC *src, TDST *dst, int size)
{
for (int i = 0; i < size; i++)
{
dst[i] = static_cast<TDST>(src[i]);
}
}
template <>
void val_copy<hipDoubleComplex, hipComplex>(const hipDoubleComplex *src, hipComplex *dst, int size)
{
for (int i = 0; i < size; i++)
{
dst[i] = types::util<hipDoubleComplex>::to_downtype(src[i]);
}
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
bool ReadNVAMGBinary<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::read(std::ifstream &finstr, const char *fnamec
, Matrix_h &A
, Vector_h &b, Vector_h &x
, const AMG_Config &cfg
, unsigned int props
, const IVector_h &rank_rows
)
{
typedef typename Matrix_h::index_type IndexType;
typedef typename Matrix_h::value_type ValueTypeA;
typedef typename Vector_h::value_type ValueTypeB; // change back to matrix type later
typedef typename types::util<ValueTypeA>::uptype UpValueTypeA;
size_t is_read;
std::string err;
finstr.close();
FILE *fin = fopen(fnamec, "rb");
if (fin == NULL)
{
err = "Error: couldn't open file " + std::string(fnamec);
}
char text_header[255];
uint32_t system_flags [9];
is_read = fread(text_header, sizeof(char), strlen("%%NVAMGBinary\n"), fin);
is_read = fread(system_flags, sizeof(uint32_t), 9, fin);
//bool is_mtx = system_flags[0];
bool is_rhs = system_flags[1];
bool is_soln = system_flags[2];
uint32_t matrix_format = system_flags[3];
bool diag = system_flags[4];
uint32_t block_dimx = system_flags[5];
uint32_t block_dimy = system_flags[6];
uint32_t num_rows = system_flags[7];
uint32_t num_nz = system_flags[8];
if (io_config::hasProps(io_config::SIZE, props))
{
A.set_num_rows(num_rows);
A.set_num_cols(num_rows);
A.set_num_nz(num_nz);
A.set_block_dimy(block_dimy);
A.set_block_dimx(block_dimx);
fclose(fin);
return true;
}
long int data_pos = ftell(fin);
IVector_h *partRowVec_p = NULL;
if (rank_rows.size() == 0)
{
partRowVec_p = new IVector_h(num_rows);
thrust::sequence(partRowVec_p->begin(), partRowVec_p->end());
cudaCheckError();
}
else
{
partRowVec_p = (IVector_h *) &rank_rows;
}
IVector_h &partRowVec = *partRowVec_p;
int n_rows_part = partRowVec.size();
IVector_h row_offsets_part(n_rows_part + 1);
IVector_h row_start_glb(n_rows_part); // Store global row start positions here
int beginEnd[2];
int n_nonzeros_part = 0;
for (int i = 0; i < partRowVec.size(); i++)
{
if (fseek(fin, data_pos + partRowVec[i]*sizeof(int), SEEK_SET) != 0)
{
FatalError("fseek error", AMGX_ERR_IO);
}
is_read = fread(beginEnd, sizeof(int), 2, fin);
if (is_read != 2)
{
err = "fread failed reading row_offsets, exiting";
FatalError(err, AMGX_ERR_IO);
}
row_start_glb[i] = beginEnd[0];
row_offsets_part[i] = n_nonzeros_part;
n_nonzeros_part += beginEnd[1] - beginEnd[0];
}
row_offsets_part[n_rows_part] = n_nonzeros_part;
A.delProps(DIAG | COLORING);
if ((matrix_format & COMPLEX) && types::util<ValueTypeA>::is_real)
{
FatalError("Matrix is in complex format, but reading as real AMGX mode", AMGX_ERR_IO);
}
if (!(matrix_format & COMPLEX) && types::util<ValueTypeA>::is_complex)
{
FatalError("Matrix is in real format, but reading as complex AMGX mode", AMGX_ERR_IO);
}
if (diag)
{
A.addProps(DIAG);
}
if (!(matrix_format & 1))
{
A.addProps(CSR);
}
else
{
FatalError("COO matrix binary format is not supported for reading.", AMGX_ERR_IO);
}
A.resize(n_rows_part, num_rows, n_nonzeros_part, block_dimx, block_dimy);
IndexType *row_offsets_ptr = A.row_offsets.raw();
IndexType *column_indices_ptr = A.col_indices.raw();
ValueTypeA *nonzero_values_ptr = A.values.raw();
ValueTypeA *dia_values_ptr = thrust::raw_pointer_cast(&(A.values[block_dimy * block_dimx * n_nonzeros_part]));
//Transfer row_offsets to matrix
thrust::copy(row_offsets_part.begin(), row_offsets_part.end(), A.row_offsets.begin());
cudaCheckError();
data_pos += (num_rows + 1) * sizeof(int);
n_nonzeros_part = 0;
int row_nnz;
for (int i = 0; i < partRowVec.size(); i++)
{
if (fseek(fin, data_pos + sizeof(int)*row_start_glb[i], SEEK_SET) != 0)
{
FatalError("fseek error", AMGX_ERR_IO);
}
row_nnz = row_offsets_part[i + 1] - row_offsets_part[i];
is_read = fread(column_indices_ptr + n_nonzeros_part, sizeof(int), row_nnz, fin);
n_nonzeros_part += row_nnz;
if (is_read != row_nnz)
{
err = "fread failed reading column_indices, exiting";
FatalError(err, AMGX_ERR_IO);
}
}
data_pos += num_nz * sizeof(int);
//temperary array for storing ValueTypeA data
// double storage for complex
std::vector< UpValueTypeA > temp(n_nonzeros_part * block_dimy * block_dimx);
n_nonzeros_part = 0;
for (int i = 0; i < partRowVec.size(); i++)
{
if (fseek(fin, data_pos + sizeof(UpValueTypeA)*row_start_glb[i] * block_dimy * block_dimx, SEEK_SET) != 0)
{
FatalError("fseek error", AMGX_ERR_IO);
}
row_nnz = row_offsets_part[i + 1] - row_offsets_part[i];
//read in data as a ValueTypeA
is_read = fread(&temp[n_nonzeros_part * block_dimy * block_dimx], sizeof(UpValueTypeA), row_nnz * block_dimy * block_dimx, fin);
n_nonzeros_part += row_nnz;
if (is_read != row_nnz * block_dimy * block_dimx)
{
err = "fread failed reading off-diagonal values, exiting";
FatalError(err, AMGX_ERR_IO);
}
}
//copy with cast data to ValueTypeA
val_copy(temp.data(), nonzero_values_ptr, n_nonzeros_part * block_dimy * block_dimx);
data_pos += sizeof(UpValueTypeA) * num_nz * block_dimx * block_dimy;
if (diag)
{
temp.resize(n_rows_part * block_dimx * block_dimy);
//read in diagonal data as a ValueTypeA
for (int i = 0; i < partRowVec.size(); i++)
{
if (fseek(fin, data_pos + sizeof(UpValueTypeA) * partRowVec[i] * block_dimx * block_dimy, SEEK_SET) != 0)
{
FatalError("fseek error", AMGX_ERR_IO);
}
is_read = fread(&temp[i * block_dimx * block_dimy], sizeof(UpValueTypeA), block_dimx * block_dimy, fin);
if (is_read != block_dimx * block_dimy)
{
err = "fread failed reading diagonal values, exiting";
FatalError(err, AMGX_ERR_IO);
}
}
//copy with cast data to ValueTypeA
val_copy(temp.data(), dia_values_ptr, n_rows_part * block_dimx * block_dimy);
data_pos += sizeof(double) * num_rows * block_dimx * block_dimy;
}
else // fill last values item with zeros
{
thrust::fill(A.values.begin() + A.get_num_nz() * block_dimy * block_dimx, A.values.end(), types::util<ValueTypeA>::get_zero());
cudaCheckError();
}
//printf("Reading values\n"); fflush(stdout);
b.resize(n_rows_part * block_dimy);
b.set_block_dimy(block_dimy);
b.set_block_dimx(1);
temp.resize(n_rows_part * block_dimy);
if (is_rhs)
{
for (int i = 0; i < partRowVec.size(); i++)
{
if (fseek(fin, data_pos + sizeof(UpValueTypeA) * partRowVec[i] * block_dimy, SEEK_SET) != 0)
{
FatalError("fseek error", AMGX_ERR_IO);
}
//read in data as a double (doublecomplex)
is_read = fread(&temp[i * block_dimy], sizeof(UpValueTypeA), block_dimy, fin);
// if the rhs exists, we must have read the whole thing
if (is_read != block_dimy)
{
err = "fread failed reading rhs, exiting";
FatalError(err, AMGX_ERR_IO);
}
}
//cast data to ValueTypeB
val_copy(temp.data(), b.raw(), n_rows_part * block_dimy);
data_pos += sizeof(UpValueTypeA) * num_rows * block_dimy;
}
else
{
thrust::fill(b.begin(), b.end(), types::util<ValueTypeB>::get_one());
cudaCheckError();
}
x.resize(0);
if (is_soln)
{
x.resize(n_rows_part * block_dimx);
x.set_block_dimx(1);
x.set_block_dimy(block_dimy);
temp.resize(n_rows_part * block_dimx);
for (int i = 0; i < partRowVec.size(); i++)
{
if (fseek(fin, data_pos + sizeof(UpValueTypeA) * partRowVec[i] * block_dimx, SEEK_SET) != 0)
{
FatalError("fseek error", AMGX_ERR_IO);
}
//read in data as a double
is_read = fread(&temp[i * block_dimx], sizeof(UpValueTypeA), block_dimx, fin);
if (is_read != block_dimx)
{
err = "fread failed reading rhs, exiting";
FatalError(err, AMGX_ERR_IO);
}
}
val_copy(temp.data(), x.raw(), n_rows_part * block_dimx);
}
fclose(fin);
if (rank_rows.size() > 0)
{
A.set_is_matrix_read_partitioned(true);
b.set_is_vector_read_partitioned(true);
if (x.size() > 0)
{
x.set_is_vector_read_partitioned(true);
}
}
else
{
delete partRowVec_p;
}
return true;
}
/****************************************
* Explict instantiations
***************************************/
#define AMGX_CASE_LINE(CASE) template class ReadMatrixMarket<TemplateMode<CASE>::Type>;
AMGX_FORALL_BUILDS(AMGX_CASE_LINE)
AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE)
#undef AMGX_CASE_LINE
#define AMGX_CASE_LINE(CASE) template class ReadNVAMGBinary<TemplateMode<CASE>::Type>;
AMGX_FORALL_BUILDS(AMGX_CASE_LINE)
AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE)
#undef AMGX_CASE_LINE
}
| 279561b9989297e92918460f77eb16fefb922668.cu | /* Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <readers.h>
#include <multiply.h>
#include <fstream>
#include <ios>
#include <iostream>
#include <map>
#include <iterator>
#include <algorithm>
#include <amgx_types/util.h>
#include <amgx_types/io.h>
namespace amgx
{
template <typename T>
void LoadValueFromStream(std::ifstream &fin, T &val);
template <>
void LoadValueFromStream(std::ifstream &fin, float &val)
{
fin >> val;
}
template <>
void LoadValueFromStream(std::ifstream &fin, double &val)
{
fin >> val;
}
template <>
void LoadValueFromStream(std::ifstream &fin, cuComplex &val)
{
float x, y;
fin >> x >> y;
val = make_cuComplex(x, y);
}
template <>
void LoadValueFromStream(std::ifstream &fin, cuDoubleComplex &val)
{
double x, y;
fin >> x >> y;
val = make_cuDoubleComplex(x, y);
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
bool LoadVector(std::ifstream &fin, bool read_all, int rows_total, int block_size, Vector<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> > &vec, const std::map<const int, int> &GlobalToLocalRowMap = std::map<const int, int>())
{
std::map<const int, int>::const_iterator gtl_it;
//std::vector<double> block_vals(block_size);
typedef typename VecPrecisionMap<t_vecPrec>::Type value_type;
std::vector<value_type> block_vals(block_size);
//for each entry
int idx = 0;
if (fin)
{
for (int i = 0; i < rows_total; i++)
{
//read entry
for (int k = 0; k < block_size; k++)
{
LoadValueFromStream(fin, block_vals[k]);
}
//fin >> block_vals[k];
if (read_all)
for (int k = 0; k < block_size; k++)
{
vec[i * block_size + k] = block_vals[k];
idx++;
}
else
{
gtl_it = GlobalToLocalRowMap.find(i);
if (gtl_it != GlobalToLocalRowMap.end())
{
for (int k = 0; k < block_size; k++)
{
vec[gtl_it->second * block_size + k] = block_vals[k];
idx++;
}
}
}
}
if (idx != vec.size())
{
FatalError("Matrix Market reader rows mismatch", AMGX_ERR_IO);
}
}
else
{
return false;
}
return true;
}
// Distrubuted version
void skip_vals(std::ifstream &fin, int num_values)
{
double val;
for (long int i = 0; i < num_values; i++)
{
fin >> val;
}
}
template <typename T>
T getBoostValue();
template <>
float getBoostValue()
{
return 1.e-6f;
}
template <>
double getBoostValue()
{
return 1.e-6;
}
template <>
cuComplex getBoostValue()
{
return make_cuComplex(1e-6f, 0.f);
}
template <>
cuDoubleComplex getBoostValue()
{
return make_cuDoubleComplex(1e-6, 0.);
}
template<AMGX_VecPrecision prec>
struct vecRealToComplexPrec
{
static const AMGX_VecPrecision CPrec = prec;
};
template <>
struct vecRealToComplexPrec<AMGX_vecDouble>
{
static const AMGX_VecPrecision CPrec = AMGX_vecDoubleComplex;
};
template <>
struct vecRealToComplexPrec<AMGX_vecFloat>
{
static const AMGX_VecPrecision CPrec = AMGX_vecComplex;
};
template<AMGX_MatPrecision prec>
struct matRealToComplexPrec
{
static const AMGX_MatPrecision CPrec = prec;
};
template <>
struct matRealToComplexPrec<AMGX_matDouble>
{
static const AMGX_MatPrecision CPrec = AMGX_matDoubleComplex;
};
template <>
struct matRealToComplexPrec<AMGX_matFloat>
{
static const AMGX_MatPrecision CPrec = AMGX_matComplex;
};
template <class TReal, class TComplex, class PartVec, bool init_flag>
struct ReadAndConvert;
template <class TReal, class TComplex, class PartVec>
struct ReadAndConvert<TReal, TComplex, PartVec, true>
{
static void readAndConvert(std::ifstream &fin, const char *fname, int conversion_type
, Matrix<TReal> &A
, Vector<TReal> &b
, Vector<TReal> &x
, unsigned int props
, const PartVec &rank_rows)
{
FatalError("Converversion from complex matrix to ERF, but one of the complex modes is specified", AMGX_ERR_IO);
}
};
template <class TReal, class TComplex, class PartVec>
struct ReadAndConvert<TReal, TComplex, PartVec, false>
{
static void readAndConvert(std::ifstream &fin, const char *fname, int conversion_type
, Matrix<TReal> &A
, Vector<TReal> &b
, Vector<TReal> &x
, unsigned int props
, const PartVec &rank_rows)
{
AMG_Config tcfg;
Matrix<TComplex> Ac;
Vector<TComplex> xc, bc;
typedef typename TReal::MatPrec RValueTypeA;
typedef typename TReal::VecPrec RValueTypeB;
typedef typename TComplex::MatPrec CValueTypeA;
typedef typename TComplex::VecPrec CValueTypeB;
printf("ERF conversion: reading complex valued system\n");
fflush(stdout);
ReadMatrixMarket<TComplex>::readMatrixMarket(fin, fname, Ac, bc, xc, tcfg);
// modes = 1..4 - convert to the scalar system of 2x size using K1..K4 formulation
if (conversion_type > 0 && conversion_type < 5)
{
// fill CSR values, common for all modes
int cnrows = Ac.get_num_rows();
int cnnz = Ac.get_num_nz();
int nrows = cnrows * 2;
int nnz = Ac.get_num_nz() * 4;
A.addProps(CSR);
A.resize(nrows, nrows, nnz);
// set row offsets
for (int i = 0; i < cnrows; i++)
{
A.row_offsets[i] = Ac.row_offsets[i] * 2;
A.row_offsets[i + cnrows] = Ac.row_offsets[i] * 2 + cnnz * 2;
}
A.row_offsets[nrows] = nnz;
// set col indices
for (int r = 0; r < nrows ; r++)
{
int *Ac_col_ptr = Ac.col_indices.raw() + Ac.row_offsets[r % cnrows];
int row_nnz = A.row_offsets[r + 1] - A.row_offsets[r];
for (int c = 0; c < (row_nnz / 2); c++)
{
A.col_indices[A.row_offsets[r] + c] = Ac_col_ptr[c];
A.col_indices[A.row_offsets[r] + c + row_nnz / 2] = Ac_col_ptr[c] + nrows / 2;
}
}
// set values
for (int r = 0; r < cnrows; r++)
{
CValueTypeA *Ac_values = Ac.values.raw() + Ac.row_offsets[r];
int row_nnz = Ac.row_offsets[r + 1] - Ac.row_offsets[r];
for (int c = 0; c < row_nnz; c++)
{
switch (conversion_type)
{
case 1:
A.values[A.row_offsets[r] + c] = types::get_re(Ac_values[c]);
A.values[A.row_offsets[r] + c + row_nnz] = -types::get_im(Ac_values[c]);
A.values[A.row_offsets[r] + c + 2 * cnnz] = types::get_im(Ac_values[c]);
A.values[A.row_offsets[r] + c + row_nnz + 2 * cnnz] = types::get_re(Ac_values[c]);
break;
case 2:
A.values[A.row_offsets[r] + c] = types::get_re(Ac_values[c]);
A.values[A.row_offsets[r] + c + row_nnz] = types::get_im(Ac_values[c]);
A.values[A.row_offsets[r] + c + 2 * cnnz] = types::get_im(Ac_values[c]);
A.values[A.row_offsets[r] + c + row_nnz + 2 * cnnz] = -types::get_re(Ac_values[c]);
break;
case 3:
A.values[A.row_offsets[r] + c] = types::get_im(Ac_values[c]);
A.values[A.row_offsets[r] + c + row_nnz] = types::get_re(Ac_values[c]);
A.values[A.row_offsets[r] + c + 2 * cnnz] = types::get_re(Ac_values[c]);
A.values[A.row_offsets[r] + c + row_nnz + 2 * cnnz] = -types::get_im(Ac_values[c]);
break;
case 4:
A.values[A.row_offsets[r] + c] = types::get_im(Ac_values[c]);
A.values[A.row_offsets[r] + c + row_nnz] = -types::get_re(Ac_values[c]);
A.values[A.row_offsets[r] + c + 2 * cnnz] = types::get_re(Ac_values[c]);
A.values[A.row_offsets[r] + c + row_nnz + 2 * cnnz] = types::get_im(Ac_values[c]);
break;
}
}
}
// set b
b.set_block_dimx(1);
b.set_block_dimy(1);
b.resize(nrows);
for (int r = 0; r < cnrows; r++)
{
switch (conversion_type)
{
case 1:
case 2:
b[r] = types::get_re(bc[r]);
b[r + cnrows] = types::get_im(bc[r]);
break;
case 3:
case 4:
b[r] = types::get_im(bc[r]);
b[r + cnrows] = types::get_re(bc[r]);
break;
}
}
//set x if needed
x.set_block_dimx(1);
x.set_block_dimy(1);
if (xc.size() > 0)
{
// set b
x.resize(nrows);
for (int r = 0; r < cnrows; r++)
{
switch (conversion_type)
{
case 1:
case 3:
x[r] = types::get_re(xc[r]);
x[r + cnrows] = types::get_im(xc[r]);
break;
case 2:
case 4:
x[r] = types::get_re(xc[r]);
x[r + cnrows] = -types::get_im(xc[r]);
break;
}
}
}
A.computeDiagonal();
std::stringstream info;
info << "Converted complex matrix " << cnrows << "x" << cnrows << " with " << cnnz << " nonzeros to the ERF - using K" << conversion_type << " formulation." << std::endl;
std::cout << info.str();
}
// modes 221..224 - convert to the system of the same size but with 2x2 blocks,
// where each block converted from original Aij value using K1..K4 formulation
// this switch is for original blocksize of 1
else if (conversion_type > 220 && conversion_type < 225 && Ac.get_block_dimy()*Ac.get_block_dimx() == 1)
{
// fill CSR values, common for all modes
int nrows = Ac.get_num_rows();
int nnz = Ac.get_num_nz();
A.addProps(Ac.hasProps(DIAG) ? CSR | DIAG : CSR);
A.resize(nrows, nrows, nnz, 2 * Ac.get_block_dimx(), 2 * Ac.get_block_dimy(), 1);
thrust::copy(Ac.row_offsets.begin(), Ac.row_offsets.end(), A.row_offsets.begin());
thrust::copy(Ac.col_indices.begin(), Ac.col_indices.end(), A.col_indices.begin());
for (int i = 0; i < nnz; i++)
{
switch (conversion_type)
{
case 221:
A.values[4 * i ] = types::get_re(Ac.values[i]);
A.values[4 * i + 1] = -types::get_im(Ac.values[i]);
A.values[4 * i + 2] = types::get_im(Ac.values[i]);
A.values[4 * i + 3] = types::get_re(Ac.values[i]);
break;
case 222:
A.values[4 * i ] = types::get_re(Ac.values[i]);
A.values[4 * i + 1] = types::get_im(Ac.values[i]);
A.values[4 * i + 2] = types::get_im(Ac.values[i]);
A.values[4 * i + 3] = -types::get_re(Ac.values[i]);
break;
case 223:
A.values[4 * i ] = types::get_im(Ac.values[i]);
A.values[4 * i + 1] = types::get_re(Ac.values[i]);
A.values[4 * i + 2] = types::get_re(Ac.values[i]);
A.values[4 * i + 3] = -types::get_im(Ac.values[i]);
break;
case 224:
A.values[4 * i ] = types::get_im(Ac.values[i]);
A.values[4 * i + 1] = -types::get_re(Ac.values[i]);
A.values[4 * i + 2] = types::get_re(Ac.values[i]);
A.values[4 * i + 3] = types::get_im(Ac.values[i]);
break;
}
}
A.computeDiagonal();
b.resize(nrows * 2);
b.set_block_dimx(1);
b.set_block_dimy(2);
for (int r = 0; r < nrows; r++)
{
switch (conversion_type)
{
case 221:
case 222:
b[2 * r ] = types::get_re(bc[r]);
b[2 * r + 1] = types::get_im(bc[r]);
break;
case 223:
case 224:
b[2 * r ] = types::get_im(bc[r]);
b[2 * r + 1] = types::get_re(bc[r]);
break;
}
}
//set x if needed
if (xc.size() > 0)
{
// set b
x.resize(nrows * 2);
x.set_block_dimx(1);
x.set_block_dimy(2);
for (int r = 0; r < nrows; r++)
{
switch (conversion_type)
{
case 221:
case 223:
x[2 * r ] = types::get_re(xc[r]);
x[2 * r + 1] = types::get_im(xc[r]);
break;
case 222:
case 224:
x[2 * r ] = types::get_re(xc[r]);
x[2 * r + 1] = -types::get_im(xc[r]);
break;
}
}
}
std::stringstream info;
info << "Converted complex matrix " << nrows << "x" << nrows << " with " << nnz << " nonzeros to the (2x2) block-ERF - using K" << conversion_type - 220 << " formulation." << std::endl;
std::cout << info.str();
}
// modes 221..224 - convert to the system of the same size but with 2x2 blocks,
// where each block converted from original Aij value using K1..K4 formulation
// this switch is for original blocksize of 1
else if (conversion_type > 220 && conversion_type < 225)
{
// fill CSR values, common for all modes
int nrows = Ac.get_num_rows();
int nnz = Ac.get_num_nz();
A.addProps(Ac.hasProps(DIAG) ? (CSR | DIAG) : CSR);
int bdimx = 2 * Ac.get_block_dimx();
int bdimy = 2 * Ac.get_block_dimy();
A.resize(nrows, nrows, nnz, bdimx, bdimy, 1);
thrust::copy(Ac.row_offsets.begin(), Ac.row_offsets.end(), A.row_offsets.begin());
thrust::copy(Ac.col_indices.begin(), Ac.col_indices.end(), A.col_indices.begin());
thrust::fill(A.values.begin(), A.values.end(), amgx::types::util<RValueTypeA>::get_zero());
std::cout << "Input block system " << Ac.get_block_dimx() << "x" << Ac.get_block_dimy() << " will be converted to system with blocks " << bdimx << "x" << bdimy << std::endl;
std::cout << "Converting values...\n";
// iterate through blocks
for (int i = 0; i < nnz; i++)
{
int block_offsetc = Ac.get_block_dimx() * Ac.get_block_dimy() * i;
int block_offset = bdimx * bdimy * i;
// iterate through values in the blocks
for (int j = 0; j < Ac.get_block_dimx()*Ac.get_block_dimy(); j++)
{
int cx = j / Ac.get_block_dimy();
int cy = j % Ac.get_block_dimy();
// interleaved blocks
int val_offset = block_offset + cx * bdimx + cy;
// in-place blocks
//int val_offset = block_offset + 2 * cx * bdimx + 2 * cy;
switch (conversion_type)
{
case 221:
// interleaved blocks
A.values[val_offset ] = types::get_re(Ac.values[block_offsetc + j]);
A.values[val_offset + Ac.get_block_dimx() ] = -types::get_im(Ac.values[block_offsetc + j]);
A.values[val_offset + 2 * Ac.get_block_size() ] = types::get_im(Ac.values[block_offsetc + j]);
A.values[val_offset + 2 * Ac.get_block_size() + Ac.get_block_dimx() ] = types::get_re(Ac.values[block_offsetc + j]);
// in-place blocks
//A.values[val_offset ] = types::get_re(Ac.values[block_offsetc + j]);
//A.values[val_offset + 1 ] = -types::get_im(Ac.values[block_offsetc + j]);
//A.values[val_offset + bdimx ] = types::get_im(Ac.values[block_offsetc + j]);
//A.values[val_offset + 1 + bdimx ] = types::get_re(Ac.values[block_offsetc + j]);
break;
case 222:
A.values[val_offset ] = types::get_re(Ac.values[block_offsetc + j]);
A.values[val_offset + 1 ] = types::get_im(Ac.values[block_offsetc + j]);
A.values[val_offset + bdimx] = types::get_im(Ac.values[block_offsetc + j]);
A.values[val_offset + 1 + bdimx] = -types::get_re(Ac.values[block_offsetc + j]);
break;
case 223:
A.values[val_offset ] = types::get_im(Ac.values[block_offsetc + j]);
A.values[val_offset + 1 ] = types::get_re(Ac.values[block_offsetc + j]);
A.values[val_offset + bdimx] = types::get_re(Ac.values[block_offsetc + j]);
A.values[val_offset + 1 + bdimx] = -types::get_im(Ac.values[block_offsetc + j]);
break;
case 224:
A.values[val_offset ] = types::get_im(Ac.values[block_offsetc + j]);
A.values[val_offset + 1 ] = -types::get_re(Ac.values[block_offsetc + j]);
A.values[val_offset + bdimx] = types::get_re(Ac.values[block_offsetc + j]);
A.values[val_offset + 1 + bdimx] = types::get_im(Ac.values[block_offsetc + j]);
break;
}
}
}
std::cout << "Compute diagonal\n";
A.computeDiagonal();
// if external diagonal - convert those values too
if (A.hasProps(DIAG))
{
std::cout << "Convert diagonal (warning!)\n";
for (int i = 0; i < Ac.get_num_rows(); i++)
{
int block_offsetc = Ac.diag[i] * Ac.get_block_dimx() * Ac.get_block_dimy();
int block_offset = A.diag[i] * bdimx * bdimy;
for (int j = 0; j < Ac.get_block_dimx()*Ac.get_block_dimy(); j++)
{
int val_offset = block_offset + (j / bdimx) * 2 * bdimx + (j % bdimx) * 2;
switch (conversion_type)
{
case 221:
A.values[val_offset ] = types::get_re(Ac.values[block_offsetc + j]);
A.values[val_offset + 1 ] = -types::get_im(Ac.values[block_offsetc + j]);
A.values[val_offset + bdimx] = types::get_im(Ac.values[block_offsetc + j]);
A.values[val_offset + 1 + bdimx] = types::get_re(Ac.values[block_offsetc + j]);
break;
case 222:
A.values[val_offset ] = types::get_re(Ac.values[block_offsetc + j]);
A.values[val_offset + 1 ] = -types::get_im(Ac.values[block_offsetc + j]);
A.values[val_offset + bdimx] = types::get_im(Ac.values[block_offsetc + j]);
A.values[val_offset + 1 + bdimx] = types::get_re(Ac.values[block_offsetc + j]);
break;
case 223:
A.values[val_offset ] = types::get_re(Ac.values[block_offsetc + j]);
A.values[val_offset + 1 ] = -types::get_im(Ac.values[block_offsetc + j]);
A.values[val_offset + bdimx] = types::get_im(Ac.values[block_offsetc + j]);
A.values[val_offset + 1 + bdimx] = types::get_re(Ac.values[block_offsetc + j]);
break;
case 224:
A.values[val_offset ] = types::get_re(Ac.values[block_offsetc + j]);
A.values[val_offset + 1 ] = -types::get_im(Ac.values[block_offsetc + j]);
A.values[val_offset + bdimx] = types::get_im(Ac.values[block_offsetc + j]);
A.values[val_offset + 1 + bdimx] = types::get_re(Ac.values[block_offsetc + j]);
break;
}
}
}
}
std::cout << "Convert rhs\n";
b.resize(nrows * bdimy);
b.set_block_dimx(1);
b.set_block_dimy(bdimy);
// interleaved blocks
for (int r = 0; r < nrows; r++)
{
for (int j = 0; j < Ac.get_block_dimy(); j++)
{
switch (conversion_type)
{
case 221:
case 222:
b[r * bdimy + j ] = types::get_re(bc[r * Ac.get_block_dimy() + j]);
b[r * bdimy + j + Ac.get_block_dimy()] = types::get_im(bc[r * Ac.get_block_dimy() + j]);
break;
case 223:
case 224:
b[r * bdimy + j ] = types::get_im(bc[r * Ac.get_block_dimy() + j]);
b[r * bdimy + j + Ac.get_block_dimy()] = types::get_re(bc[r * Ac.get_block_dimy() + j]);
break;
}
}
}
std::cout << "Convert soln\n";
//set x if needed
if (xc.size() > 0)
{
x.resize(nrows * bdimx);
x.set_block_dimx(1);
x.set_block_dimy(bdimy);
// interleaved blocks
for (int r = 0; r < nrows; r++)
{
for (int j = 0; j < Ac.get_block_dimx(); j++)
{
switch (conversion_type)
{
case 221:
case 223:
x[r * bdimx + j ] = types::get_re(xc[r * Ac.get_block_dimx() + j]);
x[r * bdimx + j + Ac.get_block_dimx()] = types::get_im(xc[r * Ac.get_block_dimx() + j]);
break;
case 222:
case 224:
x[r * bdimx + j ] = types::get_re(xc[r * Ac.get_block_dimx() + j]);
x[r * bdimx + j + Ac.get_block_dimx()] =-types::get_im(xc[r * Ac.get_block_dimx() + j]);
break;
}
}
}
}
std::stringstream info;
info << "Converted complex matrix " << nrows << "x" << nrows << " with " << nnz << " nonzeros to the (2x2) block-ERF - using K" << conversion_type - 220 << " formulation." << std::endl;
std::cout << info.str();
}
}
};
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
bool ReadMatrixMarket<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::readMatrixMarket(std::ifstream &fin, const char *fname, Matrix_h &A
, Vector_h &b
, Vector_h &x
, const AMG_Config &cfg
, unsigned int props
, const IVector_h &rank_rows // row indices for given rank
)
{
fin.seekg(std::ios::beg);
typedef typename Matrix_h::index_type IndexType;
typedef typename Matrix_h::value_type ValueTypeA;// change later back to load in high precision!
typedef typename TConfig_h::VecPrec ValueTypeB;
std::string warning;
int complex_conversion = cfg.AMG_Config::template getParameter<IndexType>("complex_conversion", "default");
// if we are in the real-valued mode and complex conversion is specified and we are reading actual matrix
if (complex_conversion != 0 && !types::util<ValueTypeA>::is_complex && !types::util<ValueTypeB>::is_complex && !io_config::hasProps(io_config::SIZE, props))
{
// read system as complex valued system of same precision and convert it to our matrices
typedef typename TConfig_h::template setVecPrec<vecRealToComplexPrec<TConfig_h::vecPrec>::CPrec>::Type TConfig_h_cvec;
typedef typename TConfig_h_cvec::template setMatPrec<matRealToComplexPrec<TConfig_h::matPrec>::CPrec>::Type TComplex_h;
ReadAndConvert<TConfig_h, TComplex_h, IVector_h, types::util<ValueTypeA>::is_complex>::readAndConvert(fin, fname, complex_conversion, A, b, x, props, rank_rows);
return true;
}
//skip comments and read amgx relevant parameters
std::list<std::string> nvConfig;
std::list<std::string> mmConfig;
// Workaround section to convert external diagonal into internal
// in CLASSICAL
bool isClassical = false;
std::string solver_scope, solver_value;
std::string precond_scope, precond_value;
AlgorithmType algorithm_s, algorithm_p;
Resources *resources = A.getResources();
if (resources != NULL)
{
resources->getResourcesConfig()->getParameter<std::string>("solver", solver_value, "default", solver_scope);
algorithm_s = resources->getResourcesConfig()->getParameter<AlgorithmType>("algorithm", solver_scope);
resources->getResourcesConfig()->getParameter<std::string>("preconditioner", precond_value, solver_scope, precond_scope);
algorithm_p = resources->getResourcesConfig()->getParameter<AlgorithmType>("algorithm", precond_scope);
if (algorithm_s == CLASSICAL && algorithm_p == CLASSICAL)
{
isClassical = true;
}
}
// End of CLASSICAL workaround
bool has_zero_diagonal_element = false;
bool check_zero_diagonal = false;
const bool boost_zero_diagonal = false;
ValueTypeA boostValue = getBoostValue<ValueTypeA>();
if (boost_zero_diagonal) { check_zero_diagonal = true; }
while (fin.peek() == '%')
{
std::string nvString;
int fpos = fin.tellg(); // store current position
getline(fin, nvString);
std::transform(nvString.begin(), nvString.end(), nvString.begin(), ::tolower);
std::istringstream nvString_s(nvString);
std::string nvFormat;
nvString_s >> nvFormat;
if (nvFormat.size() > 2)
{
if ((nvFormat.substr(2, nvFormat.size()) == "nvamg") ||
(nvFormat.substr(2, nvFormat.size()) == "amgx"))
{
std::copy(std::istream_iterator<std::string>(nvString_s), std::istream_iterator<std::string>(),
std::back_inserter<std::list<std::string> >(nvConfig));
}
if (nvFormat.substr(2, nvFormat.size()) == "matrixmarket")
{
std::copy(std::istream_iterator<std::string>(nvString_s), std::istream_iterator<std::string>(),
std::back_inserter<std::list<std::string> >(mmConfig));
}
}
fin.seekg(fpos, std::ios_base::beg);
fin.ignore(INT_MAX, '\n');
}
// process MatrixMarket config string
bool symmetric = false;
bool skew_symmetric = false;
bool hermitian = false;
if (mmConfig.size() > 0)
{
for (std::list<std::string>::const_iterator it = mmConfig.begin(); it != mmConfig.end(); ++it)
{
if (*it == "symmetric") {symmetric = true; continue;}
if (*it == "complex")
{
if (!types::util<ValueTypeA>::is_complex && complex_conversion == 0)
{
FatalError("Trying to load file with complex matrix to real valued matrix structure", AMGX_ERR_IO);
}
continue;
}
if (*it == "real")
{
if (!types::util<ValueTypeA>::is_real)
{
FatalError("Trying to load file with real matrix to complex valued matrix structure", AMGX_ERR_IO);
}
}
if (*it == "pattern") {FatalError("'pattern' is not supported in %%MatrixMarket format string", AMGX_ERR_IO);}
if (*it == "skew-symmetric") {symmetric = true; skew_symmetric = true; continue;}
//if (*it == "skew-symmetric") {FatalError("'skew-symmetric' is not supported in %%MatrixMarket format string", AMGX_ERR_IO);}
if (*it == "hermitian") {hermitian = true; continue;}
}
}
// process amgx config string
int block_dimx = 1, block_dimy = 1, index_base = 1;
bool diag_prop = false, rhs = false, soln = false, mtx = false, sorted = false;
std::list<int> block_sizes;
if (nvConfig.size() > 0)
{
for (std::list<std::string>::const_iterator it = nvConfig.begin(); it != nvConfig.end(); ++it)
{
if (*it == "diagonal") {diag_prop = true; continue;}
if (*it == "rhs") {rhs = true; continue;}
if (*it == "solution") {soln = true; continue;}
if (*it == "sorted") {sorted = true; continue;}
if (*it == "base0") {index_base = 0; continue;}
if (isdigit((*it)[0])) { int bsize; std::istringstream(*it) >> bsize; block_sizes.push_back(bsize); continue;};
}
}
// CLASSICAL fix
if (sorted && isClassical && diag_prop) { sorted = false; }
// Currently not implemented sorted symmetric matrices
if (sorted && symmetric || sorted && hermitian) { sorted = false; }
if (std::find(mmConfig.begin(), mmConfig.end(), "matrix") != mmConfig.end()) { mtx = true; }
if (block_sizes.size() == 2)
{
block_dimy = block_sizes.back();
block_dimx = block_sizes.front();
}
else if (block_sizes.size() == 1)
{
block_dimy = block_dimx = block_sizes.back();
}
int fpos = fin.tellg(); // store current position
int rows, cols, entries;
//read rows cols entries
fin >> rows >> cols >> entries;
if (rows % block_dimx != 0 || cols % block_dimy != 0 || entries % (block_dimx * block_dimy) != 0)
{
FatalError("Matrix dimensions do not match with block sizes", AMGX_ERR_IO);
}
rows /= block_dimx;
cols /= block_dimy;
entries /= (block_dimx * block_dimy);
if (io_config::hasProps(io_config::SIZE, props))
{
if (complex_conversion != 0 && block_dimy * block_dimx != 1)
{
FatalError("Complex conversion is supported only for non-coupled matrices with blocks of 1x1", AMGX_ERR_IO);
}
if (complex_conversion == 0)
{
A.set_num_rows(rows);
A.set_num_cols(cols);
A.set_block_dimy(block_dimy);
A.set_block_dimx(block_dimx);
}
else if (complex_conversion > 0 && complex_conversion < 5)
{
// general ERF
A.set_num_rows(rows * 2);
A.set_num_cols(cols * 2);
A.set_block_dimy(block_dimy);
A.set_block_dimx(block_dimx);
}
else if (complex_conversion > 220 && complex_conversion < 225)
{
// 2x2 block ERF
A.set_num_rows(rows);
A.set_num_cols(cols);
A.set_block_dimy(block_dimy * 2); // complex 1x1 only supported, which converts to 2x2 real blocks
A.set_block_dimx(block_dimx * 2);
}
else
{
FatalError("Unsupported complex_conversion mode", AMGX_ERR_IO);
}
int num_entries = 0;
if (symmetric || hermitian)
{
int i, j;
int idiag = 0;
ValueTypeA v;
for (int e = 0; e < entries * (block_dimx * block_dimy); e++)
{
fin >> i >> j;
LoadValueFromStream(fin, v);
// skip explicit zeroes, only block_size=1 is supported
if (block_dimx == 1 && block_dimy == 1 && types::util<ValueTypeA>::is_zero(v))
{
continue;
}
if (i == j)
{
idiag++;
}
}
num_entries = 2 * entries - idiag / (block_dimx * block_dimy);
}
else
{
if (isClassical && diag_prop)
{
num_entries = entries + rows;
}
else
{
num_entries = entries;
}
}
if (complex_conversion == 0)
{
A.set_num_nz(num_entries);
}
else if (complex_conversion > 0 && complex_conversion < 5)
{
// general ERF
A.set_num_nz(num_entries * 4);
}
else if (complex_conversion > 220 && complex_conversion < 225)
{
// 2x2 block ERF
A.set_num_nz(num_entries);
}
else
{
FatalError("Unsupported complex_conversion mode", AMGX_ERR_IO);
}
return true;
}
warning = "Reading data...\n";
if (isClassical && diag_prop)
{
warning += "Warning: external diagonal will be converted into internal for CLASSICAL path\n";
}
amgx_output(warning.c_str(), warning.length());
// check for consistent input
if (io_config::hasProps(io_config::MTX, props))
{
if (!mtx)
{
FatalError("Expecting 'matrix' keyword in %%MatrixMarket format string", AMGX_ERR_IO);
}
}
else
{
if (mtx)
{
skip_vals(fin, 3 * entries * (block_dimy * block_dimx));
if (diag_prop)
{
skip_vals(fin, rows * block_dimy * block_dimx);
}
}
}
bool read_all = (rank_rows.size() == 0) ? true : false;
const IVector_h &partRowVec = rank_rows;
int n_rows_part = (read_all) ? rows : partRowVec.size();
std::map<const int, int> GlobalToLocalRowMap; // should try unordered_map
std::map<const int, int>::const_iterator gtl_i;
std::map<const int, int>::const_iterator gtl_j;
// Generate inverse map for faster searching during the read
if (!read_all)
for (int i = 0; i < n_rows_part; i++)
{
GlobalToLocalRowMap.insert(std::pair<const int, int>(partRowVec[i], i));
}
if (io_config::hasProps(io_config::MTX, props))
{
int ival = 0, idiag = 0;
int block_size = block_dimy * block_dimx;
typedef std::map<const int, std::vector<ValueTypeA> > ColValuesMap_t;
typedef std::pair<const int, std::vector<ValueTypeA> > ColValuesPair_t;
typedef std::vector<ValueTypeA> ValuesVector_t;
typedef std::vector<int> ColVector_t;
std::vector<ColValuesMap_t> input;
std::vector<int> nnz_per_row;
//typename Matrix_h::MVector input_sorted_v;
//IVector_h input_sorted_c;
ValuesVector_t input_sorted_v;
ColVector_t input_sorted_c;
std::vector<int> trackDiag;
if (check_zero_diagonal)
{
trackDiag.resize(n_rows_part, 0);
}
if (sorted)
{
nnz_per_row.resize(n_rows_part, 0);
if (read_all)
{
input_sorted_v.resize(entries * block_size);
input_sorted_c.resize(entries);
}
}
else
{
input.resize(n_rows_part);
}
typename Matrix_h::MVector diag(n_rows_part * block_size, types::util<ValueTypeA>::get_zero());
std::vector<ValueTypeA> block_vals(block_size);
//for each entry
int i, j, ii, jj, i_old = -1;
bool skip = false;
bool has_ii = true, has_jj = false;
if (symmetric || hermitian) { has_jj = true; }
int explicit_zeroes = 0;
for (int e = 0; e < entries; e++)
{
for (int kx = 0; kx < block_dimx; kx++)
for (int ky = 0; ky < block_dimy; ky++)
{
//read entry
fin >> i >> j;
LoadValueFromStream(fin, block_vals[kx * block_dimy + ky]);
// check we haven't been given a 0-indexed matrix
if ((i == 0 || j == 0) && index_base == 1)
{
FatalError("Matrix Market format requires 1-based indexing. Use 'base0' AMGX format option to override.", AMGX_ERR_IO);
}
}
// skip explicit zeroes, only block_size=1 is supported
if (block_dimx == 1 && block_dimy == 1 && types::util<ValueTypeA>::is_zero(block_vals[0]))
{
explicit_zeroes++;
if (i == j)
{
idiag++;
has_zero_diagonal_element = true;
if (check_zero_diagonal)
{
trackDiag[i - index_base] = 0;
}
}
continue;
}
else
{
if (i == j)
{
if (check_zero_diagonal)
{
trackDiag[i - index_base] = 1;
}
}
}
i = (i - index_base) / block_dimx;
j = (j - index_base) / block_dimy;
if (!read_all)
if (!symmetric && !hermitian)
{
if (i != i_old) // reduce overhead of searching in GlobalToLocalRowMap
{
has_ii = false;
i_old = i;
gtl_i = GlobalToLocalRowMap.find(i);
if (gtl_i == GlobalToLocalRowMap.end())
{
skip = true;
continue;
}
else
{
has_ii = true;
skip = false;
ii = gtl_i->second;
}
}
else if (skip)
{
continue;
}
}
else
{
ii = i;
jj = j;
if (!read_all)
{
gtl_i = GlobalToLocalRowMap.find(i);
gtl_j = GlobalToLocalRowMap.find(j);
has_ii = has_jj = false;
if (gtl_i != GlobalToLocalRowMap.end()) { has_ii = true; }
if (gtl_j != GlobalToLocalRowMap.end()) { has_jj = true; }
if (!has_ii && !has_jj)
{
continue;
}
else
{
if (has_ii)
{
ii = gtl_i->second;
}
if (has_jj)
{
jj = gtl_j->second;
}
}
}
}
else
{
ii = i;
if (symmetric || hermitian)
{
jj = j;
}
}
if (sorted)
{
nnz_per_row[ii]++;
if (!read_all)
{
input_sorted_v.insert(input_sorted_v.end(), block_vals.begin(), block_vals.end());
input_sorted_c.push_back(j);
}
else
{
std::copy(block_vals.begin(), block_vals.end(), &input_sorted_v[ival * block_size]);
input_sorted_c[ival] = j;
}
ival++;
}
else
{
if (has_ii)
{
ival++;
input[ii].insert(ColValuesPair_t(j, block_vals));
}
if (has_jj)
{
ival++;
if ((skew_symmetric || hermitian) && i != j)
for (int k = 0; k < block_dimx * block_dimy; k++)
{
if (skew_symmetric)
{
block_vals[k] = types::util<ValueTypeA>::invert(block_vals[k]);
}
else if (hermitian)
{
block_vals[k] = types::util<ValueTypeA>::conjugate(block_vals[k]);
}
}
input[jj].insert(ColValuesPair_t(i, block_vals));
}
}
if (i == j)
{
idiag++;
std::copy(block_vals.begin(), block_vals.end(), &diag[ii * block_size]);
}
} // end of entries loop
int diagIdx = 0;
if (check_zero_diagonal)
{
for (int i = 0; i < rows; i++)
{
if (trackDiag[i] == 0)
{
trackDiag[diagIdx] = i;
diagIdx++;
}
}
}
else
{
diagIdx = idiag;
}
if (has_zero_diagonal_element && block_dimx == 1 && block_dimy == 1)
{
if (check_zero_diagonal)
{
printf("Warning! Input matrix has zeroes on diagonal: %d %d\nZero diagonal elements are:\n", rows, diagIdx);
for (int i = 0; i < diagIdx; i++)
{
printf("%d ", trackDiag[i]);
}
printf("\n");
}
}
if (boost_zero_diagonal && has_zero_diagonal_element && block_dimx == 1 && block_dimy == 1)
{
for (int i = 0; i < diagIdx; i++)
{
block_vals[0] = boostValue;
input[ii].insert(ColValuesPair_t(trackDiag[i], block_vals));
}
}
if (!(symmetric || hermitian) && (ival + explicit_zeroes) != entries && read_all)
{
FatalError("Matrix Market mismatch in number of entries", AMGX_ERR_IO);
}
IndexType n_nonzeros_part;
if (symmetric || hermitian)
{
n_nonzeros_part = ival - idiag;
}
else
{
n_nonzeros_part = ival;
}
//if (symmetric)
// printf("Matrix is symmetric. Counted %d entries and %d diag elements, corresponding to %d nonzeroes\n ", ival, idiag, n_nonzeros_part);
if (sorted && input_sorted_c.size() != n_nonzeros_part)
{
//printf("input_sorted_c.size() = %d n_nonzeros_part = %d\n", input_sorted_c.size(), n_nonzeros_part);
FatalError("Matrix Market mismatch in number of entries", AMGX_ERR_IO);
}
if (sorted && input_sorted_v.size() != n_nonzeros_part * block_size)
{
//printf("input_sorted_v.size() = %d n_nonzeros_part*block_size = %d\n", input_sorted_v.size(), n_nonzeros_part*block_size);
FatalError("Matrix Market mismatch in number of entries", AMGX_ERR_IO);
}
A.resize(0, 0, 0);
//A.delProps(COO);
A.addProps(CSR);
if (diag_prop && !isClassical)
{
A.addProps(DIAG);
}
else
{
A.delProps(DIAG);
}
if (diag_prop)
{
LoadVector(fin, read_all, rows, block_size, diag, GlobalToLocalRowMap);
}
if (isClassical && diag_prop)
{
n_nonzeros_part = n_nonzeros_part + n_rows_part;
for (int i = 0; i < n_rows_part; i++)
{
std::copy(&diag[i * block_size], &diag[i * block_size] + block_size, block_vals.begin());
input[i].insert(ColValuesPair_t(read_all ? i : rank_rows[i], block_vals));
}
}
A.resize(n_rows_part, cols, n_nonzeros_part, block_dimx, block_dimy);
ValueTypeA *dia_values_ptr = thrust::raw_pointer_cast(&(A.values[block_dimx * block_dimy * n_nonzeros_part]));
if (A.hasProps(CSR))
{
A.row_offsets[0] = 0;
ival = 0;
if (!sorted)
{
for (int i = 0; i < n_rows_part; i++)
{
for (auto it = input[i].begin(); it != input[i].end(); it++)
{
A.col_indices[ival] = it->first;
for (int k = 0; k < block_size; k++)
{
A.values[ival * block_size + k] = it->second[k];
}
ival++;
}
A.row_offsets[i + 1] = ival;
}
}
else
{
A.row_offsets[0] = 0;
for (int i = 0; i < n_rows_part; i++)
{
A.row_offsets[i + 1] = A.row_offsets[i] + nnz_per_row[i];
}
if (A.row_offsets[n_rows_part] != n_nonzeros_part)
{
FatalError("Matrix Market mismatch in number of entries", AMGX_ERR_IO);
}
std::copy(input_sorted_c.begin(), input_sorted_c.end(), A.col_indices.begin());
std::copy(input_sorted_v.begin(), input_sorted_v.end(), A.values.begin());
}
}
else
{
FatalError("Matrix Market reader COO output is not supported", AMGX_ERR_IO);
}
if (diag_prop && !isClassical)
{
A.computeDiagonal();
}
if (A.hasProps(DIAG) && !isClassical)
for (int i = 0; i < diag.size(); i++)
{
dia_values_ptr[i] = diag[i];
}
}// End of load matrix
if (!io_config::hasProps(io_config::RHS, props))
if (rhs)
{
skip_vals(fin, rows * block_dimy);
}
if (io_config::hasProps(io_config::RHS, props))
{
b.resize(n_rows_part * block_dimy);
b.set_block_dimy(block_dimy);
b.set_block_dimx(1);
if (rhs)
{
LoadVector(fin, read_all, rows, block_dimy, b, GlobalToLocalRowMap);
}
else
{
//initialize RHS
if (io_config::hasProps(io_config::GEN_RHS, props))
{
Vector_h b0(n_rows_part * block_dimy, types::util<ValueTypeB>::get_one());
b0.set_block_dimy(block_dimy);
b0.set_block_dimx(1);
warning = "RHS vector was not found. Using RHS b=A*e where e=[1,…,1]^T\n";
A.set_initialized(true);
multiply(A, b0, b);
A.set_initialized(false);
}
else
{
warning = "RHS vector was not found. Using RHS b=[1,…,1]^T\n";
for (int i = 0; i < n_rows_part * block_dimy; i++)
{
b[i] = types::util<ValueTypeB>::get_one();
}
}
amgx_output(warning.c_str(), warning.length());
}
}
// try to read initial guess
if (io_config::hasProps(io_config::SOLN, props))
{
x.resize(n_rows_part * block_dimx);
x.set_block_dimy(block_dimy);
x.set_block_dimx(1);
if (soln)
{
LoadVector(fin, read_all, rows, block_dimx, x, GlobalToLocalRowMap);
}
else
{
warning = "Solution vector was not found. Setting initial solution to x=[0,…,0]^T\n";
for (int i = 0; i < n_rows_part * block_dimx; i++)
{
x[i] = types::util<ValueTypeB>::get_zero();
}
}
amgx_output(warning.c_str(), warning.length());
}
if (rank_rows.size() > 0)
{
A.set_is_matrix_read_partitioned(true);
b.set_is_vector_read_partitioned(true);
if (x.size() > 0)
{
x.set_is_vector_read_partitioned(true);
}
}
warning = "";
if (has_zero_diagonal_element || skew_symmetric)
{
warning += "Warning: Matrix has at least one zero on its diagonal\n";
}
warning = +"Finished reading\n";
amgx_output(warning.c_str(), warning.length());
return true;
}
// Distrubuted version
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
bool ReadMatrixMarket<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::readMatrixMarketV2(std::ifstream &fin, const char *fname, Matrix_h &A
, Vector_h &b
, Vector_h &x
, const AMG_Config &cfg
, unsigned int props
, const IVector_h &rank_rows // row indices for given rank
)
{
fin.seekg(std::ios::beg);
typedef typename Matrix_h::index_type IndexType;
typedef typename Matrix_h::value_type ValueTypeA;// change later back to load in high precision!
typedef typename TConfig_h::VecPrec ValueTypeB;
//skip comments
while (fin.peek() == '%')
{
fin.ignore(INT_MAX, '\n');
}
int rows, cols, entries, block_dimx, block_dimy, diag_prop;
//read rows cols entries
fin >> rows >> cols >> entries >> block_dimx >> block_dimy >> diag_prop;
if (io_config::hasProps(io_config::SIZE, props))
{
A.set_num_rows(rows);
A.set_num_cols(cols);
A.set_num_nz(entries);
A.set_block_dimy(block_dimy);
A.set_block_dimx(block_dimx);
return true;
}
fflush(stdout);
bool read_all = (rank_rows.size() == 0) ? true : false;
const IVector_h &partRowVec = rank_rows;
int n_rows_part = (read_all) ? rows : partRowVec.size();
std::map<const int, int> GlobalToLocalRowMap; // should try unordered_map
std::map<const int, int>::const_iterator gtl_it;
// Generate inverse map for faster searching during the read
if (!read_all)
for (int i = 0; i < n_rows_part; i++)
{
GlobalToLocalRowMap.insert(std::pair<const int, int>(partRowVec[i], i));
}
typedef std::map<const int, std::vector<ValueTypeA> > ColValuesMap_t;
typedef std::pair<const int, std::vector<ValueTypeA> > ColValuesPair_t;
std::vector<ColValuesMap_t> input(n_rows_part);
int ival = 0;
int block_size = block_dimy * block_dimx;
typename Matrix_h::MVector diag(n_rows_part * block_size, types::util<ValueTypeA>::get_zero());
std::vector<ValueTypeA> block_vals(block_size);
//for each entry
for (int e = 0; e < entries; e++)
{
int i, j;
//read entry
fin >> i >> j;
// check we haven't been given a 0-indexed matrix
if (i == 0 || j == 0)
{
FatalError("Matrix Market format requires 1-based indexing", AMGX_ERR_IO);
}
for (int k = 0; k < block_size; k++)
{
LoadValueFromStream(fin, block_vals[k]);
}
if (!read_all)
{
gtl_it = GlobalToLocalRowMap.find(i - 1);
if (gtl_it != GlobalToLocalRowMap.end())
{
input[gtl_it->second].insert(ColValuesPair_t(j - 1, block_vals));
if (i == j)
for (int k = 0; k < block_size; k++)
{
diag[block_size * gtl_it->second + k] = block_vals[k];
}
ival++;
}
}
else
{
input[i - 1].insert(ColValuesPair_t(j - 1, block_vals));
if (i == j)
for (int k = 0; k < block_size; k++)
{
diag[block_size * (i - 1) + k] = block_vals[k];
}
ival++;
}
}
if (ival != entries && read_all)
{
FatalError("Matrix Market mismatch in number of entries", AMGX_ERR_IO);
}
IndexType n_nonzeros_part = ival;
A.resize(0, 0, 0);
//A.delProps(COO);
A.addProps(CSR);
if (diag_prop)
{
A.addProps(DIAG);
}
else
{
A.delProps(DIAG);
}
A.resize(n_rows_part, cols, n_nonzeros_part, block_dimx, block_dimy);
ValueTypeA *dia_values_ptr = thrust::raw_pointer_cast(&(A.values[block_dimx * block_dimy * n_nonzeros_part]));
if (A.hasProps(CSR))
{
A.row_offsets[0] = 0;
ival = 0;
for (int i = 0; i < n_rows_part; i++)
{
for (auto it = input[i].begin(); it != input[i].end(); it++)
{
A.col_indices[ival] = it->first;
for (int k = 0; k < block_size; k++)
{
A.values[ival * block_size + k] = it->second[k];
}
ival++;
}
A.row_offsets[i + 1] = ival;
}
}
else
{
FatalError("Matrix Market reader COO output is not supported", AMGX_ERR_IO);
}
if (diag_prop)
{
A.computeDiagonal();
LoadVector(fin, read_all, rows, block_size, diag, GlobalToLocalRowMap);
}
if (A.hasProps(DIAG))
for (int i = 0; i < diag.size(); i++)
{
dia_values_ptr[i] = diag[i];
}
if (io_config::hasProps(io_config::RHS, props))
{
b.resize(n_rows_part * block_dimy);
b.set_block_dimy(block_dimy);
//initialize RHS
for (int i = 0; i < n_rows_part * block_dimy; i++)
{
b[i] = types::util<ValueTypeB>::get_one();
}
//read num rows
fin >> rows;
LoadVector(fin, read_all, rows / block_dimy, block_dimy, b, GlobalToLocalRowMap);
}
// try to read initial guess
if (io_config::hasProps(io_config::SOLN, props))
{
fin >> rows;
if (rows)
{
x.resize(n_rows_part * block_dimx);
x.set_block_dimy(block_dimx);
LoadVector(fin, read_all, rows / block_dimx, block_dimx, x, GlobalToLocalRowMap);
}
else
{
x.resize(0);
}
}
if (rank_rows.size() > 0)
{
A.set_is_matrix_read_partitioned(true);
b.set_is_vector_read_partitioned(true);
if (x.size() > 0)
{
x.set_is_vector_read_partitioned(true);
}
}
return true;
}
template <typename TSRC, typename TDST>
void val_copy(const TSRC *src, TDST *dst, int size)
{
for (int i = 0; i < size; i++)
{
dst[i] = static_cast<TDST>(src[i]);
}
}
template <>
void val_copy<cuDoubleComplex, cuComplex>(const cuDoubleComplex *src, cuComplex *dst, int size)
{
for (int i = 0; i < size; i++)
{
dst[i] = types::util<cuDoubleComplex>::to_downtype(src[i]);
}
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
bool ReadNVAMGBinary<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::read(std::ifstream &finstr, const char *fnamec
, Matrix_h &A
, Vector_h &b, Vector_h &x
, const AMG_Config &cfg
, unsigned int props
, const IVector_h &rank_rows
)
{
typedef typename Matrix_h::index_type IndexType;
typedef typename Matrix_h::value_type ValueTypeA;
typedef typename Vector_h::value_type ValueTypeB; // change back to matrix type later
typedef typename types::util<ValueTypeA>::uptype UpValueTypeA;
size_t is_read;
std::string err;
finstr.close();
FILE *fin = fopen(fnamec, "rb");
if (fin == NULL)
{
err = "Error: couldn't open file " + std::string(fnamec);
}
char text_header[255];
uint32_t system_flags [9];
is_read = fread(text_header, sizeof(char), strlen("%%NVAMGBinary\n"), fin);
is_read = fread(system_flags, sizeof(uint32_t), 9, fin);
//bool is_mtx = system_flags[0];
bool is_rhs = system_flags[1];
bool is_soln = system_flags[2];
uint32_t matrix_format = system_flags[3];
bool diag = system_flags[4];
uint32_t block_dimx = system_flags[5];
uint32_t block_dimy = system_flags[6];
uint32_t num_rows = system_flags[7];
uint32_t num_nz = system_flags[8];
if (io_config::hasProps(io_config::SIZE, props))
{
A.set_num_rows(num_rows);
A.set_num_cols(num_rows);
A.set_num_nz(num_nz);
A.set_block_dimy(block_dimy);
A.set_block_dimx(block_dimx);
fclose(fin);
return true;
}
long int data_pos = ftell(fin);
IVector_h *partRowVec_p = NULL;
if (rank_rows.size() == 0)
{
partRowVec_p = new IVector_h(num_rows);
thrust::sequence(partRowVec_p->begin(), partRowVec_p->end());
cudaCheckError();
}
else
{
partRowVec_p = (IVector_h *) &rank_rows;
}
IVector_h &partRowVec = *partRowVec_p;
int n_rows_part = partRowVec.size();
IVector_h row_offsets_part(n_rows_part + 1);
IVector_h row_start_glb(n_rows_part); // Store global row start positions here
int beginEnd[2];
int n_nonzeros_part = 0;
for (int i = 0; i < partRowVec.size(); i++)
{
if (fseek(fin, data_pos + partRowVec[i]*sizeof(int), SEEK_SET) != 0)
{
FatalError("fseek error", AMGX_ERR_IO);
}
is_read = fread(beginEnd, sizeof(int), 2, fin);
if (is_read != 2)
{
err = "fread failed reading row_offsets, exiting";
FatalError(err, AMGX_ERR_IO);
}
row_start_glb[i] = beginEnd[0];
row_offsets_part[i] = n_nonzeros_part;
n_nonzeros_part += beginEnd[1] - beginEnd[0];
}
row_offsets_part[n_rows_part] = n_nonzeros_part;
A.delProps(DIAG | COLORING);
if ((matrix_format & COMPLEX) && types::util<ValueTypeA>::is_real)
{
FatalError("Matrix is in complex format, but reading as real AMGX mode", AMGX_ERR_IO);
}
if (!(matrix_format & COMPLEX) && types::util<ValueTypeA>::is_complex)
{
FatalError("Matrix is in real format, but reading as complex AMGX mode", AMGX_ERR_IO);
}
if (diag)
{
A.addProps(DIAG);
}
if (!(matrix_format & 1))
{
A.addProps(CSR);
}
else
{
FatalError("COO matrix binary format is not supported for reading.", AMGX_ERR_IO);
}
A.resize(n_rows_part, num_rows, n_nonzeros_part, block_dimx, block_dimy);
IndexType *row_offsets_ptr = A.row_offsets.raw();
IndexType *column_indices_ptr = A.col_indices.raw();
ValueTypeA *nonzero_values_ptr = A.values.raw();
ValueTypeA *dia_values_ptr = thrust::raw_pointer_cast(&(A.values[block_dimy * block_dimx * n_nonzeros_part]));
//Transfer row_offsets to matrix
thrust::copy(row_offsets_part.begin(), row_offsets_part.end(), A.row_offsets.begin());
cudaCheckError();
data_pos += (num_rows + 1) * sizeof(int);
n_nonzeros_part = 0;
int row_nnz;
for (int i = 0; i < partRowVec.size(); i++)
{
if (fseek(fin, data_pos + sizeof(int)*row_start_glb[i], SEEK_SET) != 0)
{
FatalError("fseek error", AMGX_ERR_IO);
}
row_nnz = row_offsets_part[i + 1] - row_offsets_part[i];
is_read = fread(column_indices_ptr + n_nonzeros_part, sizeof(int), row_nnz, fin);
n_nonzeros_part += row_nnz;
if (is_read != row_nnz)
{
err = "fread failed reading column_indices, exiting";
FatalError(err, AMGX_ERR_IO);
}
}
data_pos += num_nz * sizeof(int);
//temperary array for storing ValueTypeA data
// double storage for complex
std::vector< UpValueTypeA > temp(n_nonzeros_part * block_dimy * block_dimx);
n_nonzeros_part = 0;
for (int i = 0; i < partRowVec.size(); i++)
{
if (fseek(fin, data_pos + sizeof(UpValueTypeA)*row_start_glb[i] * block_dimy * block_dimx, SEEK_SET) != 0)
{
FatalError("fseek error", AMGX_ERR_IO);
}
row_nnz = row_offsets_part[i + 1] - row_offsets_part[i];
//read in data as a ValueTypeA
is_read = fread(&temp[n_nonzeros_part * block_dimy * block_dimx], sizeof(UpValueTypeA), row_nnz * block_dimy * block_dimx, fin);
n_nonzeros_part += row_nnz;
if (is_read != row_nnz * block_dimy * block_dimx)
{
err = "fread failed reading off-diagonal values, exiting";
FatalError(err, AMGX_ERR_IO);
}
}
//copy with cast data to ValueTypeA
val_copy(temp.data(), nonzero_values_ptr, n_nonzeros_part * block_dimy * block_dimx);
data_pos += sizeof(UpValueTypeA) * num_nz * block_dimx * block_dimy;
if (diag)
{
temp.resize(n_rows_part * block_dimx * block_dimy);
//read in diagonal data as a ValueTypeA
for (int i = 0; i < partRowVec.size(); i++)
{
if (fseek(fin, data_pos + sizeof(UpValueTypeA) * partRowVec[i] * block_dimx * block_dimy, SEEK_SET) != 0)
{
FatalError("fseek error", AMGX_ERR_IO);
}
is_read = fread(&temp[i * block_dimx * block_dimy], sizeof(UpValueTypeA), block_dimx * block_dimy, fin);
if (is_read != block_dimx * block_dimy)
{
err = "fread failed reading diagonal values, exiting";
FatalError(err, AMGX_ERR_IO);
}
}
//copy with cast data to ValueTypeA
val_copy(temp.data(), dia_values_ptr, n_rows_part * block_dimx * block_dimy);
data_pos += sizeof(double) * num_rows * block_dimx * block_dimy;
}
else // fill last values item with zeros
{
thrust::fill(A.values.begin() + A.get_num_nz() * block_dimy * block_dimx, A.values.end(), types::util<ValueTypeA>::get_zero());
cudaCheckError();
}
//printf("Reading values\n"); fflush(stdout);
b.resize(n_rows_part * block_dimy);
b.set_block_dimy(block_dimy);
b.set_block_dimx(1);
temp.resize(n_rows_part * block_dimy);
if (is_rhs)
{
for (int i = 0; i < partRowVec.size(); i++)
{
if (fseek(fin, data_pos + sizeof(UpValueTypeA) * partRowVec[i] * block_dimy, SEEK_SET) != 0)
{
FatalError("fseek error", AMGX_ERR_IO);
}
//read in data as a double (doublecomplex)
is_read = fread(&temp[i * block_dimy], sizeof(UpValueTypeA), block_dimy, fin);
// if the rhs exists, we must have read the whole thing
if (is_read != block_dimy)
{
err = "fread failed reading rhs, exiting";
FatalError(err, AMGX_ERR_IO);
}
}
//cast data to ValueTypeB
val_copy(temp.data(), b.raw(), n_rows_part * block_dimy);
data_pos += sizeof(UpValueTypeA) * num_rows * block_dimy;
}
else
{
thrust::fill(b.begin(), b.end(), types::util<ValueTypeB>::get_one());
cudaCheckError();
}
x.resize(0);
if (is_soln)
{
x.resize(n_rows_part * block_dimx);
x.set_block_dimx(1);
x.set_block_dimy(block_dimy);
temp.resize(n_rows_part * block_dimx);
for (int i = 0; i < partRowVec.size(); i++)
{
if (fseek(fin, data_pos + sizeof(UpValueTypeA) * partRowVec[i] * block_dimx, SEEK_SET) != 0)
{
FatalError("fseek error", AMGX_ERR_IO);
}
//read in data as a double
is_read = fread(&temp[i * block_dimx], sizeof(UpValueTypeA), block_dimx, fin);
if (is_read != block_dimx)
{
err = "fread failed reading rhs, exiting";
FatalError(err, AMGX_ERR_IO);
}
}
val_copy(temp.data(), x.raw(), n_rows_part * block_dimx);
}
fclose(fin);
if (rank_rows.size() > 0)
{
A.set_is_matrix_read_partitioned(true);
b.set_is_vector_read_partitioned(true);
if (x.size() > 0)
{
x.set_is_vector_read_partitioned(true);
}
}
else
{
delete partRowVec_p;
}
return true;
}
/****************************************
* Explict instantiations
***************************************/
#define AMGX_CASE_LINE(CASE) template class ReadMatrixMarket<TemplateMode<CASE>::Type>;
AMGX_FORALL_BUILDS(AMGX_CASE_LINE)
AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE)
#undef AMGX_CASE_LINE
#define AMGX_CASE_LINE(CASE) template class ReadNVAMGBinary<TemplateMode<CASE>::Type>;
AMGX_FORALL_BUILDS(AMGX_CASE_LINE)
AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE)
#undef AMGX_CASE_LINE
}
|
2c459a3dd07af895ea3c910d3082ab5993b0e5be.hip | // !!! This is a file automatically generated by hipify!!!
#define DIM 8192
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include "device_launch_parameters.h"
#include <stdio.h>
#include <math.h>
#include <chrono>
__global__
void matrixMul(double *A, double *B, double *C, int size) {
int Row = blockIdx.y*blockDim.y + threadIdx.y;
int Col = blockIdx.x*blockDim.x + threadIdx.x;
double Cvalue = 0.0;
for (int k = 0; k < size; k++) {
Cvalue += A[size * Row + k] * B[size * k + Row];
}
C[Row * size + Col] = Cvalue;
}
void addKernel(double *h_A, double *h_B, double *h_C, int size) {
int size_tot = size * size * sizeof(double);
double *d_A, *d_B, *d_C;
hipMalloc((void **)&d_A, size_tot);
hipMalloc((void **)&d_B, size_tot);
hipMalloc((void **)&d_C, size_tot);
hipMemcpy(d_A, h_A, size_tot, hipMemcpyHostToDevice);
hipMemcpy(d_B, h_B, size_tot, hipMemcpyHostToDevice);
dim3 block(16, 16, 1);
dim3 grid((int)ceil((double)DIM / 16), (int)ceil((double)DIM / 16), 1);
matrixMul << <grid, block >> > (d_A, d_B, d_C, size);
hipMemcpy(h_C, d_C, size_tot, hipMemcpyDeviceToHost);
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
}
//FUNZIONE CHE RIEMPIE LA MATRICE DI NUMERI double CASUALI
void populateMatrix(double M[DIM][DIM]) {
for (int i = 0; i < DIM; i++) {
for (int j = 0; j < DIM; j++) {
M[i][j] = 1.0;
}
}
}
//FUNZIONE PER STAMPARE UNA MATRICE
void printMatrix(double M[DIM][DIM]) {
for (int i = 0; i < DIM; i++) {
for (int j = 0; j < DIM; j++) {
printf("%f ", M[i][j]);
}
printf("\n");
}
}
int main() {
double *A =(double *)malloc(DIM*DIM*sizeof(double));
double *B=(double *)malloc(DIM*DIM*sizeof(double));
double *C=(double *)malloc(DIM*DIM*sizeof(double));
//riempio le matrici con dei valori arbitrari
for (int i = 0; i < DIM; i++) {
for (int j = 0; j < DIM; j++) {
A[i*DIM+j] = 1.0;
B[i*DIM+j] = 1.0;
}
}
std::chrono::steady_clock::time_point start = std::chrono::steady_clock::now();
addKernel(&A[0], &B[0], &C[0], DIM);
std::chrono::steady_clock::time_point end = std::chrono::steady_clock::now();
double tempo = std::chrono::duration_cast<std::chrono::duration<double> >(end - start).count();
printf("%f\n",tempo);
free(A);
free(B);
free(C);
return 0;
}
| 2c459a3dd07af895ea3c910d3082ab5993b0e5be.cu | #define DIM 8192
#include <cuda.h>
#include <cuda_runtime.h>
#include "device_launch_parameters.h"
#include <stdio.h>
#include <math.h>
#include <chrono>
__global__
void matrixMul(double *A, double *B, double *C, int size) {
int Row = blockIdx.y*blockDim.y + threadIdx.y;
int Col = blockIdx.x*blockDim.x + threadIdx.x;
double Cvalue = 0.0;
for (int k = 0; k < size; k++) {
Cvalue += A[size * Row + k] * B[size * k + Row];
}
C[Row * size + Col] = Cvalue;
}
void addKernel(double *h_A, double *h_B, double *h_C, int size) {
int size_tot = size * size * sizeof(double);
double *d_A, *d_B, *d_C;
cudaMalloc((void **)&d_A, size_tot);
cudaMalloc((void **)&d_B, size_tot);
cudaMalloc((void **)&d_C, size_tot);
cudaMemcpy(d_A, h_A, size_tot, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, size_tot, cudaMemcpyHostToDevice);
dim3 block(16, 16, 1);
dim3 grid((int)ceil((double)DIM / 16), (int)ceil((double)DIM / 16), 1);
matrixMul << <grid, block >> > (d_A, d_B, d_C, size);
cudaMemcpy(h_C, d_C, size_tot, cudaMemcpyDeviceToHost);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
}
//FUNZIONE CHE RIEMPIE LA MATRICE DI NUMERI double CASUALI
void populateMatrix(double M[DIM][DIM]) {
for (int i = 0; i < DIM; i++) {
for (int j = 0; j < DIM; j++) {
M[i][j] = 1.0;
}
}
}
//FUNZIONE PER STAMPARE UNA MATRICE
void printMatrix(double M[DIM][DIM]) {
for (int i = 0; i < DIM; i++) {
for (int j = 0; j < DIM; j++) {
printf("%f ", M[i][j]);
}
printf("\n");
}
}
int main() {
double *A =(double *)malloc(DIM*DIM*sizeof(double));
double *B=(double *)malloc(DIM*DIM*sizeof(double));
double *C=(double *)malloc(DIM*DIM*sizeof(double));
//riempio le matrici con dei valori arbitrari
for (int i = 0; i < DIM; i++) {
for (int j = 0; j < DIM; j++) {
A[i*DIM+j] = 1.0;
B[i*DIM+j] = 1.0;
}
}
std::chrono::steady_clock::time_point start = std::chrono::steady_clock::now();
addKernel(&A[0], &B[0], &C[0], DIM);
std::chrono::steady_clock::time_point end = std::chrono::steady_clock::now();
double tempo = std::chrono::duration_cast<std::chrono::duration<double> >(end - start).count();
printf("%f\n",tempo);
free(A);
free(B);
free(C);
return 0;
}
|
c3abb5998fe6a76e6dd70876877c54ea1ccec955.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include "TimingGPU.cuh"
#include "Utilities.cuh"
#define RG 10
#define BLOCKSIZE 8
/****************/
/* CPU FUNCTION */
/****************/
void h_convolution_1D(const float * __restrict__ h_Signal, const float * __restrict__ h_ConvKernel, float * __restrict__ h_Result_CPU,
const int N, const int K) {
for (int i = 0; i < N; i++) {
float temp = 0.f;
int N_start_point = i - (K / 2);
for (int j = 0; j < K; j++) if (N_start_point + j >= 0 && N_start_point + j < N) {
temp += h_Signal[N_start_point+ j] * h_ConvKernel[j];
}
h_Result_CPU[i] = temp;
}
}
/********************/
/* BASIC GPU KERNEL */
/********************/
__global__ void d_convolution_1D_basic(const float * __restrict__ d_Signal, const float * __restrict__ d_ConvKernel, float * __restrict__ d_Result_GPU,
const int N, const int K) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
float temp = 0.f;
int N_start_point = i - (K / 2);
for (int j = 0; j < K; j++) if (N_start_point + j >= 0 && N_start_point + j < N) {
temp += d_Signal[N_start_point+ j] * d_ConvKernel[j];
}
d_Result_GPU[i] = temp;
}
/***************************/
/* GPU KERNEL WITH CACHING */
/***************************/
__global__ void d_convolution_1D_caching(const float * __restrict__ d_Signal, const float * __restrict__ d_ConvKernel, float * __restrict__ d_Result_GPU,
const int N, const int K) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ float d_Tile[BLOCKSIZE];
d_Tile[threadIdx.x] = d_Signal[i];
__syncthreads();
float temp = 0.f;
int N_start_point = i - (K / 2);
for (int j = 0; j < K; j++) if (N_start_point + j >= 0 && N_start_point + j < N) {
if ((N_start_point + j >= blockIdx.x * blockDim.x) && (N_start_point + j < (blockIdx.x + 1) * blockDim.x))
// --- The signal element is in the tile loaded in the shared memory
temp += d_Tile[threadIdx.x + j - (K / 2)] * d_ConvKernel[j];
else
// --- The signal element is not in the tile loaded in the shared memory
temp += d_Signal[N_start_point + j] * d_ConvKernel[j];
}
d_Result_GPU[i] = temp;
}
/********/
/* MAIN */
/********/
int main(){
const int N = 15; // --- Signal length
const int K = 5; // --- Convolution kernel length
float *h_Signal = (float *)malloc(N * sizeof(float));
float *h_Result_CPU = (float *)malloc(N * sizeof(float));
float *h_Result_GPU = (float *)malloc(N * sizeof(float));
float *h_ConvKernel = (float *)malloc(K * sizeof(float));
float *d_Signal; gpuErrchk(hipMalloc(&d_Signal, N * sizeof(float)));
float *d_Result_GPU; gpuErrchk(hipMalloc(&d_Result_GPU, N * sizeof(float)));
float *d_ConvKernel; gpuErrchk(hipMalloc(&d_ConvKernel, K * sizeof(float)));
for (int i=0; i < N; i++) { h_Signal[i] = (float)(rand() % RG); }
for (int i=0; i < K; i++) { h_ConvKernel[i] = (float)(rand() % RG); }
gpuErrchk(hipMemcpy(d_Signal, h_Signal, N * sizeof(float), hipMemcpyHostToDevice));
gpuErrchk(hipMemcpy(d_ConvKernel, h_ConvKernel, K * sizeof(float), hipMemcpyHostToDevice));
h_convolution_1D(h_Signal, h_ConvKernel, h_Result_CPU, N, K);
hipLaunchKernelGGL(( d_convolution_1D_basic), dim3(iDivUp(N, BLOCKSIZE)), dim3(BLOCKSIZE), 0, 0, d_Signal, d_ConvKernel, d_Result_GPU, N, K);
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipDeviceSynchronize());
gpuErrchk(hipMemcpy(h_Result_GPU, d_Result_GPU, N * sizeof(float), hipMemcpyDeviceToHost));
for (int i = 0; i < N; i++) if (h_Result_CPU[i] != h_Result_GPU[i]) {printf("mismatch2 at %d, cpu: %d, gpu %d\n", i, h_Result_CPU[i], h_Result_GPU[i]); return 1;}
printf("Test basic passed\n");
hipLaunchKernelGGL(( d_convolution_1D_caching), dim3(iDivUp(N, BLOCKSIZE)), dim3(BLOCKSIZE), 0, 0, d_Signal, d_ConvKernel, d_Result_GPU, N, K);
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipDeviceSynchronize());
gpuErrchk(hipMemcpy(h_Result_GPU, d_Result_GPU, N * sizeof(float), hipMemcpyDeviceToHost));
for (int i = 0; i < N; i++) if (h_Result_CPU[i] != h_Result_GPU[i]) {printf("mismatch2 at %d, cpu: %d, gpu %d\n", i, h_Result_CPU[i], h_Result_GPU[i]); return 1;}
printf("Test caching passed\n");
return 0;
}
| c3abb5998fe6a76e6dd70876877c54ea1ccec955.cu | #include <stdio.h>
#include <stdlib.h>
#include "TimingGPU.cuh"
#include "Utilities.cuh"
#define RG 10
#define BLOCKSIZE 8
/****************/
/* CPU FUNCTION */
/****************/
void h_convolution_1D(const float * __restrict__ h_Signal, const float * __restrict__ h_ConvKernel, float * __restrict__ h_Result_CPU,
const int N, const int K) {
for (int i = 0; i < N; i++) {
float temp = 0.f;
int N_start_point = i - (K / 2);
for (int j = 0; j < K; j++) if (N_start_point + j >= 0 && N_start_point + j < N) {
temp += h_Signal[N_start_point+ j] * h_ConvKernel[j];
}
h_Result_CPU[i] = temp;
}
}
/********************/
/* BASIC GPU KERNEL */
/********************/
__global__ void d_convolution_1D_basic(const float * __restrict__ d_Signal, const float * __restrict__ d_ConvKernel, float * __restrict__ d_Result_GPU,
const int N, const int K) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
float temp = 0.f;
int N_start_point = i - (K / 2);
for (int j = 0; j < K; j++) if (N_start_point + j >= 0 && N_start_point + j < N) {
temp += d_Signal[N_start_point+ j] * d_ConvKernel[j];
}
d_Result_GPU[i] = temp;
}
/***************************/
/* GPU KERNEL WITH CACHING */
/***************************/
__global__ void d_convolution_1D_caching(const float * __restrict__ d_Signal, const float * __restrict__ d_ConvKernel, float * __restrict__ d_Result_GPU,
const int N, const int K) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ float d_Tile[BLOCKSIZE];
d_Tile[threadIdx.x] = d_Signal[i];
__syncthreads();
float temp = 0.f;
int N_start_point = i - (K / 2);
for (int j = 0; j < K; j++) if (N_start_point + j >= 0 && N_start_point + j < N) {
if ((N_start_point + j >= blockIdx.x * blockDim.x) && (N_start_point + j < (blockIdx.x + 1) * blockDim.x))
// --- The signal element is in the tile loaded in the shared memory
temp += d_Tile[threadIdx.x + j - (K / 2)] * d_ConvKernel[j];
else
// --- The signal element is not in the tile loaded in the shared memory
temp += d_Signal[N_start_point + j] * d_ConvKernel[j];
}
d_Result_GPU[i] = temp;
}
/********/
/* MAIN */
/********/
int main(){
const int N = 15; // --- Signal length
const int K = 5; // --- Convolution kernel length
float *h_Signal = (float *)malloc(N * sizeof(float));
float *h_Result_CPU = (float *)malloc(N * sizeof(float));
float *h_Result_GPU = (float *)malloc(N * sizeof(float));
float *h_ConvKernel = (float *)malloc(K * sizeof(float));
float *d_Signal; gpuErrchk(cudaMalloc(&d_Signal, N * sizeof(float)));
float *d_Result_GPU; gpuErrchk(cudaMalloc(&d_Result_GPU, N * sizeof(float)));
float *d_ConvKernel; gpuErrchk(cudaMalloc(&d_ConvKernel, K * sizeof(float)));
for (int i=0; i < N; i++) { h_Signal[i] = (float)(rand() % RG); }
for (int i=0; i < K; i++) { h_ConvKernel[i] = (float)(rand() % RG); }
gpuErrchk(cudaMemcpy(d_Signal, h_Signal, N * sizeof(float), cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(d_ConvKernel, h_ConvKernel, K * sizeof(float), cudaMemcpyHostToDevice));
h_convolution_1D(h_Signal, h_ConvKernel, h_Result_CPU, N, K);
d_convolution_1D_basic<<<iDivUp(N, BLOCKSIZE), BLOCKSIZE>>>(d_Signal, d_ConvKernel, d_Result_GPU, N, K);
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
gpuErrchk(cudaMemcpy(h_Result_GPU, d_Result_GPU, N * sizeof(float), cudaMemcpyDeviceToHost));
for (int i = 0; i < N; i++) if (h_Result_CPU[i] != h_Result_GPU[i]) {printf("mismatch2 at %d, cpu: %d, gpu %d\n", i, h_Result_CPU[i], h_Result_GPU[i]); return 1;}
printf("Test basic passed\n");
d_convolution_1D_caching<<<iDivUp(N, BLOCKSIZE), BLOCKSIZE>>>(d_Signal, d_ConvKernel, d_Result_GPU, N, K);
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
gpuErrchk(cudaMemcpy(h_Result_GPU, d_Result_GPU, N * sizeof(float), cudaMemcpyDeviceToHost));
for (int i = 0; i < N; i++) if (h_Result_CPU[i] != h_Result_GPU[i]) {printf("mismatch2 at %d, cpu: %d, gpu %d\n", i, h_Result_CPU[i], h_Result_GPU[i]); return 1;}
printf("Test caching passed\n");
return 0;
}
|
4419d7dfae21abfaabb0974aae45f4ef1a5e43f8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
__global__ void test(float *A, const int N){
int i = threadIdx.x;
if (i < N){
int foo = 3 * 5 + 2;
for(int j = 0; j < 10; j++){
float x = A[i+1];
A[i] = x; // DR
}
int bar = A[N-1] + A[N-2];
}else{
int abc = 3 * 5 + 2;
int bar = A[N-1] + A[N-2];
}
}
| 4419d7dfae21abfaabb0974aae45f4ef1a5e43f8.cu | __global__ void test(float *A, const int N){
int i = threadIdx.x;
if (i < N){
int foo = 3 * 5 + 2;
for(int j = 0; j < 10; j++){
float x = A[i+1];
A[i] = x; // DR
}
int bar = A[N-1] + A[N-2];
}else{
int abc = 3 * 5 + 2;
int bar = A[N-1] + A[N-2];
}
}
|
3c6edd63da68204c2e8dbc8d380ba5b29272c701.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*--------------------------------------------------------------------------*\
Copyright (c) 2008-2010, Danny Ruijters. All rights reserved.
http://www.dannyruijters.nl/cubicinterpolation/
This file is part of CUDA Cubic B-Spline Interpolation (CI).
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the copyright holders nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
The views and conclusions contained in the software and documentation are
those of the authors and should not be interpreted as representing official
policies, either expressed or implied.
\*--------------------------------------------------------------------------*/
#ifndef _CAST_FLOAT4_H_
#define _CAST_FLOAT4_H_
#include "memcpy.cu"
//--------------------------------------------------------------------------
// Declare the interleaved copu CUDA kernel
//--------------------------------------------------------------------------
template<class T> __global__ void CopyCastInterleaved(uchar* destination, const T* source, uint pitch, uint width)
{
uint2 index = make_uint2(
__umul24(blockIdx.x, blockDim.x) + threadIdx.x,
__umul24(blockIdx.y, blockDim.y) + threadIdx.y);
uint index3 = 3 * (index.y * width + index.x);
float4* dest = (float4*)(destination + index.y * pitch) + index.x;
float mult = 1.0f / Multiplier<T>();
*dest = make_float4(
mult * (float)source[index3],
mult * (float)source[index3+1],
mult * (float)source[index3+2], 1.0f);
}
//--------------------------------------------------------------------------
// Declare the typecast templated function
// This function can be called directly in C++ programs
//--------------------------------------------------------------------------
//! Allocate GPU memory and copy a voxel volume from CPU to GPU memory
//! and cast it to the normalized floating point format
//! @return the pointer to the GPU copy of the voxel volume
//! @param host pointer to the voxel volume in CPU (host) memory
//! @param width volume width in number of voxels
//! @param height volume height in number of voxels
//! @param depth volume depth in number of voxels
template<class T> extern hipPitchedPtr CastVolumeHost3ToDevice4(const T* host, uint width, uint height, uint depth)
{
hipPitchedPtr device = {0};
const hipExtent extent = make_hipExtent(width * sizeof(float4), height, depth);
CUDA_SAFE_CALL(hipMalloc3D(&device, extent));
const size_t pitchedBytesPerSlice = device.pitch * device.ysize;
T* temp = 0;
const uint voxelsPerSlice = width * height;
const size_t nrOfBytesTemp = voxelsPerSlice * 3 * sizeof(T);
CUDA_SAFE_CALL(hipMalloc((void**)&temp, nrOfBytesTemp));
uint dimX = min(PowTwoDivider(width), 64);
dim3 dimBlock(dimX, min(PowTwoDivider(height), 512 / dimX));
dim3 dimGrid(width / dimBlock.x, height / dimBlock.y);
size_t offsetHost = 0;
size_t offsetDevice = 0;
for (uint slice = 0; slice < depth; slice++)
{
CUDA_SAFE_CALL(hipMemcpy(temp, host + offsetHost, nrOfBytesTemp, hipMemcpyHostToDevice));
hipLaunchKernelGGL(( CopyCastInterleaved<T>), dim3(dimGrid), dim3(dimBlock), 0, 0, (uchar*)device.ptr + offsetDevice, temp, (uint)device.pitch, width);
CUT_CHECK_ERROR("Cast kernel failed");
offsetHost += voxelsPerSlice;
offsetDevice += pitchedBytesPerSlice;
}
CUDA_SAFE_CALL(hipFree(temp)); //free the temp GPU volume
return device;
}
#endif //_CAST_FLOAT4_H_
| 3c6edd63da68204c2e8dbc8d380ba5b29272c701.cu | /*--------------------------------------------------------------------------*\
Copyright (c) 2008-2010, Danny Ruijters. All rights reserved.
http://www.dannyruijters.nl/cubicinterpolation/
This file is part of CUDA Cubic B-Spline Interpolation (CI).
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the copyright holders nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
The views and conclusions contained in the software and documentation are
those of the authors and should not be interpreted as representing official
policies, either expressed or implied.
\*--------------------------------------------------------------------------*/
#ifndef _CAST_FLOAT4_H_
#define _CAST_FLOAT4_H_
#include "memcpy.cu"
//--------------------------------------------------------------------------
// Declare the interleaved copu CUDA kernel
//--------------------------------------------------------------------------
template<class T> __global__ void CopyCastInterleaved(uchar* destination, const T* source, uint pitch, uint width)
{
uint2 index = make_uint2(
__umul24(blockIdx.x, blockDim.x) + threadIdx.x,
__umul24(blockIdx.y, blockDim.y) + threadIdx.y);
uint index3 = 3 * (index.y * width + index.x);
float4* dest = (float4*)(destination + index.y * pitch) + index.x;
float mult = 1.0f / Multiplier<T>();
*dest = make_float4(
mult * (float)source[index3],
mult * (float)source[index3+1],
mult * (float)source[index3+2], 1.0f);
}
//--------------------------------------------------------------------------
// Declare the typecast templated function
// This function can be called directly in C++ programs
//--------------------------------------------------------------------------
//! Allocate GPU memory and copy a voxel volume from CPU to GPU memory
//! and cast it to the normalized floating point format
//! @return the pointer to the GPU copy of the voxel volume
//! @param host pointer to the voxel volume in CPU (host) memory
//! @param width volume width in number of voxels
//! @param height volume height in number of voxels
//! @param depth volume depth in number of voxels
template<class T> extern cudaPitchedPtr CastVolumeHost3ToDevice4(const T* host, uint width, uint height, uint depth)
{
cudaPitchedPtr device = {0};
const cudaExtent extent = make_cudaExtent(width * sizeof(float4), height, depth);
CUDA_SAFE_CALL(cudaMalloc3D(&device, extent));
const size_t pitchedBytesPerSlice = device.pitch * device.ysize;
T* temp = 0;
const uint voxelsPerSlice = width * height;
const size_t nrOfBytesTemp = voxelsPerSlice * 3 * sizeof(T);
CUDA_SAFE_CALL(cudaMalloc((void**)&temp, nrOfBytesTemp));
uint dimX = min(PowTwoDivider(width), 64);
dim3 dimBlock(dimX, min(PowTwoDivider(height), 512 / dimX));
dim3 dimGrid(width / dimBlock.x, height / dimBlock.y);
size_t offsetHost = 0;
size_t offsetDevice = 0;
for (uint slice = 0; slice < depth; slice++)
{
CUDA_SAFE_CALL(cudaMemcpy(temp, host + offsetHost, nrOfBytesTemp, cudaMemcpyHostToDevice));
CopyCastInterleaved<T><<<dimGrid, dimBlock>>>((uchar*)device.ptr + offsetDevice, temp, (uint)device.pitch, width);
CUT_CHECK_ERROR("Cast kernel failed");
offsetHost += voxelsPerSlice;
offsetDevice += pitchedBytesPerSlice;
}
CUDA_SAFE_CALL(cudaFree(temp)); //free the temp GPU volume
return device;
}
#endif //_CAST_FLOAT4_H_
|
d59c6d03c35c0f45e9a5b78f1de4b727258e2d0e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//n<=4096, m<=1024
__global__ void approxmatch(int b,
int n,
int m,
const float * __restrict__ xyz1,
const float * __restrict__ xyz2,
float * __restrict__ match){
const int MaxN=4096,MaxM=1024;
__shared__ float remainL[MaxN],remainR[MaxM],ratioR[MaxM],ratioL[MaxN];
__shared__ int listR[MaxM],lc;
float multiL,multiR;
if (n>=m){
multiL=1;
multiR=n/m;
}else{
multiL=m/n;
multiR=1;
}
for (int i=blockIdx.x;i<b;i+=gridDim.x){
for (int j=threadIdx.x;j<n*m;j+=blockDim.x)
match[i*n*m+j]=0;
for (int j=threadIdx.x;j<n;j+=blockDim.x)
remainL[j]=multiL;
for (int j=threadIdx.x;j<m;j+=blockDim.x)
remainR[j]=multiR;
__syncthreads();
for (int j=7;j>=-2;j--){
float level=-powf(4.0f,j);
if (j==-2){
level=0;
}
if (threadIdx.x==0){
lc=0;
for (int k=0;k<m;k++)
if (remainR[k]>0)
listR[lc++]=k;
}
__syncthreads();
int _lc=lc;
for (int k=threadIdx.x;k<n;k+=blockDim.x){
float suml=1e-9f;
float x1=xyz1[(i*n+k)*3+0];
float y1=xyz1[(i*n+k)*3+1];
float z1=xyz1[(i*n+k)*3+2];
for (int _l=0;_l<_lc;_l++){
int l=listR[_l];
float x2=xyz2[(i*m+l)*3+0]-x1;
float y2=xyz2[(i*m+l)*3+1]-y1;
float z2=xyz2[(i*m+l)*3+2]-z1;
float w=expf(level*(x2*x2+y2*y2+z2*z2))*remainR[l];
suml+=w;
}
ratioL[k]=remainL[k]/suml;
}
__syncthreads();
for (int _k=threadIdx.x;_k<lc;_k+=blockDim.x){
int k=listR[_k];
float sumr=0;
float x2=xyz2[(i*m+k)*3+0];
float y2=xyz2[(i*m+k)*3+1];
float z2=xyz2[(i*m+k)*3+2];
for (int l=0;l<n;l++){
float x1=xyz1[(i*n+l)*3+0]-x2;
float y1=xyz1[(i*n+l)*3+1]-y2;
float z1=xyz1[(i*n+l)*3+2]-z2;
float w=expf(level*(x1*x1+y1*y1+z1*z1))*ratioL[l];
sumr+=w;
}
sumr*=remainR[k];
float consumption=fminf(remainR[k]/(sumr+1e-9f),1.0f);
ratioR[k]=consumption*remainR[k];
remainR[k]=fmaxf(0.0f,remainR[k]-sumr);
}
__syncthreads();
for (int k=threadIdx.x;k<n;k+=blockDim.x){
float suml=0;
float x1=xyz1[(i*n+k)*3+0];
float y1=xyz1[(i*n+k)*3+1];
float z1=xyz1[(i*n+k)*3+2];
for (int _l=0;_l<_lc;_l++){
int l=listR[_l];
float x2=xyz2[(i*m+l)*3+0]-x1;
float y2=xyz2[(i*m+l)*3+1]-y1;
float z2=xyz2[(i*m+l)*3+2]-z1;
float w=expf(level*(x2*x2+y2*y2+z2*z2))*ratioL[k]*ratioR[l];
match[i*n*m+l*n+k]+=w;
suml+=w;
}
remainL[k]=fmaxf(0.0f,remainL[k]-suml);
}
__syncthreads();
}
}
}
void approxmatchLauncher(int b,int n,int m,const float * xyz1,const float * xyz2,float * match){
hipLaunchKernelGGL(( approxmatch), dim3(32),dim3(512), 0, 0, b,n,m,xyz1,xyz2,match);
}
__global__ void matchcost(int b,
int n,
int m,
const float * __restrict__ xyz1,
const float * __restrict__ xyz2,
const float * __restrict__ match,
float * __restrict__ out){
__shared__ float allsum[512];
const int Block=256;
__shared__ float buf[Block*3];
for (int i=blockIdx.x;i<b;i+=gridDim.x){
float subsum=0;
for (int k0=0;k0<m;k0+=Block){
int endk=min(m,k0+Block);
for (int k=threadIdx.x;k<(endk-k0)*3;k+=blockDim.x){
buf[k]=xyz2[i*m*3+k0*3+k];
}
__syncthreads();
for (int j=threadIdx.x;j<n;j+=blockDim.x){
float x1=xyz1[(i*n+j)*3+0];
float y1=xyz1[(i*n+j)*3+1];
float z1=xyz1[(i*n+j)*3+2];
for (int k=0;k<endk-k0;k++){
float x2=buf[k*3+0]-x1;
float y2=buf[k*3+1]-y1;
float z2=buf[k*3+2]-z1;
float d=sqrtf(x2*x2+y2*y2+z2*z2);
subsum+=match[i*n*m+(k0+k)*n+j]*d;
}
}
__syncthreads();
}
allsum[threadIdx.x]=subsum;
for (int j=1;j<blockDim.x;j<<=1){
__syncthreads();
if ((threadIdx.x&j)==0 && threadIdx.x+j<blockDim.x){
allsum[threadIdx.x]+=allsum[threadIdx.x+j];
}
}
if (threadIdx.x==0)
out[i]=allsum[0];
__syncthreads();
}
}
void matchcostLauncher(int b,int n,int m,const float * xyz1,const float * xyz2,const float * match,float * out){
hipLaunchKernelGGL(( matchcost), dim3(32),dim3(512), 0, 0, b,n,m,xyz1,xyz2,match,out);
}
__global__ void matchcostgrad(int b,int n,int m,const float * __restrict__ xyz1,const float * __restrict__ xyz2,const float * __restrict__ match,float * grad2){
__shared__ float sum_grad[256*3];
for (int i=blockIdx.x;i<b;i+=gridDim.x){
int kbeg=m*blockIdx.y/gridDim.y;
int kend=m*(blockIdx.y+1)/gridDim.y;
for (int k=kbeg;k<kend;k++){
float x2=xyz2[(i*m+k)*3+0];
float y2=xyz2[(i*m+k)*3+1];
float z2=xyz2[(i*m+k)*3+2];
float subsumx=0,subsumy=0,subsumz=0;
for (int j=threadIdx.x;j<n;j+=blockDim.x){
float x1=x2-xyz1[(i*n+j)*3+0];
float y1=y2-xyz1[(i*n+j)*3+1];
float z1=z2-xyz1[(i*n+j)*3+2];
float d=match[i*n*m+k*n+j]/fmaxf(sqrtf(x1*x1+y1*y1+z1*z1),1e-20f);
subsumx+=x1*d;
subsumy+=y1*d;
subsumz+=z1*d;
}
sum_grad[threadIdx.x*3+0]=subsumx;
sum_grad[threadIdx.x*3+1]=subsumy;
sum_grad[threadIdx.x*3+2]=subsumz;
for (int j=1;j<blockDim.x;j<<=1){
__syncthreads();
int j1=threadIdx.x;
int j2=threadIdx.x+j;
if ((j1&j)==0 && j2<blockDim.x){
sum_grad[j1*3+0]+=sum_grad[j2*3+0];
sum_grad[j1*3+1]+=sum_grad[j2*3+1];
sum_grad[j1*3+2]+=sum_grad[j2*3+2];
}
}
if (threadIdx.x==0){
grad2[(i*m+k)*3+0]=sum_grad[0];
grad2[(i*m+k)*3+1]=sum_grad[1];
grad2[(i*m+k)*3+2]=sum_grad[2];
}
__syncthreads();
}
}
}
void matchcostgradLauncher(int b,int n,int m,const float * xyz1,const float * xyz2,const float * match,float * grad2){
hipLaunchKernelGGL(( matchcostgrad), dim3(dim3(32,32)),dim3(256), 0, 0, b,n,m,xyz1,xyz2,match,grad2);
}
| d59c6d03c35c0f45e9a5b78f1de4b727258e2d0e.cu | //n<=4096, m<=1024
__global__ void approxmatch(int b,
int n,
int m,
const float * __restrict__ xyz1,
const float * __restrict__ xyz2,
float * __restrict__ match){
const int MaxN=4096,MaxM=1024;
__shared__ float remainL[MaxN],remainR[MaxM],ratioR[MaxM],ratioL[MaxN];
__shared__ int listR[MaxM],lc;
float multiL,multiR;
if (n>=m){
multiL=1;
multiR=n/m;
}else{
multiL=m/n;
multiR=1;
}
for (int i=blockIdx.x;i<b;i+=gridDim.x){
for (int j=threadIdx.x;j<n*m;j+=blockDim.x)
match[i*n*m+j]=0;
for (int j=threadIdx.x;j<n;j+=blockDim.x)
remainL[j]=multiL;
for (int j=threadIdx.x;j<m;j+=blockDim.x)
remainR[j]=multiR;
__syncthreads();
for (int j=7;j>=-2;j--){
float level=-powf(4.0f,j);
if (j==-2){
level=0;
}
if (threadIdx.x==0){
lc=0;
for (int k=0;k<m;k++)
if (remainR[k]>0)
listR[lc++]=k;
}
__syncthreads();
int _lc=lc;
for (int k=threadIdx.x;k<n;k+=blockDim.x){
float suml=1e-9f;
float x1=xyz1[(i*n+k)*3+0];
float y1=xyz1[(i*n+k)*3+1];
float z1=xyz1[(i*n+k)*3+2];
for (int _l=0;_l<_lc;_l++){
int l=listR[_l];
float x2=xyz2[(i*m+l)*3+0]-x1;
float y2=xyz2[(i*m+l)*3+1]-y1;
float z2=xyz2[(i*m+l)*3+2]-z1;
float w=expf(level*(x2*x2+y2*y2+z2*z2))*remainR[l];
suml+=w;
}
ratioL[k]=remainL[k]/suml;
}
__syncthreads();
for (int _k=threadIdx.x;_k<lc;_k+=blockDim.x){
int k=listR[_k];
float sumr=0;
float x2=xyz2[(i*m+k)*3+0];
float y2=xyz2[(i*m+k)*3+1];
float z2=xyz2[(i*m+k)*3+2];
for (int l=0;l<n;l++){
float x1=xyz1[(i*n+l)*3+0]-x2;
float y1=xyz1[(i*n+l)*3+1]-y2;
float z1=xyz1[(i*n+l)*3+2]-z2;
float w=expf(level*(x1*x1+y1*y1+z1*z1))*ratioL[l];
sumr+=w;
}
sumr*=remainR[k];
float consumption=fminf(remainR[k]/(sumr+1e-9f),1.0f);
ratioR[k]=consumption*remainR[k];
remainR[k]=fmaxf(0.0f,remainR[k]-sumr);
}
__syncthreads();
for (int k=threadIdx.x;k<n;k+=blockDim.x){
float suml=0;
float x1=xyz1[(i*n+k)*3+0];
float y1=xyz1[(i*n+k)*3+1];
float z1=xyz1[(i*n+k)*3+2];
for (int _l=0;_l<_lc;_l++){
int l=listR[_l];
float x2=xyz2[(i*m+l)*3+0]-x1;
float y2=xyz2[(i*m+l)*3+1]-y1;
float z2=xyz2[(i*m+l)*3+2]-z1;
float w=expf(level*(x2*x2+y2*y2+z2*z2))*ratioL[k]*ratioR[l];
match[i*n*m+l*n+k]+=w;
suml+=w;
}
remainL[k]=fmaxf(0.0f,remainL[k]-suml);
}
__syncthreads();
}
}
}
void approxmatchLauncher(int b,int n,int m,const float * xyz1,const float * xyz2,float * match){
approxmatch<<<32,512>>>(b,n,m,xyz1,xyz2,match);
}
__global__ void matchcost(int b,
int n,
int m,
const float * __restrict__ xyz1,
const float * __restrict__ xyz2,
const float * __restrict__ match,
float * __restrict__ out){
__shared__ float allsum[512];
const int Block=256;
__shared__ float buf[Block*3];
for (int i=blockIdx.x;i<b;i+=gridDim.x){
float subsum=0;
for (int k0=0;k0<m;k0+=Block){
int endk=min(m,k0+Block);
for (int k=threadIdx.x;k<(endk-k0)*3;k+=blockDim.x){
buf[k]=xyz2[i*m*3+k0*3+k];
}
__syncthreads();
for (int j=threadIdx.x;j<n;j+=blockDim.x){
float x1=xyz1[(i*n+j)*3+0];
float y1=xyz1[(i*n+j)*3+1];
float z1=xyz1[(i*n+j)*3+2];
for (int k=0;k<endk-k0;k++){
float x2=buf[k*3+0]-x1;
float y2=buf[k*3+1]-y1;
float z2=buf[k*3+2]-z1;
float d=sqrtf(x2*x2+y2*y2+z2*z2);
subsum+=match[i*n*m+(k0+k)*n+j]*d;
}
}
__syncthreads();
}
allsum[threadIdx.x]=subsum;
for (int j=1;j<blockDim.x;j<<=1){
__syncthreads();
if ((threadIdx.x&j)==0 && threadIdx.x+j<blockDim.x){
allsum[threadIdx.x]+=allsum[threadIdx.x+j];
}
}
if (threadIdx.x==0)
out[i]=allsum[0];
__syncthreads();
}
}
void matchcostLauncher(int b,int n,int m,const float * xyz1,const float * xyz2,const float * match,float * out){
matchcost<<<32,512>>>(b,n,m,xyz1,xyz2,match,out);
}
__global__ void matchcostgrad(int b,int n,int m,const float * __restrict__ xyz1,const float * __restrict__ xyz2,const float * __restrict__ match,float * grad2){
__shared__ float sum_grad[256*3];
for (int i=blockIdx.x;i<b;i+=gridDim.x){
int kbeg=m*blockIdx.y/gridDim.y;
int kend=m*(blockIdx.y+1)/gridDim.y;
for (int k=kbeg;k<kend;k++){
float x2=xyz2[(i*m+k)*3+0];
float y2=xyz2[(i*m+k)*3+1];
float z2=xyz2[(i*m+k)*3+2];
float subsumx=0,subsumy=0,subsumz=0;
for (int j=threadIdx.x;j<n;j+=blockDim.x){
float x1=x2-xyz1[(i*n+j)*3+0];
float y1=y2-xyz1[(i*n+j)*3+1];
float z1=z2-xyz1[(i*n+j)*3+2];
float d=match[i*n*m+k*n+j]/fmaxf(sqrtf(x1*x1+y1*y1+z1*z1),1e-20f);
subsumx+=x1*d;
subsumy+=y1*d;
subsumz+=z1*d;
}
sum_grad[threadIdx.x*3+0]=subsumx;
sum_grad[threadIdx.x*3+1]=subsumy;
sum_grad[threadIdx.x*3+2]=subsumz;
for (int j=1;j<blockDim.x;j<<=1){
__syncthreads();
int j1=threadIdx.x;
int j2=threadIdx.x+j;
if ((j1&j)==0 && j2<blockDim.x){
sum_grad[j1*3+0]+=sum_grad[j2*3+0];
sum_grad[j1*3+1]+=sum_grad[j2*3+1];
sum_grad[j1*3+2]+=sum_grad[j2*3+2];
}
}
if (threadIdx.x==0){
grad2[(i*m+k)*3+0]=sum_grad[0];
grad2[(i*m+k)*3+1]=sum_grad[1];
grad2[(i*m+k)*3+2]=sum_grad[2];
}
__syncthreads();
}
}
}
void matchcostgradLauncher(int b,int n,int m,const float * xyz1,const float * xyz2,const float * match,float * grad2){
matchcostgrad<<<dim3(32,32),256>>>(b,n,m,xyz1,xyz2,match,grad2);
}
|
a3d4e4dfdfe5a395dd6f30dc5246a446c3120b6f.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <cuPrintf.cu>
#include <shrUtils.h>
#include "cutil_inline.h"
#define CUPRINTF cuPrintf
/***********DETECTS COLISSION**************/
__device__ bool isCollision(TColor *dst, int imageW, int imageH, int x, int y){
if (x >= imageW || x < 1)
return true;
if (y >= imageH || y < 1)
return true;
TColor color = dst[imageW * y + x];
int r = (color >> 0) & 0xFF;
int g = (color >> 8) & 0xFF;
int b = (color >> 16) & 0xFF;
int a = (color >> 24) & 0xFF;
if (b==255) //hay un vehiculo, peaton o transmilenio ocupando el sitio.
return true;
int area= r & 0xE0;
if ( (area >> 5) == 7) //hay un edificio alli
return true;
if ( (area >> 6) == 3) //hay una estacion alli
return true;
return false;
}
/***********CALCULATE NEXT JUMP**************/
__device__ void getNextStep(TColor *dst, int imageW, int imageH, int x, int y, int &px, int &py){
if (isCollision(dst,imageW,imageH, px+x, py+y) ){// de frente
if (x==0){ //para direccion arriba-abajo
//asumiendo direcion hacia arriba
if ( !isCollision(dst,imageW,imageH,px+y,py+y) ){ // (+,+) - derecha de frente
px+=y;
py+=y;
}else if (!isCollision(dst,imageW,imageH,px-y,py+y)){ // (-,+) - izquierda de frente
px-=y;
py+=y;
}else if (!isCollision(dst,imageW,imageH,px+y,py)){ // (-,0) - derecha
px+=y;
}else if (!isCollision(dst,imageW,imageH,px-y,py)){ // (-,0) - izquierda
px-=y;
}else if (!isCollision(dst,imageW,imageH,px+y,py-y)){ // (+,-) - diagonal atras derecha
px+=y;
py-=y;
}else if (!isCollision(dst,imageW,imageH,px-y,py-y)){ // (-,-) - diagonal atras izquierda
px-=y;
py-=y;
}else if (!isCollision(dst,imageW,imageH,px,py-y)){ // (0,-)atras
py-=y;
}
}else if (y==0){ //para direccion izquierda-derecha
//asumiendo direccion hacia la derecha
if ( !isCollision(dst,imageW,imageH,px+x,py-x) ){ // (+,-) - diagonal derecha
px+=x;
py-=x;
}else if (!isCollision(dst,imageW,imageH,px+x,py+x)){ // (+,+) - diagonal izquierda
px+=x;
py+=x;
}else if (!isCollision(dst,imageW,imageH,px,py-x)){ // (0,-) - derecha
py-=x;
}else if (!isCollision(dst,imageW,imageH,px,py+x)){ // (0,+) - izquierda
py+=x;
}else if (!isCollision(dst,imageW,imageH,px-x,py-x)){ // (-,-) - diagonal atras derecha
px-=x;
py-=x;
}else if (!isCollision(dst,imageW,imageH,px-x,py+x)){ // (-,+) - diagonal atras izquierda
px-=x;
py+=x;
}else if (!isCollision(dst,imageW,imageH,px-x,py)){ // (-,0) - atras
px-=x;
}
}else if (x==y){ //para diagonal so-ne
// tomando como direccion (1,1) derecha-arriba
if ( !isCollision(dst,imageW,imageH,px+x,py) ){ // (+,0) - miro diagonal derecha
px+=x;
}else if (!isCollision(dst,imageW,imageH,px,py+y)){ // (0,+) - miro diagonal izquierda
py+=y;
}else if (!isCollision(dst,imageW,imageH,px+x,py-y)){ // (+,-) - derecha
px+=x;
py-=y;
}else if (!isCollision(dst,imageW,imageH,px-x,py+y)){ // (-,+) - izquierda
px-=x;
py+=y;
}else if (!isCollision(dst,imageW,imageH,px,py-y)){ // (0,-) - diagonal atras derecha
py-=y;
}else if (!isCollision(dst,imageW,imageH,px-x,py)){ // (-,0) - diagonal atras izquierda
px-=x;
}else if (!isCollision(dst,imageW,imageH,px-x,py-y)){ // (-,-) - atras
px-=x;
py-=y;
}
}else if (x==-y){ //para diagonal se-no
//asumiendo como direccion (1,-1) derecha-abajo
if ( !isCollision(dst,imageW,imageH,px,py+y) ){ // (0,-) - miro diagonal derecha (asumo y=-1)
py+=y;
}else if (!isCollision(dst,imageW,imageH,px+x,py)){ // (0,+) - miro diagonal izquierda (asumo x=1)
px+=x;
}else if (!isCollision(dst,imageW,imageH,px-x,py+y)){ // (-,-) - derecha
px-=x;
py+=y;
}else if (!isCollision(dst,imageW,imageH,px+x,py-y)){ // (+,+) - izquierda
px+=x;
py-=y;
}else if (!isCollision(dst,imageW,imageH,px-x,py)){ // (-,0) - diagonal atras derecha
px-=x;
}else if (!isCollision(dst,imageW,imageH,px,py-y)){ // (0,+) - diagonal atras izquierda
py-=y;
}else if (!isCollision(dst,imageW,imageH,px-x,py-y)){ // (-,+) - atras
px-=x;
py-=y;
}
}
}else{
px+=x;
py+=y;
}
}
//
//
// AQUI EMPIEZAN LAS FASES DEL PASO DE SIMULACION
//
//
/********* SPEED MANAGER *******/
__global__ void PedestrianPhase0(
int maxPediestran,
int *devTimeOut,
float *dev_sizeX_sizeZ_maxSpeed_maxAcce_maxTimeout
){
const int id = blockDim.x * blockIdx.x + threadIdx.x;
if(id < maxPediestran){
if(devTimeOut[id]==-10){ //first time only
int cellSize=maxPediestran*dev_sizeX_sizeZ_maxSpeed_maxAcce_maxTimeout[2];
int cellNumber=id/cellSize;
devTimeOut[id]=cellNumber;
return;
}
if(devTimeOut[id]<0){
devTimeOut[id]=1.f/dev_sizeX_sizeZ_maxSpeed_maxAcce_maxTimeout[2]-1;
}else{
devTimeOut[id]--;
}
}
}
/********* PHASE 1: P'=f(p) *******/
__global__ void PedestrianPhase1(
TColor *dst,
int imageW,
int imageH,
int maxPediestran,
bool semaphore,
int **devLocalX,
int **devLocalY,
int *devLocalStep,
int *devMaxLocalStep,
int *devNextX,
int *devNextY,
int *devPreviousX,
int *devPreviousY,
int *devConflicted,
int *devTimeOut
){
const int id = blockDim.x * blockIdx.x + threadIdx.x;
if(id < maxPediestran){
int delay=isSemaphoreRed(tex2D(texImage, (float)devNextX[id] + 0.5f, (float)devNextY[id] + 0.5f), semaphore, 1);
if(delay==-1 && devTimeOut[id]>=0){
devTimeOut[id]=devTimeOut[id]+1;
}
if(devTimeOut[id]!=0)
return;
if (devLocalStep[id]<0){
devLocalStep[id]++;
return;
}
if (devLocalStep[id]==0){
devPreviousX[id]=devLocalX[id][5*devLocalStep[id] + 4];
devPreviousY[id]=devLocalY[id][5*devLocalStep[id] + 4];
devNextX[id]=devPreviousX[id];
devNextY[id]=devPreviousY[id];
devLocalStep[id]++;
return;
}
//INVIERTE METAS LOCALES UNA VEZ HA LLEGADO A LA META GLOBAL
if (devLocalStep[id]==devMaxLocalStep[id]){
for (int i = 0; i < devMaxLocalStep[id]/2; i++){
for (int n = 0; n < 5; n++){
int tempX=devLocalX[id][5*(devMaxLocalStep[id]-i-1)+n];
devLocalX[id][5*(devMaxLocalStep[id]-i-1)+n]=devLocalX[id][5*i+n];
devLocalX[id][5*i+n]=tempX;
int tempY=devLocalY[id][5*(devMaxLocalStep[id]-i-1)+n];
devLocalY[id][5*(devMaxLocalStep[id]-i-1)+n]=devLocalY[id][5*i+n];
devLocalY[id][5*i+n]=tempY;
}
}
devLocalStep[id]=0;
}
int x=0;
int y=0;
if (!getNextDirection(id , x, y, devLocalX, devLocalY, devLocalStep, devNextX, devNextY, 1))
return;
int px=devNextX[id];
int py=devNextY[id];
getNextStep(dst,imageW,imageH,x,y,px,py);
if (px != devNextX[id] || py != devNextY[id]){ //nueva posicion
devPreviousX[id]=devNextX[id];
devPreviousY[id]=devNextY[id];
devNextX[id]=px;
devNextY[id]=py;
dst[imageW * devNextY[id] + devNextX[id]] = make_color(0.f, 1.f, 1.f, 1.f);
float disX=(float)(devLocalX[id][5*(devLocalStep[id]) + 4]-devNextX[id]);
float disY=(float)(devLocalY[id][5*(devLocalStep[id]) + 4]-devNextY[id]);
float hyp=sqrt(disX*disX+disY*disY);
if ( hyp < 1.f ){
devLocalStep[id]++;
}
devConflicted[imageW * devNextY[id] + devNextX[id]] = id;
}
}
}
/********* PHASE 2: Se intenta solucionar conflictos en paralelo********/
__global__ void PedestrianPhase2(
TColor *dst,
int imageW,
int imageH,
int maxPediestran,
bool semaphore,
int **devLocalX,
int **devLocalY,
int *devLocalStep,
int *devNextX,
int *devNextY,
int *devPreviousX,
int *devPreviousY,
int *devConflicted,
int *devTimeOut,
int *devClass
){
const int id = blockDim.x * blockIdx.x + threadIdx.x;
if((id < maxPediestran) && (devLocalStep[id] >= 0)){
if(devTimeOut[id]!=0)
return;
//int delay=isSemaphoreRed(tex2D(texImage, (float)devNextX[id] + 0.5f, (float)devNextY[id] + 0.5f), semaphore, 1);
//if(delay==-1)
// return;
if (devConflicted[imageW * devNextY[id] + devNextX[id]] == id){ //talves tenga conflicto, pero tiene prioridad sobre los demas
float4 nextFresult = tex2D(texImage, (float)devPreviousX[id] + 0.5f, (float)devPreviousY[id] + 0.5f);
dst[imageW * devPreviousY[id] + devPreviousX[id]] = make_color(nextFresult.x, nextFresult.y, nextFresult.z, 0.0f);
return;
}
if ( (devNextX[id]==devPreviousX[id]) && (devNextY[id]==devPreviousY[id]) )
//esta en conflicto pero no se ha movido (no deberia pasar nunca)
return;
int x=0;
int y=0;
if (!getNextDirection(id, x, y, devLocalX, devLocalY, devLocalStep, devNextX, devNextY, 1))
return;
int px=devPreviousX[id];
int py=devPreviousY[id];
getNextStep(dst,imageW,imageH,x,y,px,py);
//borro la posicion siguiente ya que el id de mas prioridad lo ocupo.
float4 nextFresult = tex2D(texImage, (float)devNextX[id] + 0.5f, (float)devNextY[id] + 0.5f);
dst[imageW * devNextY[id] + devNextX[id]] = make_color(nextFresult.x, nextFresult.y, nextFresult.z, 0.0f);
//guardo mi nueva posicion, no sobreescribo la anterior.
devNextX[id]=px;
devNextY[id]=py;
if ( (px!=devPreviousX[id]) || (py!=devPreviousY[id]) ){
// si me pude mover, me muevo a mi nueva coordenada, aunque este movimiento puede generar colisiones.
float4 newFresult = tex2D(texImage, (float)devNextX[id] + 0.5f, (float)devNextY[id] + 0.5f);
dst[imageW * devNextY[id] + devNextX[id]] = make_color(0.f, 1.f, 1.f, 1.f);
devConflicted[imageW * devNextY[id] + devNextX[id]] = id;
}else{
//el peaton no se pudo mover. no hago nada.
}
}
}
/********* PHASE 3: Se intenta solucionar conflictos en paralelo********/
__global__ void PedestrianPhase3(
TColor *dst,
int imageW,
int imageH,
int maxPediestran,
bool semaphore,
int *devLocalStep,
int *devNextX,
int *devNextY,
int *devPreviousX,
int *devPreviousY,
int *devConflicted,
int *devTimeOut,
int *devClass
){
const int id = blockDim.x * blockIdx.x + threadIdx.x;
if((id < maxPediestran) && (devLocalStep[id] >= 0)){
if(devTimeOut[id]!=0)
return;
//int delay=isSemaphoreRed(tex2D(texImage, (float)devNextX[id] + 0.5f, (float)devNextY[id] + 0.5f), semaphore, 1);
//if(delay==-1)
// return;
//else
// devTimeOut[id]+=delay;
if (devConflicted[imageW * devNextY[id] + devNextX[id]] == id){ //talves tenga conflicto, pero tiene prioridad sobre los demas
float4 nextFresult = tex2D(texImage, (float)devPreviousX[id] + 0.5f, (float)devPreviousY[id] + 0.5f);
dst[imageW * devPreviousY[id] + devPreviousX[id]] = make_color(nextFresult.x, nextFresult.y, nextFresult.z, 0.0f);
return;
}
devNextX[id]=devPreviousX[id];
devNextY[id]=devPreviousY[id];
}
}
// SOLUCION INICIAL PARA RESOLUCION DE CONFLICTOS
/********* PHASE 2: Detecto Colisiones (serial)********/
__global__ void PedestrianCollision(
TColor *dst,
int imageW,
int imageH,
int maxPediestran,
int *devClass,
int *devLocalStep,
int *devNextX,
int *devNextY,
int *devPreviousX,
int *devPreviousY,
int *devConflicted,
int *devTimeOut
){
for (int id=0; id < maxPediestran; id++){
devConflicted[id]=-1;
if(devTimeOut[id]!=0)
continue;
if (devLocalStep[id] >= 0){
if (devClass[imageW*devNextY[id] + devNextX[id]]==-1)
devClass[imageW*devNextY[id] + devNextX[id]]=id;
else
devConflicted[id]=devClass[imageW*devNextY[id] + devNextX[id]];
}
}
}
__global__ void PedestrianPhase2Old(
TColor *dst,
int imageW,
int imageH,
int maxPediestran,
int **devLocalX,
int **devLocalY,
int *devLocalStep,
int *devNextX,
int *devNextY,
int *devPreviousX,
int *devPreviousY,
int *devConflicted,
int *devTimeOut,
int *devClass
){
const int id = blockDim.x * blockIdx.x + threadIdx.x;
if((id < maxPediestran) && (devLocalStep[id] >= 0)){
if(devTimeOut[id]!=0)
return;
if (devConflicted[id]==-1 || devConflicted[id]==id){ //no tiene conflicto o tiene prioridad asi que borro con confianza el paso anterior
float4 nextFresult = tex2D(texImage, (float)devPreviousX[id] + 0.5f, (float)devPreviousY[id] + 0.5f);
dst[imageW * devPreviousY[id] + devPreviousX[id]] = make_color(nextFresult.x, nextFresult.y, nextFresult.z, 0.0f);
return;
}
if ( (devNextX[id]==devPreviousX[id]) && (devNextY[id]==devPreviousY[id]) )
//esta en conflicto pero no se ha movido (no deberia pasar nunca)
return;
int x=0;
int y=0;
if (!getNextDirection(id, x, y, devLocalX, devLocalY, devLocalStep, devNextX, devNextY, 1))
return;
int px=devPreviousX[id];
int py=devPreviousY[id];
getNextStep(dst,imageW,imageH,x,y,px,py);
//borro la posicion siguiente ya que el id de mas prioridad lo ocupo.
float4 nextFresult = tex2D(texImage, (float)devNextX[id] + 0.5f, (float)devNextY[id] + 0.5f);
dst[imageW * devNextY[id] + devNextX[id]] = make_color(nextFresult.x, nextFresult.y, nextFresult.z, 0.0f);
//guardo mi nueva posicion, no sobreescribo la anterior.
devNextX[id]=px;
devNextY[id]=py;
if ( (px!=devPreviousX[id]) || (py!=devPreviousY[id]) ){
// si me pude mover, me muevo a mi nueva coordenada, aunque este movimiento puede generar colisiones.
float4 newFresult = tex2D(texImage, (float)devNextX[id] + 0.5f, (float)devNextY[id] + 0.5f);
dst[imageW * devNextY[id] + devNextX[id]] = make_color(0.f, 1.f, 1.f, 1.f);
devConflicted[imageW * devNextY[id] + devNextX[id]] = id;
devClass[0]=100; //esta variable no la uso mas, asi que tomo la primera posicion para indicar que al menos hubo un movimiento
}else{
//el peaton no se pudo mover. no hago nada.
}
}
}
/********* PHASE 3: Se solucionan los conflictos (serial) que no se pudo resolver en la fase anterior********/
__global__ void PedestrianPhase3Old(
TColor *dst,
int imageW,
int imageH,
int maxPediestran,
int *devLocalStep,
int *devNextX,
int *devNextY,
int *devPreviousX,
int *devPreviousY,
int *devConflicted,
int *devTimeOut,
int *devClass
){
if(devClass[0]!=100)
return; //no hubo conflictos en la fase 2, asi que esta fase sobra.
for (int id=0; id < maxPediestran; id++){
if(devTimeOut[id]!=0)
continue;
if (devLocalStep[id]<=0)
continue;
if (devConflicted[id]==-1 || devConflicted[id]==id){ //talves tenga conflicto, pero tiene prioridad sobre los demas
//borro paso anterior.
float4 nextFresult = tex2D(texImage, (float)devPreviousX[id] + 0.5f, (float)devPreviousY[id] + 0.5f);
dst[imageW * devPreviousY[id] + devPreviousX[id]] = make_color(nextFresult.x, nextFresult.y, nextFresult.z, 0.0f);
continue;
}
if ( (devNextX[id]==devPreviousX[id]) && (devNextY[id]==devPreviousY[id]) )
//esta en conflicto pero no se ha movido (no deberia pasar nunca)
continue;
int x=devNextX[id]-devPreviousX[id];
int y=devNextY[id]-devPreviousY[id];
int px=devPreviousX[id];
int py=devPreviousY[id];
getNextStep(dst,imageW,imageH,x,y,px,py);
//borro la posicion siguiente ya que el id de mas prioridad lo ocupo.
float4 nextFresult = tex2D(texImage, (float)devNextX[id] + 0.5f, (float)devNextY[id] + 0.5f);
dst[imageW * devNextY[id] + devNextX[id]] = make_color(nextFresult.x, nextFresult.y, nextFresult.z, 0.0f);
//guardo mi nueva posicion, no sobreescribo la anterior.
devNextX[id]=px;
devNextY[id]=py;
if ( (px!=devPreviousX[id]) || (py!=devPreviousY[id]) ){
//me muevo al siguiente punto
float4 newFresult = tex2D(texImage, (float)devNextX[id] + 0.5f, (float)devNextY[id] + 0.5f);
dst[imageW * devNextY[id] + devNextX[id]] = make_color(0.f, 1.f, 1.f, 1.f);
//borro el punto anterior
float4 preFresult = tex2D(texImage, (float)devPreviousX[id] + 0.5f, (float)devPreviousY[id] + 0.5f);
dst[imageW * devPreviousY[id] + devPreviousX[id]] = make_color(preFresult.x, preFresult.y, preFresult.z, 0.0f);
}else{
//el peaton no se pudo mover. no hago nada.
}
}
}
extern "C" void run_Pedestrian(
TColor *d_dst,
int *devClass,
int imageW,
int imageH,
int maxPediestran,
bool parallelDetection,
bool semaphore,
int **devLocalX,
int **devLocalY,
int *devLocalStep,
int *devMaxLocalStep,
int *devCurrentX,
int *devCurrentY,
int *devPreviousX,
int *devPreviousY,
int *devConflicted,
int **devRelated,
int *devTimeOut,
float *devSpeed,
float *dev_sizeX_sizeZ_maxSpeed_maxAcce_maxTimeout
){
/******PARA IMPRIMIR EN CONSOLA******/
//cudaPrintfInit(); //descomentar esta linea para activar la impresion por consola
//CUPRINTF("aqui va el mensaje, funciona igual que un printf"); //copiar este comando en los kernels donde se desee imprimir
hipMemset(devConflicted,-1,imageW*imageH*sizeof(int));
dim3 dimGrid(maxPediestran, 1);
dim3 dimBlock(1, 1, 1); // para saber cuantos se ejecuta, solo multiplique todos los valores, no use Z.
if(parallelDetection){
//Fase 0: Control de velocidad, determino si en este paso de simulacion intento moverme o no.
hipLaunchKernelGGL(( PedestrianPhase0), dim3(dimGrid), dim3(dimBlock), 0, 0, maxPediestran, devTimeOut, dev_sizeX_sizeZ_maxSpeed_maxAcce_maxTimeout);
//Fase 1: realizo movimientos as hayan colisiones.
hipLaunchKernelGGL(( PedestrianPhase1), dim3(dimGrid), dim3(dimBlock), 0, 0, d_dst, imageW, imageH, maxPediestran, semaphore,
devLocalX, devLocalY, devLocalStep, devMaxLocalStep,
devCurrentX, devCurrentY, devPreviousX, devPreviousY, devConflicted, devTimeOut);
//Fase 2: detecto y corrigo movimientos de peatones en conflicto con nuevos movimientos, aun asi pueden haber conflictos en los nuevos movimientos
hipLaunchKernelGGL(( PedestrianPhase2), dim3(dimGrid), dim3(dimBlock), 0, 0, d_dst, imageW, imageH, maxPediestran, semaphore,
devLocalX, devLocalY, devLocalStep,
devCurrentX, devCurrentY, devPreviousX, devPreviousY, devConflicted, devTimeOut, devClass);
//Fase 3: detecto y corrigo movimientos de peatones en conflicto, pero no genero movimientos nuevos, me devuelvo mejor a un estado estable.
hipLaunchKernelGGL(( PedestrianPhase3), dim3(dimGrid), dim3(dimBlock), 0, 0, d_dst, imageW, imageH, maxPediestran, semaphore,
devLocalStep,
devCurrentX, devCurrentY, devPreviousX, devPreviousY, devConflicted, devTimeOut, devClass);
}else{
//Fase 0: Control de velocidad, determino si en este paso de simulacion intento moverme o no.
hipLaunchKernelGGL(( PedestrianPhase0), dim3(dimGrid), dim3(dimBlock), 0, 0, maxPediestran, devTimeOut, dev_sizeX_sizeZ_maxSpeed_maxAcce_maxTimeout);
//Fase 1: realizo movimientos as hayan colisiones.
hipLaunchKernelGGL(( PedestrianPhase1), dim3(dimGrid), dim3(dimBlock), 0, 0, d_dst, imageW, imageH, maxPediestran, semaphore,
devLocalX, devLocalY, devLocalStep, devMaxLocalStep,
devCurrentX, devCurrentY, devPreviousX, devPreviousY, devConflicted, devTimeOut);
//Fase intermedia: detecto colisiones. (solo se detectan, no se arreglan)
hipMemset(devClass,-1,imageW*imageH*sizeof(int));
hipLaunchKernelGGL(( PedestrianCollision), dim3(1),dim3(1), 0, 0, d_dst, imageW, imageH, maxPediestran,
devClass,devLocalStep,
devCurrentX, devCurrentY, devPreviousX, devPreviousY, devConflicted, devTimeOut);
//Fase 2: detecto y corrigo movimientos de peatones en conflicto con nuevos movimientos, aun asi pueden haber conflictos en los nuevos movimientos
hipLaunchKernelGGL(( PedestrianPhase2Old), dim3(dimGrid), dim3(dimBlock), 0, 0, d_dst, imageW, imageH, maxPediestran,
devLocalX, devLocalY, devLocalStep,
devCurrentX, devCurrentY, devPreviousX, devPreviousY, devConflicted, devTimeOut, devClass);
//Fase intermedia: detecto nuevas colisiones, que quedaron de la fase anterior.
hipMemset(devClass,-1,imageW*imageH*sizeof(int));
hipLaunchKernelGGL(( PedestrianCollision), dim3(1),dim3(1), 0, 0, d_dst, imageW, imageH, maxPediestran,
devClass,devLocalStep,
devCurrentX, devCurrentY, devPreviousX, devPreviousY, devConflicted, devTimeOut);
//Fase 3: arreglo las colisiones resultantes de manera serial.
hipLaunchKernelGGL(( PedestrianPhase3Old), dim3(1),dim3(1), 0, 0, d_dst, imageW, imageH, maxPediestran,
devLocalStep,
devCurrentX, devCurrentY, devPreviousX, devPreviousY, devConflicted, devTimeOut, devClass);
}
/***********PARA IMPRIMIR EN CONSOLA********/
//cudaPrintfDisplay(stdout, true); //descomentar esta linea para imprimer por consola, no modificar los atributos.
//cudaPrintfEnd(); //descomentar esta linea para finalizar la impresion por consola.
} | a3d4e4dfdfe5a395dd6f30dc5246a446c3120b6f.cu | #include <cuda.h>
#include <stdio.h>
#include <cuPrintf.cu>
#include <shrUtils.h>
#include "cutil_inline.h"
#define CUPRINTF cuPrintf
/***********DETECTS COLISSION**************/
__device__ bool isCollision(TColor *dst, int imageW, int imageH, int x, int y){
if (x >= imageW || x < 1)
return true;
if (y >= imageH || y < 1)
return true;
TColor color = dst[imageW * y + x];
int r = (color >> 0) & 0xFF;
int g = (color >> 8) & 0xFF;
int b = (color >> 16) & 0xFF;
int a = (color >> 24) & 0xFF;
if (b==255) //hay un vehiculo, peaton o transmilenio ocupando el sitio.
return true;
int area= r & 0xE0;
if ( (area >> 5) == 7) //hay un edificio alli
return true;
if ( (area >> 6) == 3) //hay una estacion alli
return true;
return false;
}
/***********CALCULATE NEXT JUMP**************/
__device__ void getNextStep(TColor *dst, int imageW, int imageH, int x, int y, int &px, int &py){
if (isCollision(dst,imageW,imageH, px+x, py+y) ){// de frente
if (x==0){ //para direccion arriba-abajo
//asumiendo direcion hacia arriba
if ( !isCollision(dst,imageW,imageH,px+y,py+y) ){ // (+,+) - derecha de frente
px+=y;
py+=y;
}else if (!isCollision(dst,imageW,imageH,px-y,py+y)){ // (-,+) - izquierda de frente
px-=y;
py+=y;
}else if (!isCollision(dst,imageW,imageH,px+y,py)){ // (-,0) - derecha
px+=y;
}else if (!isCollision(dst,imageW,imageH,px-y,py)){ // (-,0) - izquierda
px-=y;
}else if (!isCollision(dst,imageW,imageH,px+y,py-y)){ // (+,-) - diagonal atras derecha
px+=y;
py-=y;
}else if (!isCollision(dst,imageW,imageH,px-y,py-y)){ // (-,-) - diagonal atras izquierda
px-=y;
py-=y;
}else if (!isCollision(dst,imageW,imageH,px,py-y)){ // (0,-)atras
py-=y;
}
}else if (y==0){ //para direccion izquierda-derecha
//asumiendo direccion hacia la derecha
if ( !isCollision(dst,imageW,imageH,px+x,py-x) ){ // (+,-) - diagonal derecha
px+=x;
py-=x;
}else if (!isCollision(dst,imageW,imageH,px+x,py+x)){ // (+,+) - diagonal izquierda
px+=x;
py+=x;
}else if (!isCollision(dst,imageW,imageH,px,py-x)){ // (0,-) - derecha
py-=x;
}else if (!isCollision(dst,imageW,imageH,px,py+x)){ // (0,+) - izquierda
py+=x;
}else if (!isCollision(dst,imageW,imageH,px-x,py-x)){ // (-,-) - diagonal atras derecha
px-=x;
py-=x;
}else if (!isCollision(dst,imageW,imageH,px-x,py+x)){ // (-,+) - diagonal atras izquierda
px-=x;
py+=x;
}else if (!isCollision(dst,imageW,imageH,px-x,py)){ // (-,0) - atras
px-=x;
}
}else if (x==y){ //para diagonal so-ne
// tomando como direccion (1,1) derecha-arriba
if ( !isCollision(dst,imageW,imageH,px+x,py) ){ // (+,0) - miro diagonal derecha
px+=x;
}else if (!isCollision(dst,imageW,imageH,px,py+y)){ // (0,+) - miro diagonal izquierda
py+=y;
}else if (!isCollision(dst,imageW,imageH,px+x,py-y)){ // (+,-) - derecha
px+=x;
py-=y;
}else if (!isCollision(dst,imageW,imageH,px-x,py+y)){ // (-,+) - izquierda
px-=x;
py+=y;
}else if (!isCollision(dst,imageW,imageH,px,py-y)){ // (0,-) - diagonal atras derecha
py-=y;
}else if (!isCollision(dst,imageW,imageH,px-x,py)){ // (-,0) - diagonal atras izquierda
px-=x;
}else if (!isCollision(dst,imageW,imageH,px-x,py-y)){ // (-,-) - atras
px-=x;
py-=y;
}
}else if (x==-y){ //para diagonal se-no
//asumiendo como direccion (1,-1) derecha-abajo
if ( !isCollision(dst,imageW,imageH,px,py+y) ){ // (0,-) - miro diagonal derecha (asumo y=-1)
py+=y;
}else if (!isCollision(dst,imageW,imageH,px+x,py)){ // (0,+) - miro diagonal izquierda (asumo x=1)
px+=x;
}else if (!isCollision(dst,imageW,imageH,px-x,py+y)){ // (-,-) - derecha
px-=x;
py+=y;
}else if (!isCollision(dst,imageW,imageH,px+x,py-y)){ // (+,+) - izquierda
px+=x;
py-=y;
}else if (!isCollision(dst,imageW,imageH,px-x,py)){ // (-,0) - diagonal atras derecha
px-=x;
}else if (!isCollision(dst,imageW,imageH,px,py-y)){ // (0,+) - diagonal atras izquierda
py-=y;
}else if (!isCollision(dst,imageW,imageH,px-x,py-y)){ // (-,+) - atras
px-=x;
py-=y;
}
}
}else{
px+=x;
py+=y;
}
}
//
//
// AQUI EMPIEZAN LAS FASES DEL PASO DE SIMULACION
//
//
/********* SPEED MANAGER *******/
__global__ void PedestrianPhase0(
int maxPediestran,
int *devTimeOut,
float *dev_sizeX_sizeZ_maxSpeed_maxAcce_maxTimeout
){
const int id = blockDim.x * blockIdx.x + threadIdx.x;
if(id < maxPediestran){
if(devTimeOut[id]==-10){ //first time only
int cellSize=maxPediestran*dev_sizeX_sizeZ_maxSpeed_maxAcce_maxTimeout[2];
int cellNumber=id/cellSize;
devTimeOut[id]=cellNumber;
return;
}
if(devTimeOut[id]<0){
devTimeOut[id]=1.f/dev_sizeX_sizeZ_maxSpeed_maxAcce_maxTimeout[2]-1;
}else{
devTimeOut[id]--;
}
}
}
/********* PHASE 1: P'=f(p) *******/
__global__ void PedestrianPhase1(
TColor *dst,
int imageW,
int imageH,
int maxPediestran,
bool semaphore,
int **devLocalX,
int **devLocalY,
int *devLocalStep,
int *devMaxLocalStep,
int *devNextX,
int *devNextY,
int *devPreviousX,
int *devPreviousY,
int *devConflicted,
int *devTimeOut
){
const int id = blockDim.x * blockIdx.x + threadIdx.x;
if(id < maxPediestran){
int delay=isSemaphoreRed(tex2D(texImage, (float)devNextX[id] + 0.5f, (float)devNextY[id] + 0.5f), semaphore, 1);
if(delay==-1 && devTimeOut[id]>=0){
devTimeOut[id]=devTimeOut[id]+1;
}
if(devTimeOut[id]!=0)
return;
if (devLocalStep[id]<0){
devLocalStep[id]++;
return;
}
if (devLocalStep[id]==0){
devPreviousX[id]=devLocalX[id][5*devLocalStep[id] + 4];
devPreviousY[id]=devLocalY[id][5*devLocalStep[id] + 4];
devNextX[id]=devPreviousX[id];
devNextY[id]=devPreviousY[id];
devLocalStep[id]++;
return;
}
//INVIERTE METAS LOCALES UNA VEZ HA LLEGADO A LA META GLOBAL
if (devLocalStep[id]==devMaxLocalStep[id]){
for (int i = 0; i < devMaxLocalStep[id]/2; i++){
for (int n = 0; n < 5; n++){
int tempX=devLocalX[id][5*(devMaxLocalStep[id]-i-1)+n];
devLocalX[id][5*(devMaxLocalStep[id]-i-1)+n]=devLocalX[id][5*i+n];
devLocalX[id][5*i+n]=tempX;
int tempY=devLocalY[id][5*(devMaxLocalStep[id]-i-1)+n];
devLocalY[id][5*(devMaxLocalStep[id]-i-1)+n]=devLocalY[id][5*i+n];
devLocalY[id][5*i+n]=tempY;
}
}
devLocalStep[id]=0;
}
int x=0;
int y=0;
if (!getNextDirection(id , x, y, devLocalX, devLocalY, devLocalStep, devNextX, devNextY, 1))
return;
int px=devNextX[id];
int py=devNextY[id];
getNextStep(dst,imageW,imageH,x,y,px,py);
if (px != devNextX[id] || py != devNextY[id]){ //nueva posicion
devPreviousX[id]=devNextX[id];
devPreviousY[id]=devNextY[id];
devNextX[id]=px;
devNextY[id]=py;
dst[imageW * devNextY[id] + devNextX[id]] = make_color(0.f, 1.f, 1.f, 1.f);
float disX=(float)(devLocalX[id][5*(devLocalStep[id]) + 4]-devNextX[id]);
float disY=(float)(devLocalY[id][5*(devLocalStep[id]) + 4]-devNextY[id]);
float hyp=sqrt(disX*disX+disY*disY);
if ( hyp < 1.f ){
devLocalStep[id]++;
}
devConflicted[imageW * devNextY[id] + devNextX[id]] = id;
}
}
}
/********* PHASE 2: Se intenta solucionar conflictos en paralelo********/
__global__ void PedestrianPhase2(
TColor *dst,
int imageW,
int imageH,
int maxPediestran,
bool semaphore,
int **devLocalX,
int **devLocalY,
int *devLocalStep,
int *devNextX,
int *devNextY,
int *devPreviousX,
int *devPreviousY,
int *devConflicted,
int *devTimeOut,
int *devClass
){
const int id = blockDim.x * blockIdx.x + threadIdx.x;
if((id < maxPediestran) && (devLocalStep[id] >= 0)){
if(devTimeOut[id]!=0)
return;
//int delay=isSemaphoreRed(tex2D(texImage, (float)devNextX[id] + 0.5f, (float)devNextY[id] + 0.5f), semaphore, 1);
//if(delay==-1)
// return;
if (devConflicted[imageW * devNextY[id] + devNextX[id]] == id){ //talves tenga conflicto, pero tiene prioridad sobre los demas
float4 nextFresult = tex2D(texImage, (float)devPreviousX[id] + 0.5f, (float)devPreviousY[id] + 0.5f);
dst[imageW * devPreviousY[id] + devPreviousX[id]] = make_color(nextFresult.x, nextFresult.y, nextFresult.z, 0.0f);
return;
}
if ( (devNextX[id]==devPreviousX[id]) && (devNextY[id]==devPreviousY[id]) )
//esta en conflicto pero no se ha movido (no deberia pasar nunca)
return;
int x=0;
int y=0;
if (!getNextDirection(id, x, y, devLocalX, devLocalY, devLocalStep, devNextX, devNextY, 1))
return;
int px=devPreviousX[id];
int py=devPreviousY[id];
getNextStep(dst,imageW,imageH,x,y,px,py);
//borro la posicion siguiente ya que el id de mas prioridad lo ocupo.
float4 nextFresult = tex2D(texImage, (float)devNextX[id] + 0.5f, (float)devNextY[id] + 0.5f);
dst[imageW * devNextY[id] + devNextX[id]] = make_color(nextFresult.x, nextFresult.y, nextFresult.z, 0.0f);
//guardo mi nueva posicion, no sobreescribo la anterior.
devNextX[id]=px;
devNextY[id]=py;
if ( (px!=devPreviousX[id]) || (py!=devPreviousY[id]) ){
// si me pude mover, me muevo a mi nueva coordenada, aunque este movimiento puede generar colisiones.
float4 newFresult = tex2D(texImage, (float)devNextX[id] + 0.5f, (float)devNextY[id] + 0.5f);
dst[imageW * devNextY[id] + devNextX[id]] = make_color(0.f, 1.f, 1.f, 1.f);
devConflicted[imageW * devNextY[id] + devNextX[id]] = id;
}else{
//el peaton no se pudo mover. no hago nada.
}
}
}
/********* PHASE 3: Se intenta solucionar conflictos en paralelo********/
__global__ void PedestrianPhase3(
TColor *dst,
int imageW,
int imageH,
int maxPediestran,
bool semaphore,
int *devLocalStep,
int *devNextX,
int *devNextY,
int *devPreviousX,
int *devPreviousY,
int *devConflicted,
int *devTimeOut,
int *devClass
){
const int id = blockDim.x * blockIdx.x + threadIdx.x;
if((id < maxPediestran) && (devLocalStep[id] >= 0)){
if(devTimeOut[id]!=0)
return;
//int delay=isSemaphoreRed(tex2D(texImage, (float)devNextX[id] + 0.5f, (float)devNextY[id] + 0.5f), semaphore, 1);
//if(delay==-1)
// return;
//else
// devTimeOut[id]+=delay;
if (devConflicted[imageW * devNextY[id] + devNextX[id]] == id){ //talves tenga conflicto, pero tiene prioridad sobre los demas
float4 nextFresult = tex2D(texImage, (float)devPreviousX[id] + 0.5f, (float)devPreviousY[id] + 0.5f);
dst[imageW * devPreviousY[id] + devPreviousX[id]] = make_color(nextFresult.x, nextFresult.y, nextFresult.z, 0.0f);
return;
}
devNextX[id]=devPreviousX[id];
devNextY[id]=devPreviousY[id];
}
}
// SOLUCION INICIAL PARA RESOLUCION DE CONFLICTOS
/********* PHASE 2: Detecto Colisiones (serial)********/
__global__ void PedestrianCollision(
TColor *dst,
int imageW,
int imageH,
int maxPediestran,
int *devClass,
int *devLocalStep,
int *devNextX,
int *devNextY,
int *devPreviousX,
int *devPreviousY,
int *devConflicted,
int *devTimeOut
){
for (int id=0; id < maxPediestran; id++){
devConflicted[id]=-1;
if(devTimeOut[id]!=0)
continue;
if (devLocalStep[id] >= 0){
if (devClass[imageW*devNextY[id] + devNextX[id]]==-1)
devClass[imageW*devNextY[id] + devNextX[id]]=id;
else
devConflicted[id]=devClass[imageW*devNextY[id] + devNextX[id]];
}
}
}
__global__ void PedestrianPhase2Old(
TColor *dst,
int imageW,
int imageH,
int maxPediestran,
int **devLocalX,
int **devLocalY,
int *devLocalStep,
int *devNextX,
int *devNextY,
int *devPreviousX,
int *devPreviousY,
int *devConflicted,
int *devTimeOut,
int *devClass
){
const int id = blockDim.x * blockIdx.x + threadIdx.x;
if((id < maxPediestran) && (devLocalStep[id] >= 0)){
if(devTimeOut[id]!=0)
return;
if (devConflicted[id]==-1 || devConflicted[id]==id){ //no tiene conflicto o tiene prioridad asi que borro con confianza el paso anterior
float4 nextFresult = tex2D(texImage, (float)devPreviousX[id] + 0.5f, (float)devPreviousY[id] + 0.5f);
dst[imageW * devPreviousY[id] + devPreviousX[id]] = make_color(nextFresult.x, nextFresult.y, nextFresult.z, 0.0f);
return;
}
if ( (devNextX[id]==devPreviousX[id]) && (devNextY[id]==devPreviousY[id]) )
//esta en conflicto pero no se ha movido (no deberia pasar nunca)
return;
int x=0;
int y=0;
if (!getNextDirection(id, x, y, devLocalX, devLocalY, devLocalStep, devNextX, devNextY, 1))
return;
int px=devPreviousX[id];
int py=devPreviousY[id];
getNextStep(dst,imageW,imageH,x,y,px,py);
//borro la posicion siguiente ya que el id de mas prioridad lo ocupo.
float4 nextFresult = tex2D(texImage, (float)devNextX[id] + 0.5f, (float)devNextY[id] + 0.5f);
dst[imageW * devNextY[id] + devNextX[id]] = make_color(nextFresult.x, nextFresult.y, nextFresult.z, 0.0f);
//guardo mi nueva posicion, no sobreescribo la anterior.
devNextX[id]=px;
devNextY[id]=py;
if ( (px!=devPreviousX[id]) || (py!=devPreviousY[id]) ){
// si me pude mover, me muevo a mi nueva coordenada, aunque este movimiento puede generar colisiones.
float4 newFresult = tex2D(texImage, (float)devNextX[id] + 0.5f, (float)devNextY[id] + 0.5f);
dst[imageW * devNextY[id] + devNextX[id]] = make_color(0.f, 1.f, 1.f, 1.f);
devConflicted[imageW * devNextY[id] + devNextX[id]] = id;
devClass[0]=100; //esta variable no la uso mas, asi que tomo la primera posicion para indicar que al menos hubo un movimiento
}else{
//el peaton no se pudo mover. no hago nada.
}
}
}
/********* PHASE 3: Se solucionan los conflictos (serial) que no se pudo resolver en la fase anterior********/
__global__ void PedestrianPhase3Old(
TColor *dst,
int imageW,
int imageH,
int maxPediestran,
int *devLocalStep,
int *devNextX,
int *devNextY,
int *devPreviousX,
int *devPreviousY,
int *devConflicted,
int *devTimeOut,
int *devClass
){
if(devClass[0]!=100)
return; //no hubo conflictos en la fase 2, asi que esta fase sobra.
for (int id=0; id < maxPediestran; id++){
if(devTimeOut[id]!=0)
continue;
if (devLocalStep[id]<=0)
continue;
if (devConflicted[id]==-1 || devConflicted[id]==id){ //talves tenga conflicto, pero tiene prioridad sobre los demas
//borro paso anterior.
float4 nextFresult = tex2D(texImage, (float)devPreviousX[id] + 0.5f, (float)devPreviousY[id] + 0.5f);
dst[imageW * devPreviousY[id] + devPreviousX[id]] = make_color(nextFresult.x, nextFresult.y, nextFresult.z, 0.0f);
continue;
}
if ( (devNextX[id]==devPreviousX[id]) && (devNextY[id]==devPreviousY[id]) )
//esta en conflicto pero no se ha movido (no deberia pasar nunca)
continue;
int x=devNextX[id]-devPreviousX[id];
int y=devNextY[id]-devPreviousY[id];
int px=devPreviousX[id];
int py=devPreviousY[id];
getNextStep(dst,imageW,imageH,x,y,px,py);
//borro la posicion siguiente ya que el id de mas prioridad lo ocupo.
float4 nextFresult = tex2D(texImage, (float)devNextX[id] + 0.5f, (float)devNextY[id] + 0.5f);
dst[imageW * devNextY[id] + devNextX[id]] = make_color(nextFresult.x, nextFresult.y, nextFresult.z, 0.0f);
//guardo mi nueva posicion, no sobreescribo la anterior.
devNextX[id]=px;
devNextY[id]=py;
if ( (px!=devPreviousX[id]) || (py!=devPreviousY[id]) ){
//me muevo al siguiente punto
float4 newFresult = tex2D(texImage, (float)devNextX[id] + 0.5f, (float)devNextY[id] + 0.5f);
dst[imageW * devNextY[id] + devNextX[id]] = make_color(0.f, 1.f, 1.f, 1.f);
//borro el punto anterior
float4 preFresult = tex2D(texImage, (float)devPreviousX[id] + 0.5f, (float)devPreviousY[id] + 0.5f);
dst[imageW * devPreviousY[id] + devPreviousX[id]] = make_color(preFresult.x, preFresult.y, preFresult.z, 0.0f);
}else{
//el peaton no se pudo mover. no hago nada.
}
}
}
extern "C" void run_Pedestrian(
TColor *d_dst,
int *devClass,
int imageW,
int imageH,
int maxPediestran,
bool parallelDetection,
bool semaphore,
int **devLocalX,
int **devLocalY,
int *devLocalStep,
int *devMaxLocalStep,
int *devCurrentX,
int *devCurrentY,
int *devPreviousX,
int *devPreviousY,
int *devConflicted,
int **devRelated,
int *devTimeOut,
float *devSpeed,
float *dev_sizeX_sizeZ_maxSpeed_maxAcce_maxTimeout
){
/******PARA IMPRIMIR EN CONSOLA******/
//cudaPrintfInit(); //descomentar esta linea para activar la impresion por consola
//CUPRINTF("aqui va el mensaje, funciona igual que un printf"); //copiar este comando en los kernels donde se desee imprimir
cudaMemset(devConflicted,-1,imageW*imageH*sizeof(int));
dim3 dimGrid(maxPediestran, 1);
dim3 dimBlock(1, 1, 1); // para saber cuantos se ejecuta, solo multiplique todos los valores, no use Z.
if(parallelDetection){
//Fase 0: Control de velocidad, determino si en este paso de simulacion intento moverme o no.
PedestrianPhase0<<<dimGrid, dimBlock>>>(maxPediestran, devTimeOut, dev_sizeX_sizeZ_maxSpeed_maxAcce_maxTimeout);
//Fase 1: realizo movimientos así hayan colisiones.
PedestrianPhase1<<<dimGrid, dimBlock>>>(d_dst, imageW, imageH, maxPediestran, semaphore,
devLocalX, devLocalY, devLocalStep, devMaxLocalStep,
devCurrentX, devCurrentY, devPreviousX, devPreviousY, devConflicted, devTimeOut);
//Fase 2: detecto y corrigo movimientos de peatones en conflicto con nuevos movimientos, aun asi pueden haber conflictos en los nuevos movimientos
PedestrianPhase2<<<dimGrid, dimBlock>>>(d_dst, imageW, imageH, maxPediestran, semaphore,
devLocalX, devLocalY, devLocalStep,
devCurrentX, devCurrentY, devPreviousX, devPreviousY, devConflicted, devTimeOut, devClass);
//Fase 3: detecto y corrigo movimientos de peatones en conflicto, pero no genero movimientos nuevos, me devuelvo mejor a un estado estable.
PedestrianPhase3<<<dimGrid, dimBlock>>>(d_dst, imageW, imageH, maxPediestran, semaphore,
devLocalStep,
devCurrentX, devCurrentY, devPreviousX, devPreviousY, devConflicted, devTimeOut, devClass);
}else{
//Fase 0: Control de velocidad, determino si en este paso de simulacion intento moverme o no.
PedestrianPhase0<<<dimGrid, dimBlock>>>(maxPediestran, devTimeOut, dev_sizeX_sizeZ_maxSpeed_maxAcce_maxTimeout);
//Fase 1: realizo movimientos así hayan colisiones.
PedestrianPhase1<<<dimGrid, dimBlock>>>(d_dst, imageW, imageH, maxPediestran, semaphore,
devLocalX, devLocalY, devLocalStep, devMaxLocalStep,
devCurrentX, devCurrentY, devPreviousX, devPreviousY, devConflicted, devTimeOut);
//Fase intermedia: detecto colisiones. (solo se detectan, no se arreglan)
cudaMemset(devClass,-1,imageW*imageH*sizeof(int));
PedestrianCollision<<<1,1>>>(d_dst, imageW, imageH, maxPediestran,
devClass,devLocalStep,
devCurrentX, devCurrentY, devPreviousX, devPreviousY, devConflicted, devTimeOut);
//Fase 2: detecto y corrigo movimientos de peatones en conflicto con nuevos movimientos, aun asi pueden haber conflictos en los nuevos movimientos
PedestrianPhase2Old<<<dimGrid, dimBlock>>>(d_dst, imageW, imageH, maxPediestran,
devLocalX, devLocalY, devLocalStep,
devCurrentX, devCurrentY, devPreviousX, devPreviousY, devConflicted, devTimeOut, devClass);
//Fase intermedia: detecto nuevas colisiones, que quedaron de la fase anterior.
cudaMemset(devClass,-1,imageW*imageH*sizeof(int));
PedestrianCollision<<<1,1>>>(d_dst, imageW, imageH, maxPediestran,
devClass,devLocalStep,
devCurrentX, devCurrentY, devPreviousX, devPreviousY, devConflicted, devTimeOut);
//Fase 3: arreglo las colisiones resultantes de manera serial.
PedestrianPhase3Old<<<1,1>>>(d_dst, imageW, imageH, maxPediestran,
devLocalStep,
devCurrentX, devCurrentY, devPreviousX, devPreviousY, devConflicted, devTimeOut, devClass);
}
/***********PARA IMPRIMIR EN CONSOLA********/
//cudaPrintfDisplay(stdout, true); //descomentar esta linea para imprimer por consola, no modificar los atributos.
//cudaPrintfEnd(); //descomentar esta linea para finalizar la impresion por consola.
} |
09497dce2667bb8af0768646f4d206bdc42dae97.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "init_config.h"
// decompose a integer like number of grid points and number of polymer into three integer Nx Ny Nz to suit cuda//
int prime[168]={2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 107, 109, 113, 127, 131, 137, 139, 149, 151, 157, 163, 167, 173, 179, 181, 191, 193, 197, 199, 211, 223, 227, 229, 233, 239, 241, 251, 257, 263, 269, 271, 277, 281, 283, 293, 307, 311, 313, 317, 331, 337, 347, 349, 353, 359, 367, 373, 379, 383, 389, 397, 401, 409, 419, 421, 431, 433, 439, 443, 449, 457, 461, 463, 467, 479, 487, 491, 499, 503, 509, 521, 523, 541, 547, 557, 563, 569, 571, 577, 587, 593, 599, 601, 607, 613, 617, 619, 631, 641, 643, 647, 653, 659, 661, 673, 677, 683, 691, 701, 709, 719, 727, 733, 739, 743, 751, 757, 761, 769, 773, 787, 797, 809, 811, 821, 823, 827, 829, 839, 853, 857, 859, 863, 877, 881, 883, 887, 907, 911, 919, 929, 937, 941, 947, 953, 967, 971, 977, 983, 991, 997};
// find the maximal factor in a integer which is smaller than 1024(Maximal thread number in cuda)
extern int factor_decompose_1024(GPU_info *gpu_info,long N){
long temp;
temp=N;
int decom[10000],index=0;
for(int i=0;i<168;i++){
while(temp%prime[i]==0){
temp=temp/prime[i];
decom[index++]=prime[i];
};
}
int *elements;
elements=(int*)malloc(sizeof(int)*index);
for(int i=0;i<index;i++) elements[i]=0;
int temp_1024=1;
for(int j=1;j<=10;j++){
elements[j-1]=1;
const size_t N_t = index;
std::vector<int> selectors(elements, elements + N_t);
do{
int combo=1;
for (size_t i = 0; i < selectors.size(); ++i){
if (selectors[i]){
//std::cout << decom[i] << ", ";
combo*=decom[i];
}
}
if(combo>temp_1024&&combo<=1024) temp_1024=combo;
if(combo==1024) break;
} while (prev_permutation(selectors.begin(), selectors.end()));
}
free(elements);
return temp_1024;
}
extern void factor_decompose(GPU_info *gpu_info,long N, int *Nx_a,int *Ny_a,int *Nz_a){
int Nx,Ny,Nz;
long temp;
temp=N;
int decom[10000],index=0;
for(int i=0;i<168;i++){
while(temp%prime[i]==0){
temp=temp/prime[i];
decom[index++]=prime[i];
};
}
//printf("%ld prime is ",N);
//for(int i=0;i<index;i++) printf(" %d ",decom[i]);
//printf("\n");
if(temp!=1) {
printf("please give a \"good\" polymer number!\n");
exit(0);
}
if(index==1) {
Nx=N;
Ny=1;
Nz=1;
}
else if(index==2){
Nz=1;//decom[index-1]
Ny=decom[0];
Nx=decom[1];
//printf("%d %d\n",Nx,Ny);
}
else if(index>2){
Nx=1;
Ny=1;
Nz=1;
if(index%2==0){
Nz=decom[index-1]*decom[0];
if((index-2)%4==0){
for(int i=0;i<(index-2)/4;i++){
Nx*=decom[i+1]*decom[index-1-i-1];
Ny*=decom[(index-2)/4+1+i]*decom[index-1-(index-2)/4-1-i];
}
//printf("%d %d %d\n",Nx,Ny,Nz);
}
else if((index-2)==2){
Ny=decom[1];
Nx=decom[2];
//printf("%d %d %d\n",Nx,Ny,Nz);
}
else {
Nz*=decom[1]*decom[2];
for(int i=0;i<(index-4)/4;i++){
Nx*=decom[i+3]*decom[index-1-i-1];
Ny*=decom[(index-2)/4+3+i]*decom[index-1-(index-2)/4-1-i];
}
//printf("%d %d %d\n",Nx,Ny,Nz);
}
}
else{
Nz=decom[index-1];
if((index-1)%4==0){
for(int i=0;i<(index-1)/4;i++){
Nx*=decom[i]*decom[index-1-i-1];
Ny*=decom[(index-1)/4+i]*decom[index-1-(index-1)/4-i-1];
}
//printf("%d: %d %d %d\n",index,Nx,Ny,Nz);
}
else if((index-1)==2){
Ny=decom[0];
Nx=decom[1];
//printf("%d %d %d\n",Nx,Ny,Nz);
}
else {
Nz*=decom[0]*decom[1];
for(int i=0;i<(index-3)/4;i++){
Nx*=decom[i*2+2]*decom[index-1-i*2-1];
Ny*=decom[i*2+3]*decom[index-3-i*2];
}
//printf("%d %d %d\n",Nx,Ny,Nz);
}
}
}
if(N==1) {
Nx=1;
Ny=1;
Nz=1;
}
if(Nx*Ny*Nz==N) {
*Nx_a=Nx;
*Ny_a=Ny;
*Nz_a=Nz;
}
else {
printf("Error Nx %d *Ny %d *Nz %d!= N %ld\n",Nx,Ny,Nz,N);
exit(0);
}
}
//<! Read in CPU and GPU polymer structure infomation in phase->poly_arch
extern void Read_polymer_config(MPI_info *mpi_info,GPU_info *gpu_info,Phase *phase){
FILE *dp;
dp=fopen("polymer.dat","r");
fscanf(dp,"Number of polymer type:%d\n",&phase->polymer_type_number);
fscanf(dp,"Number of total polymer:%d\n",&phase->n_polymers);
phase->n_polymers_per_node=phase->n_polymers/mpi_info->total_nodes;
phase->n_polymers_per_gpu=phase->n_polymers_per_node/gpu_info->GPU_N;
//printf("total: %d Node: %d GPU: %d\n",phase->n_polymers,phase->n_polymers_per_node,phase->n_polymers_per_gpu);
phase->poly_arch.resize(gpu_info->GPU_N);
phase->n_polymer_type=(unsigned int *)malloc(sizeof(unsigned int)*phase->polymer_type_number);
phase->n_polymers_type_per_node=(unsigned int *)malloc(sizeof(unsigned int)*phase->polymer_type_number);
phase->n_polymers_type_per_gpu=(unsigned int *)malloc(sizeof(unsigned int)*phase->polymer_type_number);
for(int i=0;i<phase->polymer_type_number;i++){
fscanf(dp,"%d ",&phase->n_polymer_type[i]);
phase->n_polymers_type_per_node[i]=phase->n_polymer_type[i]/mpi_info->total_nodes;
phase->n_polymers_type_per_gpu[i]=phase->n_polymers_type_per_node[i]/gpu_info->GPU_N;
}
fscanf(dp,"\n--------\n");
if(mpi_info->current_node==0)
for(int i=0;i<phase->polymer_type_number;i++){
//printf("Type %d :node: %d GPU: %d\n",i,phase->n_polymers_type_per_node[i],phase->n_polymers_type_per_gpu[i]);
}
for(int gpu_index=0;gpu_index<gpu_info->GPU_N;gpu_index++){ //
checkCudaErrors(hipSetDevice(gpu_info->whichGPUs[gpu_index]));
checkCudaErrors(hipMallocManaged(&phase->poly_arch[gpu_index],phase->polymer_type_number));
if(gpu_index==0){
for(int polymer_index=0;polymer_index<phase->polymer_type_number;polymer_index++){
int temp;
fscanf(dp,"Polymer type: %d\n",&temp);//phase->poly_arch[gpu_index][polymer_index].polymer_type_index
fscanf(dp,"Polymer length: %d\n",&phase->poly_arch[gpu_index][polymer_index].poly_length);
fscanf(dp,"Polymer length unit: %d\n",&phase->reference_Nbeads);
checkCudaErrors(hipMallocManaged(&phase->poly_arch[gpu_index][polymer_index].Monotype,phase->poly_arch[gpu_index][polymer_index].poly_length));
fscanf(dp,"Monomer type: ");
for(int i=0;i<phase->poly_arch[gpu_index][polymer_index].poly_length;i++){
fscanf(dp,"%d ",&phase->poly_arch[gpu_index][polymer_index].Monotype[i]);
//phase->poly_arch[gpu_index][polymer_index].mono_type_length[phase->poly_arch[gpu_index][polymer_index].Monotype[i]]++;
}
int poly_length=phase->poly_arch[gpu_index][polymer_index].poly_length;
fscanf(dp,"\n");
checkCudaErrors(hipMallocManaged(&phase->poly_arch[gpu_index][polymer_index].connection,poly_length*poly_length));
for(int i=0;i<poly_length;i++){
for(int j=0;j<poly_length;j++){
fscanf(dp,"%d ",&phase->poly_arch[gpu_index][polymer_index].connection[i+j*poly_length]);
}//! end for j
fscanf(dp,"\n");
}//! end for i
checkCudaErrors(hipMallocManaged(&phase->poly_arch[gpu_index][polymer_index].neigh_num,poly_length));
checkCudaErrors(hipMallocManaged(&phase->poly_arch[gpu_index][polymer_index].conection_list,poly_length));
int index;
for(int i=0;i<poly_length;i++){
phase->poly_arch[gpu_index][polymer_index].neigh_num[i]=0;
for(int j=0;j<poly_length;j++){
if(phase->poly_arch[gpu_index][polymer_index].connection[i+j*poly_length]==1){
phase->poly_arch[gpu_index][polymer_index].neigh_num[i]++;
}//!< end if connnected
checkCudaErrors(hipMallocManaged(&phase->poly_arch[gpu_index][polymer_index].conection_list[i],phase->poly_arch[gpu_index][polymer_index].neigh_num[i]));
}//!< for loop j
index=0;
for(int j=0;j<poly_length;j++){
if(phase->poly_arch[gpu_index][polymer_index].connection[i+j*poly_length]==1)
phase->poly_arch[gpu_index][polymer_index].conection_list[i][index++]=j;
}
}//!< for loop i
}//! end for polymer index
phase->num_all_beads=0;
phase->num_all_beads_per_node=0;
phase->num_all_beads_per_gpu=0;
phase->num_bead_polymer_type=(unsigned int *)malloc(sizeof(unsigned int)*phase->polymer_type_number);
phase->num_bead_polymer_type_per_node=(unsigned int *)malloc(sizeof(unsigned int)*phase->polymer_type_number);
phase->num_bead_polymer_type_per_gpu=(unsigned int *)malloc(sizeof(unsigned int)*phase->polymer_type_number);
phase->num_bead_type=(unsigned int *)malloc(sizeof(unsigned int)*phase->n_mono_types);
phase->num_bead_type_per_node=(unsigned int *)malloc(sizeof(unsigned int)*phase->n_mono_types);
phase->num_bead_type_per_gpu=(unsigned int *)malloc(sizeof(unsigned int)*phase->n_mono_types);
for(int polymer_index=0;polymer_index<phase->polymer_type_number;polymer_index++){
int length=phase->poly_arch[gpu_index][polymer_index].poly_length;
phase->num_bead_polymer_type[polymer_index]=length*phase->n_polymer_type[polymer_index];
phase->num_bead_polymer_type_per_node[polymer_index]=length*phase->n_polymers_type_per_node[polymer_index];
phase->num_bead_polymer_type_per_gpu[polymer_index]=length*phase->n_polymers_type_per_gpu[polymer_index];
phase->num_all_beads+=length*phase->n_polymer_type[polymer_index];
phase->num_all_beads_per_node+=length*phase->n_polymers_type_per_node[polymer_index];
phase->num_all_beads_per_gpu+=length*phase->n_polymers_type_per_gpu[polymer_index];
}
}// ! end if (gpu_index==0)
else{
for(int polymer_index=0;polymer_index<phase->polymer_type_number;polymer_index++){
phase->poly_arch[gpu_index][polymer_index].polymer_type_index=phase->poly_arch[0][polymer_index].polymer_type_index;
phase->poly_arch[gpu_index][polymer_index].poly_length=phase->poly_arch[0][polymer_index].poly_length;
checkCudaErrors(hipMallocManaged(&phase->poly_arch[gpu_index][polymer_index].Monotype,phase->poly_arch[gpu_index][polymer_index].poly_length));
for(int i=0;i<phase->poly_arch[gpu_index][polymer_index].poly_length;i++){
phase->poly_arch[gpu_index][polymer_index].Monotype[i]=phase->poly_arch[0][polymer_index].Monotype[i];
}
int poly_length=phase->poly_arch[gpu_index][polymer_index].poly_length;
checkCudaErrors(hipMallocManaged(&phase->poly_arch[gpu_index][polymer_index].connection,poly_length*poly_length));
for(int i=0;i<poly_length;i++){
for(int j=0;j<poly_length;j++){
phase->poly_arch[gpu_index][polymer_index].connection[i+j*poly_length]=phase->poly_arch[0][polymer_index].connection[i+j*poly_length];
}//! end for j
}//! end for i
checkCudaErrors(hipMallocManaged(&phase->poly_arch[gpu_index][polymer_index].neigh_num,poly_length));
checkCudaErrors(hipMallocManaged(&phase->poly_arch[gpu_index][polymer_index].conection_list,poly_length));
int index;
for(int i=0;i<poly_length;i++){
phase->poly_arch[gpu_index][polymer_index].neigh_num[i]=0;
for(int j=0;j<poly_length;j++){
if(phase->poly_arch[gpu_index][polymer_index].connection[i+j*poly_length]==1){
phase->poly_arch[gpu_index][polymer_index].neigh_num[i]++;
}//!< end if connnected
checkCudaErrors(hipMallocManaged(&phase->poly_arch[gpu_index][polymer_index].conection_list[i],phase->poly_arch[gpu_index][polymer_index].neigh_num[i]));
}//!< for loop j
index=0;
for(int j=0;j<poly_length;j++){
if(phase->poly_arch[gpu_index][polymer_index].connection[i+j*poly_length]==1)
phase->poly_arch[gpu_index][polymer_index].conection_list[i][index++]=j;
}
}//!< for loop i
}// end for polymer
} // end if gpu_index ==0
}
}
extern void initialize_values(GPU_info *gpu_info,Phase *phase){
phase->start_clock = time(NULL);
phase->n_accepts = 0;
phase->n_moves =0;
phase->GPU_N=gpu_info->GPU_N;
phase->gridNx=gpu_info->gridNx;
phase->gridNy=gpu_info->gridNy;
phase->gridNz=gpu_info->gridNz;
phase->polymerNx=gpu_info->polymerNx;
phase->polymerNy=gpu_info->polymerNy;
phase->polymerNz=gpu_info->polymerNz;
phase->ana_info.delta_mc_Re=10;
phase->ana_info.filename = (char*)malloc( 200*sizeof(char) );
sprintf(phase->ana_info.filename,"Re.dat");
// Reference Harmonic Spring Cste
for(int i=0;i<gpu_info->GPU_N;i++)
for(int j=0;j<phase->polymer_type_number;j++)
phase->poly_arch[i][j].reference_Nbeads=phase->reference_Nbeads;
phase->harmonic_spring_Cste =1.0 / sqrt(3.0 * (phase->reference_Nbeads - 1.0));
//Reference energy scale for harmonic springs.
phase->harmonic_normb =1.0 / (2.0 * phase->harmonic_spring_Cste * phase->harmonic_spring_Cste);
FILE *dp;
dp=fopen("configuration.dat","r");
if(dp==NULL) {printf("Empty pointer allocate at line number %d in file %s\n", __LINE__, __FILE__);exit(0);}
fscanf(dp,"Monomer type number: %d\n",&phase->n_mono_types);
phase->xn=(double **)malloc(sizeof(double *)*phase->n_mono_types);
for(int i=0;i<phase->n_mono_types;i++) phase->xn[i]=(double *)malloc(sizeof(double )*phase->n_mono_types);
for(int j=0;j<phase->n_mono_types;j++){
for(int i=0;i<phase->n_mono_types;i++) {
fscanf(dp,"%lg ",&phase->xn[i][j]);
}
fscanf(dp,"\n");
}
fscanf(dp,"D0 = %lg; LY = %lg; LZ = %lg;\n",&phase->Lx,&phase->Ly,&phase->Lz);
fscanf(dp,"N = %d %d %d \n",&phase->nx,&phase->ny,&phase->nz);
phase->n_cells = phase->nx * phase->ny * phase->nz;
fscanf(dp,"time = %d;\n",&phase->time);
//phase->start_time = phase->time;
fscanf(dp,"xiN = %lg;\n",&phase->xiN);
fscanf(dp,"dt*N/xi = %lg;\n",&phase->dtNxi);
fscanf(dp,"kT = %lg;\n",&phase->kT);
//fscanf(dp,"TCHECK = %d;\n",&phase->Tcheck);
//fscanf(dp,"TWRITE = %d\n;",&phase->Twrite);
//MPI_Allreduce(&(p->n_polymers), &n_polymers_global_sum, 1,MPI_UNSIGNED, MPI_SUM, info_MPI->SOMA_MPI_Comm);
//assert(p->n_polymers_global == n_polymers_global_sum);
phase->area51.resize(gpu_info->GPU_N);
phase->phase_info_gpu.resize(gpu_info->GPU_N);
phase->external_field_unified.resize(gpu_info->GPU_N);
phase->umbrella_field_unified.resize(gpu_info->GPU_N);
phase->average_field_unified.resize(gpu_info->GPU_N);
phase->temp_average_field_unified.resize(gpu_info->GPU_N);
phase->MaxThreadDensity=factor_decompose_1024(gpu_info,phase->nx*phase->ny*phase->nz);
phase->MaxThreadPolymer=factor_decompose_1024(gpu_info,phase->n_polymers_per_gpu);
//printf("phase->MaxThreadDensity=%d phase->MaxThreadPolymer=%d\n",phase->MaxThreadDensity,phase->MaxThreadPolymer);
for(int gpu_index=0;gpu_index<gpu_info->GPU_N;gpu_index++){// gpu_info->GPU_N
checkCudaErrors(hipSetDevice(gpu_info->whichGPUs[gpu_index]));
phase->area51[gpu_index]=NULL;
phase->external_field_unified[gpu_index]=NULL;
phase->umbrella_field_unified[gpu_index]=NULL;
phase->average_field_unified[gpu_index]=NULL;
phase->temp_average_field_unified[gpu_index]=NULL;
checkCudaErrors(hipMallocManaged((void**)&(phase->phase_info_gpu[gpu_index]),sizeof(Phase_info_gpu)));
phase->phase_info_gpu[gpu_index]->nx=phase->nx;
phase->phase_info_gpu[gpu_index]->ny=phase->ny;
phase->phase_info_gpu[gpu_index]->nz=phase->nz;
phase->phase_info_gpu[gpu_index]->n_cells=phase->nx*phase->ny*phase->nz;
//printf("%d: %d %d %d\n",phase->phase_info_gpu[gpu_index]->n_cells,phase->phase_info_gpu[gpu_index]->nx,phase->phase_info_gpu[gpu_index]->ny,phase->phase_info_gpu[gpu_index]->nz);
phase->phase_info_gpu[gpu_index]->Lx=phase->Lx;
phase->phase_info_gpu[gpu_index]->Ly=phase->Ly;
phase->phase_info_gpu[gpu_index]->Lz=phase->Lz;
phase->phase_info_gpu[gpu_index]->iLx=1/phase->Lx;
phase->phase_info_gpu[gpu_index]->iLy=1/phase->Ly;
phase->phase_info_gpu[gpu_index]->iLz=1/phase->Lz;
phase->phase_info_gpu[gpu_index]->dx=phase->Lx/phase->nx;
phase->phase_info_gpu[gpu_index]->dy=phase->Ly/phase->ny;
phase->phase_info_gpu[gpu_index]->dz=phase->Lz/phase->nz;
phase->phase_info_gpu[gpu_index]->polymer_type_number=phase->polymer_type_number;
phase->phase_info_gpu[gpu_index]->n_polymers=phase->n_polymers;
phase->phase_info_gpu[gpu_index]->n_polymers_per_node=phase->n_polymers_per_node;
phase->phase_info_gpu[gpu_index]->n_polymers_per_gpu=phase->n_polymers_per_gpu;
phase->phase_info_gpu[gpu_index]->n_mono_types=phase->n_mono_types;
phase->phase_info_gpu[gpu_index]->num_all_beads=phase->num_all_beads;
phase->phase_info_gpu[gpu_index]->num_all_beads_per_node=phase->num_all_beads_per_node;
phase->phase_info_gpu[gpu_index]->num_all_beads_per_gpu=phase->num_all_beads_per_gpu;
phase->phase_info_gpu[gpu_index]->MaxThreadDensity=phase->MaxThreadDensity;
phase->phase_info_gpu[gpu_index]->MaxThreadPolymer=phase->MaxThreadPolymer;
checkCudaErrors(hipMallocManaged((void**)&(phase->phase_info_gpu[gpu_index]->n_polymer_type),sizeof(unsigned int)*phase->polymer_type_number));
checkCudaErrors(hipMallocManaged((void**)&(phase->phase_info_gpu[gpu_index]->n_polymers_type_per_node),sizeof(unsigned int)*phase->polymer_type_number));
checkCudaErrors(hipMallocManaged((void**)&(phase->phase_info_gpu[gpu_index]->n_polymers_type_per_gpu),sizeof(unsigned int)*phase->polymer_type_number));
for(int i=0;i<phase->polymer_type_number;i++){
phase->phase_info_gpu[gpu_index]->n_polymer_type[i]=phase->n_polymer_type[i];
phase->phase_info_gpu[gpu_index]->n_polymers_type_per_node[i]=phase->n_polymers_type_per_node[i];
phase->phase_info_gpu[gpu_index]->n_polymers_type_per_gpu[i]=phase->n_polymers_type_per_gpu[i];
}
checkCudaErrors(hipMallocManaged((void**)&(phase->phase_info_gpu[gpu_index]->num_bead_type),sizeof(unsigned int)*phase->n_mono_types));
checkCudaErrors(hipMallocManaged((void**)&(phase->phase_info_gpu[gpu_index]->num_bead_type_per_node),sizeof(unsigned int)*phase->n_mono_types));
checkCudaErrors(hipMallocManaged((void**)&(phase->phase_info_gpu[gpu_index]->num_bead_type_per_gpu),sizeof(unsigned int)*phase->n_mono_types));
checkCudaErrors(hipMallocManaged((void**)&(phase->phase_info_gpu[gpu_index]->num_bead_polymer_type),sizeof(unsigned int)*phase->polymer_type_number));
checkCudaErrors(hipMallocManaged((void**)&(phase->phase_info_gpu[gpu_index]->num_bead_polymer_type_per_node),sizeof(unsigned int)*phase->polymer_type_number));
checkCudaErrors(hipMallocManaged((void**)&(phase->phase_info_gpu[gpu_index]->num_bead_polymer_type_per_gpu),sizeof(unsigned int)*phase->polymer_type_number));
checkCudaErrors(hipMallocManaged((void**)&(phase->phase_info_gpu[gpu_index]->polymer_basis_gpu),sizeof(unsigned int)*(phase->polymer_type_number+1)));
checkCudaErrors(hipMallocManaged((void**)&(phase->phase_info_gpu[gpu_index]->monomer_poly_basis_gpu),sizeof(unsigned int)*(phase->polymer_type_number+1)));
phase->phase_info_gpu[gpu_index]->polymer_basis_gpu[0]=0;
phase->phase_info_gpu[gpu_index]->monomer_poly_basis_gpu[0]=0;
for(int i=0;i<phase->polymer_type_number;i++){
phase->phase_info_gpu[gpu_index]->num_bead_polymer_type[i]=phase->num_bead_polymer_type[i];
phase->phase_info_gpu[gpu_index]->num_bead_polymer_type_per_node[i]=phase->num_bead_polymer_type_per_node[i];
phase->phase_info_gpu[gpu_index]->num_bead_polymer_type_per_gpu[i]=phase->num_bead_polymer_type_per_gpu[i];
phase->phase_info_gpu[gpu_index]->monomer_poly_basis_gpu[i+1]=phase->phase_info_gpu[gpu_index]->monomer_poly_basis_gpu[i]+phase->num_bead_polymer_type_per_gpu[i];
phase->phase_info_gpu[gpu_index]->polymer_basis_gpu[i+1]=phase->phase_info_gpu[gpu_index]->polymer_basis_gpu[i]+phase->n_polymers_type_per_gpu[i];
}
for(int polymer_index=0; polymer_index<phase->polymer_type_number;polymer_index++){
checkCudaErrors(hipMallocManaged(&phase->poly_arch[gpu_index][polymer_index].mono_type_length,phase->n_mono_types));
for(int i=0;i<phase->n_mono_types;i++) phase->poly_arch[gpu_index][polymer_index].mono_type_length[i]=0;
for(int i=0;i<phase->poly_arch[gpu_index][polymer_index].poly_length;i++) phase->poly_arch[gpu_index][polymer_index].mono_type_length[phase->poly_arch[gpu_index][polymer_index].Monotype[i]]++;
}
for(int i=0;i<phase->n_mono_types;i++){
phase->num_bead_type[i]=0;
phase->num_bead_type_per_node[i]=0;
phase->num_bead_type_per_gpu[i]=0;
for(int j=0;j<phase->polymer_type_number;j++){
phase->num_bead_type[i]+=phase->poly_arch[gpu_index][j].mono_type_length[i]*phase->n_polymer_type[j];
phase->num_bead_type_per_node[i]+=phase->poly_arch[gpu_index][j].mono_type_length[i]*phase->n_polymers_type_per_node[j];
phase->num_bead_type_per_gpu[i]+=phase->poly_arch[gpu_index][j].mono_type_length[i]*phase->n_polymers_type_per_gpu[j];
}
phase->phase_info_gpu[gpu_index]->num_bead_type[i]=phase->num_bead_type[i];
phase->phase_info_gpu[gpu_index]->num_bead_type_per_node[i]=phase->num_bead_type_per_node[i];
phase->phase_info_gpu[gpu_index]->num_bead_type_per_gpu[i]=phase->num_bead_type_per_gpu[i];
}
//printf("%d %d %d %d %d\n",phase->num_bead_type_per_node[0],phase->num_bead_type_per_node[1],phase->n_polymers_type_per_node[0],phase->n_polymers_type_per_node[1],phase->n_polymers_type_per_node[2]);
phase->phase_info_gpu[gpu_index]->reference_Nbeads=phase->reference_Nbeads;
phase->phase_info_gpu[gpu_index]->inverse_refbeads=1/(double)phase->reference_Nbeads;
phase->phase_info_gpu[gpu_index]->harmonic_normb=phase->harmonic_normb;
checkCudaErrors(hipMallocManaged((void**)&(phase->phase_info_gpu[gpu_index]->xn),sizeof(double)*phase->n_mono_types*phase->n_mono_types));
checkCudaErrors(hipMallocManaged((void**)&(phase->phase_info_gpu[gpu_index]->field_scaling_type),sizeof(double)*phase->n_mono_types));
for(int t_type=0;t_type<phase->n_mono_types;t_type++)
for(int s_type=0;s_type<phase->n_mono_types;s_type++)
phase->phase_info_gpu[gpu_index]->xn[t_type+phase->n_mono_types*s_type]=phase->xn[t_type][s_type];
}
// Max safe move distance
phase->max_safe_jump = phase->Lx/phase->nx < phase->Ly / phase->ny ? phase->Lx/phase->nx : phase->Ly / phase->ny;
phase->max_safe_jump = phase->max_safe_jump < phase->Lz / phase->nz ? phase->max_safe_jump : phase->Lz/phase->nz;
phase->max_safe_jump *= 0.95;
int Nx,Ny,Nz;
factor_decompose(gpu_info,phase->n_cells/phase->MaxThreadDensity,&Nx,&Ny,&Nz);
gpu_info->gridNx=Nx;
gpu_info->gridNy=Ny*Nz;
gpu_info->gridNz=phase->n_mono_types;
phase->gridNx=gpu_info->gridNx;
phase->gridNy=gpu_info->gridNy;
phase->gridNz=gpu_info->gridNz;
factor_decompose(gpu_info,phase->n_polymers_per_gpu/phase->MaxThreadPolymer,&Nx,&Ny,&Nz);
gpu_info->polymerNx=Nx;
gpu_info->polymerNy=Ny;
gpu_info->polymerNz=Nz;
phase->polymerNx=gpu_info->polymerNx;
phase->polymerNy=gpu_info->polymerNy;
phase->polymerNz=gpu_info->polymerNz;
//printf("%d %d %d %d\n",phase->MaxThreadDensity,gpu_info->gridNx,gpu_info->gridNy,gpu_info->gridNz);
//printf("%d %d %d %d\n",phase->MaxThreadPolymer,gpu_info->polymerNx,gpu_info->polymerNy,gpu_info->polymerNz);
fclose(dp);
//---------------------------test--------------------------
/*
printf("D0 = %lg; LY = %lg; LZ = %lg;\n",p->Lx,p->Ly,p->Lz);
printf("N = %d %d %d \n",p->nx,p->ny,p->nz);
printf("time = %d\n",p->time);
printf("dt*N/xi = %lg\n",p->dtNxi);
printf("kT = %lg\n",p->kT);
printf("TCHECK = %d\n",p->Tcheck);
printf("TWRITE = %d\n",p->Twrite);
for(int j=0;j<p->n_mono_types;j++){
for(int i=0;i<p->n_mono_types;i++) {
printf("%g ",p->xn[i][j]);
}
printf("\n");
}
*/
}
extern int initialize_structure_GPU(GPU_info *gpu_info,Phase *phase){
phase->pos.resize(gpu_info->GPU_N);
phase->fields_unified.resize(gpu_info->GPU_N);
phase->fields_32.resize(gpu_info->GPU_N);
phase->omega_field_unified.resize(gpu_info->GPU_N);
phase->temp_average_field_unified.resize(gpu_info->GPU_N);
phase->average_field_unified.resize(gpu_info->GPU_N);
//size_t available, total;
for(int gpu_index=0;gpu_index<gpu_info->GPU_N;gpu_index++){// gpu_info->GPU_N
checkCudaErrors(hipSetDevice(gpu_info->whichGPUs[gpu_index]));
checkCudaErrors(hipMallocManaged(&(phase->pos[gpu_index]),sizeof(float)*phase->num_all_beads_per_gpu*3));//position of monomer
checkCudaErrors(hipMallocManaged(&(gpu_info->state[gpu_index]),sizeof(hiprandStatePhilox4_32_10_t)*phase->n_polymers_per_gpu));
checkCudaErrors(hipMallocManaged(&(phase->fields_unified[gpu_index]),phase->n_cells*phase->n_mono_types*sizeof(int)));
checkCudaErrors(hipMallocManaged(&(phase->fields_32[gpu_index]),phase->n_cells*phase->n_mono_types*sizeof(uint32_t)));
checkCudaErrors(hipMallocManaged(&(phase->omega_field_unified[gpu_index]),phase->n_mono_types*phase->n_cells*sizeof(double)));
checkCudaErrors(hipMallocManaged(&(phase->temp_average_field_unified[gpu_index]),phase->n_mono_types*phase->n_mono_types*sizeof(int)));
checkCudaErrors(hipMallocManaged(&(phase->average_field_unified[gpu_index]),phase->n_cells*phase->n_mono_types*sizeof(int)));
}//!< loop for gpu_index gpu device
//printf("monomer type number is %d cell %d\n",phase->n_mono_types,phase->n_cells);
phase->field_scaling_type = (double *) malloc(phase->n_mono_types * sizeof(double));
if (phase->field_scaling_type == NULL) {
fprintf(stderr, "ERROR: Malloc %s:%d\n", __FILE__, __LINE__);
return -1;
}
long ncells = phase->n_cells;
for (unsigned int i = 0; i < phase->n_mono_types; i++)
phase->field_scaling_type[i] =(ncells / ((double) phase->num_all_beads));
for(int gpu_index=0;gpu_index<gpu_info->GPU_N;gpu_index++){// gpu_info->GPU_N
for(int i=0;i<phase->n_mono_types;i++)
phase->phase_info_gpu[gpu_index]->field_scaling_type[i]=phase->field_scaling_type[i];
}
phase->R = (double*) malloc( phase->n_mono_types * sizeof(double));
phase->A = (double*)malloc( phase->n_mono_types * sizeof(double));
if(phase->R == NULL){
fprintf(stderr, "ERROR: By malloc TT800 , %s %d ",__FILE__, __LINE__ );
return -1;
}
for (unsigned int i = 0; i < phase->n_mono_types; i++){
//! \todo kBT required.
//p->A[i]
phase->A[i] =phase->dtNxi/ phase->reference_Nbeads;
phase->R[i] = sqrt( phase->A[i]* 2);
}
phase->n_accepts = 0;
phase->n_moves = 0;
for(int gpu_index=0;gpu_index<gpu_info->GPU_N;gpu_index++){// gpu_info->GPU_N
checkCudaErrors(hipSetDevice(gpu_info->whichGPUs[gpu_index]));
checkCudaErrors(hipMallocManaged(&(phase->phase_info_gpu[gpu_index]->A),phase->n_mono_types));
checkCudaErrors(hipMallocManaged(&(phase->phase_info_gpu[gpu_index]->R),phase->n_mono_types));
//phase->phase_info_gpu[gpu_index]->omega_field_unified=phase->omega_field_unified[gpu_index];
//phase->phase_info_gpu[gpu_index]->area51=phase->area51[gpu_index];
phase->phase_info_gpu[gpu_index]->max_safe_jump=phase->max_safe_jump;
for (unsigned int i = 0; i < phase->n_mono_types; i++){
//! \todo kBT required.
//p->A[i]
phase->phase_info_gpu[gpu_index]->A[i]=phase->A[i];
phase->phase_info_gpu[gpu_index]->R[i]=phase->R[i];
}
}// end loop gpu
// initialize inverse simulation cell parameters
phase->iLx = 1.0/phase->Lx;
phase->iLy = 1.0/phase->Ly;
phase->iLz = 1.0/phase->Lz;
phase->dx=phase->Lx/phase->nx;
phase->dy=phase->Ly/phase->ny;
phase->dz=phase->Lz/phase->nz;
//phase->sets = NULL; // Default init of the sets
phase->max_set_members = 0;
return 1;
}// end routine
extern int initialize_random_generator(MPI_info *mpi_info,GPU_info *gpu_info,Phase *phase){
unsigned int fixed_seed;
int fix=0;
if(fix==0)
fixed_seed = time(NULL);
else
fixed_seed = 1;
for(int gpu_index=0;gpu_index<gpu_info->GPU_N;gpu_index++){
checkCudaErrors(hipSetDevice(gpu_info->whichGPUs[gpu_index]));
phase->phase_info_gpu[gpu_index]->current_node=mpi_info->current_node;
phase->phase_info_gpu[gpu_index]->total_nodes=mpi_info->total_nodes;
dim3 grid(gpu_info->polymerNx,gpu_info->polymerNy,gpu_info->polymerNz);
hipLaunchKernelGGL(( setup_curand), dim3(grid),dim3(phase->MaxThreadPolymer),0,gpu_info->stream[gpu_index], phase->phase_info_gpu[gpu_index],gpu_index, fixed_seed,gpu_info->state[gpu_index]);
checkCudaErrors( hipDeviceSynchronize());
}
return 0;
}
extern void Generate_init_coord(MPI_info *mpi_info,GPU_info *gpu_info,Phase *phase){
int read_file=phase->read_file;
if(read_file==0){
for(int gpu_index=0;gpu_index<gpu_info->GPU_N;gpu_index++){
checkCudaErrors(hipSetDevice(gpu_info->whichGPUs[gpu_index])); //
//size_t mem=sizeof(int)*1024;
//printf("c %ld %d %d %d\n",phase->phase_info_gpu[gpu_index]->n_cells,phase->phase_info_gpu[gpu_index]->nx,phase->phase_info_gpu[gpu_index]->ny,phase->phase_info_gpu[gpu_index]->nz);
dim3 grid(gpu_info->polymerNx,gpu_info->polymerNy,gpu_info->polymerNz);
//printf("%d %d %d %d\n",grid.x,grid.y,grid.z,phase->MaxThreadPolymer);
hipLaunchKernelGGL(( initialize_coord), dim3(grid),dim3(phase->MaxThreadPolymer),0,gpu_info->stream[gpu_index], phase->pos[gpu_index],phase->phase_info_gpu[gpu_index],phase->poly_arch[gpu_index],gpu_info->state[gpu_index]);
//checkCudaErrors( hipDeviceSynchronize());
//printf("dx %g dy %g dz %g\n",phase->phase_info_gpu[gpu_index]->dx,phase->phase_info_gpu[gpu_index]->dy,phase->phase_info_gpu[gpu_index]->dz);
}
for(int gpu_index=0;gpu_index<gpu_info->GPU_N;gpu_index++){
checkCudaErrors(hipSetDevice(gpu_info->whichGPUs[gpu_index])); //
checkCudaErrors( hipDeviceSynchronize());
}
}
else if(read_file==1){
FILE *dp;
for(int gpu_index=0;gpu_index<gpu_info->GPU_N;gpu_index++){
checkCudaErrors(hipSetDevice(gpu_info->whichGPUs[gpu_index])); //
//size_t mem=sizeof(int)*1024;
//printf("c %ld %d %d %d\n",phase->phase_info_gpu[gpu_index]->n_cells,phase->phase_info_gpu[gpu_index]->nx,phase->phase_info_gpu[gpu_index]->ny,phase->phase_info_gpu[gpu_index]->nz);
dim3 grid(gpu_info->polymerNx,gpu_info->polymerNy,gpu_info->polymerNz);
//printf("%d %d %d %d\n",grid.x,grid.y,grid.z,phase->MaxThreadPolymer);
hipLaunchKernelGGL(( initialize_coord), dim3(grid),dim3(phase->MaxThreadPolymer),0,gpu_info->stream[gpu_index], phase->pos[gpu_index],phase->phase_info_gpu[gpu_index],phase->poly_arch[gpu_index],gpu_info->state[gpu_index]);
checkCudaErrors( hipDeviceSynchronize());
//printf("dx %g dy %g dz %g\n",phase->phase_info_gpu[gpu_index]->dx,phase->phase_info_gpu[gpu_index]->dy,phase->phase_info_gpu[gpu_index]->dz);
}
dp=fopen("coord.dat","r");
if(dp==NULL) {printf("coord.dat did not exit ^_^, program smartly change config to generate a random coord.\n");return;}
int cpu_index=mpi_info->current_node;
int move_to=cpu_index*phase->num_all_beads_per_node;//cpu_index*phase->num_all_beads_per_node+gpu_index*phase->num_all_beads_per_gpu;
double a,b,c;
int d;
for(int i=0;i<move_to;i++) fscanf(dp,"%lg %lg %lg\n",&a,&b,&c,&d);
for(int gpu_index=0;gpu_index<gpu_info->GPU_N;gpu_index++){
for(int i=0;i<phase->num_all_beads_per_gpu;i++){
fscanf(dp,"%lg %lg %lg\n",&a,&b,&c,&d);
phase->pos[gpu_index][i*3]=a;
phase->pos[gpu_index][i*3+1]=b;
phase->pos[gpu_index][i*3+2]=c;
//if(gpu_index==1&&i%32000==0) printf("%g %g %g\n",a,b,c);
}
}//end loop gpu
fclose(dp);
}// end read_file
}
extern void init_all_config(GPU_info *gpu_info,Phase *phase, MPI_info *mpi_info,int argc, char **argv){
init_scmf(phase,gpu_info,argc, argv);
init_cuda(mpi_info,gpu_info,0);
Read_polymer_config(mpi_info,gpu_info,phase);
initialize_values(gpu_info,phase);
initialize_structure_GPU(gpu_info,phase);
initialize_random_generator(mpi_info,gpu_info,phase);
Generate_init_coord(mpi_info, gpu_info,phase);
//printf("density %d %d %d %d\n",phase->gridNx,phase->gridNy,phase->gridNz,phase->MaxThreadDensity);
//printf("polymer %d %d %d %d\n",phase->polymerNx,phase->polymerNy,phase->polymerNz,phase->MaxThreadPolymer);
}
| 09497dce2667bb8af0768646f4d206bdc42dae97.cu | #include "init_config.h"
// decompose a integer like number of grid points and number of polymer into three integer Nx Ny Nz to suit cuda//
int prime[168]={2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 107, 109, 113, 127, 131, 137, 139, 149, 151, 157, 163, 167, 173, 179, 181, 191, 193, 197, 199, 211, 223, 227, 229, 233, 239, 241, 251, 257, 263, 269, 271, 277, 281, 283, 293, 307, 311, 313, 317, 331, 337, 347, 349, 353, 359, 367, 373, 379, 383, 389, 397, 401, 409, 419, 421, 431, 433, 439, 443, 449, 457, 461, 463, 467, 479, 487, 491, 499, 503, 509, 521, 523, 541, 547, 557, 563, 569, 571, 577, 587, 593, 599, 601, 607, 613, 617, 619, 631, 641, 643, 647, 653, 659, 661, 673, 677, 683, 691, 701, 709, 719, 727, 733, 739, 743, 751, 757, 761, 769, 773, 787, 797, 809, 811, 821, 823, 827, 829, 839, 853, 857, 859, 863, 877, 881, 883, 887, 907, 911, 919, 929, 937, 941, 947, 953, 967, 971, 977, 983, 991, 997};
// find the maximal factor in a integer which is smaller than 1024(Maximal thread number in cuda)
extern int factor_decompose_1024(GPU_info *gpu_info,long N){
long temp;
temp=N;
int decom[10000],index=0;
for(int i=0;i<168;i++){
while(temp%prime[i]==0){
temp=temp/prime[i];
decom[index++]=prime[i];
};
}
int *elements;
elements=(int*)malloc(sizeof(int)*index);
for(int i=0;i<index;i++) elements[i]=0;
int temp_1024=1;
for(int j=1;j<=10;j++){
elements[j-1]=1;
const size_t N_t = index;
std::vector<int> selectors(elements, elements + N_t);
do{
int combo=1;
for (size_t i = 0; i < selectors.size(); ++i){
if (selectors[i]){
//std::cout << decom[i] << ", ";
combo*=decom[i];
}
}
if(combo>temp_1024&&combo<=1024) temp_1024=combo;
if(combo==1024) break;
} while (prev_permutation(selectors.begin(), selectors.end()));
}
free(elements);
return temp_1024;
}
extern void factor_decompose(GPU_info *gpu_info,long N, int *Nx_a,int *Ny_a,int *Nz_a){
int Nx,Ny,Nz;
long temp;
temp=N;
int decom[10000],index=0;
for(int i=0;i<168;i++){
while(temp%prime[i]==0){
temp=temp/prime[i];
decom[index++]=prime[i];
};
}
//printf("%ld prime is ",N);
//for(int i=0;i<index;i++) printf(" %d ",decom[i]);
//printf("\n");
if(temp!=1) {
printf("please give a \"good\" polymer number!\n");
exit(0);
}
if(index==1) {
Nx=N;
Ny=1;
Nz=1;
}
else if(index==2){
Nz=1;//decom[index-1]
Ny=decom[0];
Nx=decom[1];
//printf("%d %d\n",Nx,Ny);
}
else if(index>2){
Nx=1;
Ny=1;
Nz=1;
if(index%2==0){
Nz=decom[index-1]*decom[0];
if((index-2)%4==0){
for(int i=0;i<(index-2)/4;i++){
Nx*=decom[i+1]*decom[index-1-i-1];
Ny*=decom[(index-2)/4+1+i]*decom[index-1-(index-2)/4-1-i];
}
//printf("%d %d %d\n",Nx,Ny,Nz);
}
else if((index-2)==2){
Ny=decom[1];
Nx=decom[2];
//printf("%d %d %d\n",Nx,Ny,Nz);
}
else {
Nz*=decom[1]*decom[2];
for(int i=0;i<(index-4)/4;i++){
Nx*=decom[i+3]*decom[index-1-i-1];
Ny*=decom[(index-2)/4+3+i]*decom[index-1-(index-2)/4-1-i];
}
//printf("%d %d %d\n",Nx,Ny,Nz);
}
}
else{
Nz=decom[index-1];
if((index-1)%4==0){
for(int i=0;i<(index-1)/4;i++){
Nx*=decom[i]*decom[index-1-i-1];
Ny*=decom[(index-1)/4+i]*decom[index-1-(index-1)/4-i-1];
}
//printf("%d: %d %d %d\n",index,Nx,Ny,Nz);
}
else if((index-1)==2){
Ny=decom[0];
Nx=decom[1];
//printf("%d %d %d\n",Nx,Ny,Nz);
}
else {
Nz*=decom[0]*decom[1];
for(int i=0;i<(index-3)/4;i++){
Nx*=decom[i*2+2]*decom[index-1-i*2-1];
Ny*=decom[i*2+3]*decom[index-3-i*2];
}
//printf("%d %d %d\n",Nx,Ny,Nz);
}
}
}
if(N==1) {
Nx=1;
Ny=1;
Nz=1;
}
if(Nx*Ny*Nz==N) {
*Nx_a=Nx;
*Ny_a=Ny;
*Nz_a=Nz;
}
else {
printf("Error Nx %d *Ny %d *Nz %d!= N %ld\n",Nx,Ny,Nz,N);
exit(0);
}
}
//<! Read in CPU and GPU polymer structure infomation in phase->poly_arch
extern void Read_polymer_config(MPI_info *mpi_info,GPU_info *gpu_info,Phase *phase){
FILE *dp;
dp=fopen("polymer.dat","r");
fscanf(dp,"Number of polymer type:%d\n",&phase->polymer_type_number);
fscanf(dp,"Number of total polymer:%d\n",&phase->n_polymers);
phase->n_polymers_per_node=phase->n_polymers/mpi_info->total_nodes;
phase->n_polymers_per_gpu=phase->n_polymers_per_node/gpu_info->GPU_N;
//printf("total: %d Node: %d GPU: %d\n",phase->n_polymers,phase->n_polymers_per_node,phase->n_polymers_per_gpu);
phase->poly_arch.resize(gpu_info->GPU_N);
phase->n_polymer_type=(unsigned int *)malloc(sizeof(unsigned int)*phase->polymer_type_number);
phase->n_polymers_type_per_node=(unsigned int *)malloc(sizeof(unsigned int)*phase->polymer_type_number);
phase->n_polymers_type_per_gpu=(unsigned int *)malloc(sizeof(unsigned int)*phase->polymer_type_number);
for(int i=0;i<phase->polymer_type_number;i++){
fscanf(dp,"%d ",&phase->n_polymer_type[i]);
phase->n_polymers_type_per_node[i]=phase->n_polymer_type[i]/mpi_info->total_nodes;
phase->n_polymers_type_per_gpu[i]=phase->n_polymers_type_per_node[i]/gpu_info->GPU_N;
}
fscanf(dp,"\n--------\n");
if(mpi_info->current_node==0)
for(int i=0;i<phase->polymer_type_number;i++){
//printf("Type %d :node: %d GPU: %d\n",i,phase->n_polymers_type_per_node[i],phase->n_polymers_type_per_gpu[i]);
}
for(int gpu_index=0;gpu_index<gpu_info->GPU_N;gpu_index++){ //
checkCudaErrors(cudaSetDevice(gpu_info->whichGPUs[gpu_index]));
checkCudaErrors(cudaMallocManaged(&phase->poly_arch[gpu_index],phase->polymer_type_number));
if(gpu_index==0){
for(int polymer_index=0;polymer_index<phase->polymer_type_number;polymer_index++){
int temp;
fscanf(dp,"Polymer type: %d\n",&temp);//phase->poly_arch[gpu_index][polymer_index].polymer_type_index
fscanf(dp,"Polymer length: %d\n",&phase->poly_arch[gpu_index][polymer_index].poly_length);
fscanf(dp,"Polymer length unit: %d\n",&phase->reference_Nbeads);
checkCudaErrors(cudaMallocManaged(&phase->poly_arch[gpu_index][polymer_index].Monotype,phase->poly_arch[gpu_index][polymer_index].poly_length));
fscanf(dp,"Monomer type: ");
for(int i=0;i<phase->poly_arch[gpu_index][polymer_index].poly_length;i++){
fscanf(dp,"%d ",&phase->poly_arch[gpu_index][polymer_index].Monotype[i]);
//phase->poly_arch[gpu_index][polymer_index].mono_type_length[phase->poly_arch[gpu_index][polymer_index].Monotype[i]]++;
}
int poly_length=phase->poly_arch[gpu_index][polymer_index].poly_length;
fscanf(dp,"\n");
checkCudaErrors(cudaMallocManaged(&phase->poly_arch[gpu_index][polymer_index].connection,poly_length*poly_length));
for(int i=0;i<poly_length;i++){
for(int j=0;j<poly_length;j++){
fscanf(dp,"%d ",&phase->poly_arch[gpu_index][polymer_index].connection[i+j*poly_length]);
}//! end for j
fscanf(dp,"\n");
}//! end for i
checkCudaErrors(cudaMallocManaged(&phase->poly_arch[gpu_index][polymer_index].neigh_num,poly_length));
checkCudaErrors(cudaMallocManaged(&phase->poly_arch[gpu_index][polymer_index].conection_list,poly_length));
int index;
for(int i=0;i<poly_length;i++){
phase->poly_arch[gpu_index][polymer_index].neigh_num[i]=0;
for(int j=0;j<poly_length;j++){
if(phase->poly_arch[gpu_index][polymer_index].connection[i+j*poly_length]==1){
phase->poly_arch[gpu_index][polymer_index].neigh_num[i]++;
}//!< end if connnected
checkCudaErrors(cudaMallocManaged(&phase->poly_arch[gpu_index][polymer_index].conection_list[i],phase->poly_arch[gpu_index][polymer_index].neigh_num[i]));
}//!< for loop j
index=0;
for(int j=0;j<poly_length;j++){
if(phase->poly_arch[gpu_index][polymer_index].connection[i+j*poly_length]==1)
phase->poly_arch[gpu_index][polymer_index].conection_list[i][index++]=j;
}
}//!< for loop i
}//! end for polymer index
phase->num_all_beads=0;
phase->num_all_beads_per_node=0;
phase->num_all_beads_per_gpu=0;
phase->num_bead_polymer_type=(unsigned int *)malloc(sizeof(unsigned int)*phase->polymer_type_number);
phase->num_bead_polymer_type_per_node=(unsigned int *)malloc(sizeof(unsigned int)*phase->polymer_type_number);
phase->num_bead_polymer_type_per_gpu=(unsigned int *)malloc(sizeof(unsigned int)*phase->polymer_type_number);
phase->num_bead_type=(unsigned int *)malloc(sizeof(unsigned int)*phase->n_mono_types);
phase->num_bead_type_per_node=(unsigned int *)malloc(sizeof(unsigned int)*phase->n_mono_types);
phase->num_bead_type_per_gpu=(unsigned int *)malloc(sizeof(unsigned int)*phase->n_mono_types);
for(int polymer_index=0;polymer_index<phase->polymer_type_number;polymer_index++){
int length=phase->poly_arch[gpu_index][polymer_index].poly_length;
phase->num_bead_polymer_type[polymer_index]=length*phase->n_polymer_type[polymer_index];
phase->num_bead_polymer_type_per_node[polymer_index]=length*phase->n_polymers_type_per_node[polymer_index];
phase->num_bead_polymer_type_per_gpu[polymer_index]=length*phase->n_polymers_type_per_gpu[polymer_index];
phase->num_all_beads+=length*phase->n_polymer_type[polymer_index];
phase->num_all_beads_per_node+=length*phase->n_polymers_type_per_node[polymer_index];
phase->num_all_beads_per_gpu+=length*phase->n_polymers_type_per_gpu[polymer_index];
}
}// ! end if (gpu_index==0)
else{
for(int polymer_index=0;polymer_index<phase->polymer_type_number;polymer_index++){
phase->poly_arch[gpu_index][polymer_index].polymer_type_index=phase->poly_arch[0][polymer_index].polymer_type_index;
phase->poly_arch[gpu_index][polymer_index].poly_length=phase->poly_arch[0][polymer_index].poly_length;
checkCudaErrors(cudaMallocManaged(&phase->poly_arch[gpu_index][polymer_index].Monotype,phase->poly_arch[gpu_index][polymer_index].poly_length));
for(int i=0;i<phase->poly_arch[gpu_index][polymer_index].poly_length;i++){
phase->poly_arch[gpu_index][polymer_index].Monotype[i]=phase->poly_arch[0][polymer_index].Monotype[i];
}
int poly_length=phase->poly_arch[gpu_index][polymer_index].poly_length;
checkCudaErrors(cudaMallocManaged(&phase->poly_arch[gpu_index][polymer_index].connection,poly_length*poly_length));
for(int i=0;i<poly_length;i++){
for(int j=0;j<poly_length;j++){
phase->poly_arch[gpu_index][polymer_index].connection[i+j*poly_length]=phase->poly_arch[0][polymer_index].connection[i+j*poly_length];
}//! end for j
}//! end for i
checkCudaErrors(cudaMallocManaged(&phase->poly_arch[gpu_index][polymer_index].neigh_num,poly_length));
checkCudaErrors(cudaMallocManaged(&phase->poly_arch[gpu_index][polymer_index].conection_list,poly_length));
int index;
for(int i=0;i<poly_length;i++){
phase->poly_arch[gpu_index][polymer_index].neigh_num[i]=0;
for(int j=0;j<poly_length;j++){
if(phase->poly_arch[gpu_index][polymer_index].connection[i+j*poly_length]==1){
phase->poly_arch[gpu_index][polymer_index].neigh_num[i]++;
}//!< end if connnected
checkCudaErrors(cudaMallocManaged(&phase->poly_arch[gpu_index][polymer_index].conection_list[i],phase->poly_arch[gpu_index][polymer_index].neigh_num[i]));
}//!< for loop j
index=0;
for(int j=0;j<poly_length;j++){
if(phase->poly_arch[gpu_index][polymer_index].connection[i+j*poly_length]==1)
phase->poly_arch[gpu_index][polymer_index].conection_list[i][index++]=j;
}
}//!< for loop i
}// end for polymer
} // end if gpu_index ==0
}
}
extern void initialize_values(GPU_info *gpu_info,Phase *phase){
phase->start_clock = time(NULL);
phase->n_accepts = 0;
phase->n_moves =0;
phase->GPU_N=gpu_info->GPU_N;
phase->gridNx=gpu_info->gridNx;
phase->gridNy=gpu_info->gridNy;
phase->gridNz=gpu_info->gridNz;
phase->polymerNx=gpu_info->polymerNx;
phase->polymerNy=gpu_info->polymerNy;
phase->polymerNz=gpu_info->polymerNz;
phase->ana_info.delta_mc_Re=10;
phase->ana_info.filename = (char*)malloc( 200*sizeof(char) );
sprintf(phase->ana_info.filename,"Re.dat");
// Reference Harmonic Spring Cste
for(int i=0;i<gpu_info->GPU_N;i++)
for(int j=0;j<phase->polymer_type_number;j++)
phase->poly_arch[i][j].reference_Nbeads=phase->reference_Nbeads;
phase->harmonic_spring_Cste =1.0 / sqrt(3.0 * (phase->reference_Nbeads - 1.0));
//Reference energy scale for harmonic springs.
phase->harmonic_normb =1.0 / (2.0 * phase->harmonic_spring_Cste * phase->harmonic_spring_Cste);
FILE *dp;
dp=fopen("configuration.dat","r");
if(dp==NULL) {printf("Empty pointer allocate at line number %d in file %s\n", __LINE__, __FILE__);exit(0);}
fscanf(dp,"Monomer type number: %d\n",&phase->n_mono_types);
phase->xn=(double **)malloc(sizeof(double *)*phase->n_mono_types);
for(int i=0;i<phase->n_mono_types;i++) phase->xn[i]=(double *)malloc(sizeof(double )*phase->n_mono_types);
for(int j=0;j<phase->n_mono_types;j++){
for(int i=0;i<phase->n_mono_types;i++) {
fscanf(dp,"%lg ",&phase->xn[i][j]);
}
fscanf(dp,"\n");
}
fscanf(dp,"D0 = %lg; LY = %lg; LZ = %lg;\n",&phase->Lx,&phase->Ly,&phase->Lz);
fscanf(dp,"N = %d %d %d \n",&phase->nx,&phase->ny,&phase->nz);
phase->n_cells = phase->nx * phase->ny * phase->nz;
fscanf(dp,"time = %d;\n",&phase->time);
//phase->start_time = phase->time;
fscanf(dp,"xiN = %lg;\n",&phase->xiN);
fscanf(dp,"dt*N/xi = %lg;\n",&phase->dtNxi);
fscanf(dp,"kT = %lg;\n",&phase->kT);
//fscanf(dp,"TCHECK = %d;\n",&phase->Tcheck);
//fscanf(dp,"TWRITE = %d\n;",&phase->Twrite);
//MPI_Allreduce(&(p->n_polymers), &n_polymers_global_sum, 1,MPI_UNSIGNED, MPI_SUM, info_MPI->SOMA_MPI_Comm);
//assert(p->n_polymers_global == n_polymers_global_sum);
phase->area51.resize(gpu_info->GPU_N);
phase->phase_info_gpu.resize(gpu_info->GPU_N);
phase->external_field_unified.resize(gpu_info->GPU_N);
phase->umbrella_field_unified.resize(gpu_info->GPU_N);
phase->average_field_unified.resize(gpu_info->GPU_N);
phase->temp_average_field_unified.resize(gpu_info->GPU_N);
phase->MaxThreadDensity=factor_decompose_1024(gpu_info,phase->nx*phase->ny*phase->nz);
phase->MaxThreadPolymer=factor_decompose_1024(gpu_info,phase->n_polymers_per_gpu);
//printf("phase->MaxThreadDensity=%d phase->MaxThreadPolymer=%d\n",phase->MaxThreadDensity,phase->MaxThreadPolymer);
for(int gpu_index=0;gpu_index<gpu_info->GPU_N;gpu_index++){// gpu_info->GPU_N
checkCudaErrors(cudaSetDevice(gpu_info->whichGPUs[gpu_index]));
phase->area51[gpu_index]=NULL;
phase->external_field_unified[gpu_index]=NULL;
phase->umbrella_field_unified[gpu_index]=NULL;
phase->average_field_unified[gpu_index]=NULL;
phase->temp_average_field_unified[gpu_index]=NULL;
checkCudaErrors(cudaMallocManaged((void**)&(phase->phase_info_gpu[gpu_index]),sizeof(Phase_info_gpu)));
phase->phase_info_gpu[gpu_index]->nx=phase->nx;
phase->phase_info_gpu[gpu_index]->ny=phase->ny;
phase->phase_info_gpu[gpu_index]->nz=phase->nz;
phase->phase_info_gpu[gpu_index]->n_cells=phase->nx*phase->ny*phase->nz;
//printf("%d: %d %d %d\n",phase->phase_info_gpu[gpu_index]->n_cells,phase->phase_info_gpu[gpu_index]->nx,phase->phase_info_gpu[gpu_index]->ny,phase->phase_info_gpu[gpu_index]->nz);
phase->phase_info_gpu[gpu_index]->Lx=phase->Lx;
phase->phase_info_gpu[gpu_index]->Ly=phase->Ly;
phase->phase_info_gpu[gpu_index]->Lz=phase->Lz;
phase->phase_info_gpu[gpu_index]->iLx=1/phase->Lx;
phase->phase_info_gpu[gpu_index]->iLy=1/phase->Ly;
phase->phase_info_gpu[gpu_index]->iLz=1/phase->Lz;
phase->phase_info_gpu[gpu_index]->dx=phase->Lx/phase->nx;
phase->phase_info_gpu[gpu_index]->dy=phase->Ly/phase->ny;
phase->phase_info_gpu[gpu_index]->dz=phase->Lz/phase->nz;
phase->phase_info_gpu[gpu_index]->polymer_type_number=phase->polymer_type_number;
phase->phase_info_gpu[gpu_index]->n_polymers=phase->n_polymers;
phase->phase_info_gpu[gpu_index]->n_polymers_per_node=phase->n_polymers_per_node;
phase->phase_info_gpu[gpu_index]->n_polymers_per_gpu=phase->n_polymers_per_gpu;
phase->phase_info_gpu[gpu_index]->n_mono_types=phase->n_mono_types;
phase->phase_info_gpu[gpu_index]->num_all_beads=phase->num_all_beads;
phase->phase_info_gpu[gpu_index]->num_all_beads_per_node=phase->num_all_beads_per_node;
phase->phase_info_gpu[gpu_index]->num_all_beads_per_gpu=phase->num_all_beads_per_gpu;
phase->phase_info_gpu[gpu_index]->MaxThreadDensity=phase->MaxThreadDensity;
phase->phase_info_gpu[gpu_index]->MaxThreadPolymer=phase->MaxThreadPolymer;
checkCudaErrors(cudaMallocManaged((void**)&(phase->phase_info_gpu[gpu_index]->n_polymer_type),sizeof(unsigned int)*phase->polymer_type_number));
checkCudaErrors(cudaMallocManaged((void**)&(phase->phase_info_gpu[gpu_index]->n_polymers_type_per_node),sizeof(unsigned int)*phase->polymer_type_number));
checkCudaErrors(cudaMallocManaged((void**)&(phase->phase_info_gpu[gpu_index]->n_polymers_type_per_gpu),sizeof(unsigned int)*phase->polymer_type_number));
for(int i=0;i<phase->polymer_type_number;i++){
phase->phase_info_gpu[gpu_index]->n_polymer_type[i]=phase->n_polymer_type[i];
phase->phase_info_gpu[gpu_index]->n_polymers_type_per_node[i]=phase->n_polymers_type_per_node[i];
phase->phase_info_gpu[gpu_index]->n_polymers_type_per_gpu[i]=phase->n_polymers_type_per_gpu[i];
}
checkCudaErrors(cudaMallocManaged((void**)&(phase->phase_info_gpu[gpu_index]->num_bead_type),sizeof(unsigned int)*phase->n_mono_types));
checkCudaErrors(cudaMallocManaged((void**)&(phase->phase_info_gpu[gpu_index]->num_bead_type_per_node),sizeof(unsigned int)*phase->n_mono_types));
checkCudaErrors(cudaMallocManaged((void**)&(phase->phase_info_gpu[gpu_index]->num_bead_type_per_gpu),sizeof(unsigned int)*phase->n_mono_types));
checkCudaErrors(cudaMallocManaged((void**)&(phase->phase_info_gpu[gpu_index]->num_bead_polymer_type),sizeof(unsigned int)*phase->polymer_type_number));
checkCudaErrors(cudaMallocManaged((void**)&(phase->phase_info_gpu[gpu_index]->num_bead_polymer_type_per_node),sizeof(unsigned int)*phase->polymer_type_number));
checkCudaErrors(cudaMallocManaged((void**)&(phase->phase_info_gpu[gpu_index]->num_bead_polymer_type_per_gpu),sizeof(unsigned int)*phase->polymer_type_number));
checkCudaErrors(cudaMallocManaged((void**)&(phase->phase_info_gpu[gpu_index]->polymer_basis_gpu),sizeof(unsigned int)*(phase->polymer_type_number+1)));
checkCudaErrors(cudaMallocManaged((void**)&(phase->phase_info_gpu[gpu_index]->monomer_poly_basis_gpu),sizeof(unsigned int)*(phase->polymer_type_number+1)));
phase->phase_info_gpu[gpu_index]->polymer_basis_gpu[0]=0;
phase->phase_info_gpu[gpu_index]->monomer_poly_basis_gpu[0]=0;
for(int i=0;i<phase->polymer_type_number;i++){
phase->phase_info_gpu[gpu_index]->num_bead_polymer_type[i]=phase->num_bead_polymer_type[i];
phase->phase_info_gpu[gpu_index]->num_bead_polymer_type_per_node[i]=phase->num_bead_polymer_type_per_node[i];
phase->phase_info_gpu[gpu_index]->num_bead_polymer_type_per_gpu[i]=phase->num_bead_polymer_type_per_gpu[i];
phase->phase_info_gpu[gpu_index]->monomer_poly_basis_gpu[i+1]=phase->phase_info_gpu[gpu_index]->monomer_poly_basis_gpu[i]+phase->num_bead_polymer_type_per_gpu[i];
phase->phase_info_gpu[gpu_index]->polymer_basis_gpu[i+1]=phase->phase_info_gpu[gpu_index]->polymer_basis_gpu[i]+phase->n_polymers_type_per_gpu[i];
}
for(int polymer_index=0; polymer_index<phase->polymer_type_number;polymer_index++){
checkCudaErrors(cudaMallocManaged(&phase->poly_arch[gpu_index][polymer_index].mono_type_length,phase->n_mono_types));
for(int i=0;i<phase->n_mono_types;i++) phase->poly_arch[gpu_index][polymer_index].mono_type_length[i]=0;
for(int i=0;i<phase->poly_arch[gpu_index][polymer_index].poly_length;i++) phase->poly_arch[gpu_index][polymer_index].mono_type_length[phase->poly_arch[gpu_index][polymer_index].Monotype[i]]++;
}
for(int i=0;i<phase->n_mono_types;i++){
phase->num_bead_type[i]=0;
phase->num_bead_type_per_node[i]=0;
phase->num_bead_type_per_gpu[i]=0;
for(int j=0;j<phase->polymer_type_number;j++){
phase->num_bead_type[i]+=phase->poly_arch[gpu_index][j].mono_type_length[i]*phase->n_polymer_type[j];
phase->num_bead_type_per_node[i]+=phase->poly_arch[gpu_index][j].mono_type_length[i]*phase->n_polymers_type_per_node[j];
phase->num_bead_type_per_gpu[i]+=phase->poly_arch[gpu_index][j].mono_type_length[i]*phase->n_polymers_type_per_gpu[j];
}
phase->phase_info_gpu[gpu_index]->num_bead_type[i]=phase->num_bead_type[i];
phase->phase_info_gpu[gpu_index]->num_bead_type_per_node[i]=phase->num_bead_type_per_node[i];
phase->phase_info_gpu[gpu_index]->num_bead_type_per_gpu[i]=phase->num_bead_type_per_gpu[i];
}
//printf("%d %d %d %d %d\n",phase->num_bead_type_per_node[0],phase->num_bead_type_per_node[1],phase->n_polymers_type_per_node[0],phase->n_polymers_type_per_node[1],phase->n_polymers_type_per_node[2]);
phase->phase_info_gpu[gpu_index]->reference_Nbeads=phase->reference_Nbeads;
phase->phase_info_gpu[gpu_index]->inverse_refbeads=1/(double)phase->reference_Nbeads;
phase->phase_info_gpu[gpu_index]->harmonic_normb=phase->harmonic_normb;
checkCudaErrors(cudaMallocManaged((void**)&(phase->phase_info_gpu[gpu_index]->xn),sizeof(double)*phase->n_mono_types*phase->n_mono_types));
checkCudaErrors(cudaMallocManaged((void**)&(phase->phase_info_gpu[gpu_index]->field_scaling_type),sizeof(double)*phase->n_mono_types));
for(int t_type=0;t_type<phase->n_mono_types;t_type++)
for(int s_type=0;s_type<phase->n_mono_types;s_type++)
phase->phase_info_gpu[gpu_index]->xn[t_type+phase->n_mono_types*s_type]=phase->xn[t_type][s_type];
}
// Max safe move distance
phase->max_safe_jump = phase->Lx/phase->nx < phase->Ly / phase->ny ? phase->Lx/phase->nx : phase->Ly / phase->ny;
phase->max_safe_jump = phase->max_safe_jump < phase->Lz / phase->nz ? phase->max_safe_jump : phase->Lz/phase->nz;
phase->max_safe_jump *= 0.95;
int Nx,Ny,Nz;
factor_decompose(gpu_info,phase->n_cells/phase->MaxThreadDensity,&Nx,&Ny,&Nz);
gpu_info->gridNx=Nx;
gpu_info->gridNy=Ny*Nz;
gpu_info->gridNz=phase->n_mono_types;
phase->gridNx=gpu_info->gridNx;
phase->gridNy=gpu_info->gridNy;
phase->gridNz=gpu_info->gridNz;
factor_decompose(gpu_info,phase->n_polymers_per_gpu/phase->MaxThreadPolymer,&Nx,&Ny,&Nz);
gpu_info->polymerNx=Nx;
gpu_info->polymerNy=Ny;
gpu_info->polymerNz=Nz;
phase->polymerNx=gpu_info->polymerNx;
phase->polymerNy=gpu_info->polymerNy;
phase->polymerNz=gpu_info->polymerNz;
//printf("%d %d %d %d\n",phase->MaxThreadDensity,gpu_info->gridNx,gpu_info->gridNy,gpu_info->gridNz);
//printf("%d %d %d %d\n",phase->MaxThreadPolymer,gpu_info->polymerNx,gpu_info->polymerNy,gpu_info->polymerNz);
fclose(dp);
//---------------------------test--------------------------
/*
printf("D0 = %lg; LY = %lg; LZ = %lg;\n",p->Lx,p->Ly,p->Lz);
printf("N = %d %d %d \n",p->nx,p->ny,p->nz);
printf("time = %d\n",p->time);
printf("dt*N/xi = %lg\n",p->dtNxi);
printf("kT = %lg\n",p->kT);
printf("TCHECK = %d\n",p->Tcheck);
printf("TWRITE = %d\n",p->Twrite);
for(int j=0;j<p->n_mono_types;j++){
for(int i=0;i<p->n_mono_types;i++) {
printf("%g ",p->xn[i][j]);
}
printf("\n");
}
*/
}
extern int initialize_structure_GPU(GPU_info *gpu_info,Phase *phase){
phase->pos.resize(gpu_info->GPU_N);
phase->fields_unified.resize(gpu_info->GPU_N);
phase->fields_32.resize(gpu_info->GPU_N);
phase->omega_field_unified.resize(gpu_info->GPU_N);
phase->temp_average_field_unified.resize(gpu_info->GPU_N);
phase->average_field_unified.resize(gpu_info->GPU_N);
//size_t available, total;
for(int gpu_index=0;gpu_index<gpu_info->GPU_N;gpu_index++){// gpu_info->GPU_N
checkCudaErrors(cudaSetDevice(gpu_info->whichGPUs[gpu_index]));
checkCudaErrors(cudaMallocManaged(&(phase->pos[gpu_index]),sizeof(float)*phase->num_all_beads_per_gpu*3));//position of monomer
checkCudaErrors(cudaMallocManaged(&(gpu_info->state[gpu_index]),sizeof(curandStatePhilox4_32_10_t)*phase->n_polymers_per_gpu));
checkCudaErrors(cudaMallocManaged(&(phase->fields_unified[gpu_index]),phase->n_cells*phase->n_mono_types*sizeof(int)));
checkCudaErrors(cudaMallocManaged(&(phase->fields_32[gpu_index]),phase->n_cells*phase->n_mono_types*sizeof(uint32_t)));
checkCudaErrors(cudaMallocManaged(&(phase->omega_field_unified[gpu_index]),phase->n_mono_types*phase->n_cells*sizeof(double)));
checkCudaErrors(cudaMallocManaged(&(phase->temp_average_field_unified[gpu_index]),phase->n_mono_types*phase->n_mono_types*sizeof(int)));
checkCudaErrors(cudaMallocManaged(&(phase->average_field_unified[gpu_index]),phase->n_cells*phase->n_mono_types*sizeof(int)));
}//!< loop for gpu_index gpu device
//printf("monomer type number is %d cell %d\n",phase->n_mono_types,phase->n_cells);
phase->field_scaling_type = (double *) malloc(phase->n_mono_types * sizeof(double));
if (phase->field_scaling_type == NULL) {
fprintf(stderr, "ERROR: Malloc %s:%d\n", __FILE__, __LINE__);
return -1;
}
long ncells = phase->n_cells;
for (unsigned int i = 0; i < phase->n_mono_types; i++)
phase->field_scaling_type[i] =(ncells / ((double) phase->num_all_beads));
for(int gpu_index=0;gpu_index<gpu_info->GPU_N;gpu_index++){// gpu_info->GPU_N
for(int i=0;i<phase->n_mono_types;i++)
phase->phase_info_gpu[gpu_index]->field_scaling_type[i]=phase->field_scaling_type[i];
}
phase->R = (double*) malloc( phase->n_mono_types * sizeof(double));
phase->A = (double*)malloc( phase->n_mono_types * sizeof(double));
if(phase->R == NULL){
fprintf(stderr, "ERROR: By malloc TT800 , %s %d ",__FILE__, __LINE__ );
return -1;
}
for (unsigned int i = 0; i < phase->n_mono_types; i++){
//! \todo kBT required.
//p->A[i]
phase->A[i] =phase->dtNxi/ phase->reference_Nbeads;
phase->R[i] = sqrt( phase->A[i]* 2);
}
phase->n_accepts = 0;
phase->n_moves = 0;
for(int gpu_index=0;gpu_index<gpu_info->GPU_N;gpu_index++){// gpu_info->GPU_N
checkCudaErrors(cudaSetDevice(gpu_info->whichGPUs[gpu_index]));
checkCudaErrors(cudaMallocManaged(&(phase->phase_info_gpu[gpu_index]->A),phase->n_mono_types));
checkCudaErrors(cudaMallocManaged(&(phase->phase_info_gpu[gpu_index]->R),phase->n_mono_types));
//phase->phase_info_gpu[gpu_index]->omega_field_unified=phase->omega_field_unified[gpu_index];
//phase->phase_info_gpu[gpu_index]->area51=phase->area51[gpu_index];
phase->phase_info_gpu[gpu_index]->max_safe_jump=phase->max_safe_jump;
for (unsigned int i = 0; i < phase->n_mono_types; i++){
//! \todo kBT required.
//p->A[i]
phase->phase_info_gpu[gpu_index]->A[i]=phase->A[i];
phase->phase_info_gpu[gpu_index]->R[i]=phase->R[i];
}
}// end loop gpu
// initialize inverse simulation cell parameters
phase->iLx = 1.0/phase->Lx;
phase->iLy = 1.0/phase->Ly;
phase->iLz = 1.0/phase->Lz;
phase->dx=phase->Lx/phase->nx;
phase->dy=phase->Ly/phase->ny;
phase->dz=phase->Lz/phase->nz;
//phase->sets = NULL; // Default init of the sets
phase->max_set_members = 0;
return 1;
}// end routine
extern int initialize_random_generator(MPI_info *mpi_info,GPU_info *gpu_info,Phase *phase){
unsigned int fixed_seed;
int fix=0;
if(fix==0)
fixed_seed = time(NULL);
else
fixed_seed = 1;
for(int gpu_index=0;gpu_index<gpu_info->GPU_N;gpu_index++){
checkCudaErrors(cudaSetDevice(gpu_info->whichGPUs[gpu_index]));
phase->phase_info_gpu[gpu_index]->current_node=mpi_info->current_node;
phase->phase_info_gpu[gpu_index]->total_nodes=mpi_info->total_nodes;
dim3 grid(gpu_info->polymerNx,gpu_info->polymerNy,gpu_info->polymerNz);
setup_curand<<<grid,phase->MaxThreadPolymer,0,gpu_info->stream[gpu_index]>>>(phase->phase_info_gpu[gpu_index],gpu_index, fixed_seed,gpu_info->state[gpu_index]);
checkCudaErrors( cudaDeviceSynchronize());
}
return 0;
}
extern void Generate_init_coord(MPI_info *mpi_info,GPU_info *gpu_info,Phase *phase){
int read_file=phase->read_file;
if(read_file==0){
for(int gpu_index=0;gpu_index<gpu_info->GPU_N;gpu_index++){
checkCudaErrors(cudaSetDevice(gpu_info->whichGPUs[gpu_index])); //
//size_t mem=sizeof(int)*1024;
//printf("c %ld %d %d %d\n",phase->phase_info_gpu[gpu_index]->n_cells,phase->phase_info_gpu[gpu_index]->nx,phase->phase_info_gpu[gpu_index]->ny,phase->phase_info_gpu[gpu_index]->nz);
dim3 grid(gpu_info->polymerNx,gpu_info->polymerNy,gpu_info->polymerNz);
//printf("%d %d %d %d\n",grid.x,grid.y,grid.z,phase->MaxThreadPolymer);
initialize_coord<<<grid,phase->MaxThreadPolymer,0,gpu_info->stream[gpu_index]>>>(phase->pos[gpu_index],phase->phase_info_gpu[gpu_index],phase->poly_arch[gpu_index],gpu_info->state[gpu_index]);
//checkCudaErrors( cudaDeviceSynchronize());
//printf("dx %g dy %g dz %g\n",phase->phase_info_gpu[gpu_index]->dx,phase->phase_info_gpu[gpu_index]->dy,phase->phase_info_gpu[gpu_index]->dz);
}
for(int gpu_index=0;gpu_index<gpu_info->GPU_N;gpu_index++){
checkCudaErrors(cudaSetDevice(gpu_info->whichGPUs[gpu_index])); //
checkCudaErrors( cudaDeviceSynchronize());
}
}
else if(read_file==1){
FILE *dp;
for(int gpu_index=0;gpu_index<gpu_info->GPU_N;gpu_index++){
checkCudaErrors(cudaSetDevice(gpu_info->whichGPUs[gpu_index])); //
//size_t mem=sizeof(int)*1024;
//printf("c %ld %d %d %d\n",phase->phase_info_gpu[gpu_index]->n_cells,phase->phase_info_gpu[gpu_index]->nx,phase->phase_info_gpu[gpu_index]->ny,phase->phase_info_gpu[gpu_index]->nz);
dim3 grid(gpu_info->polymerNx,gpu_info->polymerNy,gpu_info->polymerNz);
//printf("%d %d %d %d\n",grid.x,grid.y,grid.z,phase->MaxThreadPolymer);
initialize_coord<<<grid,phase->MaxThreadPolymer,0,gpu_info->stream[gpu_index]>>>(phase->pos[gpu_index],phase->phase_info_gpu[gpu_index],phase->poly_arch[gpu_index],gpu_info->state[gpu_index]);
checkCudaErrors( cudaDeviceSynchronize());
//printf("dx %g dy %g dz %g\n",phase->phase_info_gpu[gpu_index]->dx,phase->phase_info_gpu[gpu_index]->dy,phase->phase_info_gpu[gpu_index]->dz);
}
dp=fopen("coord.dat","r");
if(dp==NULL) {printf("coord.dat did not exit ^_^, program smartly change config to generate a random coord.\n");return;}
int cpu_index=mpi_info->current_node;
int move_to=cpu_index*phase->num_all_beads_per_node;//cpu_index*phase->num_all_beads_per_node+gpu_index*phase->num_all_beads_per_gpu;
double a,b,c;
int d;
for(int i=0;i<move_to;i++) fscanf(dp,"%lg %lg %lg\n",&a,&b,&c,&d);
for(int gpu_index=0;gpu_index<gpu_info->GPU_N;gpu_index++){
for(int i=0;i<phase->num_all_beads_per_gpu;i++){
fscanf(dp,"%lg %lg %lg\n",&a,&b,&c,&d);
phase->pos[gpu_index][i*3]=a;
phase->pos[gpu_index][i*3+1]=b;
phase->pos[gpu_index][i*3+2]=c;
//if(gpu_index==1&&i%32000==0) printf("%g %g %g\n",a,b,c);
}
}//end loop gpu
fclose(dp);
}// end read_file
}
extern void init_all_config(GPU_info *gpu_info,Phase *phase, MPI_info *mpi_info,int argc, char **argv){
init_scmf(phase,gpu_info,argc, argv);
init_cuda(mpi_info,gpu_info,0);
Read_polymer_config(mpi_info,gpu_info,phase);
initialize_values(gpu_info,phase);
initialize_structure_GPU(gpu_info,phase);
initialize_random_generator(mpi_info,gpu_info,phase);
Generate_init_coord(mpi_info, gpu_info,phase);
//printf("density %d %d %d %d\n",phase->gridNx,phase->gridNy,phase->gridNz,phase->MaxThreadDensity);
//printf("polymer %d %d %d %d\n",phase->polymerNx,phase->polymerNy,phase->polymerNz,phase->MaxThreadPolymer);
}
|
296272a286e4a73e9cdb00980ae5d784d016f50b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <time.h>
#include "vec3.h"
// limited version of checkCudaErrors from helper_cuda.h in CUDA examples
#define checkCudaErrors(val) check_cuda( (val), #val, __FILE__, __LINE__ )
void check_cuda(hipError_t result, char const *const func, const char *const file, int const line) {
if (result) {
std::cerr << "CUDA error = " << static_cast<unsigned int>(result) << " at " <<
file << ":" << line << " '" << func << "' \n";
// Make sure we call CUDA Device Reset before exiting
hipDeviceReset();
exit(99);
}
}
__global__ void render(vec3 *fb, int max_x, int max_y) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if((i >= max_x) || (j >= max_y)) return;
int pixel_index = j*max_x + i;
fb[pixel_index] = vec3( float(i) / max_x, float(j) / max_y, 0.2f);
}
int main() {
int nx = 1200;
int ny = 600;
int tx = 8;
int ty = 8;
std::cerr << "Rendering a " << nx << "x" << ny << " image ";
std::cerr << "in " << tx << "x" << ty << " blocks.\n";
int num_pixels = nx*ny;
size_t fb_size = num_pixels*sizeof(vec3);
// allocate FB
vec3 *fb;
checkCudaErrors(hipMallocManaged((void **)&fb, fb_size));
clock_t start, stop;
start = clock();
// Render our buffer
dim3 blocks(nx/tx+1,ny/ty+1);
dim3 threads(tx,ty);
hipLaunchKernelGGL(( render), dim3(blocks), dim3(threads), 0, 0, fb, nx, ny);
checkCudaErrors(hipGetLastError());
checkCudaErrors(hipDeviceSynchronize());
stop = clock();
double timer_seconds = ((double)(stop - start)) / CLOCKS_PER_SEC;
std::cerr << "took " << timer_seconds << " seconds.\n";
// Output FB as Image
std::cout << "P3\n" << nx << " " << ny << "\n255\n";
for (int j = ny-1; j >= 0; j--) {
for (int i = 0; i < nx; i++) {
size_t pixel_index = j*nx + i;
int ir = int(255.99*fb[pixel_index].r());
int ig = int(255.99*fb[pixel_index].g());
int ib = int(255.99*fb[pixel_index].b());
std::cout << ir << " " << ig << " " << ib << "\n";
}
}
checkCudaErrors(hipFree(fb));
}
| 296272a286e4a73e9cdb00980ae5d784d016f50b.cu | #include <iostream>
#include <time.h>
#include "vec3.h"
// limited version of checkCudaErrors from helper_cuda.h in CUDA examples
#define checkCudaErrors(val) check_cuda( (val), #val, __FILE__, __LINE__ )
void check_cuda(cudaError_t result, char const *const func, const char *const file, int const line) {
if (result) {
std::cerr << "CUDA error = " << static_cast<unsigned int>(result) << " at " <<
file << ":" << line << " '" << func << "' \n";
// Make sure we call CUDA Device Reset before exiting
cudaDeviceReset();
exit(99);
}
}
__global__ void render(vec3 *fb, int max_x, int max_y) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if((i >= max_x) || (j >= max_y)) return;
int pixel_index = j*max_x + i;
fb[pixel_index] = vec3( float(i) / max_x, float(j) / max_y, 0.2f);
}
int main() {
int nx = 1200;
int ny = 600;
int tx = 8;
int ty = 8;
std::cerr << "Rendering a " << nx << "x" << ny << " image ";
std::cerr << "in " << tx << "x" << ty << " blocks.\n";
int num_pixels = nx*ny;
size_t fb_size = num_pixels*sizeof(vec3);
// allocate FB
vec3 *fb;
checkCudaErrors(cudaMallocManaged((void **)&fb, fb_size));
clock_t start, stop;
start = clock();
// Render our buffer
dim3 blocks(nx/tx+1,ny/ty+1);
dim3 threads(tx,ty);
render<<<blocks, threads>>>(fb, nx, ny);
checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaDeviceSynchronize());
stop = clock();
double timer_seconds = ((double)(stop - start)) / CLOCKS_PER_SEC;
std::cerr << "took " << timer_seconds << " seconds.\n";
// Output FB as Image
std::cout << "P3\n" << nx << " " << ny << "\n255\n";
for (int j = ny-1; j >= 0; j--) {
for (int i = 0; i < nx; i++) {
size_t pixel_index = j*nx + i;
int ir = int(255.99*fb[pixel_index].r());
int ig = int(255.99*fb[pixel_index].g());
int ib = int(255.99*fb[pixel_index].b());
std::cout << ir << " " << ig << " " << ib << "\n";
}
}
checkCudaErrors(cudaFree(fb));
}
|
2e6ddd68533a8a5d42e9aa2e082bd0a38cb7806f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdlib.h>
#include <stdio.h>
#include "PSAkernel.h"
#include <vector>
template <typename Dtype>
__global__ void PSAForward_buffer_mask_collect_gpu(const int nthreads,
const int feature_H_, const int feature_W_,
const int mask_H_, const int mask_W_,
const int half_mask_H_, const int half_mask_W_,
const Dtype* mask_data, Dtype* buffer_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int w = index % feature_W_;
const int h = (index / feature_W_) % feature_H_;
const int n = index / feature_W_ / feature_H_;
// effective mask region : [hstart, hend) x [wstart, wend) with mask-indexed
const int hstart = max(0, half_mask_H_ - h);
const int hend = min(mask_H_, feature_H_ + half_mask_H_ - h);
const int wstart = max(0, half_mask_W_ - w);
const int wend = min(mask_W_, feature_W_ + half_mask_W_ - w);
// (hidx, widx ) with mask-indexed
// (hidx + h - half_mask_H_, widx + w - half_mask_W_) with feature-indexed
for (int hidx = hstart; hidx < hend; hidx++) {
for (int widx = wstart; widx < wend; widx++) {
buffer_data[(n * feature_H_ * feature_W_ + (hidx + h - half_mask_H_) * feature_W_ + (widx + w - half_mask_W_)) * feature_H_ * feature_W_ + h * feature_W_ + w] =
mask_data[((n * mask_H_ * mask_W_ + hidx * mask_W_ + widx) * feature_H_ + h) * feature_W_ + w];
}
}
}
}
template <typename Dtype>
__global__ void PSAForward_buffer_mask_distribute_gpu(const int nthreads,
const int feature_H_, const int feature_W_,
const int mask_H_, const int mask_W_,
const int half_mask_H_, const int half_mask_W_,
const Dtype* mask_data, Dtype* buffer_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int w = index % feature_W_;
const int h = (index / feature_W_) % feature_H_;
const int n = index / feature_W_ / feature_H_;
// effective mask region : [hstart, hend) x [wstart, wend) with mask-indexed
const int hstart = max(0, half_mask_H_ - h);
const int hend = min(mask_H_, feature_H_ + half_mask_H_ - h);
const int wstart = max(0, half_mask_W_ - w);
const int wend = min(mask_W_, feature_W_ + half_mask_W_ - w);
// (hidx, widx ) with mask-indexed
// (hidx + h - half_mask_H_, widx + w - half_mask_W_) with feature-indexed
// printf("hstart: %d hend: %d wstart: %d wend:%d\n",hstart,hend,wstart,wend);
for (int hidx = hstart; hidx < hend; hidx++) {
for (int widx = wstart; widx < wend; widx++) {
buffer_data[(n * feature_H_ * feature_W_ + h * feature_W_ + w) * feature_H_ * feature_W_ + (hidx + h - half_mask_H_) * feature_W_ + (widx + w - half_mask_W_)] =
mask_data[((n * mask_H_ * mask_W_ + hidx * mask_W_ + widx) * feature_H_ + h) * feature_W_ + w];
}
}
}
}
template <typename Dtype>
__global__ void PSABackward_buffer_mask_collect_gpu(const int nthreads,
const int feature_H_, const int feature_W_,
const int mask_H_, const int mask_W_,
const int half_mask_H_, const int half_mask_W_,
const Dtype* buffer_diff, Dtype* mask_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int w = index % feature_W_;
const int h = (index / feature_W_) % feature_H_;
const int n = index / feature_W_ / feature_H_;
// effective mask region : [hstart, hend) x [wstart, wend) with mask-indexed
const int hstart = max(0, half_mask_H_ - h);
const int hend = min(mask_H_, feature_H_ + half_mask_H_ - h);
const int wstart = max(0, half_mask_W_ - w);
const int wend = min(mask_W_, feature_W_ + half_mask_W_ - w);
// (hidx, widx ) with mask-indexed
// (hidx + h - half_mask_H_, widx + w - half_mask_W_) with feature-indexed
for (int hidx = hstart; hidx < hend; hidx++) {
for (int widx = wstart; widx < wend; widx++) {
mask_diff[((n * mask_H_ * mask_W_ + hidx * mask_W_ + widx) * feature_H_ + h) * feature_W_ + w] =
buffer_diff[(n * feature_H_ * feature_W_ + (hidx + h - half_mask_H_) * feature_W_ + (widx + w - half_mask_W_)) * feature_H_ * feature_W_ + h * feature_W_ + w];
}
}
}
}
template <typename Dtype>
__global__ void PSABackward_buffer_mask_distribute_gpu(const int nthreads,
const int feature_H_, const int feature_W_,
const int mask_H_, const int mask_W_,
const int half_mask_H_, const int half_mask_W_,
const Dtype* buffer_diff, Dtype* mask_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int w = index % feature_W_;
const int h = (index / feature_W_) % feature_H_;
const int n = index / feature_W_ / feature_H_;
// effective mask region : [hstart, hend) x [wstart, wend) with mask-indexed
const int hstart = max(0, half_mask_H_ - h);
const int hend = min(mask_H_, feature_H_ + half_mask_H_ - h);
const int wstart = max(0, half_mask_W_ - w);
const int wend = min(mask_W_, feature_W_ + half_mask_W_ - w);
// (hidx, widx ) with mask-indexed
// (hidx + h - half_mask_H_, widx + w - half_mask_W_) with feature-indexed
for (int hidx = hstart; hidx < hend; hidx++) {
for (int widx = wstart; widx < wend; widx++) {
mask_diff[((n * mask_H_ * mask_W_ + hidx * mask_W_ + widx) * feature_H_ + h) * feature_W_ + w] =
buffer_diff[(n * feature_H_ * feature_W_ + h * feature_W_ + w) * feature_H_ * feature_W_ + (hidx + h - half_mask_H_) * feature_W_ + (widx + w - half_mask_W_)];
}
}
}
}
void PSAkernel_Forward_Launcher(const float * bottom1, const int num_,
const int feature_H_,const int feature_W_,
float * mask_buffer_,const int forward_type,hipStream_t stream) {
// set mask buffer
const int mask_H_ = 2 * feature_H_ - 1;const int mask_W_ = 2 * feature_W_ - 1;
const int half_mask_H_ = (mask_H_ - 1) / 2;const int half_mask_W_ = (mask_W_ - 1) / 2;
int nthreads = num_ * feature_H_ * feature_W_;
switch (forward_type) {
case PSA_TYPE_COLLECT:
hipLaunchKernelGGL(( PSAForward_buffer_mask_collect_gpu<float>), dim3(CAFFE_GET_BLOCKS(nthreads)), dim3(CAFFE_CUDA_NUM_THREADS),0,stream,
nthreads, feature_H_, feature_W_, mask_H_, mask_W_, half_mask_H_, half_mask_W_,
bottom1, mask_buffer_);
do {
hipError_t error = hipPeekAtLastError();
if (error != hipSuccess){
printf(hipGetErrorString(error));}
} while(0);
break;
case PSA_TYPE_DISTRIBUTE:
hipLaunchKernelGGL(( PSAForward_buffer_mask_distribute_gpu<float>), dim3(CAFFE_GET_BLOCKS(nthreads)), dim3(CAFFE_CUDA_NUM_THREADS),0,stream,
nthreads, feature_H_, feature_W_, mask_H_, mask_W_, half_mask_H_, half_mask_W_,
bottom1, mask_buffer_);
do {
hipError_t error = hipPeekAtLastError();
if (error != hipSuccess){
printf(hipGetErrorString(error));}
} while(0);
break;
default:
printf("Unknown PSA type.");
}
}
void PSAkernel_Backward_Launcher(
float * bottom1_diff, const int num_,
const int feature_H_,const int feature_W_,
float * mask_buffer_diff, const int forward_type,hipStream_t stream) {
int nthreads = num_ * feature_H_ * feature_W_;
// int nthreads = 10;
const int mask_H_ = 2 * feature_H_ - 1;const int mask_W_ = 2 * feature_W_ - 1;
const int half_mask_H_ = (mask_H_ - 1) / 2;const int half_mask_W_ = (mask_W_ - 1) / 2;
switch (forward_type) {
case PSA_TYPE_COLLECT:
hipLaunchKernelGGL(( PSABackward_buffer_mask_collect_gpu<float>), dim3(CAFFE_GET_BLOCKS(nthreads)), dim3(CAFFE_CUDA_NUM_THREADS),0,stream,
nthreads, feature_H_, feature_W_, mask_H_, mask_W_, half_mask_H_, half_mask_W_,
mask_buffer_diff, bottom1_diff);
do {
hipError_t error = hipPeekAtLastError();
if (error != hipSuccess){
printf(hipGetErrorString(error));}
} while(0);
break;
case PSA_TYPE_DISTRIBUTE:
hipLaunchKernelGGL(( PSABackward_buffer_mask_distribute_gpu<float>), dim3(CAFFE_GET_BLOCKS(nthreads)), dim3(CAFFE_CUDA_NUM_THREADS),0,stream,
nthreads, feature_H_, feature_W_, mask_H_, mask_W_, half_mask_H_, half_mask_W_,
mask_buffer_diff, bottom1_diff);
do {
hipError_t error = hipPeekAtLastError();
if (error != hipSuccess){
printf(hipGetErrorString(error));}
} while(0);
break;
default:
printf("Unknown PSA type.");
}
} | 2e6ddd68533a8a5d42e9aa2e082bd0a38cb7806f.cu | #include <stdlib.h>
#include <stdio.h>
#include "PSAkernel.h"
#include <vector>
template <typename Dtype>
__global__ void PSAForward_buffer_mask_collect_gpu(const int nthreads,
const int feature_H_, const int feature_W_,
const int mask_H_, const int mask_W_,
const int half_mask_H_, const int half_mask_W_,
const Dtype* mask_data, Dtype* buffer_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int w = index % feature_W_;
const int h = (index / feature_W_) % feature_H_;
const int n = index / feature_W_ / feature_H_;
// effective mask region : [hstart, hend) x [wstart, wend) with mask-indexed
const int hstart = max(0, half_mask_H_ - h);
const int hend = min(mask_H_, feature_H_ + half_mask_H_ - h);
const int wstart = max(0, half_mask_W_ - w);
const int wend = min(mask_W_, feature_W_ + half_mask_W_ - w);
// (hidx, widx ) with mask-indexed
// (hidx + h - half_mask_H_, widx + w - half_mask_W_) with feature-indexed
for (int hidx = hstart; hidx < hend; hidx++) {
for (int widx = wstart; widx < wend; widx++) {
buffer_data[(n * feature_H_ * feature_W_ + (hidx + h - half_mask_H_) * feature_W_ + (widx + w - half_mask_W_)) * feature_H_ * feature_W_ + h * feature_W_ + w] =
mask_data[((n * mask_H_ * mask_W_ + hidx * mask_W_ + widx) * feature_H_ + h) * feature_W_ + w];
}
}
}
}
template <typename Dtype>
__global__ void PSAForward_buffer_mask_distribute_gpu(const int nthreads,
const int feature_H_, const int feature_W_,
const int mask_H_, const int mask_W_,
const int half_mask_H_, const int half_mask_W_,
const Dtype* mask_data, Dtype* buffer_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int w = index % feature_W_;
const int h = (index / feature_W_) % feature_H_;
const int n = index / feature_W_ / feature_H_;
// effective mask region : [hstart, hend) x [wstart, wend) with mask-indexed
const int hstart = max(0, half_mask_H_ - h);
const int hend = min(mask_H_, feature_H_ + half_mask_H_ - h);
const int wstart = max(0, half_mask_W_ - w);
const int wend = min(mask_W_, feature_W_ + half_mask_W_ - w);
// (hidx, widx ) with mask-indexed
// (hidx + h - half_mask_H_, widx + w - half_mask_W_) with feature-indexed
// printf("hstart: %d hend: %d wstart: %d wend:%d\n",hstart,hend,wstart,wend);
for (int hidx = hstart; hidx < hend; hidx++) {
for (int widx = wstart; widx < wend; widx++) {
buffer_data[(n * feature_H_ * feature_W_ + h * feature_W_ + w) * feature_H_ * feature_W_ + (hidx + h - half_mask_H_) * feature_W_ + (widx + w - half_mask_W_)] =
mask_data[((n * mask_H_ * mask_W_ + hidx * mask_W_ + widx) * feature_H_ + h) * feature_W_ + w];
}
}
}
}
template <typename Dtype>
__global__ void PSABackward_buffer_mask_collect_gpu(const int nthreads,
const int feature_H_, const int feature_W_,
const int mask_H_, const int mask_W_,
const int half_mask_H_, const int half_mask_W_,
const Dtype* buffer_diff, Dtype* mask_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int w = index % feature_W_;
const int h = (index / feature_W_) % feature_H_;
const int n = index / feature_W_ / feature_H_;
// effective mask region : [hstart, hend) x [wstart, wend) with mask-indexed
const int hstart = max(0, half_mask_H_ - h);
const int hend = min(mask_H_, feature_H_ + half_mask_H_ - h);
const int wstart = max(0, half_mask_W_ - w);
const int wend = min(mask_W_, feature_W_ + half_mask_W_ - w);
// (hidx, widx ) with mask-indexed
// (hidx + h - half_mask_H_, widx + w - half_mask_W_) with feature-indexed
for (int hidx = hstart; hidx < hend; hidx++) {
for (int widx = wstart; widx < wend; widx++) {
mask_diff[((n * mask_H_ * mask_W_ + hidx * mask_W_ + widx) * feature_H_ + h) * feature_W_ + w] =
buffer_diff[(n * feature_H_ * feature_W_ + (hidx + h - half_mask_H_) * feature_W_ + (widx + w - half_mask_W_)) * feature_H_ * feature_W_ + h * feature_W_ + w];
}
}
}
}
template <typename Dtype>
__global__ void PSABackward_buffer_mask_distribute_gpu(const int nthreads,
const int feature_H_, const int feature_W_,
const int mask_H_, const int mask_W_,
const int half_mask_H_, const int half_mask_W_,
const Dtype* buffer_diff, Dtype* mask_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int w = index % feature_W_;
const int h = (index / feature_W_) % feature_H_;
const int n = index / feature_W_ / feature_H_;
// effective mask region : [hstart, hend) x [wstart, wend) with mask-indexed
const int hstart = max(0, half_mask_H_ - h);
const int hend = min(mask_H_, feature_H_ + half_mask_H_ - h);
const int wstart = max(0, half_mask_W_ - w);
const int wend = min(mask_W_, feature_W_ + half_mask_W_ - w);
// (hidx, widx ) with mask-indexed
// (hidx + h - half_mask_H_, widx + w - half_mask_W_) with feature-indexed
for (int hidx = hstart; hidx < hend; hidx++) {
for (int widx = wstart; widx < wend; widx++) {
mask_diff[((n * mask_H_ * mask_W_ + hidx * mask_W_ + widx) * feature_H_ + h) * feature_W_ + w] =
buffer_diff[(n * feature_H_ * feature_W_ + h * feature_W_ + w) * feature_H_ * feature_W_ + (hidx + h - half_mask_H_) * feature_W_ + (widx + w - half_mask_W_)];
}
}
}
}
void PSAkernel_Forward_Launcher(const float * bottom1, const int num_,
const int feature_H_,const int feature_W_,
float * mask_buffer_,const int forward_type,cudaStream_t stream) {
// set mask buffer
const int mask_H_ = 2 * feature_H_ - 1;const int mask_W_ = 2 * feature_W_ - 1;
const int half_mask_H_ = (mask_H_ - 1) / 2;const int half_mask_W_ = (mask_W_ - 1) / 2;
int nthreads = num_ * feature_H_ * feature_W_;
switch (forward_type) {
case PSA_TYPE_COLLECT:
PSAForward_buffer_mask_collect_gpu<float><<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS,0,stream>>>(
nthreads, feature_H_, feature_W_, mask_H_, mask_W_, half_mask_H_, half_mask_W_,
bottom1, mask_buffer_);
do {
cudaError_t error = cudaPeekAtLastError();
if (error != cudaSuccess){
printf(cudaGetErrorString(error));}
} while(0);
break;
case PSA_TYPE_DISTRIBUTE:
PSAForward_buffer_mask_distribute_gpu<float><<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS,0,stream>>>(
nthreads, feature_H_, feature_W_, mask_H_, mask_W_, half_mask_H_, half_mask_W_,
bottom1, mask_buffer_);
do {
cudaError_t error = cudaPeekAtLastError();
if (error != cudaSuccess){
printf(cudaGetErrorString(error));}
} while(0);
break;
default:
printf("Unknown PSA type.");
}
}
void PSAkernel_Backward_Launcher(
float * bottom1_diff, const int num_,
const int feature_H_,const int feature_W_,
float * mask_buffer_diff, const int forward_type,cudaStream_t stream) {
int nthreads = num_ * feature_H_ * feature_W_;
// int nthreads = 10;
const int mask_H_ = 2 * feature_H_ - 1;const int mask_W_ = 2 * feature_W_ - 1;
const int half_mask_H_ = (mask_H_ - 1) / 2;const int half_mask_W_ = (mask_W_ - 1) / 2;
switch (forward_type) {
case PSA_TYPE_COLLECT:
PSABackward_buffer_mask_collect_gpu<float><<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS,0,stream>>>(
nthreads, feature_H_, feature_W_, mask_H_, mask_W_, half_mask_H_, half_mask_W_,
mask_buffer_diff, bottom1_diff);
do {
cudaError_t error = cudaPeekAtLastError();
if (error != cudaSuccess){
printf(cudaGetErrorString(error));}
} while(0);
break;
case PSA_TYPE_DISTRIBUTE:
PSABackward_buffer_mask_distribute_gpu<float><<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS,0,stream>>>(
nthreads, feature_H_, feature_W_, mask_H_, mask_W_, half_mask_H_, half_mask_W_,
mask_buffer_diff, bottom1_diff);
do {
cudaError_t error = cudaPeekAtLastError();
if (error != cudaSuccess){
printf(cudaGetErrorString(error));}
} while(0);
break;
default:
printf("Unknown PSA type.");
}
} |
df44232308594b38c1ef56d0bab12bfca508fad4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
/*
* CudaOperations.cu
*
* Created on: Feb 6, 2019
* Author: alexander
*/
__global__ void allocHamiltonian(float* devMat, float* devSpins, int index, int size, double* energyTempor) {
int i;
int j;
int wIndex = threadIdx.x + blockIdx.x * blockDim.x;
while (wIndex < size * size) {
i = wIndex % size;
j = (int) (wIndex / size);
energyTempor[wIndex] = (double) (devSpins[i + index * size]
* devSpins[j + index * size] * devMat[wIndex]);
wIndex = wIndex + blockDim.x * gridDim.x;
}
} | df44232308594b38c1ef56d0bab12bfca508fad4.cu | #include "includes.h"
/*
* CudaOperations.cu
*
* Created on: Feb 6, 2019
* Author: alexander
*/
__global__ void allocHamiltonian(float* devMat, float* devSpins, int index, int size, double* energyTempor) {
int i;
int j;
int wIndex = threadIdx.x + blockIdx.x * blockDim.x;
while (wIndex < size * size) {
i = wIndex % size;
j = (int) (wIndex / size);
energyTempor[wIndex] = (double) (devSpins[i + index * size]
* devSpins[j + index * size] * devMat[wIndex]);
wIndex = wIndex + blockDim.x * gridDim.x;
}
} |
0178d1da04fc30664dd7559b0457284ef9039edd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "FreenectFusion.h"
#include "cudautils.h"
#include "cudamath.h"
#include <cuda_gl_interop.h>
#include <thrust/transform.h>
#include <thrust/fill.h>
texture<float, 2, hipReadModeElementType> depth_texture;
texture<float, 3, hipReadModeElementType> F_texture;
__constant__ float K[9];
__constant__ float invK[9];
__constant__ float Tgk[16];
__constant__ float Tk_1k[16];
__device__ float3 transform3(const float* matrix, const float3& v)
{
float3 res;
res.x = matrix[0]*v.x + matrix[1]*v.y + matrix[2]*v.z;
res.y = matrix[3]*v.x + matrix[4]*v.y + matrix[5]*v.z;
res.z = matrix[6]*v.x + matrix[7]*v.y + matrix[8]*v.z;
return res;
}
__device__ float3 transform3_affine(const float* matrix, const float3& v)
{
float3 res;
res.x = matrix[0]*v.x + matrix[1]*v.y + matrix[2]*v.z + matrix[3];
res.y = matrix[4]*v.x + matrix[5]*v.y + matrix[6]*v.z + matrix[7];
res.z = matrix[8]*v.x + matrix[9]*v.y + matrix[10]*v.z + matrix[11];
return res;
}
__device__ float3 transform3_affine_inverse(const float* matrix, const float3& v)
{
float3 res;
float3 v2 = make_float3(v.x-matrix[3], v.y-matrix[7], v.z-matrix[11]);
res.x = matrix[0]*v2.x + matrix[4]*v2.y + matrix[8]*v2.z;
res.y = matrix[1]*v2.x + matrix[5]*v2.y + matrix[9]*v2.z;
res.z = matrix[2]*v2.x + matrix[6]*v2.y + matrix[10]*v2.z;
return res;
}
__device__ float gaussian(float t, float sigma)
{
return exp(-t*t/(sigma*sigma));
}
__host__ __device__ float3 gridToWorld(const float3& p, int side, float units_per_voxel)
{
return make_float3((p.x - side/2 + 0.5f) * units_per_voxel,
(p.y - side/2 + 0.5f) * units_per_voxel,
(p.z - side/2 + 0.5f) * units_per_voxel);
}
__host__ __device__ float3 worldToGrid(const float3& p, int side, float units_per_voxel)
{
return make_float3(p.x/units_per_voxel + side/2 - 0.5f,
p.y/units_per_voxel + side/2 - 0.5f,
p.z/units_per_voxel + side/2 - 0.5f);
}
__global__ void compute_smooth_depth(float* smooth_depth,
int width, int height, size_t pitch,
float sigma1, float sigma2)
{
int x = blockDim.x * blockIdx.x + threadIdx.x;
int y = blockDim.y * blockIdx.y + threadIdx.y;
if(x >= width || y >= height)
return;
float* current_smooth_depth = (float*)((char*)smooth_depth + pitch*y) + x;
float depth1 = tex2D(depth_texture, x, y);
float cum = 0.f;
float weight_cum = 0.f;
for(int i=-5; i<=5; ++i)
for(int j=-5; j<=5; ++j)
{
float depth2 = tex2D(depth_texture, x+i, y+j);
float weight1 = gaussian(length2(make_float2(i,j)), sigma1);
float weight2 = gaussian(depth1 - depth2, sigma2);
weight_cum += weight1 * weight2;
cum += depth2 * weight1 * weight2;
}
cum /= weight_cum;
*current_smooth_depth = cum;
}
__global__ void pyrdownSmoothDepth(float* output, int width, int height)
{
int x = blockDim.x * blockIdx.x + threadIdx.x;
int y = blockDim.y * blockIdx.y + threadIdx.y;
if(x >= width || y >= height)
return;
float* current = &output[y*width + x];
float depth1 = tex2D(depth_texture, 2*x, 2*y);
float cum = 0.f;
float weight_cum = 0.f;
for(int i=-2; i<=2; ++i)
for(int j=-2; j<=2; ++j)
{
float depth2 = tex2D(depth_texture, 2*x+i, 2*y+j);
float weight1 = gaussian(length2(make_float2(i,j)), 1.f);
float weight2 = gaussian(depth1 - depth2, 20.f);
weight_cum += weight1 * weight2;
cum += depth2 * weight1 * weight2;
}
cum /= weight_cum;
*current = cum;
}
/**
* Generate vertices and normals from a depth stored in depth_texture.
*/
__global__ void measure(float3* vertices, float3* normals, int* mask,
int width, int height, size_t pitch)
{
int x = blockDim.x * blockIdx.x + threadIdx.x;
int y = blockDim.y * blockIdx.y + threadIdx.y;
int thid = width*y + x;
if(x >= width || y >= height)
return;
float3* current_vertex = (float3*)((char*)vertices + pitch*y) + x;
float3* current_normal = (float3*)((char*)normals + pitch*y) + x;
float3 u = make_float3(float(x), float(y), 1.f);
float3 v = make_float3(float(x+1), float(y), 1.f);
float3 w = make_float3(float(x), float(y+1), 1.f);
float depth = tex2D(depth_texture, x, y);
u = depth * transform3(invK, u);
v = tex2D(depth_texture, x+1, y) * transform3(invK, v);
w = tex2D(depth_texture, x, y+1) * transform3(invK, w);
float3 n = normalize(cross(v - u, w - u));
*current_vertex = u;
*current_normal = n;
if(depth < 0.01f)
{
*current_vertex = make_float3(0.f, 0.f, 0.f);
*current_normal = make_float3(0.f, 0.f, 2.f);
}
mask[thid] = depth > 0.01f;
}
__global__ void update_reconstruction(float* F, float* W,
int side, float units_per_voxel,
float mu, int init_slice)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j = blockDim.y * blockIdx.y + threadIdx.y;
int k = blockDim.z * blockIdx.z + threadIdx.z + init_slice;
float* current_F = F + k*side*side + j*side + i;
float* current_W = W + k*side*side + j*side + i;
// Point 3D.
float3 p = gridToWorld(make_float3(i,j,k), side, units_per_voxel);
// Project the point.
float3 x = transform3(K, transform3_affine_inverse(Tgk, p));
x.x = round(x.x/x.z);
x.y = round(x.y/x.z);
x.z = 1.f;
// Determine lambda.
float3 aux = transform3(invK, x);
float lambda = length(aux);
float R = tex2D(depth_texture, x.x, x.y);
float3 tgk = make_float3(Tgk[3], Tgk[7], Tgk[11]);
float eta = R - length(tgk - p)/lambda;
float F_rk = fminf(1.f, eta/mu);
float W_rk = 1.f;
if(F_rk < -1.f || R == 0.f)
return;
if(*current_F < -2.f)
*current_F = F_rk;
else
*current_F = (*current_W * *current_F + W_rk * F_rk)/(*current_W + W_rk);
*current_W = min(*current_W + W_rk, 50.f);
}
__global__ void raycast(float3* vertices, float3* normals,
int width, int height, size_t pitch,
int side, float units_per_voxel, float mu,
float mindistance, float maxdistance)
{
int x = blockDim.x * blockIdx.x + threadIdx.x;
int y = blockDim.y * blockIdx.y + threadIdx.y;
if(x >= width || y >= height)
return;
float3* current_vertex = (float3*)((char*)vertices + pitch*y) + x;
float3* current_normal = (float3*)((char*)normals + pitch*y) + x;
float3 ray = normalize(transform3(invK, make_float3(float(x), float(y), 1.f)));
float3 tgk = make_float3(Tgk[3], Tgk[7], Tgk[11]);
ray = transform3_affine(Tgk, ray) - tgk;
*current_normal = make_float3(1.f, 1.f, 1.f);
float step = 3.f*mu/4.f;
float3 p = worldToGrid(tgk + mindistance * ray, side, units_per_voxel);
float old_value = tex3D(F_texture, p.x+0.5f, p.y+0.5f, p.z+0.5f);
for(float distance = mindistance; distance < maxdistance; distance += step)
{
p = worldToGrid(tgk + distance * ray, side, units_per_voxel);
float value = tex3D(F_texture, p.x+0.5f, p.y+0.5f, p.z+0.5f);
if(value < -2 || (old_value < 0 && value > 0))
break;
if(old_value >= 0 && value < 0)
{
float t = distance - step - (step * old_value)/(value - old_value);
*current_vertex = tgk + t * ray;
float valuex = tex3D(F_texture, p.x-1+0.5f, p.y+0.5f, p.z+0.5f);
float valuey = tex3D(F_texture, p.x+0.5f, p.y-1+0.5f, p.z+0.5f);
float valuez = tex3D(F_texture, p.x+0.5f, p.y+0.5f, p.z-1+0.5f);
*current_normal = normalize(make_float3(valuex-value, valuey-value, valuez-value));
return;
}
old_value = value;
}
*current_vertex = make_float3(0.f, 0.f, 0.f);
*current_normal = make_float3(0.f, 0.f, 2.f);
}
__device__ float3 project(const float* K, const float* T, float3 point)
{
return transform3(K, transform3_affine(T, point));
}
__device__ int2 hom2cart(float3 point)
{
return make_int2(roundf(point.x/point.z), roundf(point.y/point.z));
}
__global__ void search_correspondences(float3* vertices_corresp, float3* normals_corresp,
const float3* vertices_measure, float3* normals_measure,
const float3* vertices_raycast, const float3* normals_raycast,
int width_measure, int height_measure,
int width_raycast, int height_raycast,
float threshold_distance)
{
int x = blockDim.x * blockIdx.x + threadIdx.x;
int y = blockDim.y * blockIdx.y + threadIdx.y;
int thid = width_measure*y + x;
if(x >= width_measure || y >= height_measure)
return;
float3* current_vertex_corresp = &vertices_corresp[thid];
float3* current_normal_corresp = &normals_corresp[thid];
float3 vertex_measure = vertices_measure[thid];
// Get the corresponding pixel in the raycast image.
int2 u_raycast = hom2cart(project(K, Tk_1k, vertex_measure));
if(u_raycast.x < 0 || u_raycast.y < 0 ||
u_raycast.x >= width_raycast || u_raycast.y >= height_raycast)
{
*current_vertex_corresp = make_float3(0.f, 0.f, 0.f);
*current_normal_corresp = make_float3(0.f, 0.f, 2.f);
return;
}
int id_raycast = width_raycast*u_raycast.y + u_raycast.x;
float3 v = transform3_affine(Tgk, vertex_measure);
float3 vdiff = vertices_raycast[id_raycast] - v;
float vertex_distance = length(vdiff);
// Prune invalid matches.
if(vertex_measure.z==0.f || vertex_distance > threshold_distance)
{
*current_vertex_corresp = make_float3(0.f, 0.f, 0.f);
*current_normal_corresp = make_float3(0.f, 0.f, 2.f);
return;
}
*current_vertex_corresp = vertices_raycast[id_raycast];
*current_normal_corresp = normals_raycast[id_raycast];
// For debug only.
normals_measure[thid] = make_float3(1.f, 1.f, 1.f);
}
__global__ void compute_tracking_matrices(float* AA, float* Ab,
const float3* vertices_measure, const float3* normals_measure,
const float3* vertices_corresp, const float3* normals_corresp,
int numVertices)
{
int blockId = __mul24(blockIdx.y, gridDim.x) + blockIdx.x;
int thid = __mul24(blockId, blockDim.x) + threadIdx.x;
//int thid = blockDim.x * blockIdx.x + threadIdx.x;
if(thid >= numVertices)
return;
float* current_AA = &AA[21*thid];
float* current_Ab = &Ab[6*thid];
if(normals_corresp[thid].z == 2.f)
return;
float3 v = transform3_affine(Tgk, vertices_measure[thid]);
float3 n = normals_corresp[thid];
float b = dot(vertices_corresp[thid] - v, n);
current_Ab[0] = b*(v.z*n.y - v.y*n.z);
current_Ab[1] = b*(-v.z*n.x + v.x*n.z);
current_Ab[2] = b*(v.y*n.x - v.x*n.y);
current_Ab[3] = b*n.x;
current_Ab[4] = b*n.y;
current_Ab[5] = b*n.z;
current_AA[0] = (v.z*n.y - v.y*n.z)*(v.z*n.y - v.y*n.z);
current_AA[1] = (v.z*n.y - v.y*n.z)*(-v.z*n.x + v.x*n.z);
current_AA[2] = (v.z*n.y - v.y*n.z)*(v.y*n.x - v.x*n.y);
current_AA[3] = (v.z*n.y - v.y*n.z)*n.x;
current_AA[4] = (v.z*n.y - v.y*n.z)*n.y;
current_AA[5] = (v.z*n.y - v.y*n.z)*n.z;
current_AA[6] = (-v.z*n.x + v.x*n.z)*(-v.z*n.x + v.x*n.z);
current_AA[7] = (-v.z*n.x + v.x*n.z)*(v.y*n.x - v.x*n.y);
current_AA[8] = (-v.z*n.x + v.x*n.z)*n.x;
current_AA[9] = (-v.z*n.x + v.x*n.z)*n.y;
current_AA[10] = (-v.z*n.x + v.x*n.z)*n.z;
current_AA[11] = (v.y*n.x - v.x*n.y)*(v.y*n.x - v.x*n.y);
current_AA[12] = (v.y*n.x - v.x*n.y)*n.x;
current_AA[13] = (v.y*n.x - v.x*n.y)*n.y;
current_AA[14] = (v.y*n.x - v.x*n.y)*n.z;
current_AA[15] = n.x*n.x;
current_AA[16] = n.x*n.y;
current_AA[17] = n.x*n.z;
current_AA[18] = n.y*n.y;
current_AA[19] = n.y*n.z;
current_AA[20] = n.z*n.z;
}
/// Transform Kinect depth measurements to milimeters.
struct transform_depth
{
__host__ __device__
float operator()(uint16_t a)
{
if(a == 2047)
return 0.f;
return 1000.f / (a * -0.0030711016f + 3.3309495161f);
}
};
void Measurement::setDepth(uint16_t* depth)
{
// Convert raw depth to milimeters.
cudaSafeCall(hipMemcpy(mRawDepthGpu, depth, sizeof(uint16_t)*mNumVertices,
hipMemcpyHostToDevice));
thrust::transform(thrust::device_ptr<uint16_t>(mRawDepthGpu),
thrust::device_ptr<uint16_t>(mRawDepthGpu + mNumVertices),
thrust::device_ptr<float>(mDepthGpu),
transform_depth());
// Generate the pyramid of depth maps and vertices/normals.
for(int i=0; i<3; ++i)
mPyramid[i]->update();
}
void PyramidMeasurement::update()
{
const float* previousDepthGpu;
int previousWidth, previousHeight;
// Get info from the parent.
if(mParent != 0)
{
previousDepthGpu = mParent->getDepthGpu();
previousWidth = mParent->getWidth();
previousHeight = mParent->getHeight();
}
else
{
previousDepthGpu = mParent2->getDepthGpu();
previousWidth = mParent2->getWidth();
previousHeight = mParent2->getHeight();
}
// Bind the depth into a texture for fast access.
hipChannelFormatDesc channelDesc = hipCreateChannelDesc<float>();
cudaSafeCall(hipBindTexture2D(0, &depth_texture, previousDepthGpu, &channelDesc,
previousWidth, previousHeight, previousWidth*sizeof(float)));
depth_texture.normalized = false;
depth_texture.filterMode = hipFilterModePoint;
depth_texture.addressMode[0] = hipAddressModeBorder;
depth_texture.addressMode[1] = hipAddressModeBorder;
// Smooth or resize the image down.
dim3 grid, block(16,16,1);
grid.x = (mWidth-1)/block.x + 1;
grid.y = (mHeight-1)/block.y + 1;
if(mParent != 0)
hipLaunchKernelGGL(( compute_smooth_depth), dim3(grid),dim3(block), 0, 0, mDepthGpu, previousWidth, previousHeight,
mWidth*sizeof(float), 2.f, 20.f);
else
hipLaunchKernelGGL(( pyrdownSmoothDepth), dim3(grid),dim3(block), 0, 0, mDepthGpu, mWidth, mHeight);
// Bind the new reduced/smooth depth into a texture for fast access.
cudaSafeCall(hipBindTexture2D(0, &depth_texture, mDepthGpu, &channelDesc,
mWidth, mHeight, mWidth*sizeof(float)));
// Determine vertices and normals from the depth map.
cudaSafeCall(hipMemcpyToSymbol(invK, mKInv, sizeof(float)*9));
float3* vertices;
float3* normals;
hipGLMapBufferObject__((void**)&vertices, mVertexBuffer);
hipGLMapBufferObject__((void**)&normals, mNormalBuffer);
hipLaunchKernelGGL(( measure), dim3(grid),dim3(block), 0, 0, vertices, normals, mMaskGpu,
mWidth, mHeight, mWidth*3*sizeof(float));
cudaSafeCall(hipGetLastError());
hipGLUnmapBufferObject(mVertexBuffer);
hipGLUnmapBufferObject(mNormalBuffer);
}
void VolumeFusion::initBoundingBox()
{
float3 lower = gridToWorld(make_float3(0,0,0), mSide, mUnitsPerVoxel);
float3 upper = gridToWorld(make_float3(mSide,mSide,mSide), mSide, mUnitsPerVoxel);
mBoundingBox[0] = lower.x; mBoundingBox[1] = lower.y; mBoundingBox[2] = lower.z;
mBoundingBox[3] = upper.x; mBoundingBox[4] = upper.y; mBoundingBox[5] = upper.z;
}
template<typename texture>
void VolumeFusion::bindTextureToF(texture& tex) const
{
cudaSafeCall(hipMemcpy3D(mCopyParams));
tex.normalized = false;
tex.filterMode = hipFilterModeLinear;
tex.addressMode[0] = hipAddressModeBorder;
tex.addressMode[1] = hipAddressModeBorder;
tex.addressMode[2] = hipAddressModeBorder;
static const hipChannelFormatDesc channelDesc = hipCreateChannelDesc<float>();
cudaSafeCall(hipBindTextureToArray(tex, mFArray, channelDesc));
}
void VolumeFusion::update(const Measurement& measurement, const float* T)
{
dim3 block(8,8,8);
dim3 grid;
grid.x = grid.y = mSide/block.x;
grid.z = 1;
// Set instrinsic and extrinsics in constant memory.
const float* kdepth = measurement.getK();
const float* kdepthinv = measurement.getKInverse();
cudaSafeCall(hipMemcpyToSymbol(K, kdepth, sizeof(float)*9));
cudaSafeCall(hipMemcpyToSymbol(invK, kdepthinv, sizeof(float)*9));
cudaSafeCall(hipMemcpyToSymbol(Tgk, T, sizeof(float)*16));
// Bind the depth map into the depth_texture.
hipChannelFormatDesc channelDesc = hipCreateChannelDesc<float>();
cudaSafeCall(hipBindTexture2D(0, &depth_texture, measurement.getDepthGpu(), &channelDesc,
measurement.getWidth(), measurement.getHeight(),
measurement.getWidth()*sizeof(float)));
// Update the volume.
for(int i=0; i<mSide; i+=block.z)
hipLaunchKernelGGL(( update_reconstruction), dim3(grid),dim3(block), 0, 0, mFGpu, mWGpu, mSide, mUnitsPerVoxel, 30.f, i);
cudaSafeCall(hipGetLastError());
}
void VolumeMeasurement::measure(const VolumeFusion& volume, const float* T)
{
volume.bindTextureToF(F_texture);
float position[3];
position[0] = T[3]; position[1] = T[7]; position[2] = T[11];
float mindistance = volume.getMinimumDistanceTo(position);
float maxdistance = volume.getMaximumDistanceTo(position);
cudaSafeCall(hipMemcpyToSymbol(invK, mKdepth->getInverse(), sizeof(float)*9));
cudaSafeCall(hipMemcpyToSymbol(Tgk, T, sizeof(float)*16));
float3* vertices;
float3* normals;
hipGLMapBufferObject__((void**)&vertices, mVertexBuffer);
hipGLMapBufferObject__((void**)&normals, mNormalBuffer);
dim3 grid, block(16,16,1);
grid.x = (mWidth-1)/block.x + 1;
grid.y = (mHeight-1)/block.y + 1;
hipLaunchKernelGGL(( raycast), dim3(grid),dim3(block), 0, 0, vertices, normals, mWidth, mHeight, mWidth*3*sizeof(float),
volume.getSide(), volume.getUnitsPerVoxel(), 30.f,
mindistance, maxdistance);
cudaSafeCall(hipGetLastError());
hipGLUnmapBufferObject(mVertexBuffer);
hipGLUnmapBufferObject(mNormalBuffer);
// Preserve the current T.
std::copy(T, T+16, mT);
}
template<int T>
struct floatN
{
float a[T];
__host__ __device__
floatN()
{
for(int i=0; i<T; ++i)
a[i] = 0.f;
}
__host__ __device__
floatN(float v)
{
for(int i=0; i<T; ++i)
a[i] = v;
}
};
__device__
floatN<21> operator+(const floatN<21>& a, const floatN<21>& b)
{
floatN<21> res;
for(int i=0; i<21; ++i)
res.a[i] = a.a[i] + b.a[i];
return res;
}
__device__
floatN<6> operator+(const floatN<6>& a, const floatN<6>& b)
{
floatN<6> res;
for(int i=0; i<6; ++i)
res.a[i] = a.a[i] + b.a[i];
return res;
}
void Tracker::searchCorrespondences(float3* vertexCorresp, float3* normalsCorresp,
const float* K_,
const float* currentT, const float* current2InitT,
const float3* verticesMeasure, const float3* normalsMeasure,
const float3* verticesRaycast, const float3* normalsRaycast,
int widthMeasure, int heightMeasure, int widthRaycast, int heightRaycast)
{
// Copy intrinsic and extrinsic matrices to constant memory.
cudaSafeCall(hipMemcpyToSymbol(Tgk, currentT, sizeof(float)*16));
cudaSafeCall(hipMemcpyToSymbol(Tk_1k, current2InitT, sizeof(float)*16));
cudaSafeCall(hipMemcpyToSymbol(K, K_, sizeof(float)*9));
dim3 block(16,16,1), grid;
grid.x = (widthMeasure - 1)/block.x + 1;
grid.y = (heightMeasure - 1)/block.y + 1;
// Search the correspondences between device measurements and volume measurements.
hipLaunchKernelGGL(( search_correspondences), dim3(grid),dim3(block), 0, 0, vertexCorresp, normalsCorresp,
verticesMeasure, (float3*)normalsMeasure, verticesRaycast, normalsRaycast,
widthMeasure, heightMeasure, widthRaycast, heightRaycast, 100.f);
cudaSafeCall(hipGetLastError());
}
void Tracker::trackStep(float* AA, float* Ab, const float* currentT,
const float3* verticesMeasure, const float3* normalsMeasure,
const float3* verticesCorresp, const float3* normalsCorresp,
int numVertices)
{
// Set the result matrices to 0.
thrust::fill(thrust::device_ptr<float>(mAAGpu),
thrust::device_ptr<float>(mAAGpu + 21*numVertices), 0.f);
thrust::fill(thrust::device_ptr<float>(mAbGpu),
thrust::device_ptr<float>(mAbGpu + 6*numVertices), 0.f);
// Determine the grid and block sizes.
dim3 block(32,1,1), grid;
grid.x = (numVertices - 1)/block.x + 1;
while(grid.x > 65535)
{
grid.x /= 2;
grid.y *= 2;
}
// Copy the extrinsic matrix to the constant memory.
cudaSafeCall(hipMemcpyToSymbol(Tgk, currentT, sizeof(float)*16));
// Compute the matrices.
hipLaunchKernelGGL(( compute_tracking_matrices), dim3(grid),dim3(block), 0, 0, mAAGpu, mAbGpu,
verticesMeasure, normalsMeasure,
verticesCorresp, normalsCorresp,
numVertices);
cudaSafeCall(hipGetLastError());
// Sum AA and Ab.
floatN<21> _AA = thrust::reduce(thrust::device_ptr<floatN<21> >((floatN<21>*)mAAGpu),
thrust::device_ptr<floatN<21> >(((floatN<21>*)mAAGpu) + numVertices),
floatN<21>(0.f), thrust::plus<floatN<21> >());
floatN<6> _Ab = thrust::reduce(thrust::device_ptr<floatN<6> >((floatN<6>*)mAbGpu),
thrust::device_ptr<floatN<6> >(((floatN<6>*)mAbGpu) + numVertices),
floatN<6>(0.f), thrust::plus<floatN<6> >());
;
std::copy(_AA.a, _AA.a+21, AA);
std::copy(_Ab.a, _Ab.a+6, Ab);
}
| 0178d1da04fc30664dd7559b0457284ef9039edd.cu |
#include "FreenectFusion.h"
#include "cudautils.h"
#include "cudamath.h"
#include <cuda_gl_interop.h>
#include <thrust/transform.h>
#include <thrust/fill.h>
texture<float, 2, cudaReadModeElementType> depth_texture;
texture<float, 3, cudaReadModeElementType> F_texture;
__constant__ float K[9];
__constant__ float invK[9];
__constant__ float Tgk[16];
__constant__ float Tk_1k[16];
__device__ float3 transform3(const float* matrix, const float3& v)
{
float3 res;
res.x = matrix[0]*v.x + matrix[1]*v.y + matrix[2]*v.z;
res.y = matrix[3]*v.x + matrix[4]*v.y + matrix[5]*v.z;
res.z = matrix[6]*v.x + matrix[7]*v.y + matrix[8]*v.z;
return res;
}
__device__ float3 transform3_affine(const float* matrix, const float3& v)
{
float3 res;
res.x = matrix[0]*v.x + matrix[1]*v.y + matrix[2]*v.z + matrix[3];
res.y = matrix[4]*v.x + matrix[5]*v.y + matrix[6]*v.z + matrix[7];
res.z = matrix[8]*v.x + matrix[9]*v.y + matrix[10]*v.z + matrix[11];
return res;
}
__device__ float3 transform3_affine_inverse(const float* matrix, const float3& v)
{
float3 res;
float3 v2 = make_float3(v.x-matrix[3], v.y-matrix[7], v.z-matrix[11]);
res.x = matrix[0]*v2.x + matrix[4]*v2.y + matrix[8]*v2.z;
res.y = matrix[1]*v2.x + matrix[5]*v2.y + matrix[9]*v2.z;
res.z = matrix[2]*v2.x + matrix[6]*v2.y + matrix[10]*v2.z;
return res;
}
__device__ float gaussian(float t, float sigma)
{
return exp(-t*t/(sigma*sigma));
}
__host__ __device__ float3 gridToWorld(const float3& p, int side, float units_per_voxel)
{
return make_float3((p.x - side/2 + 0.5f) * units_per_voxel,
(p.y - side/2 + 0.5f) * units_per_voxel,
(p.z - side/2 + 0.5f) * units_per_voxel);
}
__host__ __device__ float3 worldToGrid(const float3& p, int side, float units_per_voxel)
{
return make_float3(p.x/units_per_voxel + side/2 - 0.5f,
p.y/units_per_voxel + side/2 - 0.5f,
p.z/units_per_voxel + side/2 - 0.5f);
}
__global__ void compute_smooth_depth(float* smooth_depth,
int width, int height, size_t pitch,
float sigma1, float sigma2)
{
int x = blockDim.x * blockIdx.x + threadIdx.x;
int y = blockDim.y * blockIdx.y + threadIdx.y;
if(x >= width || y >= height)
return;
float* current_smooth_depth = (float*)((char*)smooth_depth + pitch*y) + x;
float depth1 = tex2D(depth_texture, x, y);
float cum = 0.f;
float weight_cum = 0.f;
for(int i=-5; i<=5; ++i)
for(int j=-5; j<=5; ++j)
{
float depth2 = tex2D(depth_texture, x+i, y+j);
float weight1 = gaussian(length2(make_float2(i,j)), sigma1);
float weight2 = gaussian(depth1 - depth2, sigma2);
weight_cum += weight1 * weight2;
cum += depth2 * weight1 * weight2;
}
cum /= weight_cum;
*current_smooth_depth = cum;
}
__global__ void pyrdownSmoothDepth(float* output, int width, int height)
{
int x = blockDim.x * blockIdx.x + threadIdx.x;
int y = blockDim.y * blockIdx.y + threadIdx.y;
if(x >= width || y >= height)
return;
float* current = &output[y*width + x];
float depth1 = tex2D(depth_texture, 2*x, 2*y);
float cum = 0.f;
float weight_cum = 0.f;
for(int i=-2; i<=2; ++i)
for(int j=-2; j<=2; ++j)
{
float depth2 = tex2D(depth_texture, 2*x+i, 2*y+j);
float weight1 = gaussian(length2(make_float2(i,j)), 1.f);
float weight2 = gaussian(depth1 - depth2, 20.f);
weight_cum += weight1 * weight2;
cum += depth2 * weight1 * weight2;
}
cum /= weight_cum;
*current = cum;
}
/**
* Generate vertices and normals from a depth stored in depth_texture.
*/
__global__ void measure(float3* vertices, float3* normals, int* mask,
int width, int height, size_t pitch)
{
int x = blockDim.x * blockIdx.x + threadIdx.x;
int y = blockDim.y * blockIdx.y + threadIdx.y;
int thid = width*y + x;
if(x >= width || y >= height)
return;
float3* current_vertex = (float3*)((char*)vertices + pitch*y) + x;
float3* current_normal = (float3*)((char*)normals + pitch*y) + x;
float3 u = make_float3(float(x), float(y), 1.f);
float3 v = make_float3(float(x+1), float(y), 1.f);
float3 w = make_float3(float(x), float(y+1), 1.f);
float depth = tex2D(depth_texture, x, y);
u = depth * transform3(invK, u);
v = tex2D(depth_texture, x+1, y) * transform3(invK, v);
w = tex2D(depth_texture, x, y+1) * transform3(invK, w);
float3 n = normalize(cross(v - u, w - u));
*current_vertex = u;
*current_normal = n;
if(depth < 0.01f)
{
*current_vertex = make_float3(0.f, 0.f, 0.f);
*current_normal = make_float3(0.f, 0.f, 2.f);
}
mask[thid] = depth > 0.01f;
}
__global__ void update_reconstruction(float* F, float* W,
int side, float units_per_voxel,
float mu, int init_slice)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j = blockDim.y * blockIdx.y + threadIdx.y;
int k = blockDim.z * blockIdx.z + threadIdx.z + init_slice;
float* current_F = F + k*side*side + j*side + i;
float* current_W = W + k*side*side + j*side + i;
// Point 3D.
float3 p = gridToWorld(make_float3(i,j,k), side, units_per_voxel);
// Project the point.
float3 x = transform3(K, transform3_affine_inverse(Tgk, p));
x.x = round(x.x/x.z);
x.y = round(x.y/x.z);
x.z = 1.f;
// Determine lambda.
float3 aux = transform3(invK, x);
float lambda = length(aux);
float R = tex2D(depth_texture, x.x, x.y);
float3 tgk = make_float3(Tgk[3], Tgk[7], Tgk[11]);
float eta = R - length(tgk - p)/lambda;
float F_rk = fminf(1.f, eta/mu);
float W_rk = 1.f;
if(F_rk < -1.f || R == 0.f)
return;
if(*current_F < -2.f)
*current_F = F_rk;
else
*current_F = (*current_W * *current_F + W_rk * F_rk)/(*current_W + W_rk);
*current_W = min(*current_W + W_rk, 50.f);
}
__global__ void raycast(float3* vertices, float3* normals,
int width, int height, size_t pitch,
int side, float units_per_voxel, float mu,
float mindistance, float maxdistance)
{
int x = blockDim.x * blockIdx.x + threadIdx.x;
int y = blockDim.y * blockIdx.y + threadIdx.y;
if(x >= width || y >= height)
return;
float3* current_vertex = (float3*)((char*)vertices + pitch*y) + x;
float3* current_normal = (float3*)((char*)normals + pitch*y) + x;
float3 ray = normalize(transform3(invK, make_float3(float(x), float(y), 1.f)));
float3 tgk = make_float3(Tgk[3], Tgk[7], Tgk[11]);
ray = transform3_affine(Tgk, ray) - tgk;
*current_normal = make_float3(1.f, 1.f, 1.f);
float step = 3.f*mu/4.f;
float3 p = worldToGrid(tgk + mindistance * ray, side, units_per_voxel);
float old_value = tex3D(F_texture, p.x+0.5f, p.y+0.5f, p.z+0.5f);
for(float distance = mindistance; distance < maxdistance; distance += step)
{
p = worldToGrid(tgk + distance * ray, side, units_per_voxel);
float value = tex3D(F_texture, p.x+0.5f, p.y+0.5f, p.z+0.5f);
if(value < -2 || (old_value < 0 && value > 0))
break;
if(old_value >= 0 && value < 0)
{
float t = distance - step - (step * old_value)/(value - old_value);
*current_vertex = tgk + t * ray;
float valuex = tex3D(F_texture, p.x-1+0.5f, p.y+0.5f, p.z+0.5f);
float valuey = tex3D(F_texture, p.x+0.5f, p.y-1+0.5f, p.z+0.5f);
float valuez = tex3D(F_texture, p.x+0.5f, p.y+0.5f, p.z-1+0.5f);
*current_normal = normalize(make_float3(valuex-value, valuey-value, valuez-value));
return;
}
old_value = value;
}
*current_vertex = make_float3(0.f, 0.f, 0.f);
*current_normal = make_float3(0.f, 0.f, 2.f);
}
__device__ float3 project(const float* K, const float* T, float3 point)
{
return transform3(K, transform3_affine(T, point));
}
__device__ int2 hom2cart(float3 point)
{
return make_int2(roundf(point.x/point.z), roundf(point.y/point.z));
}
__global__ void search_correspondences(float3* vertices_corresp, float3* normals_corresp,
const float3* vertices_measure, float3* normals_measure,
const float3* vertices_raycast, const float3* normals_raycast,
int width_measure, int height_measure,
int width_raycast, int height_raycast,
float threshold_distance)
{
int x = blockDim.x * blockIdx.x + threadIdx.x;
int y = blockDim.y * blockIdx.y + threadIdx.y;
int thid = width_measure*y + x;
if(x >= width_measure || y >= height_measure)
return;
float3* current_vertex_corresp = &vertices_corresp[thid];
float3* current_normal_corresp = &normals_corresp[thid];
float3 vertex_measure = vertices_measure[thid];
// Get the corresponding pixel in the raycast image.
int2 u_raycast = hom2cart(project(K, Tk_1k, vertex_measure));
if(u_raycast.x < 0 || u_raycast.y < 0 ||
u_raycast.x >= width_raycast || u_raycast.y >= height_raycast)
{
*current_vertex_corresp = make_float3(0.f, 0.f, 0.f);
*current_normal_corresp = make_float3(0.f, 0.f, 2.f);
return;
}
int id_raycast = width_raycast*u_raycast.y + u_raycast.x;
float3 v = transform3_affine(Tgk, vertex_measure);
float3 vdiff = vertices_raycast[id_raycast] - v;
float vertex_distance = length(vdiff);
// Prune invalid matches.
if(vertex_measure.z==0.f || vertex_distance > threshold_distance)
{
*current_vertex_corresp = make_float3(0.f, 0.f, 0.f);
*current_normal_corresp = make_float3(0.f, 0.f, 2.f);
return;
}
*current_vertex_corresp = vertices_raycast[id_raycast];
*current_normal_corresp = normals_raycast[id_raycast];
// For debug only.
normals_measure[thid] = make_float3(1.f, 1.f, 1.f);
}
__global__ void compute_tracking_matrices(float* AA, float* Ab,
const float3* vertices_measure, const float3* normals_measure,
const float3* vertices_corresp, const float3* normals_corresp,
int numVertices)
{
int blockId = __mul24(blockIdx.y, gridDim.x) + blockIdx.x;
int thid = __mul24(blockId, blockDim.x) + threadIdx.x;
//int thid = blockDim.x * blockIdx.x + threadIdx.x;
if(thid >= numVertices)
return;
float* current_AA = &AA[21*thid];
float* current_Ab = &Ab[6*thid];
if(normals_corresp[thid].z == 2.f)
return;
float3 v = transform3_affine(Tgk, vertices_measure[thid]);
float3 n = normals_corresp[thid];
float b = dot(vertices_corresp[thid] - v, n);
current_Ab[0] = b*(v.z*n.y - v.y*n.z);
current_Ab[1] = b*(-v.z*n.x + v.x*n.z);
current_Ab[2] = b*(v.y*n.x - v.x*n.y);
current_Ab[3] = b*n.x;
current_Ab[4] = b*n.y;
current_Ab[5] = b*n.z;
current_AA[0] = (v.z*n.y - v.y*n.z)*(v.z*n.y - v.y*n.z);
current_AA[1] = (v.z*n.y - v.y*n.z)*(-v.z*n.x + v.x*n.z);
current_AA[2] = (v.z*n.y - v.y*n.z)*(v.y*n.x - v.x*n.y);
current_AA[3] = (v.z*n.y - v.y*n.z)*n.x;
current_AA[4] = (v.z*n.y - v.y*n.z)*n.y;
current_AA[5] = (v.z*n.y - v.y*n.z)*n.z;
current_AA[6] = (-v.z*n.x + v.x*n.z)*(-v.z*n.x + v.x*n.z);
current_AA[7] = (-v.z*n.x + v.x*n.z)*(v.y*n.x - v.x*n.y);
current_AA[8] = (-v.z*n.x + v.x*n.z)*n.x;
current_AA[9] = (-v.z*n.x + v.x*n.z)*n.y;
current_AA[10] = (-v.z*n.x + v.x*n.z)*n.z;
current_AA[11] = (v.y*n.x - v.x*n.y)*(v.y*n.x - v.x*n.y);
current_AA[12] = (v.y*n.x - v.x*n.y)*n.x;
current_AA[13] = (v.y*n.x - v.x*n.y)*n.y;
current_AA[14] = (v.y*n.x - v.x*n.y)*n.z;
current_AA[15] = n.x*n.x;
current_AA[16] = n.x*n.y;
current_AA[17] = n.x*n.z;
current_AA[18] = n.y*n.y;
current_AA[19] = n.y*n.z;
current_AA[20] = n.z*n.z;
}
/// Transform Kinect depth measurements to milimeters.
struct transform_depth
{
__host__ __device__
float operator()(uint16_t a)
{
if(a == 2047)
return 0.f;
return 1000.f / (a * -0.0030711016f + 3.3309495161f);
}
};
void Measurement::setDepth(uint16_t* depth)
{
// Convert raw depth to milimeters.
cudaSafeCall(cudaMemcpy(mRawDepthGpu, depth, sizeof(uint16_t)*mNumVertices,
cudaMemcpyHostToDevice));
thrust::transform(thrust::device_ptr<uint16_t>(mRawDepthGpu),
thrust::device_ptr<uint16_t>(mRawDepthGpu + mNumVertices),
thrust::device_ptr<float>(mDepthGpu),
transform_depth());
// Generate the pyramid of depth maps and vertices/normals.
for(int i=0; i<3; ++i)
mPyramid[i]->update();
}
void PyramidMeasurement::update()
{
const float* previousDepthGpu;
int previousWidth, previousHeight;
// Get info from the parent.
if(mParent != 0)
{
previousDepthGpu = mParent->getDepthGpu();
previousWidth = mParent->getWidth();
previousHeight = mParent->getHeight();
}
else
{
previousDepthGpu = mParent2->getDepthGpu();
previousWidth = mParent2->getWidth();
previousHeight = mParent2->getHeight();
}
// Bind the depth into a texture for fast access.
cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<float>();
cudaSafeCall(cudaBindTexture2D(0, &depth_texture, previousDepthGpu, &channelDesc,
previousWidth, previousHeight, previousWidth*sizeof(float)));
depth_texture.normalized = false;
depth_texture.filterMode = cudaFilterModePoint;
depth_texture.addressMode[0] = cudaAddressModeBorder;
depth_texture.addressMode[1] = cudaAddressModeBorder;
// Smooth or resize the image down.
dim3 grid, block(16,16,1);
grid.x = (mWidth-1)/block.x + 1;
grid.y = (mHeight-1)/block.y + 1;
if(mParent != 0)
compute_smooth_depth<<<grid,block>>>(mDepthGpu, previousWidth, previousHeight,
mWidth*sizeof(float), 2.f, 20.f);
else
pyrdownSmoothDepth<<<grid,block>>>(mDepthGpu, mWidth, mHeight);
// Bind the new reduced/smooth depth into a texture for fast access.
cudaSafeCall(cudaBindTexture2D(0, &depth_texture, mDepthGpu, &channelDesc,
mWidth, mHeight, mWidth*sizeof(float)));
// Determine vertices and normals from the depth map.
cudaSafeCall(cudaMemcpyToSymbol(invK, mKInv, sizeof(float)*9));
float3* vertices;
float3* normals;
cudaGLMapBufferObject((void**)&vertices, mVertexBuffer);
cudaGLMapBufferObject((void**)&normals, mNormalBuffer);
measure<<<grid,block>>>(vertices, normals, mMaskGpu,
mWidth, mHeight, mWidth*3*sizeof(float));
cudaSafeCall(cudaGetLastError());
cudaGLUnmapBufferObject(mVertexBuffer);
cudaGLUnmapBufferObject(mNormalBuffer);
}
void VolumeFusion::initBoundingBox()
{
float3 lower = gridToWorld(make_float3(0,0,0), mSide, mUnitsPerVoxel);
float3 upper = gridToWorld(make_float3(mSide,mSide,mSide), mSide, mUnitsPerVoxel);
mBoundingBox[0] = lower.x; mBoundingBox[1] = lower.y; mBoundingBox[2] = lower.z;
mBoundingBox[3] = upper.x; mBoundingBox[4] = upper.y; mBoundingBox[5] = upper.z;
}
template<typename texture>
void VolumeFusion::bindTextureToF(texture& tex) const
{
cudaSafeCall(cudaMemcpy3D(mCopyParams));
tex.normalized = false;
tex.filterMode = cudaFilterModeLinear;
tex.addressMode[0] = cudaAddressModeBorder;
tex.addressMode[1] = cudaAddressModeBorder;
tex.addressMode[2] = cudaAddressModeBorder;
static const cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<float>();
cudaSafeCall(cudaBindTextureToArray(tex, mFArray, channelDesc));
}
void VolumeFusion::update(const Measurement& measurement, const float* T)
{
dim3 block(8,8,8);
dim3 grid;
grid.x = grid.y = mSide/block.x;
grid.z = 1;
// Set instrinsic and extrinsics in constant memory.
const float* kdepth = measurement.getK();
const float* kdepthinv = measurement.getKInverse();
cudaSafeCall(cudaMemcpyToSymbol(K, kdepth, sizeof(float)*9));
cudaSafeCall(cudaMemcpyToSymbol(invK, kdepthinv, sizeof(float)*9));
cudaSafeCall(cudaMemcpyToSymbol(Tgk, T, sizeof(float)*16));
// Bind the depth map into the depth_texture.
cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<float>();
cudaSafeCall(cudaBindTexture2D(0, &depth_texture, measurement.getDepthGpu(), &channelDesc,
measurement.getWidth(), measurement.getHeight(),
measurement.getWidth()*sizeof(float)));
// Update the volume.
for(int i=0; i<mSide; i+=block.z)
update_reconstruction<<<grid,block>>>(mFGpu, mWGpu, mSide, mUnitsPerVoxel, 30.f, i);
cudaSafeCall(cudaGetLastError());
}
void VolumeMeasurement::measure(const VolumeFusion& volume, const float* T)
{
volume.bindTextureToF(F_texture);
float position[3];
position[0] = T[3]; position[1] = T[7]; position[2] = T[11];
float mindistance = volume.getMinimumDistanceTo(position);
float maxdistance = volume.getMaximumDistanceTo(position);
cudaSafeCall(cudaMemcpyToSymbol(invK, mKdepth->getInverse(), sizeof(float)*9));
cudaSafeCall(cudaMemcpyToSymbol(Tgk, T, sizeof(float)*16));
float3* vertices;
float3* normals;
cudaGLMapBufferObject((void**)&vertices, mVertexBuffer);
cudaGLMapBufferObject((void**)&normals, mNormalBuffer);
dim3 grid, block(16,16,1);
grid.x = (mWidth-1)/block.x + 1;
grid.y = (mHeight-1)/block.y + 1;
raycast<<<grid,block>>>(vertices, normals, mWidth, mHeight, mWidth*3*sizeof(float),
volume.getSide(), volume.getUnitsPerVoxel(), 30.f,
mindistance, maxdistance);
cudaSafeCall(cudaGetLastError());
cudaGLUnmapBufferObject(mVertexBuffer);
cudaGLUnmapBufferObject(mNormalBuffer);
// Preserve the current T.
std::copy(T, T+16, mT);
}
template<int T>
struct floatN
{
float a[T];
__host__ __device__
floatN()
{
for(int i=0; i<T; ++i)
a[i] = 0.f;
}
__host__ __device__
floatN(float v)
{
for(int i=0; i<T; ++i)
a[i] = v;
}
};
__device__
floatN<21> operator+(const floatN<21>& a, const floatN<21>& b)
{
floatN<21> res;
for(int i=0; i<21; ++i)
res.a[i] = a.a[i] + b.a[i];
return res;
}
__device__
floatN<6> operator+(const floatN<6>& a, const floatN<6>& b)
{
floatN<6> res;
for(int i=0; i<6; ++i)
res.a[i] = a.a[i] + b.a[i];
return res;
}
void Tracker::searchCorrespondences(float3* vertexCorresp, float3* normalsCorresp,
const float* K_,
const float* currentT, const float* current2InitT,
const float3* verticesMeasure, const float3* normalsMeasure,
const float3* verticesRaycast, const float3* normalsRaycast,
int widthMeasure, int heightMeasure, int widthRaycast, int heightRaycast)
{
// Copy intrinsic and extrinsic matrices to constant memory.
cudaSafeCall(cudaMemcpyToSymbol(Tgk, currentT, sizeof(float)*16));
cudaSafeCall(cudaMemcpyToSymbol(Tk_1k, current2InitT, sizeof(float)*16));
cudaSafeCall(cudaMemcpyToSymbol(K, K_, sizeof(float)*9));
dim3 block(16,16,1), grid;
grid.x = (widthMeasure - 1)/block.x + 1;
grid.y = (heightMeasure - 1)/block.y + 1;
// Search the correspondences between device measurements and volume measurements.
search_correspondences<<<grid,block>>>(vertexCorresp, normalsCorresp,
verticesMeasure, (float3*)normalsMeasure, verticesRaycast, normalsRaycast,
widthMeasure, heightMeasure, widthRaycast, heightRaycast, 100.f);
cudaSafeCall(cudaGetLastError());
}
void Tracker::trackStep(float* AA, float* Ab, const float* currentT,
const float3* verticesMeasure, const float3* normalsMeasure,
const float3* verticesCorresp, const float3* normalsCorresp,
int numVertices)
{
// Set the result matrices to 0.
thrust::fill(thrust::device_ptr<float>(mAAGpu),
thrust::device_ptr<float>(mAAGpu + 21*numVertices), 0.f);
thrust::fill(thrust::device_ptr<float>(mAbGpu),
thrust::device_ptr<float>(mAbGpu + 6*numVertices), 0.f);
// Determine the grid and block sizes.
dim3 block(32,1,1), grid;
grid.x = (numVertices - 1)/block.x + 1;
while(grid.x > 65535)
{
grid.x /= 2;
grid.y *= 2;
}
// Copy the extrinsic matrix to the constant memory.
cudaSafeCall(cudaMemcpyToSymbol(Tgk, currentT, sizeof(float)*16));
// Compute the matrices.
compute_tracking_matrices<<<grid,block>>>(mAAGpu, mAbGpu,
verticesMeasure, normalsMeasure,
verticesCorresp, normalsCorresp,
numVertices);
cudaSafeCall(cudaGetLastError());
// Sum AA and Ab.
floatN<21> _AA = thrust::reduce(thrust::device_ptr<floatN<21> >((floatN<21>*)mAAGpu),
thrust::device_ptr<floatN<21> >(((floatN<21>*)mAAGpu) + numVertices),
floatN<21>(0.f), thrust::plus<floatN<21> >());
floatN<6> _Ab = thrust::reduce(thrust::device_ptr<floatN<6> >((floatN<6>*)mAbGpu),
thrust::device_ptr<floatN<6> >(((floatN<6>*)mAbGpu) + numVertices),
floatN<6>(0.f), thrust::plus<floatN<6> >());
;
std::copy(_AA.a, _AA.a+21, AA);
std::copy(_Ab.a, _Ab.a+6, Ab);
}
|
10d1c06104141278158baead2bd52587edf5364d.hip | // !!! This is a file automatically generated by hipify!!!
/*_________________________________________________________________________
* ww2parCC_python_066DP.cu
*
* Python wrapping to manage data passing and call the CUDA kernel code.
* Pass the input data from python to C environment;
* Copy input data from host to device;
* Set parallel computation;
* Call device code;
* Copy output data back from device to host;
* Pass the output data from C back to Matlab environment.
*
* DUWIND- Delft University Wind Energy Research Institute
* Developer: Artur Palha
* Giuseppe Tescione
*
* Version: 0.8DP (alpha) - 20130508
* Gaussian kernel of order 1
* double precision (for GPUs of computing capability 2.x)
*________________________________________________________________________*/
#include "Python.h"
#include "arrayobject.h"
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <math.h> //C standard basic mathematical operations
#include <cstdlib> //C standard general utilities library
#include <stdio.h>
#include <string.h> //C standard constant and function declaration library
/*--------------------- CUDA Kernel Code - Device -----------------------*/
#include "ww2par_device.cu"
/*--------------- Python callable function defintions -------------------*/
extern "C" static PyObject *ww2par_gpu(PyObject *self, PyObject *args);
extern "C" void init_ww2par_gpu();
/*---------------- C utility function definitions ---------------------*/
/*---------------- cuda utility function definitions --------------------*/
__global__ void ww2par_kernel(void *cxBlob_gpu_ondevice, void *cyBlob_gpu_ondevice, void *cwBlob_gpu_ondevice, void *cxTarget_gpu_ondevice, void *cyTarget_gpu_ondevice,void *cvorticity_gpu_ondevice);
__device__ double ww2par_thread(double THR_vorticity, double THR_xTarget, double THR_yTarget, double THR_xBlob, double THR_yBlob, double THR_wBlob);
__device__ double ww2par_block(double BLK_xTarget, double BLK_yTarget, double BLK_vorticity);
// define the doctrings for the functions so that help is available in Python
static char ww2par_gpu_docs[] =
"ww2parGauss_gpu computes induced vorticity of particle distribution for a\n"
"Gaussian kernel using the GPU.\n"
"\n"
"Usage\n"
"-----\n"
" ww2parGauss_gpu(xBlob,yBlob,wBlob,xTarget,yTarget,sigmasqr,blocksize)\n"
"\n"
"Parameters\n"
"----------\n"
" xBlob :: the x coordinates of the vortex blobs (nBlobs=N*blocksize)\n"
" ----- (type: numpy.ndarray (float64); shape: (nBlobs,))\n"
"\n"
" yBlob :: the y coordinates of the vortex blobs (nBlobs=N*blocksize)\n"
" ----- (type: numpy.ndarray (float64); shape: (nBlobs,))\n"
"\n"
" wBlob :: the circulations associated to each of the vortex blobs\n"
" ----- (nBlobs=N*blocksize)\n"
" (type: numpy.ndarray (float64); shape: (nBlobs,))\n"
"\n"
" xTarget :: the x coordinates of the target points where to compute\n"
" ------- velocity\n"
" (type: numpy.ndarray (float64); shape: (nTargets,))\n"
"\n"
" yTarget :: the y coordinates of the target points where to compute\n"
" ------- velocity\n"
" (type: numpy.ndarray (float64); shape: (nTargets,))\n"
"\n"
" sigmasqr :: the square of the core size of all the vortex blobs\n"
" --------- (type: float (64bits); shape: single value)\n"
"\n"
" blocksize :: the blocksize of the memory used in the gpu\n"
" --------- (type: int (any); shape: single value)\n"
"\n"
"Returns\n"
"-------\n"
" vorticity :: the induced velocities in each of the (xTarget,yTarget)\n"
" points\n"
" (type: numpy.ndarray; shape: (nEval,2))\n"
"\n"
"First added: 2013-06-24\n"
"\n"
"Copyright (C) 2013 Artur Palha\n"
" pHyFlow";
/*------------------- Set up the methods table for Python ---------------*/
static PyMethodDef _ww2par_gpu[] = {
{"ww2par_gpu", ww2par_gpu, METH_VARARGS,ww2par_gpu_docs},
{NULL} /*Sentiner - marks the end of this Python structure*/
};
/*--------------Initialize the ww2par functions -------------------------*/
void init_ww2par_gpu(){
(void) Py_InitModule("_ww2par_gpu", _ww2par_gpu);
import_array(); /* Must be present for Numpy */
}
/*-------------------- Python wrapping function - HOST -----------------------*/
static PyObject *ww2par_gpu(PyObject *self, PyObject *args){
// Python wrapping function, pass data between Python and C and call CUDA Host Code.
// declare variables
PyArrayObject *xBlob;
PyArrayObject *yBlob;
PyArrayObject *wBlob;
PyArrayObject *xTarget;
PyArrayObject *yTarget;
PyArrayObject *w;
double *cxBlob,*cyBlob,*cwBlob,*cxTarget,*cyTarget, *cw;
void *cxBlob_gpu, *cyBlob_gpu, *cwBlob_gpu, *cxTarget_gpu, *cyTarget_gpu, *cw_gpu;
double myeps = 1e-12;
double sigmasqr;
double inv_pi = 1.0/3.141592653589793;
int blocksize, nParticles, nTargets, nTargetBlocks, nParticleBlocks;
int dims[2];
/* Parse tuples separately since args will differ between C functions*/
if(!PyArg_ParseTuple(args, "O!O!O!O!O!di", &PyArray_Type, &xBlob, &PyArray_Type, &yBlob, &PyArray_Type, &wBlob, &PyArray_Type, &xTarget, &PyArray_Type, &yTarget, &sigmasqr, &blocksize)) return NULL;
if(NULL == xBlob) return NULL; // if something went wrong and xBlob matrix is null, exit
if(NULL == yBlob) return NULL; // if something went wrong and yBlob matrix is null, exit
if(NULL == wBlob) return NULL; // if something went wrong and wBlob matrix is null, exit
if(NULL == xTarget) return NULL; // if something went wrong and xTarget matrix is null, exit
if(NULL == yTarget) return NULL; // if something went wrong and yTarget matrix is null, exit
// get the number of particles
nParticles = xBlob->dimensions[0];
nTargets = dims[0] = xTarget->dimensions[0];
nTargetBlocks = nTargets/blocksize;
nParticleBlocks = nParticles/blocksize;
dims[1] = 1; // the 2 is the number of components of the velocity
// allocate memory space for the induced velocities matrices
w = (PyArrayObject *) PyArray_FromDims(1,dims,NPY_DOUBLE);
// pointers to arrays of double precision values of Python matrices
cxBlob = (double*)xBlob->data;
cyBlob = (double*)yBlob->data;
cwBlob = (double*)wBlob->data;
cxTarget = (double*)xTarget->data;
cyTarget = (double*)yTarget->data;
cw = (double*)w->data;
// Copy constants to Constant memory of device. (CUDA syntax)
hipMemcpyToSymbol(sigmasqr_gpu, &sigmasqr, sizeof(sigmasqr), 0, hipMemcpyHostToDevice);
hipMemcpyToSymbol(inv_pi_gpu, &inv_pi, sizeof(inv_pi), 0, hipMemcpyHostToDevice);
hipMemcpyToSymbol(blocksize_gpu, &blocksize, sizeof(blocksize), 0, hipMemcpyHostToDevice);
hipMemcpyToSymbol(nParticles_gpu, &nParticles, sizeof(nParticles), 0, hipMemcpyHostToDevice);
hipMemcpyToSymbol(nTargets_gpu, &nTargets, sizeof(nTargets), 0, hipMemcpyHostToDevice);
hipMemcpyToSymbol(nParticleBlocks_gpu, &nParticleBlocks, sizeof(nParticleBlocks), 0, hipMemcpyHostToDevice);
hipMemcpyToSymbol(nTargetBlocks_gpu, &nTargetBlocks, sizeof(nTargetBlocks), 0, hipMemcpyHostToDevice);
hipMemcpyToSymbol(myeps_gpu, &myeps, sizeof(myeps), 0, hipMemcpyHostToDevice);
// Allocate space on device for the input (cxBlob_gpu,cyBlob_gpu,cwBlob_gpu,cxTarget_gpu,cyTarget_gpu,) and output (cvelocities_gpu) arrays.
size_t SIZE_POS = sizeof(double) * nParticles; //double
size_t SIZE_IND = sizeof(double) * nTargets; //double
hipMalloc(&cxBlob_gpu, SIZE_POS);
hipMalloc(&cyBlob_gpu, SIZE_POS);
hipMalloc(&cwBlob_gpu, SIZE_POS);
hipMalloc(&cxTarget_gpu, SIZE_IND);
hipMalloc(&cyTarget_gpu, SIZE_IND);
hipMalloc(&cw_gpu, SIZE_IND);
// Copy input array from host to device memory.
hipMemcpy(cxBlob_gpu, cxBlob, SIZE_POS, hipMemcpyHostToDevice);
hipMemcpy(cyBlob_gpu, cyBlob, SIZE_POS, hipMemcpyHostToDevice);
hipMemcpy(cwBlob_gpu, cwBlob, SIZE_POS, hipMemcpyHostToDevice);
hipMemcpy(cxTarget_gpu, cxTarget, SIZE_IND, hipMemcpyHostToDevice);
hipMemcpy(cyTarget_gpu, cyTarget, SIZE_IND, hipMemcpyHostToDevice);
// RUN KERNELL
size_t Sharedmemsize = sizeof(double) * 3 * blocksize;
dim3 threads(blocksize, 1, 1);
dim3 grid(nTargets/blocksize, 1, 1);
hipLaunchKernelGGL(( ww2par_kernel) , dim3(grid), dim3(threads), Sharedmemsize, 0, cxBlob_gpu, cyBlob_gpu, cwBlob_gpu, cxTarget_gpu, cyTarget_gpu, cw_gpu);
// Copy induction array from device to host memory
hipMemcpy(cw, cw_gpu, SIZE_IND, hipMemcpyDeviceToHost);
// Free memory space of particle position and induction arrays
hipFree(cxBlob_gpu);
hipFree(cyBlob_gpu);
hipFree(cwBlob_gpu);
hipFree(cxTarget_gpu);
hipFree(cyTarget_gpu);
hipFree(cw_gpu);
// return the Python array with induced vorticities
return PyArray_Return(w);
}
| 10d1c06104141278158baead2bd52587edf5364d.cu | /*_________________________________________________________________________
* ww2parCC_python_066DP.cu
*
* Python wrapping to manage data passing and call the CUDA kernel code.
* Pass the input data from python to C environment;
* Copy input data from host to device;
* Set parallel computation;
* Call device code;
* Copy output data back from device to host;
* Pass the output data from C back to Matlab environment.
*
* DUWIND- Delft University Wind Energy Research Institute
* Developer: Artur Palha
* Giuseppe Tescione
*
* Version: 0.8DP (alpha) - 20130508
* Gaussian kernel of order 1
* double precision (for GPUs of computing capability 2.x)
*________________________________________________________________________*/
#include "Python.h"
#include "arrayobject.h"
#include <cuda.h>
#include <cuda_runtime.h>
#include <math.h> //C standard basic mathematical operations
#include <cstdlib> //C standard general utilities library
#include <stdio.h>
#include <string.h> //C standard constant and function declaration library
/*--------------------- CUDA Kernel Code - Device -----------------------*/
#include "ww2par_device.cu"
/*--------------- Python callable function defintions -------------------*/
extern "C" static PyObject *ww2par_gpu(PyObject *self, PyObject *args);
extern "C" void init_ww2par_gpu();
/*---------------- C utility function definitions ---------------------*/
/*---------------- cuda utility function definitions --------------------*/
__global__ void ww2par_kernel(void *cxBlob_gpu_ondevice, void *cyBlob_gpu_ondevice, void *cwBlob_gpu_ondevice, void *cxTarget_gpu_ondevice, void *cyTarget_gpu_ondevice,void *cvorticity_gpu_ondevice);
__device__ double ww2par_thread(double THR_vorticity, double THR_xTarget, double THR_yTarget, double THR_xBlob, double THR_yBlob, double THR_wBlob);
__device__ double ww2par_block(double BLK_xTarget, double BLK_yTarget, double BLK_vorticity);
// define the doctrings for the functions so that help is available in Python
static char ww2par_gpu_docs[] =
"ww2parGauss_gpu computes induced vorticity of particle distribution for a\n"
"Gaussian kernel using the GPU.\n"
"\n"
"Usage\n"
"-----\n"
" ww2parGauss_gpu(xBlob,yBlob,wBlob,xTarget,yTarget,sigmasqr,blocksize)\n"
"\n"
"Parameters\n"
"----------\n"
" xBlob :: the x coordinates of the vortex blobs (nBlobs=N*blocksize)\n"
" ----- (type: numpy.ndarray (float64); shape: (nBlobs,))\n"
"\n"
" yBlob :: the y coordinates of the vortex blobs (nBlobs=N*blocksize)\n"
" ----- (type: numpy.ndarray (float64); shape: (nBlobs,))\n"
"\n"
" wBlob :: the circulations associated to each of the vortex blobs\n"
" ----- (nBlobs=N*blocksize)\n"
" (type: numpy.ndarray (float64); shape: (nBlobs,))\n"
"\n"
" xTarget :: the x coordinates of the target points where to compute\n"
" ------- velocity\n"
" (type: numpy.ndarray (float64); shape: (nTargets,))\n"
"\n"
" yTarget :: the y coordinates of the target points where to compute\n"
" ------- velocity\n"
" (type: numpy.ndarray (float64); shape: (nTargets,))\n"
"\n"
" sigmasqr :: the square of the core size of all the vortex blobs\n"
" --------- (type: float (64bits); shape: single value)\n"
"\n"
" blocksize :: the blocksize of the memory used in the gpu\n"
" --------- (type: int (any); shape: single value)\n"
"\n"
"Returns\n"
"-------\n"
" vorticity :: the induced velocities in each of the (xTarget,yTarget)\n"
" points\n"
" (type: numpy.ndarray; shape: (nEval,2))\n"
"\n"
"First added: 2013-06-24\n"
"\n"
"Copyright (C) 2013 Artur Palha\n"
" pHyFlow";
/*------------------- Set up the methods table for Python ---------------*/
static PyMethodDef _ww2par_gpu[] = {
{"ww2par_gpu", ww2par_gpu, METH_VARARGS,ww2par_gpu_docs},
{NULL} /*Sentiner - marks the end of this Python structure*/
};
/*--------------Initialize the ww2par functions -------------------------*/
void init_ww2par_gpu(){
(void) Py_InitModule("_ww2par_gpu", _ww2par_gpu);
import_array(); /* Must be present for Numpy */
}
/*-------------------- Python wrapping function - HOST -----------------------*/
static PyObject *ww2par_gpu(PyObject *self, PyObject *args){
// Python wrapping function, pass data between Python and C and call CUDA Host Code.
// declare variables
PyArrayObject *xBlob;
PyArrayObject *yBlob;
PyArrayObject *wBlob;
PyArrayObject *xTarget;
PyArrayObject *yTarget;
PyArrayObject *w;
double *cxBlob,*cyBlob,*cwBlob,*cxTarget,*cyTarget, *cw;
void *cxBlob_gpu, *cyBlob_gpu, *cwBlob_gpu, *cxTarget_gpu, *cyTarget_gpu, *cw_gpu;
double myeps = 1e-12;
double sigmasqr;
double inv_pi = 1.0/3.141592653589793;
int blocksize, nParticles, nTargets, nTargetBlocks, nParticleBlocks;
int dims[2];
/* Parse tuples separately since args will differ between C functions*/
if(!PyArg_ParseTuple(args, "O!O!O!O!O!di", &PyArray_Type, &xBlob, &PyArray_Type, &yBlob, &PyArray_Type, &wBlob, &PyArray_Type, &xTarget, &PyArray_Type, &yTarget, &sigmasqr, &blocksize)) return NULL;
if(NULL == xBlob) return NULL; // if something went wrong and xBlob matrix is null, exit
if(NULL == yBlob) return NULL; // if something went wrong and yBlob matrix is null, exit
if(NULL == wBlob) return NULL; // if something went wrong and wBlob matrix is null, exit
if(NULL == xTarget) return NULL; // if something went wrong and xTarget matrix is null, exit
if(NULL == yTarget) return NULL; // if something went wrong and yTarget matrix is null, exit
// get the number of particles
nParticles = xBlob->dimensions[0];
nTargets = dims[0] = xTarget->dimensions[0];
nTargetBlocks = nTargets/blocksize;
nParticleBlocks = nParticles/blocksize;
dims[1] = 1; // the 2 is the number of components of the velocity
// allocate memory space for the induced velocities matrices
w = (PyArrayObject *) PyArray_FromDims(1,dims,NPY_DOUBLE);
// pointers to arrays of double precision values of Python matrices
cxBlob = (double*)xBlob->data;
cyBlob = (double*)yBlob->data;
cwBlob = (double*)wBlob->data;
cxTarget = (double*)xTarget->data;
cyTarget = (double*)yTarget->data;
cw = (double*)w->data;
// Copy constants to Constant memory of device. (CUDA syntax)
cudaMemcpyToSymbol(sigmasqr_gpu, &sigmasqr, sizeof(sigmasqr), 0, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(inv_pi_gpu, &inv_pi, sizeof(inv_pi), 0, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(blocksize_gpu, &blocksize, sizeof(blocksize), 0, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(nParticles_gpu, &nParticles, sizeof(nParticles), 0, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(nTargets_gpu, &nTargets, sizeof(nTargets), 0, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(nParticleBlocks_gpu, &nParticleBlocks, sizeof(nParticleBlocks), 0, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(nTargetBlocks_gpu, &nTargetBlocks, sizeof(nTargetBlocks), 0, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(myeps_gpu, &myeps, sizeof(myeps), 0, cudaMemcpyHostToDevice);
// Allocate space on device for the input (cxBlob_gpu,cyBlob_gpu,cwBlob_gpu,cxTarget_gpu,cyTarget_gpu,) and output (cvelocities_gpu) arrays.
size_t SIZE_POS = sizeof(double) * nParticles; //double
size_t SIZE_IND = sizeof(double) * nTargets; //double
cudaMalloc(&cxBlob_gpu, SIZE_POS);
cudaMalloc(&cyBlob_gpu, SIZE_POS);
cudaMalloc(&cwBlob_gpu, SIZE_POS);
cudaMalloc(&cxTarget_gpu, SIZE_IND);
cudaMalloc(&cyTarget_gpu, SIZE_IND);
cudaMalloc(&cw_gpu, SIZE_IND);
// Copy input array from host to device memory.
cudaMemcpy(cxBlob_gpu, cxBlob, SIZE_POS, cudaMemcpyHostToDevice);
cudaMemcpy(cyBlob_gpu, cyBlob, SIZE_POS, cudaMemcpyHostToDevice);
cudaMemcpy(cwBlob_gpu, cwBlob, SIZE_POS, cudaMemcpyHostToDevice);
cudaMemcpy(cxTarget_gpu, cxTarget, SIZE_IND, cudaMemcpyHostToDevice);
cudaMemcpy(cyTarget_gpu, cyTarget, SIZE_IND, cudaMemcpyHostToDevice);
// RUN KERNELL
size_t Sharedmemsize = sizeof(double) * 3 * blocksize;
dim3 threads(blocksize, 1, 1);
dim3 grid(nTargets/blocksize, 1, 1);
ww2par_kernel <<<grid, threads, Sharedmemsize>>> (cxBlob_gpu, cyBlob_gpu, cwBlob_gpu, cxTarget_gpu, cyTarget_gpu, cw_gpu);
// Copy induction array from device to host memory
cudaMemcpy(cw, cw_gpu, SIZE_IND, cudaMemcpyDeviceToHost);
// Free memory space of particle position and induction arrays
cudaFree(cxBlob_gpu);
cudaFree(cyBlob_gpu);
cudaFree(cwBlob_gpu);
cudaFree(cxTarget_gpu);
cudaFree(cyTarget_gpu);
cudaFree(cw_gpu);
// return the Python array with induced vorticities
return PyArray_Return(w);
}
|
d77d54f285e6f2aec7caaf8059589e37a67dc280.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/hip/HIPContext.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <vector>
#include "type_shim.h"
__device__ __forceinline__ int lastpow2(int n)
{
int out = 1 << (31 - __clz(n));
if(n == out)
out >>= 1;
return out;
}
__host__ __forceinline__ int h_next_pow2(unsigned int n) {
n--;
n |= (n >> 1);
n |= (n >> 2);
n |= (n >> 4);
n |= (n >> 8);
n |= (n >> 16);
return ++n;
}
__host__ __forceinline__ int h_last_pow2(unsigned int n) {
n |= (n >> 1);
n |= (n >> 2);
n |= (n >> 4);
n |= (n >> 8);
n |= (n >> 16);
return n - (n >> 1);
}
#define WARP_SIZE 32
template<typename T>
__device__ __forceinline__ T warp_reduce_sum(T val)
{
#pragma unroll
for(int i = WARP_SIZE/2; i > 0; i >>= 1)
val = val + __shfl_down_sync(0xffffffff, val, i);
return val;
}
template<typename T>
__device__ __forceinline__ T reduce_block(T *x, T val)
{
int tid = threadIdx.y*blockDim.x + threadIdx.x;
int blockSize = blockDim.x * blockDim.y;
if (blockSize > 32) {
val = warp_reduce_sum(val);
if (tid % WARP_SIZE == 0)
x[tid/WARP_SIZE] = val;
__syncthreads();
val = (tid < blockSize / WARP_SIZE? x[tid%WARP_SIZE] : T(0));
}
if(tid/WARP_SIZE==0) val = warp_reduce_sum(val);
return val;
}
#define ELEMENTS_PER_ITER 4 // enables concurrency within each thread to hide latency
#define ELEMENTS_PER_THREAD 16
#define OPTIMAL_TILE_W 32
#define MAX_H_BLOCK 128
#define MAX_BLOCK_SIZE 512
__host__ int div_ru(int x, int y) {
return h_last_pow2(1 + (x-1)/y);
}
__host__ void flexible_launch_configs(
const int reduction,
const int stride,
dim3 &block,
dim3 &grid,
const bool coop_flag = false) {
int block_x = ::min(h_last_pow2(stride), OPTIMAL_TILE_W);
int block_y = ::min(h_last_pow2(div_ru(reduction , ELEMENTS_PER_THREAD)),
MAX_BLOCK_SIZE / block_x);
if (block_x * block_y != MAX_BLOCK_SIZE) {
block_x = ::min(h_last_pow2(stride), MAX_BLOCK_SIZE / block_y);
}
int grid_x = div_ru(stride, block_x);
int grid_y = ::min(div_ru(reduction, block_y * ELEMENTS_PER_THREAD), MAX_H_BLOCK);
if (coop_flag) {
// it's not worth having a grid reduction if the reduction dimension is not big enough
grid_y = grid_y < 8 ? 1 : grid_y;
}
block.x = block_x;
block.y = block_y;
block.z = 1;
grid.x = grid_x;
grid.y = grid_y;
grid.z = 1;
}
template<typename T, typename C>
__device__ __forceinline__ void welford_merge_element(C& count,
T& mean,
T& m2n,
const C& num_new,
const T& mean_new,
const T& m2n_new) {
T factor = T(1.0) / max(1, (count + num_new));
T delta0 = mean - mean_new;
mean = (mean_new * num_new + mean * count) * factor;
m2n += m2n_new + delta0 * delta0 * num_new * count * factor;
count += num_new;
}
template<typename T>
__device__ __forceinline__ void warp_reduce_mean_m2n(T &mean, T &m2n, int &num)
{
#pragma unroll
for(int i = WARP_SIZE/2; i > 0; i >>= 1) {
auto num_new = __shfl_down_sync(0xffffffff, num, i);
auto mean_new = __shfl_down_sync(0xffffffff, mean, i);
auto m2n_new = __shfl_down_sync(0xffffffff, m2n, i);
welford_merge_element(num, mean, m2n, num_new, mean_new, m2n_new);
}
}
template <typename T>
__device__ void welford_reduce_mean_m2n(
T* __restrict__ x,
int* __restrict__ count,
T &mean,
T &m2n,
int &num,
int block_size,
int thread_id)
{
int lane = thread_id % WARP_SIZE;
int wid = thread_id / WARP_SIZE;
if (block_size > 32) {
warp_reduce_mean_m2n(mean, m2n, num);
if (lane == 0) {
x[wid*2] = mean;
x[wid*2+1] = m2n;
count[wid] = num;
}
__syncthreads();
if (wid == 0) {
mean = (thread_id < block_size / WARP_SIZE)? x[lane*2] : T(0);
m2n = (thread_id < block_size / WARP_SIZE)? x[lane*2+1] : T(0);
num = (thread_id < block_size / WARP_SIZE)? count[lane] : int(0);
}
}
if (wid==0) warp_reduce_mean_m2n(mean, m2n, num);
return;
}
// return spatial size for NC+ Tensors
__host__ int get_tensor_spatial_size(const at::Tensor& input)
{
auto space_size = input.size(2);
for (int i = 3; i < input.ndimension(); i++) {
space_size *= input.size(i);
}
return space_size;
}
// promote accumulation scalar type. promote half to float.
__host__ at::ScalarType promote_scalartype(const at::Tensor& input)
{
return input.scalar_type() == at::ScalarType::Half ?
at::ScalarType::Float : input.scalar_type();
}
// return single element size, optional accumulation type promotion.
__host__ size_t get_element_data_size(const at::Tensor& input, bool accumulation = false)
{
auto scalar_type = accumulation ? promote_scalartype(input) : input.scalar_type();
return at::elementSize(scalar_type);
}
template<typename T, typename C>
__device__ __forceinline__ void welford_merge_block_vertical(C& count,
T& mean,
T& m2n,
C* shmem_count,
T* shmem_mean,
T* shmem_m2n) {
// write to shared memory
auto address_base = threadIdx.x + threadIdx.y * blockDim.x;
shmem_mean[address_base] = mean;
shmem_m2n[address_base] = m2n;
shmem_count[address_base] = count;
#pragma unroll
for (int offset = blockDim.y/2; offset > 0; offset >>= 1) {
__syncthreads();
if (threadIdx.y < offset && threadIdx.y + offset < blockDim.y) {
auto address = address_base + offset * blockDim.x;
// read shared memory back to register for reduction
auto num_new = shmem_count[address];
auto mean_new = shmem_mean[address];
auto m2n_new = shmem_m2n[address];
welford_merge_element(count, mean, m2n, num_new, mean_new, m2n_new);
// last write is not necessary
shmem_mean[address_base] = mean;
shmem_m2n[address_base] = m2n;
shmem_count[address_base] = count;
}
}
}
template<typename T>
__device__ __forceinline__ void merge_block_vertical(T& sum_dy,
T& sum_dy_xmu,
T* shmem_sum_dy,
T* shmem_sum_dy_xmu) {
// write to shared memory
auto address_base = threadIdx.x + threadIdx.y * blockDim.x;
shmem_sum_dy[address_base] = sum_dy;
shmem_sum_dy_xmu[address_base] = sum_dy_xmu;
#pragma unroll
for (int offset = blockDim.y/2; offset > 0; offset >>= 1) {
__syncthreads();
if (threadIdx.y < offset && threadIdx.y + offset < blockDim.y) {
auto address = address_base + offset * blockDim.x;
sum_dy += shmem_sum_dy[address];
sum_dy_xmu += shmem_sum_dy_xmu[address];
// last write is not necessary
shmem_sum_dy[address_base] = sum_dy;
shmem_sum_dy_xmu[address_base] = sum_dy_xmu;
}
}
}
// welford kernel calculating mean/biased_variance/unbiased_variance
template <typename scalar_t, typename accscalar_t, typename outscalar_t>
__global__ void welford_kernel(
const scalar_t* __restrict__ input,
outscalar_t* __restrict__ out_mean,
outscalar_t* __restrict__ out_var_biased,
const int bs,
const int fs,
const int ss) {
int block_size = blockDim.x * blockDim.y;
int count = 0;
accscalar_t x_mean = accscalar_t(0);
accscalar_t m_2_n = accscalar_t(0);
int thread_id = threadIdx.y*blockDim.x + threadIdx.x;
for (int batch_id = threadIdx.y; batch_id < bs; batch_id += blockDim.y) {
int input_base = blockIdx.x*ss + batch_id*ss*fs;
// sequential welford
for (int offset = threadIdx.x; offset < ss ; offset += blockDim.x) {
count++;
auto x_n = static_cast<accscalar_t>(input[offset+input_base]);
auto d = x_n - x_mean;
x_mean += d / count;
m_2_n += d * (x_n - x_mean);
}
}
static __shared__ int s_mem[160];
accscalar_t* s_mem_ac = (accscalar_t*) &s_mem[32];
welford_reduce_mean_m2n<accscalar_t>(s_mem_ac, s_mem, x_mean, m_2_n, count, block_size, thread_id);
if (thread_id == 0) {
out_mean[blockIdx.x] = static_cast<outscalar_t>(x_mean);
out_var_biased[blockIdx.x] = static_cast<outscalar_t>(m_2_n/count);
}
}
// elementwise BN kernel
template <typename scalar_t, typename accscalar_t, typename layerscalar_t>
__global__ void batchnorm_forward_kernel(
const scalar_t* __restrict__ input,
const accscalar_t* __restrict__ mean,
const accscalar_t* __restrict__ inv_std,
const layerscalar_t* __restrict__ weight,
const layerscalar_t* __restrict__ shift,
scalar_t* __restrict__ out,
const int ss,
const int bs) {
auto m_c = mean[blockIdx.x];
auto inv_std_c = inv_std[blockIdx.x];
auto w_c = weight == NULL ? accscalar_t(1.0) : static_cast<accscalar_t>(weight[blockIdx.x]);
auto s_c = shift == NULL ? accscalar_t(0.0) : static_cast<accscalar_t>(shift[blockIdx.x]);
for (int batch_offset = blockIdx.y*blockDim.y + threadIdx.y; batch_offset < bs; batch_offset += gridDim.y*blockDim.y) {
int address_base = blockIdx.x*ss + batch_offset*gridDim.x*ss;
for (int offset = threadIdx.x + blockIdx.z*blockDim.x; offset < ss ; offset+= gridDim.z*blockDim.x) {
out[address_base+offset] = static_cast<scalar_t>(w_c * (static_cast<accscalar_t>(input[address_base+offset]) - m_c ) * inv_std_c + s_c);
}
}
}
// Backward BN kernel, calculates grad_bias, grad_weight as well as intermediate
// results to calculating grad_input.
// Breaking the grad_input to two step to support sync BN, which requires all
// reduce of the intermediate results across processes.
template <typename scalar_t, typename accscalar_t, typename layerscalar_t>
__global__ void reduce_bn_kernel(
const scalar_t* __restrict__ input,
const scalar_t* __restrict__ grad_output,
const accscalar_t* __restrict__ mean,
const accscalar_t* __restrict__ inv_std,
accscalar_t* __restrict__ mean_dy,
accscalar_t* __restrict__ mean_dy_xmu,
layerscalar_t* __restrict__ grad_weight,
layerscalar_t* __restrict__ grad_bias,
const int bs,
const int fs,
const int ss) {
static __shared__ int s_mem[64];
int total_item_num = bs * ss;
int thread_id = threadIdx.y*blockDim.x + threadIdx.x;
auto r_mean = mean[blockIdx.x];
auto factor = inv_std[blockIdx.x];
// Kahan sum
accscalar_t sum_dy = 0.0;
accscalar_t sum_dy_xmu = 0.0;
accscalar_t sum_dy_c = 0.0;
accscalar_t sum_dy_xmu_c = 0.0;
for (int batch_id = threadIdx.y; batch_id < bs; batch_id += blockDim.y) {
int input_base = blockIdx.x*ss + batch_id*ss*fs;
for (int offset = threadIdx.x; offset < ss ; offset += blockDim.x) {
auto e_grad = static_cast<accscalar_t>(grad_output[offset+input_base]);
auto e_input = static_cast<accscalar_t>(input[offset+input_base]);
// calculating sum_dy
auto sum_dy_y = e_grad - sum_dy_c;
auto sum_dy_t = sum_dy + sum_dy_y;
sum_dy_c = (sum_dy_t - sum_dy) - sum_dy_y;
sum_dy = sum_dy_t;
// calculating sum_dy_xmu
auto sum_dy_xmu_y = e_grad * (e_input - r_mean) - sum_dy_xmu_c;
auto sum_dy_xmu_t = sum_dy_xmu + sum_dy_xmu_y;
sum_dy_xmu_c = (sum_dy_xmu_t - sum_dy_xmu) - sum_dy_xmu_y;
sum_dy_xmu = sum_dy_xmu_t;
}
}
sum_dy = reduce_block((accscalar_t*)s_mem, sum_dy);
__syncthreads();
sum_dy_xmu = reduce_block((accscalar_t*)s_mem, sum_dy_xmu);
if (thread_id == 0) {
if (grad_bias != NULL) {
grad_bias[blockIdx.x] = static_cast<layerscalar_t>(sum_dy);
}
if (grad_weight != NULL) {
grad_weight[blockIdx.x] = static_cast<layerscalar_t>(sum_dy_xmu * factor);
}
mean_dy[blockIdx.x] = sum_dy / total_item_num;
mean_dy_xmu[blockIdx.x] = sum_dy_xmu / total_item_num;
}
}
// elementwise backward BN kernel
template <typename scalar_t, typename accscalar_t, typename layerscalar_t>
__global__ void batchnorm_backward_kernel(
const scalar_t* __restrict__ grad_output,
const scalar_t* __restrict__ input,
const accscalar_t* __restrict__ mean,
const accscalar_t* __restrict__ inv_std,
const layerscalar_t* __restrict__ weight,
const accscalar_t* __restrict__ mean_dy,
const accscalar_t* __restrict__ mean_dy_xmu,
scalar_t* __restrict__ grad_input,
const int ss,
const int bs) {
auto m_c = static_cast<accscalar_t>(mean[blockIdx.x]);
auto m_dy_c = static_cast<accscalar_t>(mean_dy[blockIdx.x]);
auto factor_1_c = inv_std[blockIdx.x];
auto factor_2_c = (weight == NULL ? accscalar_t(1.0) : static_cast<accscalar_t>(weight[blockIdx.x])) * factor_1_c;
factor_1_c = factor_1_c * factor_1_c * mean_dy_xmu[blockIdx.x];
for (int batch_offset = blockIdx.y*blockDim.y+threadIdx.y; batch_offset < bs; batch_offset += gridDim.y*blockDim.y) {
int address_base = blockIdx.x*ss + batch_offset*gridDim.x*ss;
for (int offset = threadIdx.x + blockIdx.z*blockDim.x; offset < ss ; offset+= gridDim.z*blockDim.x) {
grad_input[address_base+offset] = (static_cast<accscalar_t>(grad_output[address_base+offset]) - m_dy_c - (static_cast<accscalar_t>(input[address_base+offset]) - m_c) * factor_1_c) * factor_2_c;
}
}
}
// welford kernel for c last tensor calculating mean/biased_variance/unbiased_variance
template
<typename scalar_t,
typename accscalar_t,
typename outscalar_t,
int PARALLEL_LOADS>
__global__ void
welford_kernel_c_last(
const scalar_t* __restrict__ input,
outscalar_t* __restrict__ out_mean,
outscalar_t* __restrict__ out_var_biased,
volatile accscalar_t* staging_data,
int* semaphores,
const int reduction_size,
const int stride) {
// hide latency with concurrency
accscalar_t x_mean[PARALLEL_LOADS];
accscalar_t m_2_n[PARALLEL_LOADS];
int count[PARALLEL_LOADS];
#pragma unroll
for (int i = 0; i < PARALLEL_LOADS; i++) {
x_mean[i] = accscalar_t(0);
m_2_n[i] = accscalar_t(0);
count[i] = accscalar_t(0);
}
// tensor dimension (m,c)
// loop along m dimension
int inner_loop_stride = blockDim.y * gridDim.y;
// offset along m dimension
int m_offset = blockIdx.y * blockDim.y + threadIdx.y;
int c_offset = blockIdx.x * blockDim.x + threadIdx.x;
int loop_count = 1 + (reduction_size - 1) / (inner_loop_stride * PARALLEL_LOADS);
int address_base = m_offset * stride + c_offset;
int address_increment = inner_loop_stride * stride;
for (int i = 0; i < loop_count; i++) {
accscalar_t x_math[PARALLEL_LOADS];
accscalar_t x_count_inv[PARALLEL_LOADS];
accscalar_t is_valid[PARALLEL_LOADS];
// load multiple data in
#pragma unroll
for (int j = 0; j < PARALLEL_LOADS; j++) {
if (c_offset < stride && m_offset < reduction_size) {
x_math[j] = input[address_base];
count[j]++;
x_count_inv[j] = accscalar_t(1) / count[j];
is_valid[j] = accscalar_t(1);
} else {
x_math[j] = accscalar_t(0);
x_count_inv[j] = accscalar_t(0);
is_valid[j] = accscalar_t(0);
}
m_offset += inner_loop_stride;
address_base += address_increment;
}
// calculate mean/m2n with welford
#pragma unroll
for (int j = 0; j < PARALLEL_LOADS; j++) {
accscalar_t delta0 = x_math[j] - x_mean[j];
x_mean[j] += delta0 * x_count_inv[j];
accscalar_t delta1 = x_math[j] - x_mean[j];
m_2_n[j] += delta0 * delta1 * is_valid[j];
}
}
// thread reduction to accumulate mean/m_2_n/count between PARALLEL_LOADS
#pragma unroll
for (int j = 1; j < PARALLEL_LOADS; j++) {
welford_merge_element(count[0], x_mean[0], m_2_n[0], count[j], x_mean[j], m_2_n[j]);
}
// release x_mean / m_2_n
auto mean_th = x_mean[0];
auto m2_th = m_2_n[0];
auto count_th = count[0];
// block-wise reduction with shared memory (since reduction cannot be done within a warp)
static __shared__ accscalar_t shmem_mean[MAX_BLOCK_SIZE];
static __shared__ accscalar_t shmem_m2n[MAX_BLOCK_SIZE];
static __shared__ int shmem_count[MAX_BLOCK_SIZE];
welford_merge_block_vertical(count_th, mean_th, m2_th, shmem_count, shmem_mean, shmem_m2n);
// grid reduction if needed (coop launch used at the first place)
if (gridDim.y > 1) {
volatile accscalar_t* staging_mean = staging_data;
volatile accscalar_t* staging_m2n = &staging_data[stride*gridDim.y];
volatile int* staging_count = reinterpret_cast<volatile int*>(&staging_m2n[stride*gridDim.y]);
address_base = c_offset + blockIdx.y * stride;
// write data to staging_data;
if (threadIdx.y == 0 && c_offset < stride) {
staging_mean[address_base] = mean_th;
staging_m2n[address_base] = m2_th;
staging_count[address_base] = count_th;
}
__threadfence();
__syncthreads(); // ensuring writes to staging_ is visible to all blocks
__shared__ bool is_last_block_done;
// mark block done
if (threadIdx.x == 0 && threadIdx.y == 0) {
int old = atomicAdd(&semaphores[blockIdx.x], 1);
is_last_block_done = (old == (gridDim.y-1));
}
__syncthreads();
// check that all data is now available in global memory
if (is_last_block_done) {
count_th = 0;
mean_th = accscalar_t(0.0);
m2_th = accscalar_t(0.0);
for (int y = threadIdx.y; y < gridDim.y; y += blockDim.y) {
address_base = c_offset + y * stride;
int num_new = c_offset < stride ? staging_count[address_base] : 0;
accscalar_t mean_new = c_offset < stride ? staging_mean[address_base] : accscalar_t(0.0);
accscalar_t m2n_new = c_offset < stride ? staging_m2n[address_base] : accscalar_t(0.0);
welford_merge_element(count_th, mean_th, m2_th, num_new, mean_new, m2n_new);
}
welford_merge_block_vertical(count_th, mean_th, m2_th, shmem_count, shmem_mean, shmem_m2n);
if (threadIdx.y == 0 && c_offset < stride) {
out_mean[c_offset] = static_cast<outscalar_t>(mean_th);
out_var_biased[c_offset] = static_cast<outscalar_t>(m2_th / count_th);
}
}
} else {
if (blockIdx.y == 0 && threadIdx.y == 0 && c_offset < stride) {
out_mean[c_offset] = static_cast<outscalar_t>(mean_th);
out_var_biased[c_offset] = static_cast<outscalar_t>(m2_th / count_th);
}
}
}
// parallel welford kernel to further reduce mean / biased_var
// into mean / unbiased_var / inv_std across multiple processes.
template <typename scalar_t>
__global__ void welford_kernel_parallel(
const scalar_t* __restrict__ mean,
const scalar_t* __restrict__ var_biased,
scalar_t* __restrict__ out_mean,
scalar_t* __restrict__ out_var,
scalar_t* __restrict__ inv_std,
const int world_size,
const int feature_size,
const float eps,
const int numel) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < feature_size; i += gridDim.x * blockDim.x) {
// load data;
int address = i;
scalar_t x_mean = 0;
scalar_t m_2_n = 0;
int count = 0;
for (int j = 0; j < world_size; j++) {
welford_merge_element(count, x_mean, m_2_n, numel, mean[address], var_biased[address]*numel);
address += feature_size;
}
out_mean[i] = x_mean;
out_var[i] = m_2_n/ (count - 1);
inv_std[i] = scalar_t(1) / sqrt(m_2_n/count + eps);
}
}
// elementwise BN kernel
template <
typename scalar_t,
typename accscalar_t,
typename layerscalar_t,
int PARALLEL_LOADS>
__global__ void batchnorm_forward_c_last_kernel(
const scalar_t* __restrict__ input,
const scalar_t* __restrict__ z,
const accscalar_t* __restrict__ mean,
const accscalar_t* __restrict__ inv_std,
const layerscalar_t* __restrict__ weight,
const layerscalar_t* __restrict__ shift,
scalar_t* __restrict__ out,
const int reduction_size,
const int stride,
const bool fuse_relu) {
// tensor dimension (m,c)
// loop along m dimension
int inner_loop_stride = blockDim.y * gridDim.y;
// offset along m dimension
int m_offset = blockIdx.y * blockDim.y + threadIdx.y;
int c_offset = blockIdx.x * blockDim.x + threadIdx.x;
auto m_c = mean[c_offset];
auto inv_std_c = static_cast<accscalar_t>(inv_std[c_offset]);
auto w_c = weight == NULL ? accscalar_t(1.0) : static_cast<accscalar_t>(weight[c_offset]);
auto s_c = shift == NULL ? accscalar_t(0.0) : static_cast<accscalar_t>(shift[c_offset]);
int loop_count = 1 + (reduction_size - 1) / (inner_loop_stride * PARALLEL_LOADS);
int address_base = m_offset * stride + c_offset;
int address_increment = inner_loop_stride * stride;
for (int i = 0; i < loop_count; i++) {
#pragma unroll
for (int j = 0; j < PARALLEL_LOADS; j++) {
if (c_offset < stride && m_offset < reduction_size) {
auto tmp = w_c * (static_cast<accscalar_t>(input[address_base]) - m_c ) * inv_std_c + s_c;
if (z != NULL) {
tmp += z[address_base];
}
out[address_base] = (fuse_relu && tmp <= accscalar_t(0.0) ? scalar_t(0.0) : static_cast<scalar_t>(tmp));
}
m_offset += inner_loop_stride;
address_base += address_increment;
}
}
}
// elementwise BN kernel
template <
typename scalar_t,
typename accscalar_t,
typename layerscalar_t,
int PARALLEL_LOADS>
__global__ void relu_backward_c_last_kernel(
const scalar_t* __restrict__ grad_output,
const scalar_t* __restrict__ input,
const scalar_t* __restrict__ z,
const accscalar_t* __restrict__ mean,
const accscalar_t* __restrict__ inv_std,
const layerscalar_t* __restrict__ weight,
const layerscalar_t* __restrict__ shift,
scalar_t* __restrict__ out,
const int reduction_size,
const int stride) {
// tensor dimension (m,c)
// loop along m dimension
int inner_loop_stride = blockDim.y * gridDim.y;
// offset along m dimension
int m_offset = blockIdx.y * blockDim.y + threadIdx.y;
int c_offset = blockIdx.x * blockDim.x + threadIdx.x;
auto m_c = mean[c_offset];
auto inv_std_c = static_cast<accscalar_t>(inv_std[c_offset]);
auto w_c = weight == NULL ? accscalar_t(1.0) : static_cast<accscalar_t>(weight[c_offset]);
auto s_c = shift == NULL ? accscalar_t(0.0) : static_cast<accscalar_t>(shift[c_offset]);
int loop_count = 1 + (reduction_size - 1) / (inner_loop_stride * PARALLEL_LOADS);
int address_base = m_offset * stride + c_offset;
int address_increment = inner_loop_stride * stride;
for (int i = 0; i < loop_count; i++) {
#pragma unroll
for (int j = 0; j < PARALLEL_LOADS; j++) {
if (c_offset < stride && m_offset < reduction_size) {
auto tmp = w_c * (static_cast<accscalar_t>(input[address_base]) - m_c ) * inv_std_c + s_c;
if (z != NULL) {
tmp += z[address_base];
}
out[address_base] = (tmp <= accscalar_t(0.0) ? scalar_t(0.0) : grad_output[address_base]);
}
m_offset += inner_loop_stride;
address_base += address_increment;
}
}
}
// batchnorm backward kernel for c last tensor
template
<typename scalar_t,
typename accscalar_t,
typename layerscalar_t,
int PARALLEL_LOADS>
__global__ void reduce_bn_c_last_kernel(
const scalar_t* __restrict__ input,
const scalar_t* __restrict__ grad_output,
const accscalar_t* __restrict__ mean,
const accscalar_t* __restrict__ inv_std,
accscalar_t* __restrict__ mean_dy,
accscalar_t* __restrict__ mean_dy_xmu,
layerscalar_t* __restrict__ grad_weight,
layerscalar_t* __restrict__ grad_bias,
volatile accscalar_t* staging_data,
int* semaphores,
const int reduction_size,
const int stride) {
// hide latency with concurrency
accscalar_t sum_dy[PARALLEL_LOADS];
accscalar_t sum_dy_xmu[PARALLEL_LOADS];
#pragma unroll
for (int i = 0; i < PARALLEL_LOADS; i++) {
sum_dy[i] = accscalar_t(0);
sum_dy_xmu[i] = accscalar_t(0);
}
// tensor dimension (m,c)
// loop along m dimension
int inner_loop_stride = blockDim.y * gridDim.y;
// offset along m dimension
int m_offset = blockIdx.y * blockDim.y + threadIdx.y;
int c_offset = blockIdx.x * blockDim.x + threadIdx.x;
int loop_count = 1 + (reduction_size - 1) / (inner_loop_stride * PARALLEL_LOADS);
int address_base = m_offset * stride + c_offset;
int address_increment = inner_loop_stride * stride;
auto r_mean = mean[c_offset];
auto factor = inv_std[c_offset];
for (int i = 0; i < loop_count; i++) {
accscalar_t x_input[PARALLEL_LOADS];
accscalar_t x_grad_output[PARALLEL_LOADS];
// load multiple data in
#pragma unroll
for (int j = 0; j < PARALLEL_LOADS; j++) {
if (c_offset < stride && m_offset < reduction_size) {
x_input[j] = input[address_base];
x_grad_output[j] = grad_output[address_base];
} else {
x_input[j] = accscalar_t(0);
x_grad_output[j] = accscalar_t(0);
}
m_offset += inner_loop_stride;
address_base += address_increment;
}
// calculate sum_dy / sum_dy_xmu
#pragma unroll
for (int j = 0; j < PARALLEL_LOADS; j++) {
sum_dy[j] += x_grad_output[j];
sum_dy_xmu[j] += x_grad_output[j] * (x_input[j] - r_mean);
}
}
// thread reduction to accumulate sum_dy / sum_dy_xmu between PARALLEL_LOADS
#pragma unroll
for (int j = 1; j < PARALLEL_LOADS; j++) {
sum_dy[0] += sum_dy[j];
sum_dy_xmu[0] += sum_dy_xmu[j];
}
// release array of registers
auto sum_dy_th = sum_dy[0];
auto sum_dy_xmu_th = sum_dy_xmu[0];
// block-wise reduction with shared memory (since reduction cannot be done within a warp)
static __shared__ accscalar_t shmem_sum_dy[MAX_BLOCK_SIZE];
static __shared__ accscalar_t shmem_sum_dy_xmu[MAX_BLOCK_SIZE];
merge_block_vertical(sum_dy_th, sum_dy_xmu_th, shmem_sum_dy, shmem_sum_dy_xmu);
// grid reduction if needed (coop launch used at the first place)
if (gridDim.y > 1) {
volatile accscalar_t* staging_sum_dy = staging_data;
volatile accscalar_t* staging_sum_dy_xmu = &staging_data[stride*gridDim.y];
address_base = c_offset + blockIdx.y * stride;
// write data to staging_data;
if (threadIdx.y == 0 && c_offset < stride) {
staging_sum_dy[address_base] = sum_dy_th;
staging_sum_dy_xmu[address_base] = sum_dy_xmu_th;
}
__threadfence();
__syncthreads(); // ensuring writes to staging_ is visible to all blocks
__shared__ bool is_last_block_done;
// mark block done
if (threadIdx.x == 0 && threadIdx.y == 0) {
int old = atomicAdd(&semaphores[blockIdx.x], 1);
is_last_block_done = (old == (gridDim.y-1));
}
__syncthreads();
// check that all data is now available in global memory
if (is_last_block_done) {
sum_dy_th = accscalar_t(0.0);
sum_dy_xmu_th = accscalar_t(0.0);
for (int y = threadIdx.y; y < gridDim.y; y += blockDim.y) {
address_base = c_offset + y * stride;
sum_dy_th += (c_offset < stride ? staging_sum_dy[address_base] : accscalar_t(0.0));
sum_dy_xmu_th += (c_offset < stride ? staging_sum_dy_xmu[address_base] : accscalar_t(0.0));
}
merge_block_vertical(sum_dy_th, sum_dy_xmu_th, shmem_sum_dy, shmem_sum_dy_xmu);
if (threadIdx.y == 0 && c_offset < stride) {
if (grad_bias != NULL) {
grad_bias[c_offset] = static_cast<layerscalar_t>(sum_dy_th);
}
if (grad_weight != NULL) {
grad_weight[c_offset] = static_cast<layerscalar_t>(sum_dy_xmu_th * factor);
}
mean_dy[c_offset] = sum_dy_th / reduction_size;
mean_dy_xmu[c_offset] = sum_dy_xmu_th / reduction_size;
}
}
} else {
if (blockIdx.y == 0 && threadIdx.y == 0 && c_offset < stride) {
if (grad_bias != NULL) {
grad_bias[c_offset] = static_cast<layerscalar_t>(sum_dy_th);
}
if (grad_weight != NULL) {
grad_weight[c_offset] = static_cast<layerscalar_t>(sum_dy_xmu_th * factor);
}
mean_dy[c_offset] = sum_dy_th / reduction_size;
mean_dy_xmu[c_offset] = sum_dy_xmu_th / reduction_size;
}
}
}
// elementwise BN kernel
template <
typename scalar_t,
typename accscalar_t,
typename layerscalar_t,
int PARALLEL_LOADS>
__global__ void batchnorm_backward_c_last_kernel(
const scalar_t* __restrict__ grad_output,
const scalar_t* __restrict__ input,
const accscalar_t* __restrict__ mean,
const accscalar_t* __restrict__ inv_std,
const layerscalar_t* __restrict__ weight,
const accscalar_t* __restrict__ mean_dy,
const accscalar_t* __restrict__ mean_dy_xmu,
scalar_t* __restrict__ grad_input,
const int reduction_size,
const int stride) {
// tensor dimension (m,c)
// loop along m dimension
int inner_loop_stride = blockDim.y * gridDim.y;
// offset along m dimension
int m_offset = blockIdx.y * blockDim.y + threadIdx.y;
int c_offset = blockIdx.x * blockDim.x + threadIdx.x;
auto m_c = mean[c_offset];
auto m_dy_c = mean_dy[c_offset];
auto factor_1_c = inv_std[c_offset];
auto factor_2_c = (weight == NULL? accscalar_t(1.0) : static_cast<accscalar_t>(weight[c_offset])) * factor_1_c;
factor_1_c = factor_1_c * factor_1_c * mean_dy_xmu[c_offset];
int loop_count = 1 + (reduction_size - 1) / (inner_loop_stride * PARALLEL_LOADS);
int address_base = m_offset * stride + c_offset;
int address_increment = inner_loop_stride * stride;
for (int i = 0; i < loop_count; i++) {
#pragma unroll
for (int j = 0; j < PARALLEL_LOADS; j++) {
if (c_offset < stride && m_offset < reduction_size) {
grad_input[address_base] = static_cast<scalar_t>(
(static_cast<accscalar_t>(grad_output[address_base]) - m_dy_c -
(static_cast<accscalar_t>(input[address_base]) - m_c) * factor_1_c)
* factor_2_c);
}
m_offset += inner_loop_stride;
address_base += address_increment;
}
}
}
std::vector<at::Tensor> welford_mean_var_CUDA(const at::Tensor input) {
const auto batch_size = input.size(0);
const auto feature_size = input.size(1);
auto space_size = get_tensor_spatial_size(input);
auto scalar_type = promote_scalartype(input);
at::Tensor out_var_biased = at::empty({feature_size}, input.options().dtype(scalar_type));
at::Tensor out_mean = at::empty({feature_size}, input.options().dtype(scalar_type));
int block_y = min(h_last_pow2(batch_size), int(MAX_BLOCK_SIZE / 32));
int block_x = max(1, min(MAX_BLOCK_SIZE / block_y, h_last_pow2(space_size)));
const dim3 block(block_x, block_y);
const dim3 grid(feature_size);
auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
{
using namespace at;
DISPATCH_FLOAT_AND_HALF(input.scalar_type(), 0, "welford_mean_var_kernel",
using accscalar_t = at::acc_type<scalar_t_0, true>;
hipLaunchKernelGGL(( welford_kernel<scalar_t_0, accscalar_t, accscalar_t>), dim3(grid), dim3(block), 0, stream,
input.data<scalar_t_0>(),
out_mean.data<accscalar_t>(),
out_var_biased.data<accscalar_t>(),
batch_size,
feature_size,
space_size);
);
}
return {out_mean, out_var_biased};
}
at::Tensor batchnorm_forward_CUDA(
const at::Tensor input,
const at::Tensor mean,
const at::Tensor inv_std,
const at::optional<at::Tensor> weight,
const at::optional<at::Tensor> shift) {
const auto batch_size = input.size(0);
const auto feature_size = input.size(1);
at::Tensor out = at::empty_like(input);
auto space_size = get_tensor_spatial_size(input);
int block_x = max(32, min(MAX_BLOCK_SIZE, h_last_pow2(space_size)/4));
int block_y = max(1, min(MAX_BLOCK_SIZE/block_x, h_last_pow2(batch_size)/4));
const dim3 block(block_x, block_y);
int grid_z = max(1, min(65535, h_last_pow2(space_size)/4/block_x));
int batch_group_size = max(1, min(65535, h_last_pow2(batch_size)/block_y));
const dim3 grid(feature_size, batch_group_size, grid_z);
auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
if (input.scalar_type() == at::ScalarType::Half
&& weight.has_value() &&
weight.value().scalar_type() == at::ScalarType::Float) {
using namespace at;
DISPATCH_FLOAT_AND_HALF(input.scalar_type(), 0, "batchnorm_forward",
using accscalar_t = at::acc_type<scalar_t_0, true>;
hipLaunchKernelGGL(( batchnorm_forward_kernel<scalar_t_0, accscalar_t, accscalar_t>), dim3(grid), dim3(block), 0, stream,
input.data<scalar_t_0>(),
mean.data<accscalar_t>(),
inv_std.data<accscalar_t>(),
weight.has_value() ? weight.value().data<accscalar_t>() : NULL,
shift.has_value() ? shift.value().data<accscalar_t>() : NULL,
out.data<scalar_t_0>(),
space_size,
batch_size);
);
} else {
if (weight.has_value()) {
AT_CHECK(input.scalar_type() == weight.value().scalar_type(),
"input.scalar_type() is not supported with weight.scalar_type()");
}
using namespace at;
DISPATCH_FLOAT_AND_HALF(input.scalar_type(), 0, "batchnorm_forward",
using accscalar_t = at::acc_type<scalar_t_0, true>;
hipLaunchKernelGGL(( batchnorm_forward_kernel<scalar_t_0, accscalar_t, scalar_t_0>), dim3(grid), dim3(block), 0, stream,
input.data<scalar_t_0>(),
mean.data<accscalar_t>(),
inv_std.data<accscalar_t>(),
weight.has_value() ? weight.value().data<scalar_t_0>() : NULL,
shift.has_value() ? shift.value().data<scalar_t_0>() : NULL,
out.data<scalar_t_0>(),
space_size,
batch_size);
);
}
return out;
}
std::vector<at::Tensor> reduce_bn_CUDA(
const at::Tensor grad_output,
const at::Tensor input,
const at::Tensor mean,
const at::Tensor inv_std,
const at::optional<at::Tensor> weight)
{
const auto batch_size = input.size(0);
const auto feature_size = input.size(1);
auto scalar_type = promote_scalartype(input);
at::Tensor mean_dy = at::empty({feature_size}, mean.options());
at::Tensor mean_dy_xmu = at::empty({feature_size}, mean.options());
at::Tensor grad_weight;
at::Tensor grad_bias;
if (weight.has_value()) {
grad_weight = at::empty({feature_size}, weight.value().options());
grad_bias = at::empty({feature_size}, weight.value().options());
} else {
grad_weight = at::empty({0}, mean.options());
grad_bias = at::empty({0}, mean.options());
}
auto space_size = get_tensor_spatial_size(input);
int block_y = min(h_last_pow2(batch_size), int(MAX_BLOCK_SIZE/ 32));
int block_x = max(1, min(MAX_BLOCK_SIZE/ block_y, h_last_pow2(space_size)));
const dim3 block(block_x, block_y);
const dim3 grid(feature_size);
auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
if (input.scalar_type() == at::ScalarType::Half
&& weight.has_value() &&
weight.value().scalar_type() == at::ScalarType::Float) {
using namespace at;
DISPATCH_FLOAT_AND_HALF(input.scalar_type(), 0, "batchnorm_backward_reduce",
using accscalar_t = at::acc_type<scalar_t_0, true>;
hipLaunchKernelGGL(( reduce_bn_kernel<scalar_t_0, accscalar_t, accscalar_t>), dim3(grid), dim3(block), 0, stream,
input.data<scalar_t_0>(),
grad_output.data<scalar_t_0>(),
mean.data<accscalar_t>(),
inv_std.data<accscalar_t>(),
mean_dy.data<accscalar_t>(),
mean_dy_xmu.data<accscalar_t>(),
weight.has_value() ? grad_weight.data<accscalar_t>() : NULL,
weight.has_value() ? grad_bias.data<accscalar_t>() : NULL,
batch_size,
feature_size,
space_size);
);
} else {
if (weight.has_value()) {
AT_CHECK(input.scalar_type() == weight.value().scalar_type(),
"input.scalar_type() is not supported with weight.scalar_type()");
}
using namespace at;
DISPATCH_FLOAT_AND_HALF(input.scalar_type(), 0, "batchnorm_backward_reduce",
using accscalar_t = at::acc_type<scalar_t_0, true>;
hipLaunchKernelGGL(( reduce_bn_kernel<scalar_t_0, accscalar_t, scalar_t_0>), dim3(grid), dim3(block), 0, stream,
input.data<scalar_t_0>(),
grad_output.data<scalar_t_0>(),
mean.data<accscalar_t>(),
inv_std.data<accscalar_t>(),
mean_dy.data<accscalar_t>(),
mean_dy_xmu.data<accscalar_t>(),
weight.has_value() ? grad_weight.data<scalar_t_0>() : NULL,
weight.has_value() ? grad_bias.data<scalar_t_0>() : NULL,
batch_size,
feature_size,
space_size);
);
}
return {mean_dy, mean_dy_xmu, grad_weight, grad_bias};
}
at::Tensor batchnorm_backward_CUDA(
const at::Tensor grad_output,
const at::Tensor input,
const at::Tensor mean,
const at::Tensor inv_std,
const at::optional<at::Tensor> weight,
const at::Tensor mean_dy,
const at::Tensor mean_dy_xmu) {
const auto batch_size = input.size(0);
const auto feature_size = input.size(1);
at::Tensor grad_input = at::empty_like(input);
auto space_size = get_tensor_spatial_size(input);
int block_x = max(32, min(MAX_BLOCK_SIZE, h_last_pow2(space_size)/4));
int block_y = max(1, min(MAX_BLOCK_SIZE/block_x, h_last_pow2(batch_size)/4));
const dim3 block(block_x, block_y);
int grid_z = max(1, min(65535, h_last_pow2(space_size)/4/block_x));
int batch_group_size = max(1, min(65535, h_last_pow2(batch_size)/block_y));
const dim3 grid(feature_size, batch_group_size, grid_z);
auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
if (input.scalar_type() == at::ScalarType::Half
&& weight.has_value() &&
weight.value().scalar_type() == at::ScalarType::Float) {
using namespace at;
DISPATCH_FLOAT_AND_HALF(input.scalar_type(), 0, "batchnorm_backward",
using accscalar_t = at::acc_type<scalar_t_0, true>;
hipLaunchKernelGGL(( batchnorm_backward_kernel<scalar_t_0, accscalar_t, accscalar_t>), dim3(grid), dim3(block), 0, stream,
grad_output.data<scalar_t_0>(),
input.data<scalar_t_0>(),
mean.data<accscalar_t>(),
inv_std.data<accscalar_t>(),
weight.has_value() ? weight.value().data<accscalar_t>() : NULL,
mean_dy.data<accscalar_t>(),
mean_dy_xmu.data<accscalar_t>(),
grad_input.data<scalar_t_0>(),
space_size,
batch_size);
);
} else {
if (weight.has_value()) {
AT_CHECK(input.scalar_type() == weight.value().scalar_type(),
"input.scalar_type() is not supported with weight.scalar_type()");
}
using namespace at;
DISPATCH_FLOAT_AND_HALF(input.scalar_type(), 0, "batchnorm_backward",
using accscalar_t = at::acc_type<scalar_t_0, true>;
hipLaunchKernelGGL(( batchnorm_backward_kernel<scalar_t_0, accscalar_t, scalar_t_0>), dim3(grid), dim3(block), 0, stream,
grad_output.data<scalar_t_0>(),
input.data<scalar_t_0>(),
mean.data<accscalar_t>(),
inv_std.data<accscalar_t>(),
weight.has_value() ? weight.value().data<scalar_t_0>() : NULL,
mean_dy.data<accscalar_t>(),
mean_dy_xmu.data<accscalar_t>(),
grad_input.data<scalar_t_0>(),
space_size,
batch_size);
);
}
return grad_input;
}
std::vector<at::Tensor> welford_parallel_CUDA(const at::Tensor mean_feature_nodes,
const at::Tensor var_biased,
int numel,
const float eps) {
const auto world_size = mean_feature_nodes.size(0);
const auto feature_size = mean_feature_nodes.size(1);
at::Tensor out_var = at::empty({feature_size}, var_biased.options());
at::Tensor inv_std = at::empty_like(out_var);
at::Tensor out_mean = at::empty_like(out_var);
// TODO(jie): tile this for memory coalescing!
const int block = ::min(h_last_pow2(feature_size), MAX_BLOCK_SIZE);
const int grid = std::max<int>(1, feature_size / block);
auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
{
using namespace at;
DISPATCH_FLOAT_AND_HALF(mean_feature_nodes.scalar_type(), 0, "welford_parallel_kernel",
hipLaunchKernelGGL(( welford_kernel_parallel<scalar_t_0>), dim3(grid), dim3(block), 0, stream,
mean_feature_nodes.data<scalar_t_0>(),
var_biased.data<scalar_t_0>(),
out_mean.data<scalar_t_0>(),
out_var.data<scalar_t_0>(),
inv_std.data<scalar_t_0>(),
world_size,
feature_size,
eps,
numel);
);
}
return {out_mean, out_var, inv_std};
}
std::vector<at::Tensor> welford_mean_var_c_last_CUDA(const at::Tensor input) {
const auto stride = input.size(input.ndimension()-1);
const auto reduction_size = input.numel() / stride;
auto scalar_type = promote_scalartype(input);
auto option = input.options().dtype(scalar_type);
at::Tensor out_var_biased = at::empty({stride}, option);
at::Tensor out_mean = at::empty({stride}, option);
dim3 block;
dim3 grid;
flexible_launch_configs(reduction_size, stride, block, grid, true);
at::Tensor staging_data;
at::Tensor semaphores;
if (grid.y > 1) {
staging_data = at::empty({4*stride*grid.y}, option);
semaphores = at::zeros({grid.x}, input.options().dtype(at::kInt));
}
auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
{
using namespace at;
DISPATCH_FLOAT_AND_HALF(input.scalar_type(), 0, "welford_mean_var_c_last",
using accscalar_t = at::acc_type<scalar_t_0, true>;
accscalar_t* staging_data_ptr = grid.y > 1 ? staging_data.data<accscalar_t>() : nullptr;
int* semaphores_ptr = grid.y > 1 ? semaphores.data<int>() : nullptr;
hipLaunchKernelGGL(( welford_kernel_c_last<scalar_t_0, accscalar_t, accscalar_t, ELEMENTS_PER_ITER>)
, dim3(grid), dim3(block), 0, stream,
input.data<scalar_t_0>(),
out_mean.data<accscalar_t>(),
out_var_biased.data<accscalar_t>(),
staging_data_ptr,
semaphores_ptr,
reduction_size,
stride);
);
}
return {out_mean, out_var_biased};
}
at::Tensor batchnorm_forward_c_last_CUDA(
const at::Tensor input,
const at::optional<at::Tensor> z,
const at::Tensor mean,
const at::Tensor inv_std,
const at::optional<at::Tensor> weight,
const at::optional<at::Tensor> shift,
const bool fuse_relu) {
const auto stride = input.size(input.ndimension()-1);
const auto reduction_size = input.numel() / stride;
at::Tensor out = at::empty_like(input);
dim3 block;
dim3 grid;
flexible_launch_configs(reduction_size, stride, block, grid);
auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
if (input.scalar_type() == at::ScalarType::Half
&& weight.has_value() && weight.value().scalar_type() == at::ScalarType::Float) {
using namespace at;
DISPATCH_FLOAT_AND_HALF(input.scalar_type(), 0, "batchnorm_forward",
using accscalar_t = at::acc_type<scalar_t_0, true>;
hipLaunchKernelGGL(( batchnorm_forward_c_last_kernel<scalar_t_0, accscalar_t, accscalar_t, ELEMENTS_PER_ITER>)
, dim3(grid), dim3(block), 0, stream,
input.data<scalar_t_0>(),
z.has_value() ? z.value().data<scalar_t_0>() : NULL,
mean.data<accscalar_t>(),
inv_std.data<accscalar_t>(),
weight.has_value() ? weight.value().data<accscalar_t>() : NULL,
shift.has_value() ? shift.value().data<accscalar_t>(): NULL,
out.data<scalar_t_0>(),
reduction_size,
stride,
fuse_relu);
);
} else {
if (weight.has_value()) {
AT_CHECK(input.scalar_type() == weight.value().scalar_type(),
"input.scalar_type() is not supported with weight.scalar_type()");
}
using namespace at;
DISPATCH_FLOAT_AND_HALF(input.scalar_type(), 0, "batchnorm_forward",
using accscalar_t = at::acc_type<scalar_t_0, true>;
hipLaunchKernelGGL(( batchnorm_forward_c_last_kernel<scalar_t_0, accscalar_t, scalar_t_0, ELEMENTS_PER_ITER>)
, dim3(grid), dim3(block), 0, stream,
input.data<scalar_t_0>(),
z.has_value() ? z.value().data<scalar_t_0>() : NULL,
mean.data<accscalar_t>(),
inv_std.data<accscalar_t>(),
weight.has_value() ? weight.value().data<scalar_t_0>() : NULL,
shift.has_value() ? shift.value().data<scalar_t_0>(): NULL,
out.data<scalar_t_0>(),
reduction_size,
stride,
fuse_relu);
);
}
return out;
}
std::vector<at::Tensor> reduce_bn_c_last_CUDA(
const at::Tensor grad_output,
const at::Tensor input,
const at::Tensor mean,
const at::Tensor inv_std,
const at::optional<at::Tensor> weight) {
const auto stride = input.size(input.ndimension()-1);
const auto reduction_size = input.numel() / stride;
at::Tensor mean_dy = at::empty({stride}, mean.options());
at::Tensor mean_dy_xmu = at::empty({stride}, mean.options());
at::Tensor grad_weight;
at::Tensor grad_bias;
if (weight.has_value()) {
grad_weight = at::empty({stride}, weight.value().options());
grad_bias = at::empty({stride}, weight.value().options());
} else {
// because I cannot return an uninitialized at::Tensor
grad_weight = at::empty({0}, mean.options());
grad_bias = at::empty({0}, mean.options());
}
dim3 block;
dim3 grid;
flexible_launch_configs(reduction_size, stride, block, grid, true);
at::Tensor staging_data;
at::Tensor semaphores;
if (grid.y > 1) {
staging_data = at::empty({2*stride*grid.y}, mean.options());
semaphores = at::zeros({grid.x}, input.options().dtype(at::kInt));
}
auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
if (input.scalar_type() == at::ScalarType::Half
&& weight.has_value()
&& weight.value().scalar_type() == at::ScalarType::Float) {
using namespace at;
DISPATCH_FLOAT_AND_HALF(input.scalar_type(), 0, "batchnorm_backward_reduce",
using accscalar_t = at::acc_type<scalar_t_0, true>;
accscalar_t* staging_data_ptr = grid.y > 1 ? staging_data.data<accscalar_t>() : nullptr;
int* semaphores_ptr = grid.y > 1 ? semaphores.data<int>() : nullptr;
hipLaunchKernelGGL(( reduce_bn_c_last_kernel<scalar_t_0, accscalar_t, accscalar_t, ELEMENTS_PER_ITER>)
, dim3(grid), dim3(block), 0, stream,
input.data<scalar_t_0>(),
grad_output.data<scalar_t_0>(),
mean.data<accscalar_t>(),
inv_std.data<accscalar_t>(),
mean_dy.data<accscalar_t>(),
mean_dy_xmu.data<accscalar_t>(),
weight.has_value() ? grad_weight.data<accscalar_t>() : NULL,
weight.has_value() ?grad_bias.data<accscalar_t>() : NULL,
staging_data_ptr,
semaphores_ptr,
reduction_size,
stride);
);
} else {
if (weight.has_value()) {
AT_CHECK(input.scalar_type() == weight.value().scalar_type(),
"input.scalar_type() is not supported with weight.scalar_type()");
}
using namespace at;
DISPATCH_FLOAT_AND_HALF(input.scalar_type(), 0, "batchnorm_backward_reduce",
using accscalar_t = at::acc_type<scalar_t_0, true>;
accscalar_t* staging_data_ptr = grid.y > 1 ? staging_data.data<accscalar_t>() : nullptr;
int* semaphores_ptr = grid.y > 1 ? semaphores.data<int>() : nullptr;
hipLaunchKernelGGL(( reduce_bn_c_last_kernel<scalar_t_0, accscalar_t, scalar_t_0, ELEMENTS_PER_ITER>)
, dim3(grid), dim3(block), 0, stream,
input.data<scalar_t_0>(),
grad_output.data<scalar_t_0>(),
mean.data<accscalar_t>(),
inv_std.data<accscalar_t>(),
mean_dy.data<accscalar_t>(),
mean_dy_xmu.data<accscalar_t>(),
weight.has_value() ? grad_weight.data<scalar_t_0>() : NULL,
weight.has_value() ?grad_bias.data<scalar_t_0>() : NULL,
staging_data_ptr,
semaphores_ptr,
reduction_size,
stride);
);
}
return {mean_dy, mean_dy_xmu, grad_weight, grad_bias};
}
at::Tensor batchnorm_backward_c_last_CUDA(
const at::Tensor grad_output,
const at::Tensor input,
const at::Tensor mean,
const at::Tensor inv_std,
const at::optional<at::Tensor> weight,
const at::Tensor mean_dy,
const at::Tensor mean_dy_xmu) {
const auto stride = input.size(input.ndimension()-1);
const auto reduction_size = input.numel() / stride;
at::Tensor grad_input = at::empty_like(input);
dim3 block;
dim3 grid;
flexible_launch_configs(reduction_size, stride, block, grid);
auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
if (input.scalar_type() == at::ScalarType::Half
&& weight.has_value() && weight.value().scalar_type() == at::ScalarType::Float) {
using namespace at;
DISPATCH_FLOAT_AND_HALF(input.scalar_type(), 0, "batchnorm_forward",
using accscalar_t = at::acc_type<scalar_t_0, true>;
hipLaunchKernelGGL(( batchnorm_backward_c_last_kernel<scalar_t_0, accscalar_t, accscalar_t, ELEMENTS_PER_ITER>)
, dim3(grid), dim3(block), 0, stream,
grad_output.data<scalar_t_0>(),
input.data<scalar_t_0>(),
mean.data<accscalar_t>(),
inv_std.data<accscalar_t>(),
weight.has_value() ? weight.value().data<accscalar_t>() : NULL,
mean_dy.data<accscalar_t>(),
mean_dy_xmu.data<accscalar_t>(),
grad_input.data<scalar_t_0>(),
reduction_size,
stride);
);
} else {
if (weight.has_value()) {
AT_CHECK(input.scalar_type() == weight.value().scalar_type(),
"input.scalar_type() is not supported with weight.scalar_type()");
}
using namespace at;
DISPATCH_FLOAT_AND_HALF(input.scalar_type(), 0, "batchnorm_forward",
using accscalar_t = at::acc_type<scalar_t_0, true>;
hipLaunchKernelGGL(( batchnorm_backward_c_last_kernel<scalar_t_0, accscalar_t, scalar_t_0, ELEMENTS_PER_ITER>)
, dim3(grid), dim3(block), 0, stream,
grad_output.data<scalar_t_0>(),
input.data<scalar_t_0>(),
mean.data<accscalar_t>(),
inv_std.data<accscalar_t>(),
weight.has_value() ? weight.value().data<scalar_t_0>() : NULL,
mean_dy.data<accscalar_t>(),
mean_dy_xmu.data<accscalar_t>(),
grad_input.data<scalar_t_0>(),
reduction_size,
stride);
);
}
return grad_input;
}
at::Tensor relu_backward_c_last_CUDA(
const at::Tensor grad_output,
const at::Tensor input,
const at::optional<at::Tensor> z,
const at::Tensor mean,
const at::Tensor inv_std,
const at::optional<at::Tensor> weight,
const at::optional<at::Tensor> shift) {
const auto stride = input.size(input.ndimension()-1);
const auto reduction_size = input.numel() / stride;
at::Tensor out = at::empty_like(input);
dim3 block;
dim3 grid;
flexible_launch_configs(reduction_size, stride, block, grid);
auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
if (input.scalar_type() == at::ScalarType::Half
&& weight.has_value() && weight.value().scalar_type() == at::ScalarType::Float) {
using namespace at;
DISPATCH_FLOAT_AND_HALF(input.scalar_type(), 0, "batchnorm_forward",
using accscalar_t = at::acc_type<scalar_t_0, true>;
hipLaunchKernelGGL(( relu_backward_c_last_kernel<scalar_t_0, accscalar_t, accscalar_t, ELEMENTS_PER_ITER>)
, dim3(grid), dim3(block), 0, stream,
grad_output.data<scalar_t_0>(),
input.data<scalar_t_0>(),
z.has_value() ? z.value().data<scalar_t_0>() : NULL,
mean.data<accscalar_t>(),
inv_std.data<accscalar_t>(),
weight.has_value() ? weight.value().data<accscalar_t>() : NULL,
shift.has_value() ? shift.value().data<accscalar_t>(): NULL,
out.data<scalar_t_0>(),
reduction_size,
stride);
);
} else {
if (weight.has_value()) {
AT_CHECK(input.scalar_type() == weight.value().scalar_type(),
"input.scalar_type() is not supported with weight.scalar_type()");
}
using namespace at;
DISPATCH_FLOAT_AND_HALF(input.scalar_type(), 0, "batchnorm_forward",
using accscalar_t = at::acc_type<scalar_t_0, true>;
hipLaunchKernelGGL(( relu_backward_c_last_kernel<scalar_t_0, accscalar_t, scalar_t_0, ELEMENTS_PER_ITER>)
, dim3(grid), dim3(block), 0, stream,
grad_output.data<scalar_t_0>(),
input.data<scalar_t_0>(),
z.has_value() ? z.value().data<scalar_t_0>() : NULL,
mean.data<accscalar_t>(),
inv_std.data<accscalar_t>(),
weight.has_value() ? weight.value().data<scalar_t_0>() : NULL,
shift.has_value() ? shift.value().data<scalar_t_0>(): NULL,
out.data<scalar_t_0>(),
reduction_size,
stride);
);
}
return out;
}
| d77d54f285e6f2aec7caaf8059589e37a67dc280.cu | #include <iostream>
#include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/cuda/CUDAContext.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <vector>
#include "type_shim.h"
__device__ __forceinline__ int lastpow2(int n)
{
int out = 1 << (31 - __clz(n));
if(n == out)
out >>= 1;
return out;
}
__host__ __forceinline__ int h_next_pow2(unsigned int n) {
n--;
n |= (n >> 1);
n |= (n >> 2);
n |= (n >> 4);
n |= (n >> 8);
n |= (n >> 16);
return ++n;
}
__host__ __forceinline__ int h_last_pow2(unsigned int n) {
n |= (n >> 1);
n |= (n >> 2);
n |= (n >> 4);
n |= (n >> 8);
n |= (n >> 16);
return n - (n >> 1);
}
#define WARP_SIZE 32
template<typename T>
__device__ __forceinline__ T warp_reduce_sum(T val)
{
#pragma unroll
for(int i = WARP_SIZE/2; i > 0; i >>= 1)
val = val + __shfl_down_sync(0xffffffff, val, i);
return val;
}
template<typename T>
__device__ __forceinline__ T reduce_block(T *x, T val)
{
int tid = threadIdx.y*blockDim.x + threadIdx.x;
int blockSize = blockDim.x * blockDim.y;
if (blockSize > 32) {
val = warp_reduce_sum(val);
if (tid % WARP_SIZE == 0)
x[tid/WARP_SIZE] = val;
__syncthreads();
val = (tid < blockSize / WARP_SIZE? x[tid%WARP_SIZE] : T(0));
}
if(tid/WARP_SIZE==0) val = warp_reduce_sum(val);
return val;
}
#define ELEMENTS_PER_ITER 4 // enables concurrency within each thread to hide latency
#define ELEMENTS_PER_THREAD 16
#define OPTIMAL_TILE_W 32
#define MAX_H_BLOCK 128
#define MAX_BLOCK_SIZE 512
__host__ int div_ru(int x, int y) {
return h_last_pow2(1 + (x-1)/y);
}
__host__ void flexible_launch_configs(
const int reduction,
const int stride,
dim3 &block,
dim3 &grid,
const bool coop_flag = false) {
int block_x = std::min(h_last_pow2(stride), OPTIMAL_TILE_W);
int block_y = std::min(h_last_pow2(div_ru(reduction , ELEMENTS_PER_THREAD)),
MAX_BLOCK_SIZE / block_x);
if (block_x * block_y != MAX_BLOCK_SIZE) {
block_x = std::min(h_last_pow2(stride), MAX_BLOCK_SIZE / block_y);
}
int grid_x = div_ru(stride, block_x);
int grid_y = std::min(div_ru(reduction, block_y * ELEMENTS_PER_THREAD), MAX_H_BLOCK);
if (coop_flag) {
// it's not worth having a grid reduction if the reduction dimension is not big enough
grid_y = grid_y < 8 ? 1 : grid_y;
}
block.x = block_x;
block.y = block_y;
block.z = 1;
grid.x = grid_x;
grid.y = grid_y;
grid.z = 1;
}
template<typename T, typename C>
__device__ __forceinline__ void welford_merge_element(C& count,
T& mean,
T& m2n,
const C& num_new,
const T& mean_new,
const T& m2n_new) {
T factor = T(1.0) / max(1, (count + num_new));
T delta0 = mean - mean_new;
mean = (mean_new * num_new + mean * count) * factor;
m2n += m2n_new + delta0 * delta0 * num_new * count * factor;
count += num_new;
}
template<typename T>
__device__ __forceinline__ void warp_reduce_mean_m2n(T &mean, T &m2n, int &num)
{
#pragma unroll
for(int i = WARP_SIZE/2; i > 0; i >>= 1) {
auto num_new = __shfl_down_sync(0xffffffff, num, i);
auto mean_new = __shfl_down_sync(0xffffffff, mean, i);
auto m2n_new = __shfl_down_sync(0xffffffff, m2n, i);
welford_merge_element(num, mean, m2n, num_new, mean_new, m2n_new);
}
}
template <typename T>
__device__ void welford_reduce_mean_m2n(
T* __restrict__ x,
int* __restrict__ count,
T &mean,
T &m2n,
int &num,
int block_size,
int thread_id)
{
int lane = thread_id % WARP_SIZE;
int wid = thread_id / WARP_SIZE;
if (block_size > 32) {
warp_reduce_mean_m2n(mean, m2n, num);
if (lane == 0) {
x[wid*2] = mean;
x[wid*2+1] = m2n;
count[wid] = num;
}
__syncthreads();
if (wid == 0) {
mean = (thread_id < block_size / WARP_SIZE)? x[lane*2] : T(0);
m2n = (thread_id < block_size / WARP_SIZE)? x[lane*2+1] : T(0);
num = (thread_id < block_size / WARP_SIZE)? count[lane] : int(0);
}
}
if (wid==0) warp_reduce_mean_m2n(mean, m2n, num);
return;
}
// return spatial size for NC+ Tensors
__host__ int get_tensor_spatial_size(const at::Tensor& input)
{
auto space_size = input.size(2);
for (int i = 3; i < input.ndimension(); i++) {
space_size *= input.size(i);
}
return space_size;
}
// promote accumulation scalar type. promote half to float.
__host__ at::ScalarType promote_scalartype(const at::Tensor& input)
{
return input.scalar_type() == at::ScalarType::Half ?
at::ScalarType::Float : input.scalar_type();
}
// return single element size, optional accumulation type promotion.
__host__ size_t get_element_data_size(const at::Tensor& input, bool accumulation = false)
{
auto scalar_type = accumulation ? promote_scalartype(input) : input.scalar_type();
return at::elementSize(scalar_type);
}
template<typename T, typename C>
__device__ __forceinline__ void welford_merge_block_vertical(C& count,
T& mean,
T& m2n,
C* shmem_count,
T* shmem_mean,
T* shmem_m2n) {
// write to shared memory
auto address_base = threadIdx.x + threadIdx.y * blockDim.x;
shmem_mean[address_base] = mean;
shmem_m2n[address_base] = m2n;
shmem_count[address_base] = count;
#pragma unroll
for (int offset = blockDim.y/2; offset > 0; offset >>= 1) {
__syncthreads();
if (threadIdx.y < offset && threadIdx.y + offset < blockDim.y) {
auto address = address_base + offset * blockDim.x;
// read shared memory back to register for reduction
auto num_new = shmem_count[address];
auto mean_new = shmem_mean[address];
auto m2n_new = shmem_m2n[address];
welford_merge_element(count, mean, m2n, num_new, mean_new, m2n_new);
// last write is not necessary
shmem_mean[address_base] = mean;
shmem_m2n[address_base] = m2n;
shmem_count[address_base] = count;
}
}
}
template<typename T>
__device__ __forceinline__ void merge_block_vertical(T& sum_dy,
T& sum_dy_xmu,
T* shmem_sum_dy,
T* shmem_sum_dy_xmu) {
// write to shared memory
auto address_base = threadIdx.x + threadIdx.y * blockDim.x;
shmem_sum_dy[address_base] = sum_dy;
shmem_sum_dy_xmu[address_base] = sum_dy_xmu;
#pragma unroll
for (int offset = blockDim.y/2; offset > 0; offset >>= 1) {
__syncthreads();
if (threadIdx.y < offset && threadIdx.y + offset < blockDim.y) {
auto address = address_base + offset * blockDim.x;
sum_dy += shmem_sum_dy[address];
sum_dy_xmu += shmem_sum_dy_xmu[address];
// last write is not necessary
shmem_sum_dy[address_base] = sum_dy;
shmem_sum_dy_xmu[address_base] = sum_dy_xmu;
}
}
}
// welford kernel calculating mean/biased_variance/unbiased_variance
template <typename scalar_t, typename accscalar_t, typename outscalar_t>
__global__ void welford_kernel(
const scalar_t* __restrict__ input,
outscalar_t* __restrict__ out_mean,
outscalar_t* __restrict__ out_var_biased,
const int bs,
const int fs,
const int ss) {
int block_size = blockDim.x * blockDim.y;
int count = 0;
accscalar_t x_mean = accscalar_t(0);
accscalar_t m_2_n = accscalar_t(0);
int thread_id = threadIdx.y*blockDim.x + threadIdx.x;
for (int batch_id = threadIdx.y; batch_id < bs; batch_id += blockDim.y) {
int input_base = blockIdx.x*ss + batch_id*ss*fs;
// sequential welford
for (int offset = threadIdx.x; offset < ss ; offset += blockDim.x) {
count++;
auto x_n = static_cast<accscalar_t>(input[offset+input_base]);
auto d = x_n - x_mean;
x_mean += d / count;
m_2_n += d * (x_n - x_mean);
}
}
static __shared__ int s_mem[160];
accscalar_t* s_mem_ac = (accscalar_t*) &s_mem[32];
welford_reduce_mean_m2n<accscalar_t>(s_mem_ac, s_mem, x_mean, m_2_n, count, block_size, thread_id);
if (thread_id == 0) {
out_mean[blockIdx.x] = static_cast<outscalar_t>(x_mean);
out_var_biased[blockIdx.x] = static_cast<outscalar_t>(m_2_n/count);
}
}
// elementwise BN kernel
template <typename scalar_t, typename accscalar_t, typename layerscalar_t>
__global__ void batchnorm_forward_kernel(
const scalar_t* __restrict__ input,
const accscalar_t* __restrict__ mean,
const accscalar_t* __restrict__ inv_std,
const layerscalar_t* __restrict__ weight,
const layerscalar_t* __restrict__ shift,
scalar_t* __restrict__ out,
const int ss,
const int bs) {
auto m_c = mean[blockIdx.x];
auto inv_std_c = inv_std[blockIdx.x];
auto w_c = weight == NULL ? accscalar_t(1.0) : static_cast<accscalar_t>(weight[blockIdx.x]);
auto s_c = shift == NULL ? accscalar_t(0.0) : static_cast<accscalar_t>(shift[blockIdx.x]);
for (int batch_offset = blockIdx.y*blockDim.y + threadIdx.y; batch_offset < bs; batch_offset += gridDim.y*blockDim.y) {
int address_base = blockIdx.x*ss + batch_offset*gridDim.x*ss;
for (int offset = threadIdx.x + blockIdx.z*blockDim.x; offset < ss ; offset+= gridDim.z*blockDim.x) {
out[address_base+offset] = static_cast<scalar_t>(w_c * (static_cast<accscalar_t>(input[address_base+offset]) - m_c ) * inv_std_c + s_c);
}
}
}
// Backward BN kernel, calculates grad_bias, grad_weight as well as intermediate
// results to calculating grad_input.
// Breaking the grad_input to two step to support sync BN, which requires all
// reduce of the intermediate results across processes.
template <typename scalar_t, typename accscalar_t, typename layerscalar_t>
__global__ void reduce_bn_kernel(
const scalar_t* __restrict__ input,
const scalar_t* __restrict__ grad_output,
const accscalar_t* __restrict__ mean,
const accscalar_t* __restrict__ inv_std,
accscalar_t* __restrict__ mean_dy,
accscalar_t* __restrict__ mean_dy_xmu,
layerscalar_t* __restrict__ grad_weight,
layerscalar_t* __restrict__ grad_bias,
const int bs,
const int fs,
const int ss) {
static __shared__ int s_mem[64];
int total_item_num = bs * ss;
int thread_id = threadIdx.y*blockDim.x + threadIdx.x;
auto r_mean = mean[blockIdx.x];
auto factor = inv_std[blockIdx.x];
// Kahan sum
accscalar_t sum_dy = 0.0;
accscalar_t sum_dy_xmu = 0.0;
accscalar_t sum_dy_c = 0.0;
accscalar_t sum_dy_xmu_c = 0.0;
for (int batch_id = threadIdx.y; batch_id < bs; batch_id += blockDim.y) {
int input_base = blockIdx.x*ss + batch_id*ss*fs;
for (int offset = threadIdx.x; offset < ss ; offset += blockDim.x) {
auto e_grad = static_cast<accscalar_t>(grad_output[offset+input_base]);
auto e_input = static_cast<accscalar_t>(input[offset+input_base]);
// calculating sum_dy
auto sum_dy_y = e_grad - sum_dy_c;
auto sum_dy_t = sum_dy + sum_dy_y;
sum_dy_c = (sum_dy_t - sum_dy) - sum_dy_y;
sum_dy = sum_dy_t;
// calculating sum_dy_xmu
auto sum_dy_xmu_y = e_grad * (e_input - r_mean) - sum_dy_xmu_c;
auto sum_dy_xmu_t = sum_dy_xmu + sum_dy_xmu_y;
sum_dy_xmu_c = (sum_dy_xmu_t - sum_dy_xmu) - sum_dy_xmu_y;
sum_dy_xmu = sum_dy_xmu_t;
}
}
sum_dy = reduce_block((accscalar_t*)s_mem, sum_dy);
__syncthreads();
sum_dy_xmu = reduce_block((accscalar_t*)s_mem, sum_dy_xmu);
if (thread_id == 0) {
if (grad_bias != NULL) {
grad_bias[blockIdx.x] = static_cast<layerscalar_t>(sum_dy);
}
if (grad_weight != NULL) {
grad_weight[blockIdx.x] = static_cast<layerscalar_t>(sum_dy_xmu * factor);
}
mean_dy[blockIdx.x] = sum_dy / total_item_num;
mean_dy_xmu[blockIdx.x] = sum_dy_xmu / total_item_num;
}
}
// elementwise backward BN kernel
template <typename scalar_t, typename accscalar_t, typename layerscalar_t>
__global__ void batchnorm_backward_kernel(
const scalar_t* __restrict__ grad_output,
const scalar_t* __restrict__ input,
const accscalar_t* __restrict__ mean,
const accscalar_t* __restrict__ inv_std,
const layerscalar_t* __restrict__ weight,
const accscalar_t* __restrict__ mean_dy,
const accscalar_t* __restrict__ mean_dy_xmu,
scalar_t* __restrict__ grad_input,
const int ss,
const int bs) {
auto m_c = static_cast<accscalar_t>(mean[blockIdx.x]);
auto m_dy_c = static_cast<accscalar_t>(mean_dy[blockIdx.x]);
auto factor_1_c = inv_std[blockIdx.x];
auto factor_2_c = (weight == NULL ? accscalar_t(1.0) : static_cast<accscalar_t>(weight[blockIdx.x])) * factor_1_c;
factor_1_c = factor_1_c * factor_1_c * mean_dy_xmu[blockIdx.x];
for (int batch_offset = blockIdx.y*blockDim.y+threadIdx.y; batch_offset < bs; batch_offset += gridDim.y*blockDim.y) {
int address_base = blockIdx.x*ss + batch_offset*gridDim.x*ss;
for (int offset = threadIdx.x + blockIdx.z*blockDim.x; offset < ss ; offset+= gridDim.z*blockDim.x) {
grad_input[address_base+offset] = (static_cast<accscalar_t>(grad_output[address_base+offset]) - m_dy_c - (static_cast<accscalar_t>(input[address_base+offset]) - m_c) * factor_1_c) * factor_2_c;
}
}
}
// welford kernel for c last tensor calculating mean/biased_variance/unbiased_variance
template
<typename scalar_t,
typename accscalar_t,
typename outscalar_t,
int PARALLEL_LOADS>
__global__ void
welford_kernel_c_last(
const scalar_t* __restrict__ input,
outscalar_t* __restrict__ out_mean,
outscalar_t* __restrict__ out_var_biased,
volatile accscalar_t* staging_data,
int* semaphores,
const int reduction_size,
const int stride) {
// hide latency with concurrency
accscalar_t x_mean[PARALLEL_LOADS];
accscalar_t m_2_n[PARALLEL_LOADS];
int count[PARALLEL_LOADS];
#pragma unroll
for (int i = 0; i < PARALLEL_LOADS; i++) {
x_mean[i] = accscalar_t(0);
m_2_n[i] = accscalar_t(0);
count[i] = accscalar_t(0);
}
// tensor dimension (m,c)
// loop along m dimension
int inner_loop_stride = blockDim.y * gridDim.y;
// offset along m dimension
int m_offset = blockIdx.y * blockDim.y + threadIdx.y;
int c_offset = blockIdx.x * blockDim.x + threadIdx.x;
int loop_count = 1 + (reduction_size - 1) / (inner_loop_stride * PARALLEL_LOADS);
int address_base = m_offset * stride + c_offset;
int address_increment = inner_loop_stride * stride;
for (int i = 0; i < loop_count; i++) {
accscalar_t x_math[PARALLEL_LOADS];
accscalar_t x_count_inv[PARALLEL_LOADS];
accscalar_t is_valid[PARALLEL_LOADS];
// load multiple data in
#pragma unroll
for (int j = 0; j < PARALLEL_LOADS; j++) {
if (c_offset < stride && m_offset < reduction_size) {
x_math[j] = input[address_base];
count[j]++;
x_count_inv[j] = accscalar_t(1) / count[j];
is_valid[j] = accscalar_t(1);
} else {
x_math[j] = accscalar_t(0);
x_count_inv[j] = accscalar_t(0);
is_valid[j] = accscalar_t(0);
}
m_offset += inner_loop_stride;
address_base += address_increment;
}
// calculate mean/m2n with welford
#pragma unroll
for (int j = 0; j < PARALLEL_LOADS; j++) {
accscalar_t delta0 = x_math[j] - x_mean[j];
x_mean[j] += delta0 * x_count_inv[j];
accscalar_t delta1 = x_math[j] - x_mean[j];
m_2_n[j] += delta0 * delta1 * is_valid[j];
}
}
// thread reduction to accumulate mean/m_2_n/count between PARALLEL_LOADS
#pragma unroll
for (int j = 1; j < PARALLEL_LOADS; j++) {
welford_merge_element(count[0], x_mean[0], m_2_n[0], count[j], x_mean[j], m_2_n[j]);
}
// release x_mean / m_2_n
auto mean_th = x_mean[0];
auto m2_th = m_2_n[0];
auto count_th = count[0];
// block-wise reduction with shared memory (since reduction cannot be done within a warp)
static __shared__ accscalar_t shmem_mean[MAX_BLOCK_SIZE];
static __shared__ accscalar_t shmem_m2n[MAX_BLOCK_SIZE];
static __shared__ int shmem_count[MAX_BLOCK_SIZE];
welford_merge_block_vertical(count_th, mean_th, m2_th, shmem_count, shmem_mean, shmem_m2n);
// grid reduction if needed (coop launch used at the first place)
if (gridDim.y > 1) {
volatile accscalar_t* staging_mean = staging_data;
volatile accscalar_t* staging_m2n = &staging_data[stride*gridDim.y];
volatile int* staging_count = reinterpret_cast<volatile int*>(&staging_m2n[stride*gridDim.y]);
address_base = c_offset + blockIdx.y * stride;
// write data to staging_data;
if (threadIdx.y == 0 && c_offset < stride) {
staging_mean[address_base] = mean_th;
staging_m2n[address_base] = m2_th;
staging_count[address_base] = count_th;
}
__threadfence();
__syncthreads(); // ensuring writes to staging_ is visible to all blocks
__shared__ bool is_last_block_done;
// mark block done
if (threadIdx.x == 0 && threadIdx.y == 0) {
int old = atomicAdd(&semaphores[blockIdx.x], 1);
is_last_block_done = (old == (gridDim.y-1));
}
__syncthreads();
// check that all data is now available in global memory
if (is_last_block_done) {
count_th = 0;
mean_th = accscalar_t(0.0);
m2_th = accscalar_t(0.0);
for (int y = threadIdx.y; y < gridDim.y; y += blockDim.y) {
address_base = c_offset + y * stride;
int num_new = c_offset < stride ? staging_count[address_base] : 0;
accscalar_t mean_new = c_offset < stride ? staging_mean[address_base] : accscalar_t(0.0);
accscalar_t m2n_new = c_offset < stride ? staging_m2n[address_base] : accscalar_t(0.0);
welford_merge_element(count_th, mean_th, m2_th, num_new, mean_new, m2n_new);
}
welford_merge_block_vertical(count_th, mean_th, m2_th, shmem_count, shmem_mean, shmem_m2n);
if (threadIdx.y == 0 && c_offset < stride) {
out_mean[c_offset] = static_cast<outscalar_t>(mean_th);
out_var_biased[c_offset] = static_cast<outscalar_t>(m2_th / count_th);
}
}
} else {
if (blockIdx.y == 0 && threadIdx.y == 0 && c_offset < stride) {
out_mean[c_offset] = static_cast<outscalar_t>(mean_th);
out_var_biased[c_offset] = static_cast<outscalar_t>(m2_th / count_th);
}
}
}
// parallel welford kernel to further reduce mean / biased_var
// into mean / unbiased_var / inv_std across multiple processes.
template <typename scalar_t>
__global__ void welford_kernel_parallel(
const scalar_t* __restrict__ mean,
const scalar_t* __restrict__ var_biased,
scalar_t* __restrict__ out_mean,
scalar_t* __restrict__ out_var,
scalar_t* __restrict__ inv_std,
const int world_size,
const int feature_size,
const float eps,
const int numel) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < feature_size; i += gridDim.x * blockDim.x) {
// load data;
int address = i;
scalar_t x_mean = 0;
scalar_t m_2_n = 0;
int count = 0;
for (int j = 0; j < world_size; j++) {
welford_merge_element(count, x_mean, m_2_n, numel, mean[address], var_biased[address]*numel);
address += feature_size;
}
out_mean[i] = x_mean;
out_var[i] = m_2_n/ (count - 1);
inv_std[i] = scalar_t(1) / sqrt(m_2_n/count + eps);
}
}
// elementwise BN kernel
template <
typename scalar_t,
typename accscalar_t,
typename layerscalar_t,
int PARALLEL_LOADS>
__global__ void batchnorm_forward_c_last_kernel(
const scalar_t* __restrict__ input,
const scalar_t* __restrict__ z,
const accscalar_t* __restrict__ mean,
const accscalar_t* __restrict__ inv_std,
const layerscalar_t* __restrict__ weight,
const layerscalar_t* __restrict__ shift,
scalar_t* __restrict__ out,
const int reduction_size,
const int stride,
const bool fuse_relu) {
// tensor dimension (m,c)
// loop along m dimension
int inner_loop_stride = blockDim.y * gridDim.y;
// offset along m dimension
int m_offset = blockIdx.y * blockDim.y + threadIdx.y;
int c_offset = blockIdx.x * blockDim.x + threadIdx.x;
auto m_c = mean[c_offset];
auto inv_std_c = static_cast<accscalar_t>(inv_std[c_offset]);
auto w_c = weight == NULL ? accscalar_t(1.0) : static_cast<accscalar_t>(weight[c_offset]);
auto s_c = shift == NULL ? accscalar_t(0.0) : static_cast<accscalar_t>(shift[c_offset]);
int loop_count = 1 + (reduction_size - 1) / (inner_loop_stride * PARALLEL_LOADS);
int address_base = m_offset * stride + c_offset;
int address_increment = inner_loop_stride * stride;
for (int i = 0; i < loop_count; i++) {
#pragma unroll
for (int j = 0; j < PARALLEL_LOADS; j++) {
if (c_offset < stride && m_offset < reduction_size) {
auto tmp = w_c * (static_cast<accscalar_t>(input[address_base]) - m_c ) * inv_std_c + s_c;
if (z != NULL) {
tmp += z[address_base];
}
out[address_base] = (fuse_relu && tmp <= accscalar_t(0.0) ? scalar_t(0.0) : static_cast<scalar_t>(tmp));
}
m_offset += inner_loop_stride;
address_base += address_increment;
}
}
}
// elementwise BN kernel
template <
typename scalar_t,
typename accscalar_t,
typename layerscalar_t,
int PARALLEL_LOADS>
__global__ void relu_backward_c_last_kernel(
const scalar_t* __restrict__ grad_output,
const scalar_t* __restrict__ input,
const scalar_t* __restrict__ z,
const accscalar_t* __restrict__ mean,
const accscalar_t* __restrict__ inv_std,
const layerscalar_t* __restrict__ weight,
const layerscalar_t* __restrict__ shift,
scalar_t* __restrict__ out,
const int reduction_size,
const int stride) {
// tensor dimension (m,c)
// loop along m dimension
int inner_loop_stride = blockDim.y * gridDim.y;
// offset along m dimension
int m_offset = blockIdx.y * blockDim.y + threadIdx.y;
int c_offset = blockIdx.x * blockDim.x + threadIdx.x;
auto m_c = mean[c_offset];
auto inv_std_c = static_cast<accscalar_t>(inv_std[c_offset]);
auto w_c = weight == NULL ? accscalar_t(1.0) : static_cast<accscalar_t>(weight[c_offset]);
auto s_c = shift == NULL ? accscalar_t(0.0) : static_cast<accscalar_t>(shift[c_offset]);
int loop_count = 1 + (reduction_size - 1) / (inner_loop_stride * PARALLEL_LOADS);
int address_base = m_offset * stride + c_offset;
int address_increment = inner_loop_stride * stride;
for (int i = 0; i < loop_count; i++) {
#pragma unroll
for (int j = 0; j < PARALLEL_LOADS; j++) {
if (c_offset < stride && m_offset < reduction_size) {
auto tmp = w_c * (static_cast<accscalar_t>(input[address_base]) - m_c ) * inv_std_c + s_c;
if (z != NULL) {
tmp += z[address_base];
}
out[address_base] = (tmp <= accscalar_t(0.0) ? scalar_t(0.0) : grad_output[address_base]);
}
m_offset += inner_loop_stride;
address_base += address_increment;
}
}
}
// batchnorm backward kernel for c last tensor
template
<typename scalar_t,
typename accscalar_t,
typename layerscalar_t,
int PARALLEL_LOADS>
__global__ void reduce_bn_c_last_kernel(
const scalar_t* __restrict__ input,
const scalar_t* __restrict__ grad_output,
const accscalar_t* __restrict__ mean,
const accscalar_t* __restrict__ inv_std,
accscalar_t* __restrict__ mean_dy,
accscalar_t* __restrict__ mean_dy_xmu,
layerscalar_t* __restrict__ grad_weight,
layerscalar_t* __restrict__ grad_bias,
volatile accscalar_t* staging_data,
int* semaphores,
const int reduction_size,
const int stride) {
// hide latency with concurrency
accscalar_t sum_dy[PARALLEL_LOADS];
accscalar_t sum_dy_xmu[PARALLEL_LOADS];
#pragma unroll
for (int i = 0; i < PARALLEL_LOADS; i++) {
sum_dy[i] = accscalar_t(0);
sum_dy_xmu[i] = accscalar_t(0);
}
// tensor dimension (m,c)
// loop along m dimension
int inner_loop_stride = blockDim.y * gridDim.y;
// offset along m dimension
int m_offset = blockIdx.y * blockDim.y + threadIdx.y;
int c_offset = blockIdx.x * blockDim.x + threadIdx.x;
int loop_count = 1 + (reduction_size - 1) / (inner_loop_stride * PARALLEL_LOADS);
int address_base = m_offset * stride + c_offset;
int address_increment = inner_loop_stride * stride;
auto r_mean = mean[c_offset];
auto factor = inv_std[c_offset];
for (int i = 0; i < loop_count; i++) {
accscalar_t x_input[PARALLEL_LOADS];
accscalar_t x_grad_output[PARALLEL_LOADS];
// load multiple data in
#pragma unroll
for (int j = 0; j < PARALLEL_LOADS; j++) {
if (c_offset < stride && m_offset < reduction_size) {
x_input[j] = input[address_base];
x_grad_output[j] = grad_output[address_base];
} else {
x_input[j] = accscalar_t(0);
x_grad_output[j] = accscalar_t(0);
}
m_offset += inner_loop_stride;
address_base += address_increment;
}
// calculate sum_dy / sum_dy_xmu
#pragma unroll
for (int j = 0; j < PARALLEL_LOADS; j++) {
sum_dy[j] += x_grad_output[j];
sum_dy_xmu[j] += x_grad_output[j] * (x_input[j] - r_mean);
}
}
// thread reduction to accumulate sum_dy / sum_dy_xmu between PARALLEL_LOADS
#pragma unroll
for (int j = 1; j < PARALLEL_LOADS; j++) {
sum_dy[0] += sum_dy[j];
sum_dy_xmu[0] += sum_dy_xmu[j];
}
// release array of registers
auto sum_dy_th = sum_dy[0];
auto sum_dy_xmu_th = sum_dy_xmu[0];
// block-wise reduction with shared memory (since reduction cannot be done within a warp)
static __shared__ accscalar_t shmem_sum_dy[MAX_BLOCK_SIZE];
static __shared__ accscalar_t shmem_sum_dy_xmu[MAX_BLOCK_SIZE];
merge_block_vertical(sum_dy_th, sum_dy_xmu_th, shmem_sum_dy, shmem_sum_dy_xmu);
// grid reduction if needed (coop launch used at the first place)
if (gridDim.y > 1) {
volatile accscalar_t* staging_sum_dy = staging_data;
volatile accscalar_t* staging_sum_dy_xmu = &staging_data[stride*gridDim.y];
address_base = c_offset + blockIdx.y * stride;
// write data to staging_data;
if (threadIdx.y == 0 && c_offset < stride) {
staging_sum_dy[address_base] = sum_dy_th;
staging_sum_dy_xmu[address_base] = sum_dy_xmu_th;
}
__threadfence();
__syncthreads(); // ensuring writes to staging_ is visible to all blocks
__shared__ bool is_last_block_done;
// mark block done
if (threadIdx.x == 0 && threadIdx.y == 0) {
int old = atomicAdd(&semaphores[blockIdx.x], 1);
is_last_block_done = (old == (gridDim.y-1));
}
__syncthreads();
// check that all data is now available in global memory
if (is_last_block_done) {
sum_dy_th = accscalar_t(0.0);
sum_dy_xmu_th = accscalar_t(0.0);
for (int y = threadIdx.y; y < gridDim.y; y += blockDim.y) {
address_base = c_offset + y * stride;
sum_dy_th += (c_offset < stride ? staging_sum_dy[address_base] : accscalar_t(0.0));
sum_dy_xmu_th += (c_offset < stride ? staging_sum_dy_xmu[address_base] : accscalar_t(0.0));
}
merge_block_vertical(sum_dy_th, sum_dy_xmu_th, shmem_sum_dy, shmem_sum_dy_xmu);
if (threadIdx.y == 0 && c_offset < stride) {
if (grad_bias != NULL) {
grad_bias[c_offset] = static_cast<layerscalar_t>(sum_dy_th);
}
if (grad_weight != NULL) {
grad_weight[c_offset] = static_cast<layerscalar_t>(sum_dy_xmu_th * factor);
}
mean_dy[c_offset] = sum_dy_th / reduction_size;
mean_dy_xmu[c_offset] = sum_dy_xmu_th / reduction_size;
}
}
} else {
if (blockIdx.y == 0 && threadIdx.y == 0 && c_offset < stride) {
if (grad_bias != NULL) {
grad_bias[c_offset] = static_cast<layerscalar_t>(sum_dy_th);
}
if (grad_weight != NULL) {
grad_weight[c_offset] = static_cast<layerscalar_t>(sum_dy_xmu_th * factor);
}
mean_dy[c_offset] = sum_dy_th / reduction_size;
mean_dy_xmu[c_offset] = sum_dy_xmu_th / reduction_size;
}
}
}
// elementwise BN kernel
template <
typename scalar_t,
typename accscalar_t,
typename layerscalar_t,
int PARALLEL_LOADS>
__global__ void batchnorm_backward_c_last_kernel(
const scalar_t* __restrict__ grad_output,
const scalar_t* __restrict__ input,
const accscalar_t* __restrict__ mean,
const accscalar_t* __restrict__ inv_std,
const layerscalar_t* __restrict__ weight,
const accscalar_t* __restrict__ mean_dy,
const accscalar_t* __restrict__ mean_dy_xmu,
scalar_t* __restrict__ grad_input,
const int reduction_size,
const int stride) {
// tensor dimension (m,c)
// loop along m dimension
int inner_loop_stride = blockDim.y * gridDim.y;
// offset along m dimension
int m_offset = blockIdx.y * blockDim.y + threadIdx.y;
int c_offset = blockIdx.x * blockDim.x + threadIdx.x;
auto m_c = mean[c_offset];
auto m_dy_c = mean_dy[c_offset];
auto factor_1_c = inv_std[c_offset];
auto factor_2_c = (weight == NULL? accscalar_t(1.0) : static_cast<accscalar_t>(weight[c_offset])) * factor_1_c;
factor_1_c = factor_1_c * factor_1_c * mean_dy_xmu[c_offset];
int loop_count = 1 + (reduction_size - 1) / (inner_loop_stride * PARALLEL_LOADS);
int address_base = m_offset * stride + c_offset;
int address_increment = inner_loop_stride * stride;
for (int i = 0; i < loop_count; i++) {
#pragma unroll
for (int j = 0; j < PARALLEL_LOADS; j++) {
if (c_offset < stride && m_offset < reduction_size) {
grad_input[address_base] = static_cast<scalar_t>(
(static_cast<accscalar_t>(grad_output[address_base]) - m_dy_c -
(static_cast<accscalar_t>(input[address_base]) - m_c) * factor_1_c)
* factor_2_c);
}
m_offset += inner_loop_stride;
address_base += address_increment;
}
}
}
std::vector<at::Tensor> welford_mean_var_CUDA(const at::Tensor input) {
const auto batch_size = input.size(0);
const auto feature_size = input.size(1);
auto space_size = get_tensor_spatial_size(input);
auto scalar_type = promote_scalartype(input);
at::Tensor out_var_biased = at::empty({feature_size}, input.options().dtype(scalar_type));
at::Tensor out_mean = at::empty({feature_size}, input.options().dtype(scalar_type));
int block_y = min(h_last_pow2(batch_size), int(MAX_BLOCK_SIZE / 32));
int block_x = max(1, min(MAX_BLOCK_SIZE / block_y, h_last_pow2(space_size)));
const dim3 block(block_x, block_y);
const dim3 grid(feature_size);
auto stream = at::cuda::getCurrentCUDAStream();
{
using namespace at;
DISPATCH_FLOAT_AND_HALF(input.scalar_type(), 0, "welford_mean_var_kernel",
using accscalar_t = at::acc_type<scalar_t_0, true>;
welford_kernel<scalar_t_0, accscalar_t, accscalar_t><<<grid, block, 0, stream>>>(
input.data<scalar_t_0>(),
out_mean.data<accscalar_t>(),
out_var_biased.data<accscalar_t>(),
batch_size,
feature_size,
space_size);
);
}
return {out_mean, out_var_biased};
}
at::Tensor batchnorm_forward_CUDA(
const at::Tensor input,
const at::Tensor mean,
const at::Tensor inv_std,
const at::optional<at::Tensor> weight,
const at::optional<at::Tensor> shift) {
const auto batch_size = input.size(0);
const auto feature_size = input.size(1);
at::Tensor out = at::empty_like(input);
auto space_size = get_tensor_spatial_size(input);
int block_x = max(32, min(MAX_BLOCK_SIZE, h_last_pow2(space_size)/4));
int block_y = max(1, min(MAX_BLOCK_SIZE/block_x, h_last_pow2(batch_size)/4));
const dim3 block(block_x, block_y);
int grid_z = max(1, min(65535, h_last_pow2(space_size)/4/block_x));
int batch_group_size = max(1, min(65535, h_last_pow2(batch_size)/block_y));
const dim3 grid(feature_size, batch_group_size, grid_z);
auto stream = at::cuda::getCurrentCUDAStream();
if (input.scalar_type() == at::ScalarType::Half
&& weight.has_value() &&
weight.value().scalar_type() == at::ScalarType::Float) {
using namespace at;
DISPATCH_FLOAT_AND_HALF(input.scalar_type(), 0, "batchnorm_forward",
using accscalar_t = at::acc_type<scalar_t_0, true>;
batchnorm_forward_kernel<scalar_t_0, accscalar_t, accscalar_t><<<grid, block, 0, stream>>>(
input.data<scalar_t_0>(),
mean.data<accscalar_t>(),
inv_std.data<accscalar_t>(),
weight.has_value() ? weight.value().data<accscalar_t>() : NULL,
shift.has_value() ? shift.value().data<accscalar_t>() : NULL,
out.data<scalar_t_0>(),
space_size,
batch_size);
);
} else {
if (weight.has_value()) {
AT_CHECK(input.scalar_type() == weight.value().scalar_type(),
"input.scalar_type() is not supported with weight.scalar_type()");
}
using namespace at;
DISPATCH_FLOAT_AND_HALF(input.scalar_type(), 0, "batchnorm_forward",
using accscalar_t = at::acc_type<scalar_t_0, true>;
batchnorm_forward_kernel<scalar_t_0, accscalar_t, scalar_t_0><<<grid, block, 0, stream>>>(
input.data<scalar_t_0>(),
mean.data<accscalar_t>(),
inv_std.data<accscalar_t>(),
weight.has_value() ? weight.value().data<scalar_t_0>() : NULL,
shift.has_value() ? shift.value().data<scalar_t_0>() : NULL,
out.data<scalar_t_0>(),
space_size,
batch_size);
);
}
return out;
}
std::vector<at::Tensor> reduce_bn_CUDA(
const at::Tensor grad_output,
const at::Tensor input,
const at::Tensor mean,
const at::Tensor inv_std,
const at::optional<at::Tensor> weight)
{
const auto batch_size = input.size(0);
const auto feature_size = input.size(1);
auto scalar_type = promote_scalartype(input);
at::Tensor mean_dy = at::empty({feature_size}, mean.options());
at::Tensor mean_dy_xmu = at::empty({feature_size}, mean.options());
at::Tensor grad_weight;
at::Tensor grad_bias;
if (weight.has_value()) {
grad_weight = at::empty({feature_size}, weight.value().options());
grad_bias = at::empty({feature_size}, weight.value().options());
} else {
grad_weight = at::empty({0}, mean.options());
grad_bias = at::empty({0}, mean.options());
}
auto space_size = get_tensor_spatial_size(input);
int block_y = min(h_last_pow2(batch_size), int(MAX_BLOCK_SIZE/ 32));
int block_x = max(1, min(MAX_BLOCK_SIZE/ block_y, h_last_pow2(space_size)));
const dim3 block(block_x, block_y);
const dim3 grid(feature_size);
auto stream = at::cuda::getCurrentCUDAStream();
if (input.scalar_type() == at::ScalarType::Half
&& weight.has_value() &&
weight.value().scalar_type() == at::ScalarType::Float) {
using namespace at;
DISPATCH_FLOAT_AND_HALF(input.scalar_type(), 0, "batchnorm_backward_reduce",
using accscalar_t = at::acc_type<scalar_t_0, true>;
reduce_bn_kernel<scalar_t_0, accscalar_t, accscalar_t><<<grid, block, 0, stream>>>(
input.data<scalar_t_0>(),
grad_output.data<scalar_t_0>(),
mean.data<accscalar_t>(),
inv_std.data<accscalar_t>(),
mean_dy.data<accscalar_t>(),
mean_dy_xmu.data<accscalar_t>(),
weight.has_value() ? grad_weight.data<accscalar_t>() : NULL,
weight.has_value() ? grad_bias.data<accscalar_t>() : NULL,
batch_size,
feature_size,
space_size);
);
} else {
if (weight.has_value()) {
AT_CHECK(input.scalar_type() == weight.value().scalar_type(),
"input.scalar_type() is not supported with weight.scalar_type()");
}
using namespace at;
DISPATCH_FLOAT_AND_HALF(input.scalar_type(), 0, "batchnorm_backward_reduce",
using accscalar_t = at::acc_type<scalar_t_0, true>;
reduce_bn_kernel<scalar_t_0, accscalar_t, scalar_t_0><<<grid, block, 0, stream>>>(
input.data<scalar_t_0>(),
grad_output.data<scalar_t_0>(),
mean.data<accscalar_t>(),
inv_std.data<accscalar_t>(),
mean_dy.data<accscalar_t>(),
mean_dy_xmu.data<accscalar_t>(),
weight.has_value() ? grad_weight.data<scalar_t_0>() : NULL,
weight.has_value() ? grad_bias.data<scalar_t_0>() : NULL,
batch_size,
feature_size,
space_size);
);
}
return {mean_dy, mean_dy_xmu, grad_weight, grad_bias};
}
at::Tensor batchnorm_backward_CUDA(
const at::Tensor grad_output,
const at::Tensor input,
const at::Tensor mean,
const at::Tensor inv_std,
const at::optional<at::Tensor> weight,
const at::Tensor mean_dy,
const at::Tensor mean_dy_xmu) {
const auto batch_size = input.size(0);
const auto feature_size = input.size(1);
at::Tensor grad_input = at::empty_like(input);
auto space_size = get_tensor_spatial_size(input);
int block_x = max(32, min(MAX_BLOCK_SIZE, h_last_pow2(space_size)/4));
int block_y = max(1, min(MAX_BLOCK_SIZE/block_x, h_last_pow2(batch_size)/4));
const dim3 block(block_x, block_y);
int grid_z = max(1, min(65535, h_last_pow2(space_size)/4/block_x));
int batch_group_size = max(1, min(65535, h_last_pow2(batch_size)/block_y));
const dim3 grid(feature_size, batch_group_size, grid_z);
auto stream = at::cuda::getCurrentCUDAStream();
if (input.scalar_type() == at::ScalarType::Half
&& weight.has_value() &&
weight.value().scalar_type() == at::ScalarType::Float) {
using namespace at;
DISPATCH_FLOAT_AND_HALF(input.scalar_type(), 0, "batchnorm_backward",
using accscalar_t = at::acc_type<scalar_t_0, true>;
batchnorm_backward_kernel<scalar_t_0, accscalar_t, accscalar_t><<<grid, block, 0, stream>>>(
grad_output.data<scalar_t_0>(),
input.data<scalar_t_0>(),
mean.data<accscalar_t>(),
inv_std.data<accscalar_t>(),
weight.has_value() ? weight.value().data<accscalar_t>() : NULL,
mean_dy.data<accscalar_t>(),
mean_dy_xmu.data<accscalar_t>(),
grad_input.data<scalar_t_0>(),
space_size,
batch_size);
);
} else {
if (weight.has_value()) {
AT_CHECK(input.scalar_type() == weight.value().scalar_type(),
"input.scalar_type() is not supported with weight.scalar_type()");
}
using namespace at;
DISPATCH_FLOAT_AND_HALF(input.scalar_type(), 0, "batchnorm_backward",
using accscalar_t = at::acc_type<scalar_t_0, true>;
batchnorm_backward_kernel<scalar_t_0, accscalar_t, scalar_t_0><<<grid, block, 0, stream>>>(
grad_output.data<scalar_t_0>(),
input.data<scalar_t_0>(),
mean.data<accscalar_t>(),
inv_std.data<accscalar_t>(),
weight.has_value() ? weight.value().data<scalar_t_0>() : NULL,
mean_dy.data<accscalar_t>(),
mean_dy_xmu.data<accscalar_t>(),
grad_input.data<scalar_t_0>(),
space_size,
batch_size);
);
}
return grad_input;
}
std::vector<at::Tensor> welford_parallel_CUDA(const at::Tensor mean_feature_nodes,
const at::Tensor var_biased,
int numel,
const float eps) {
const auto world_size = mean_feature_nodes.size(0);
const auto feature_size = mean_feature_nodes.size(1);
at::Tensor out_var = at::empty({feature_size}, var_biased.options());
at::Tensor inv_std = at::empty_like(out_var);
at::Tensor out_mean = at::empty_like(out_var);
// TODO(jie): tile this for memory coalescing!
const int block = std::min(h_last_pow2(feature_size), MAX_BLOCK_SIZE);
const int grid = std::max<int>(1, feature_size / block);
auto stream = at::cuda::getCurrentCUDAStream();
{
using namespace at;
DISPATCH_FLOAT_AND_HALF(mean_feature_nodes.scalar_type(), 0, "welford_parallel_kernel",
welford_kernel_parallel<scalar_t_0><<<grid, block, 0, stream>>>(
mean_feature_nodes.data<scalar_t_0>(),
var_biased.data<scalar_t_0>(),
out_mean.data<scalar_t_0>(),
out_var.data<scalar_t_0>(),
inv_std.data<scalar_t_0>(),
world_size,
feature_size,
eps,
numel);
);
}
return {out_mean, out_var, inv_std};
}
std::vector<at::Tensor> welford_mean_var_c_last_CUDA(const at::Tensor input) {
const auto stride = input.size(input.ndimension()-1);
const auto reduction_size = input.numel() / stride;
auto scalar_type = promote_scalartype(input);
auto option = input.options().dtype(scalar_type);
at::Tensor out_var_biased = at::empty({stride}, option);
at::Tensor out_mean = at::empty({stride}, option);
dim3 block;
dim3 grid;
flexible_launch_configs(reduction_size, stride, block, grid, true);
at::Tensor staging_data;
at::Tensor semaphores;
if (grid.y > 1) {
staging_data = at::empty({4*stride*grid.y}, option);
semaphores = at::zeros({grid.x}, input.options().dtype(at::kInt));
}
auto stream = at::cuda::getCurrentCUDAStream();
{
using namespace at;
DISPATCH_FLOAT_AND_HALF(input.scalar_type(), 0, "welford_mean_var_c_last",
using accscalar_t = at::acc_type<scalar_t_0, true>;
accscalar_t* staging_data_ptr = grid.y > 1 ? staging_data.data<accscalar_t>() : nullptr;
int* semaphores_ptr = grid.y > 1 ? semaphores.data<int>() : nullptr;
welford_kernel_c_last<scalar_t_0, accscalar_t, accscalar_t, ELEMENTS_PER_ITER>
<<<grid, block, 0, stream>>>(
input.data<scalar_t_0>(),
out_mean.data<accscalar_t>(),
out_var_biased.data<accscalar_t>(),
staging_data_ptr,
semaphores_ptr,
reduction_size,
stride);
);
}
return {out_mean, out_var_biased};
}
at::Tensor batchnorm_forward_c_last_CUDA(
const at::Tensor input,
const at::optional<at::Tensor> z,
const at::Tensor mean,
const at::Tensor inv_std,
const at::optional<at::Tensor> weight,
const at::optional<at::Tensor> shift,
const bool fuse_relu) {
const auto stride = input.size(input.ndimension()-1);
const auto reduction_size = input.numel() / stride;
at::Tensor out = at::empty_like(input);
dim3 block;
dim3 grid;
flexible_launch_configs(reduction_size, stride, block, grid);
auto stream = at::cuda::getCurrentCUDAStream();
if (input.scalar_type() == at::ScalarType::Half
&& weight.has_value() && weight.value().scalar_type() == at::ScalarType::Float) {
using namespace at;
DISPATCH_FLOAT_AND_HALF(input.scalar_type(), 0, "batchnorm_forward",
using accscalar_t = at::acc_type<scalar_t_0, true>;
batchnorm_forward_c_last_kernel<scalar_t_0, accscalar_t, accscalar_t, ELEMENTS_PER_ITER>
<<<grid, block, 0, stream>>>(
input.data<scalar_t_0>(),
z.has_value() ? z.value().data<scalar_t_0>() : NULL,
mean.data<accscalar_t>(),
inv_std.data<accscalar_t>(),
weight.has_value() ? weight.value().data<accscalar_t>() : NULL,
shift.has_value() ? shift.value().data<accscalar_t>(): NULL,
out.data<scalar_t_0>(),
reduction_size,
stride,
fuse_relu);
);
} else {
if (weight.has_value()) {
AT_CHECK(input.scalar_type() == weight.value().scalar_type(),
"input.scalar_type() is not supported with weight.scalar_type()");
}
using namespace at;
DISPATCH_FLOAT_AND_HALF(input.scalar_type(), 0, "batchnorm_forward",
using accscalar_t = at::acc_type<scalar_t_0, true>;
batchnorm_forward_c_last_kernel<scalar_t_0, accscalar_t, scalar_t_0, ELEMENTS_PER_ITER>
<<<grid, block, 0, stream>>>(
input.data<scalar_t_0>(),
z.has_value() ? z.value().data<scalar_t_0>() : NULL,
mean.data<accscalar_t>(),
inv_std.data<accscalar_t>(),
weight.has_value() ? weight.value().data<scalar_t_0>() : NULL,
shift.has_value() ? shift.value().data<scalar_t_0>(): NULL,
out.data<scalar_t_0>(),
reduction_size,
stride,
fuse_relu);
);
}
return out;
}
std::vector<at::Tensor> reduce_bn_c_last_CUDA(
const at::Tensor grad_output,
const at::Tensor input,
const at::Tensor mean,
const at::Tensor inv_std,
const at::optional<at::Tensor> weight) {
const auto stride = input.size(input.ndimension()-1);
const auto reduction_size = input.numel() / stride;
at::Tensor mean_dy = at::empty({stride}, mean.options());
at::Tensor mean_dy_xmu = at::empty({stride}, mean.options());
at::Tensor grad_weight;
at::Tensor grad_bias;
if (weight.has_value()) {
grad_weight = at::empty({stride}, weight.value().options());
grad_bias = at::empty({stride}, weight.value().options());
} else {
// because I cannot return an uninitialized at::Tensor
grad_weight = at::empty({0}, mean.options());
grad_bias = at::empty({0}, mean.options());
}
dim3 block;
dim3 grid;
flexible_launch_configs(reduction_size, stride, block, grid, true);
at::Tensor staging_data;
at::Tensor semaphores;
if (grid.y > 1) {
staging_data = at::empty({2*stride*grid.y}, mean.options());
semaphores = at::zeros({grid.x}, input.options().dtype(at::kInt));
}
auto stream = at::cuda::getCurrentCUDAStream();
if (input.scalar_type() == at::ScalarType::Half
&& weight.has_value()
&& weight.value().scalar_type() == at::ScalarType::Float) {
using namespace at;
DISPATCH_FLOAT_AND_HALF(input.scalar_type(), 0, "batchnorm_backward_reduce",
using accscalar_t = at::acc_type<scalar_t_0, true>;
accscalar_t* staging_data_ptr = grid.y > 1 ? staging_data.data<accscalar_t>() : nullptr;
int* semaphores_ptr = grid.y > 1 ? semaphores.data<int>() : nullptr;
reduce_bn_c_last_kernel<scalar_t_0, accscalar_t, accscalar_t, ELEMENTS_PER_ITER>
<<<grid, block, 0, stream>>>(
input.data<scalar_t_0>(),
grad_output.data<scalar_t_0>(),
mean.data<accscalar_t>(),
inv_std.data<accscalar_t>(),
mean_dy.data<accscalar_t>(),
mean_dy_xmu.data<accscalar_t>(),
weight.has_value() ? grad_weight.data<accscalar_t>() : NULL,
weight.has_value() ?grad_bias.data<accscalar_t>() : NULL,
staging_data_ptr,
semaphores_ptr,
reduction_size,
stride);
);
} else {
if (weight.has_value()) {
AT_CHECK(input.scalar_type() == weight.value().scalar_type(),
"input.scalar_type() is not supported with weight.scalar_type()");
}
using namespace at;
DISPATCH_FLOAT_AND_HALF(input.scalar_type(), 0, "batchnorm_backward_reduce",
using accscalar_t = at::acc_type<scalar_t_0, true>;
accscalar_t* staging_data_ptr = grid.y > 1 ? staging_data.data<accscalar_t>() : nullptr;
int* semaphores_ptr = grid.y > 1 ? semaphores.data<int>() : nullptr;
reduce_bn_c_last_kernel<scalar_t_0, accscalar_t, scalar_t_0, ELEMENTS_PER_ITER>
<<<grid, block, 0, stream>>>(
input.data<scalar_t_0>(),
grad_output.data<scalar_t_0>(),
mean.data<accscalar_t>(),
inv_std.data<accscalar_t>(),
mean_dy.data<accscalar_t>(),
mean_dy_xmu.data<accscalar_t>(),
weight.has_value() ? grad_weight.data<scalar_t_0>() : NULL,
weight.has_value() ?grad_bias.data<scalar_t_0>() : NULL,
staging_data_ptr,
semaphores_ptr,
reduction_size,
stride);
);
}
return {mean_dy, mean_dy_xmu, grad_weight, grad_bias};
}
at::Tensor batchnorm_backward_c_last_CUDA(
const at::Tensor grad_output,
const at::Tensor input,
const at::Tensor mean,
const at::Tensor inv_std,
const at::optional<at::Tensor> weight,
const at::Tensor mean_dy,
const at::Tensor mean_dy_xmu) {
const auto stride = input.size(input.ndimension()-1);
const auto reduction_size = input.numel() / stride;
at::Tensor grad_input = at::empty_like(input);
dim3 block;
dim3 grid;
flexible_launch_configs(reduction_size, stride, block, grid);
auto stream = at::cuda::getCurrentCUDAStream();
if (input.scalar_type() == at::ScalarType::Half
&& weight.has_value() && weight.value().scalar_type() == at::ScalarType::Float) {
using namespace at;
DISPATCH_FLOAT_AND_HALF(input.scalar_type(), 0, "batchnorm_forward",
using accscalar_t = at::acc_type<scalar_t_0, true>;
batchnorm_backward_c_last_kernel<scalar_t_0, accscalar_t, accscalar_t, ELEMENTS_PER_ITER>
<<<grid, block, 0, stream>>>(
grad_output.data<scalar_t_0>(),
input.data<scalar_t_0>(),
mean.data<accscalar_t>(),
inv_std.data<accscalar_t>(),
weight.has_value() ? weight.value().data<accscalar_t>() : NULL,
mean_dy.data<accscalar_t>(),
mean_dy_xmu.data<accscalar_t>(),
grad_input.data<scalar_t_0>(),
reduction_size,
stride);
);
} else {
if (weight.has_value()) {
AT_CHECK(input.scalar_type() == weight.value().scalar_type(),
"input.scalar_type() is not supported with weight.scalar_type()");
}
using namespace at;
DISPATCH_FLOAT_AND_HALF(input.scalar_type(), 0, "batchnorm_forward",
using accscalar_t = at::acc_type<scalar_t_0, true>;
batchnorm_backward_c_last_kernel<scalar_t_0, accscalar_t, scalar_t_0, ELEMENTS_PER_ITER>
<<<grid, block, 0, stream>>>(
grad_output.data<scalar_t_0>(),
input.data<scalar_t_0>(),
mean.data<accscalar_t>(),
inv_std.data<accscalar_t>(),
weight.has_value() ? weight.value().data<scalar_t_0>() : NULL,
mean_dy.data<accscalar_t>(),
mean_dy_xmu.data<accscalar_t>(),
grad_input.data<scalar_t_0>(),
reduction_size,
stride);
);
}
return grad_input;
}
at::Tensor relu_backward_c_last_CUDA(
const at::Tensor grad_output,
const at::Tensor input,
const at::optional<at::Tensor> z,
const at::Tensor mean,
const at::Tensor inv_std,
const at::optional<at::Tensor> weight,
const at::optional<at::Tensor> shift) {
const auto stride = input.size(input.ndimension()-1);
const auto reduction_size = input.numel() / stride;
at::Tensor out = at::empty_like(input);
dim3 block;
dim3 grid;
flexible_launch_configs(reduction_size, stride, block, grid);
auto stream = at::cuda::getCurrentCUDAStream();
if (input.scalar_type() == at::ScalarType::Half
&& weight.has_value() && weight.value().scalar_type() == at::ScalarType::Float) {
using namespace at;
DISPATCH_FLOAT_AND_HALF(input.scalar_type(), 0, "batchnorm_forward",
using accscalar_t = at::acc_type<scalar_t_0, true>;
relu_backward_c_last_kernel<scalar_t_0, accscalar_t, accscalar_t, ELEMENTS_PER_ITER>
<<<grid, block, 0, stream>>>(
grad_output.data<scalar_t_0>(),
input.data<scalar_t_0>(),
z.has_value() ? z.value().data<scalar_t_0>() : NULL,
mean.data<accscalar_t>(),
inv_std.data<accscalar_t>(),
weight.has_value() ? weight.value().data<accscalar_t>() : NULL,
shift.has_value() ? shift.value().data<accscalar_t>(): NULL,
out.data<scalar_t_0>(),
reduction_size,
stride);
);
} else {
if (weight.has_value()) {
AT_CHECK(input.scalar_type() == weight.value().scalar_type(),
"input.scalar_type() is not supported with weight.scalar_type()");
}
using namespace at;
DISPATCH_FLOAT_AND_HALF(input.scalar_type(), 0, "batchnorm_forward",
using accscalar_t = at::acc_type<scalar_t_0, true>;
relu_backward_c_last_kernel<scalar_t_0, accscalar_t, scalar_t_0, ELEMENTS_PER_ITER>
<<<grid, block, 0, stream>>>(
grad_output.data<scalar_t_0>(),
input.data<scalar_t_0>(),
z.has_value() ? z.value().data<scalar_t_0>() : NULL,
mean.data<accscalar_t>(),
inv_std.data<accscalar_t>(),
weight.has_value() ? weight.value().data<scalar_t_0>() : NULL,
shift.has_value() ? shift.value().data<scalar_t_0>(): NULL,
out.data<scalar_t_0>(),
reduction_size,
stride);
);
}
return out;
}
|
c572e98d86422fdcd00a5e1812d093950afacc29.hip | // !!! This is a file automatically generated by hipify!!!
#include "cuSparseMatrix.h"
#include <thrust/extrema.h>
#include <thrust/device_ptr.h>
#include "cuNDArray_math.h"
using namespace Gadgetron;
template<class T>
static auto create_DnVec( cuNDArray<T>& vec){
hipsparseDnVecDescr_t dnvec;
hipsparseCreateDnVec(&dnvec,vec.size(),vec.data(),cuda_datatype<T>());
auto deleter = [](hipsparseDnVecDescr_t val){hipsparseDestroyDnVec(val);};
return std::unique_ptr<std::decay_t<decltype(*dnvec)>,decltype(deleter)>(dnvec,deleter);
}
template<class T> void Gadgetron::sparseMV(T alpha,T beta, const cuCsrMatrix<T> & mat, const cuNDArray<T> & vec_in, cuNDArray<T>& vec_out, bool adjoint){
if (vec_in.get_number_of_elements() != (adjoint ? mat.rows : mat.cols))
throw std::runtime_error("Matrix and input vector have mismatching dimensions");
if (vec_out.get_number_of_elements() != (adjoint ? mat.rows : mat.cols))
throw std::runtime_error("Matrix and output vector have mismatching dimensions");
hipsparseOperation_t trans = adjoint ? HIPSPARSE_OPERATION_CONJUGATE_TRANSPOSE : HIPSPARSE_OPERATION_NON_TRANSPOSE;
//hipsparseStatus_t status = sparseCSRMV(cudaDeviceManager::Instance()->lockSparseHandle(),trans,mat.m,mat.n,mat.nnz,&alpha, mat.descr,
// thrust::raw_pointer_cast(&mat.data[0]),thrust::raw_pointer_cast(&mat.csrRow[0]),thrust::raw_pointer_cast(&mat.csrColdnd[0]),vec_in.get_data_ptr(),&beta,vec_out.get_data_ptr());
auto dnvec_in = create_DnVec(const_cast<cuNDArray<T>&>(vec_in));
auto dnvec_out = create_DnVec(vec_out);
size_t bufferSize;
auto handle = cudaDeviceManager::Instance()->lockSparseHandle();
hipsparseSpMV(handle, trans, &alpha,mat.descr,dnvec_in.get(),&beta,dnvec_out.get(),cuda_datatype<T>(),HIPSPARSE_CSRMV_ALG2,&bufferSize);
cuNDArray<char> buffer(bufferSize);
hipsparseStatus_t status = hipsparseSpMV(handle, trans, &alpha,mat.descr,dnvec_in.get(),&beta,dnvec_out.get(),cuda_datatype<T>(),HIPSPARSE_CSRMV_ALG2, buffer.data());
cudaDeviceManager::Instance()->unlockSparseHandle();
if (status != HIPSPARSE_STATUS_SUCCESS){
std::stringstream ss;
ss << "Sparse Matrix Vector multiplication failed. Error: ";
ss << gadgetron_getCusparseErrorString(status);
throw cuda_error(ss.str());
}
}
template<class T>
static auto create_DnMat( cuNDArray<T>& mat){
hipsparseDnMatDescr_t dnmat;
hipsparseCreateDnMat(&dnmat,mat.get_size(0),mat.get_size(1),mat.get_size(0),mat.data(),cuda_datatype<T>(), HIPSPARSE_ORDER_COL);
auto deleter = [](hipsparseDnMatDescr_t val){hipsparseDestroyDnMat(val);};
return std::unique_ptr<std::decay_t<decltype(*dnmat)>,decltype(deleter)>(dnmat,deleter);
//return std::unique_ptr<std::decay_t<decltype(*dnmat)>,decltype(&hipsparseDestroyDnMat)>(dnmat);
}
template<class T> void Gadgetron::sparseMM(T alpha,T beta, const cuCsrMatrix<T> & mat, const cuNDArray<T> & mat_in, cuNDArray<T>& mat_out, bool adjoint) {
if (mat_in.get_size(1) != mat_out.get_size(1)) throw std::runtime_error("In and out dense matrix must have same second dimension");
if (mat_in.get_size(0) != mat.rows) throw std::runtime_error("Input matrix and sparse matrix have mismatched dimensions");
if (mat_out.get_size(0) != mat.cols) throw std::runtime_error("Output matrix and sparse matrix have mismatched dimensions");
hipsparseOperation_t trans = adjoint ? HIPSPARSE_OPERATION_CONJUGATE_TRANSPOSE : HIPSPARSE_OPERATION_NON_TRANSPOSE;
auto handle = cudaDeviceManager::Instance()->lockSparseHandle();
auto dnmat_in = create_DnMat(const_cast<cuNDArray<T>&>(mat_in));
auto dnmat_out = create_DnMat(mat_out);
size_t bufferSize;
CUSPARSE_CALL(hipsparseSpMM_bufferSize(handle, trans, HIPSPARSE_OPERATION_NON_TRANSPOSE, &alpha, mat.descr, dnmat_in.get(), &beta, dnmat_out.get(), cuda_datatype<T>(),HIPSPARSE_CSRMM_ALG1, &bufferSize));
cuNDArray<char> buffer(bufferSize);
CUSPARSE_CALL(hipsparseSpMM(handle, trans, HIPSPARSE_OPERATION_NON_TRANSPOSE, &alpha, mat.descr, dnmat_in.get(), &beta, dnmat_out.get(), cuda_datatype<T>(),HIPSPARSE_CSRMM_ALG1, buffer.data()));
cudaDeviceManager::Instance()->unlockSparseHandle();
}
template void Gadgetron::sparseMV<float>(float alpha,float beta, const cuCsrMatrix<float> & mat, const cuNDArray<float> & vec_in, cuNDArray<float>& vec_out, bool adjoint);
template void Gadgetron::sparseMV<double>(double alpha,double beta, const cuCsrMatrix<double> & mat, const cuNDArray<double> & vec_in, cuNDArray<double>& vec_out, bool adjoint);
template void Gadgetron::sparseMV<complext<float> >(complext<float> alpha,complext<float> beta, const cuCsrMatrix<complext<float> > & mat, const cuNDArray<complext<float> > & vec_in, cuNDArray<complext<float> >& vec_out, bool adjoint);
template void Gadgetron::sparseMV<complext<double> >(complext<double> alpha,complext<double> beta, const cuCsrMatrix<complext<double> > & mat, const cuNDArray<complext<double> > & vec_in, cuNDArray<complext<double> >& vec_out, bool adjoint);
template void Gadgetron::sparseMM<float>(float alpha,float beta, const cuCsrMatrix<float> & mat, const cuNDArray<float> & vec_in, cuNDArray<float>& vec_out, bool adjoint);
template void Gadgetron::sparseMM<double>(double alpha,double beta, const cuCsrMatrix<double> & mat, const cuNDArray<double> & vec_in, cuNDArray<double>& vec_out, bool adjoint);
template void Gadgetron::sparseMM<complext<float> >(complext<float> alpha,complext<float> beta, const cuCsrMatrix<complext<float> > & mat, const cuNDArray<complext<float> > & vec_in, cuNDArray<complext<float> >& vec_out, bool adjoint);
template void Gadgetron::sparseMM<complext<double> >(complext<double> alpha,complext<double> beta, const cuCsrMatrix<complext<double> > & mat, const cuNDArray<complext<double> > & vec_in, cuNDArray<complext<double> >& vec_out, bool adjoint);
| c572e98d86422fdcd00a5e1812d093950afacc29.cu | #include "cuSparseMatrix.h"
#include <thrust/extrema.h>
#include <thrust/device_ptr.h>
#include "cuNDArray_math.h"
using namespace Gadgetron;
template<class T>
static auto create_DnVec( cuNDArray<T>& vec){
cusparseDnVecDescr_t dnvec;
cusparseCreateDnVec(&dnvec,vec.size(),vec.data(),cuda_datatype<T>());
auto deleter = [](cusparseDnVecDescr_t val){cusparseDestroyDnVec(val);};
return std::unique_ptr<std::decay_t<decltype(*dnvec)>,decltype(deleter)>(dnvec,deleter);
}
template<class T> void Gadgetron::sparseMV(T alpha,T beta, const cuCsrMatrix<T> & mat, const cuNDArray<T> & vec_in, cuNDArray<T>& vec_out, bool adjoint){
if (vec_in.get_number_of_elements() != (adjoint ? mat.rows : mat.cols))
throw std::runtime_error("Matrix and input vector have mismatching dimensions");
if (vec_out.get_number_of_elements() != (adjoint ? mat.rows : mat.cols))
throw std::runtime_error("Matrix and output vector have mismatching dimensions");
cusparseOperation_t trans = adjoint ? CUSPARSE_OPERATION_CONJUGATE_TRANSPOSE : CUSPARSE_OPERATION_NON_TRANSPOSE;
//cusparseStatus_t status = sparseCSRMV(cudaDeviceManager::Instance()->lockSparseHandle(),trans,mat.m,mat.n,mat.nnz,&alpha, mat.descr,
// thrust::raw_pointer_cast(&mat.data[0]),thrust::raw_pointer_cast(&mat.csrRow[0]),thrust::raw_pointer_cast(&mat.csrColdnd[0]),vec_in.get_data_ptr(),&beta,vec_out.get_data_ptr());
auto dnvec_in = create_DnVec(const_cast<cuNDArray<T>&>(vec_in));
auto dnvec_out = create_DnVec(vec_out);
size_t bufferSize;
auto handle = cudaDeviceManager::Instance()->lockSparseHandle();
cusparseSpMV(handle, trans, &alpha,mat.descr,dnvec_in.get(),&beta,dnvec_out.get(),cuda_datatype<T>(),CUSPARSE_CSRMV_ALG2,&bufferSize);
cuNDArray<char> buffer(bufferSize);
cusparseStatus_t status = cusparseSpMV(handle, trans, &alpha,mat.descr,dnvec_in.get(),&beta,dnvec_out.get(),cuda_datatype<T>(),CUSPARSE_CSRMV_ALG2, buffer.data());
cudaDeviceManager::Instance()->unlockSparseHandle();
if (status != CUSPARSE_STATUS_SUCCESS){
std::stringstream ss;
ss << "Sparse Matrix Vector multiplication failed. Error: ";
ss << gadgetron_getCusparseErrorString(status);
throw cuda_error(ss.str());
}
}
template<class T>
static auto create_DnMat( cuNDArray<T>& mat){
cusparseDnMatDescr_t dnmat;
cusparseCreateDnMat(&dnmat,mat.get_size(0),mat.get_size(1),mat.get_size(0),mat.data(),cuda_datatype<T>(), CUSPARSE_ORDER_COL);
auto deleter = [](cusparseDnMatDescr_t val){cusparseDestroyDnMat(val);};
return std::unique_ptr<std::decay_t<decltype(*dnmat)>,decltype(deleter)>(dnmat,deleter);
//return std::unique_ptr<std::decay_t<decltype(*dnmat)>,decltype(&cusparseDestroyDnMat)>(dnmat);
}
template<class T> void Gadgetron::sparseMM(T alpha,T beta, const cuCsrMatrix<T> & mat, const cuNDArray<T> & mat_in, cuNDArray<T>& mat_out, bool adjoint) {
if (mat_in.get_size(1) != mat_out.get_size(1)) throw std::runtime_error("In and out dense matrix must have same second dimension");
if (mat_in.get_size(0) != mat.rows) throw std::runtime_error("Input matrix and sparse matrix have mismatched dimensions");
if (mat_out.get_size(0) != mat.cols) throw std::runtime_error("Output matrix and sparse matrix have mismatched dimensions");
cusparseOperation_t trans = adjoint ? CUSPARSE_OPERATION_CONJUGATE_TRANSPOSE : CUSPARSE_OPERATION_NON_TRANSPOSE;
auto handle = cudaDeviceManager::Instance()->lockSparseHandle();
auto dnmat_in = create_DnMat(const_cast<cuNDArray<T>&>(mat_in));
auto dnmat_out = create_DnMat(mat_out);
size_t bufferSize;
CUSPARSE_CALL(cusparseSpMM_bufferSize(handle, trans, CUSPARSE_OPERATION_NON_TRANSPOSE, &alpha, mat.descr, dnmat_in.get(), &beta, dnmat_out.get(), cuda_datatype<T>(),CUSPARSE_CSRMM_ALG1, &bufferSize));
cuNDArray<char> buffer(bufferSize);
CUSPARSE_CALL(cusparseSpMM(handle, trans, CUSPARSE_OPERATION_NON_TRANSPOSE, &alpha, mat.descr, dnmat_in.get(), &beta, dnmat_out.get(), cuda_datatype<T>(),CUSPARSE_CSRMM_ALG1, buffer.data()));
cudaDeviceManager::Instance()->unlockSparseHandle();
}
template void Gadgetron::sparseMV<float>(float alpha,float beta, const cuCsrMatrix<float> & mat, const cuNDArray<float> & vec_in, cuNDArray<float>& vec_out, bool adjoint);
template void Gadgetron::sparseMV<double>(double alpha,double beta, const cuCsrMatrix<double> & mat, const cuNDArray<double> & vec_in, cuNDArray<double>& vec_out, bool adjoint);
template void Gadgetron::sparseMV<complext<float> >(complext<float> alpha,complext<float> beta, const cuCsrMatrix<complext<float> > & mat, const cuNDArray<complext<float> > & vec_in, cuNDArray<complext<float> >& vec_out, bool adjoint);
template void Gadgetron::sparseMV<complext<double> >(complext<double> alpha,complext<double> beta, const cuCsrMatrix<complext<double> > & mat, const cuNDArray<complext<double> > & vec_in, cuNDArray<complext<double> >& vec_out, bool adjoint);
template void Gadgetron::sparseMM<float>(float alpha,float beta, const cuCsrMatrix<float> & mat, const cuNDArray<float> & vec_in, cuNDArray<float>& vec_out, bool adjoint);
template void Gadgetron::sparseMM<double>(double alpha,double beta, const cuCsrMatrix<double> & mat, const cuNDArray<double> & vec_in, cuNDArray<double>& vec_out, bool adjoint);
template void Gadgetron::sparseMM<complext<float> >(complext<float> alpha,complext<float> beta, const cuCsrMatrix<complext<float> > & mat, const cuNDArray<complext<float> > & vec_in, cuNDArray<complext<float> >& vec_out, bool adjoint);
template void Gadgetron::sparseMM<complext<double> >(complext<double> alpha,complext<double> beta, const cuCsrMatrix<complext<double> > & mat, const cuNDArray<complext<double> > & vec_in, cuNDArray<complext<double> >& vec_out, bool adjoint);
|
f08b983b2f0e6996e2d2fb8adfbe444d86aa251a.hip | // !!! This is a file automatically generated by hipify!!!
//#include <stdio.h>
//#include "hip/hip_runtime.h"
//#include "device_launch_parameters.h"
//#include <conio.h>
//
//__global__ void cube(float * d_out, float * d_in) {
// int idx = threadIdx.x;
// float f = d_in[idx];
// d_out[idx] = f*f*f;
//}
//
//int main(int argc, char ** argv) {
// const int ARRAY_SIZE = 96;
// const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float);
//
// generate the input array on the host
// float h_in[ARRAY_SIZE];
// for (int i = 0; i < ARRAY_SIZE; i++) {
// h_in[i] = float(i);
// }
// float h_out[ARRAY_SIZE];
//
// declare GPU memory pointers
// float * d_in;
// float * d_out;
//
// allocate GPU memory
// hipMalloc((void**)&d_in, ARRAY_BYTES);
// hipMalloc((void**)&d_out, ARRAY_BYTES);
//
// transfer the array to the GPU
// hipMemcpy(d_in, h_in, ARRAY_BYTES, hipMemcpyHostToDevice);
//
// launch the kernel
// cube <<<1, ARRAY_SIZE >> >(d_out, d_in);
//
// copy back the result array to the CPU
// hipMemcpy(h_out, d_out, ARRAY_BYTES, hipMemcpyDeviceToHost);
//
// print out the resulting array
// for (int i = 0; i < ARRAY_SIZE; i++) {
// printf("%f", h_out[i]);
// printf(((i % 4) != 3) ? "\t" : "\n");
// }
//
// hipFree(d_in);
// hipFree(d_out);
// _getch();
// return 0;
//} | f08b983b2f0e6996e2d2fb8adfbe444d86aa251a.cu | //#include <stdio.h>
//#include "cuda_runtime.h"
//#include "device_launch_parameters.h"
//#include <conio.h>
//
//__global__ void cube(float * d_out, float * d_in) {
// int idx = threadIdx.x;
// float f = d_in[idx];
// d_out[idx] = f*f*f;
//}
//
//int main(int argc, char ** argv) {
// const int ARRAY_SIZE = 96;
// const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float);
//
// generate the input array on the host
// float h_in[ARRAY_SIZE];
// for (int i = 0; i < ARRAY_SIZE; i++) {
// h_in[i] = float(i);
// }
// float h_out[ARRAY_SIZE];
//
// declare GPU memory pointers
// float * d_in;
// float * d_out;
//
// allocate GPU memory
// cudaMalloc((void**)&d_in, ARRAY_BYTES);
// cudaMalloc((void**)&d_out, ARRAY_BYTES);
//
// transfer the array to the GPU
// cudaMemcpy(d_in, h_in, ARRAY_BYTES, cudaMemcpyHostToDevice);
//
// launch the kernel
// cube <<<1, ARRAY_SIZE >> >(d_out, d_in);
//
// copy back the result array to the CPU
// cudaMemcpy(h_out, d_out, ARRAY_BYTES, cudaMemcpyDeviceToHost);
//
// print out the resulting array
// for (int i = 0; i < ARRAY_SIZE; i++) {
// printf("%f", h_out[i]);
// printf(((i % 4) != 3) ? "\t" : "\n");
// }
//
// cudaFree(d_in);
// cudaFree(d_out);
// _getch();
// return 0;
//} |
9ad220b6c8796642bc3f7e29cfee3037e15d956e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*************************************************************************
GPU Version:
Tsinghua University, Aug. 2012.
Written by Yun Fei in collaboration with
W. Wang and B. Wang
Original:
Optimization Technology Center.
Argonne National Laboratory and Northwestern University.
Written by Ciyou Zhu in collaboration with
R.H. Byrd, P. Lu-Chen and J. Nocedal.
Contributors:
* Sergey Bochkanov (ALGLIB project). Translation from FORTRAN to
pseudocode.
This software is freely available, but we expect that all publications
describing work using this software, or all commercial products using it,
quote at least one of the references given below:
* R. H. Byrd, P. Lu and J. Nocedal. A Limited Memory Algorithm for
Bound Constrained Optimization, (1995), SIAM Journal on Scientific
and Statistical Computing , 16, 5, pp. 1190-1208.
* C. Zhu, R.H. Byrd and J. Nocedal. L-BFGS-B: Algorithm 778: L-BFGS-B,
FORTRAN routines for large scale bound constrained optimization
(1997), ACM Transactions on Mathematical Software, Vol 23, Num. 4,
pp. 550 - 560.
*************************************************************************/
#include "lbfgsbcuda.h"
namespace lbfgsbcuda {
namespace cauchy {
template<int bx>
__global__
void kernel0(
int n,
const real* g,
const int* nbd,
real* t,
const real* x,
const real* u,
const real* l
)
{
const int i = blockIdx.x * blockDim.x + threadIdx.x;
const int tid = threadIdx.x;
volatile __shared__ real sdata[bx];
real mySum;
if(i < n) {
real neggi = -g[i];
const int nbdi = nbd[i];
if(neggi == 0)
neggi = machineepsilon;
real tl = 0;
real tu = 0;
if( nbdi <= 2 )
{
tl = x[i] - l[i];
}
if( nbdi >= 2 )
{
tu = u[i] - x[i];
}
bool k1 = nbdi <= 2 && nbdi != 0 && neggi < 0;
/* bool k2 = nbdi >= 2 && neggi > 0;*/
real tlu = k1 ? -tl : tu;
mySum = tlu / neggi;
} else {
mySum = machinemaximum;
}
sdata[tid] = mySum;
__syncthreads();
if(bx > 512) {if (tid < 512) { sdata[tid] = mySum = minr(mySum, sdata[tid + 512]); } __syncthreads();}
if(bx > 256) {if (tid < 256) { sdata[tid] = mySum = minr(mySum, sdata[tid + 256]); } __syncthreads();}
if(bx > 128) {if (tid < 128) { sdata[tid] = mySum = minr(mySum, sdata[tid + 128]); } __syncthreads();}
if(bx > 64) {if (tid < 64) { sdata[tid] = mySum = minr(mySum, sdata[tid + 64]); } __syncthreads();}
if (tid < __min(bx / 2, 32))
{
// now that we are using warp-synchronous programming (below)
// we need to declare our shared memory volatile so that the compiler
// doesn't reorder stores to it and induce incorrect behavior.
volatile real* smem = sdata + tid;
if(bx > 32) {*smem = mySum = minr(mySum, smem[32]);}
if(bx > 16) {*smem = mySum = minr(mySum, smem[16]);}
if(bx > 8) {*smem = mySum = minr(mySum, smem[8]);}
if(bx > 4) {*smem = mySum = minr(mySum, smem[4]);}
if(bx > 2) {*smem = mySum = minr(mySum, smem[2]);}
if(bx > 1) {*smem = mySum = minr(mySum, smem[1]);}
}
if (tid == 0)
t[blockIdx.x] = mySum;
}
template<int bx>
__global__
void kernel01(
const int n,
const real* buf_in,
real* buf_out)
{
const int i = blockIdx.x * blockDim.x + threadIdx.x;
const int tid = threadIdx.x;
volatile __shared__ real sdata[bx];
real mySum;
if(i < n)
mySum = buf_in[i];
else
mySum = machinemaximum;
sdata[tid] = mySum;
__syncthreads();
if(bx > 512) {if (tid < 512) { sdata[tid] = mySum = minr(mySum, sdata[tid + 512]); } __syncthreads();}
if(bx > 256) {if (tid < 256) { sdata[tid] = mySum = minr(mySum, sdata[tid + 256]); } __syncthreads();}
if(bx > 128) {if (tid < 128) { sdata[tid] = mySum = minr(mySum, sdata[tid + 128]); } __syncthreads();}
if(bx > 64) {if (tid < 64) { sdata[tid] = mySum = minr(mySum, sdata[tid + 64]); } __syncthreads();}
if (tid < __min(bx / 2, 32))
{
// now that we are using warp-synchronous programming (below)
// we need to declare our shared memory volatile so that the compiler
// doesn't reorder stores to it and induce incorrect behavior.
volatile real* smem = sdata + tid;
if(bx > 32) {*smem = mySum = minr(mySum, smem[32]);}
if(bx > 16) {*smem = mySum = minr(mySum, smem[16]);}
if(bx > 8) {*smem = mySum = minr(mySum, smem[8]);}
if(bx > 4) {*smem = mySum = minr(mySum, smem[4]);}
if(bx > 2) {*smem = mySum = minr(mySum, smem[2]);}
if(bx > 1) {*smem = mySum = minr(mySum, smem[1]);}
}
if(tid == 0) {
buf_out[blockIdx.x] = mySum;
}
}
template<int bx>
__global__
void kernel1(
const int n,
const real* g,
real* buf_s_r)
{
const int i = blockIdx.x * blockDim.x + threadIdx.x;
const int tid = threadIdx.x;
volatile __shared__ real sdata[bx];
real mySum;
if(i < n) {
real neggi = g[i];
if(neggi == 0)
neggi = machineepsilon;
mySum = -neggi * neggi;
} else {
mySum = 0;
}
sdata[tid] = mySum;
__syncthreads();
if(bx > 512) {if (tid < 512) { sdata[tid] = mySum = (mySum + sdata[tid + 512]); } __syncthreads();}
if(bx > 256) {if (tid < 256) { sdata[tid] = mySum = (mySum + sdata[tid + 256]); } __syncthreads();}
if(bx > 128) {if (tid < 128) { sdata[tid] = mySum = (mySum + sdata[tid + 128]); } __syncthreads();}
if(bx > 64) {if (tid < 64) { sdata[tid] = mySum = (mySum + sdata[tid + 64]); } __syncthreads();}
if (tid < __min(bx / 2, 32))
{
// now that we are using warp-synchronous programming (below)
// we need to declare our shared memory volatile so that the compiler
// doesn't reorder stores to it and induce incorrect behavior.
volatile real* smem = sdata + tid;
if(bx > 32) {*smem = mySum = mySum + smem[32];}
if(bx > 16) {*smem = mySum = mySum + smem[16];}
if(bx > 8) {*smem = mySum = mySum + smem[8];}
if(bx > 4) {*smem = mySum = mySum + smem[4];}
if(bx > 2) {*smem = mySum = mySum + smem[2];}
if(bx > 1) {*smem = mySum = mySum + smem[1];}
}
if (tid == 0)
buf_s_r[blockIdx.x] = mySum;
}
template<int bx>
__global__
void kernel20(
const int n,
const int head,
const int m,
const int col,
const int iPitch,
const int oPitch,
const real* g,
real* buf_array_p,
const real* wy,
const real* ws)
{
const int i = blockIdx.x * blockDim.x + threadIdx.x;
const int j = blockIdx.y;
const int tid = threadIdx.x;
volatile __shared__ real sdata[bx];
real mySum;
if(i < n) {
real neggi = -g[i];
if(neggi == 0)
neggi = machineepsilon;
real p0;
if(j < col) {
int pointr = Modular((head + j), m);
p0 = wy[i * iPitch + pointr];
} else {
int pointr = Modular((head + j - col), m);
p0 = ws[i * iPitch + pointr];
}
mySum = p0 * neggi;
} else {
mySum = 0;
}
sdata[tid] = mySum;
__syncthreads();
if(bx > 512) {if (tid < 512) { sdata[tid] = mySum = (mySum + sdata[tid + 512]); } __syncthreads();}
if(bx > 256) {if (tid < 256) { sdata[tid] = mySum = (mySum + sdata[tid + 256]); } __syncthreads();}
if(bx > 128) {if (tid < 128) { sdata[tid] = mySum = (mySum + sdata[tid + 128]); } __syncthreads();}
if(bx > 64) {if (tid < 64) { sdata[tid] = mySum = (mySum + sdata[tid + 64]); } __syncthreads();}
if (tid < __min(bx / 2, 32))
{
// now that we are using warp-synchronous programming (below)
// we need to declare our shared memory volatile so that the compiler
// doesn't reorder stores to it and induce incorrect behavior.
volatile real* smem = sdata + tid;
if(bx > 32) {*smem = mySum = mySum + smem[32];}
if(bx > 16) {*smem = mySum = mySum + smem[16];}
if(bx > 8) {*smem = mySum = mySum + smem[8];}
if(bx > 4) {*smem = mySum = mySum + smem[4];}
if(bx > 2) {*smem = mySum = mySum + smem[2];}
if(bx > 1) {*smem = mySum = mySum + smem[1];}
}
if (tid == 0)
buf_array_p[j * oPitch + blockIdx.x] = mySum;
}
template<int bx>
__global__
void kernel21(
const int n,
const int iPitch,
const int oPitch,
const real* buf_in,
real* buf_out)
{
const int i = blockIdx.x * blockDim.x + threadIdx.x;
const int j = blockIdx.y;
const int tid = threadIdx.x;
volatile __shared__ real sdata[bx];
real mySum;
if(i < n)
mySum = buf_in[j * iPitch + i];
else
mySum = 0;
sdata[tid] = mySum;
__syncthreads();
if(bx > 512) {if (tid < 512) { sdata[tid] = mySum = (mySum + sdata[tid + 512]); } __syncthreads();}
if(bx > 256) {if (tid < 256) { sdata[tid] = mySum = (mySum + sdata[tid + 256]); } __syncthreads();}
if(bx > 128) {if (tid < 128) { sdata[tid] = mySum = (mySum + sdata[tid + 128]); } __syncthreads();}
if(bx > 64) {if (tid < 64) { sdata[tid] = mySum = (mySum + sdata[tid + 64]); } __syncthreads();}
if (tid < __min(bx / 2, 32))
{
// now that we are using warp-synchronous programming (below)
// we need to declare our shared memory volatile so that the compiler
// doesn't reorder stores to it and induce incorrect behavior.
volatile real* smem = sdata + tid;
if(bx > 32) {*smem = mySum = mySum + smem[32];}
if(bx > 16) {*smem = mySum = mySum + smem[16];}
if(bx > 8) {*smem = mySum = mySum + smem[8];}
if(bx > 4) {*smem = mySum = mySum + smem[4];}
if(bx > 2) {*smem = mySum = mySum + smem[2];}
if(bx > 1) {*smem = mySum = mySum + smem[1];}
}
if(tid == 0) {
buf_out[j * oPitch + blockIdx.x] = mySum;
}
}
__global__
void kernel22(
const int n,
real* p,
const real theta
)
{
const int i = threadIdx.x;
if(i >= n)
return;
p[i] *= theta;
}
__global__
void kernel4(
const int col2,
const real* p,
real* c,
const real dtm
)
{
const int i = threadIdx.x;
if(i >= col2)
return;
c[i] = p[i] * dtm;
}
__global__
void kernel3(
const int n,
const real* x,
const real* g,
real* xcp,
real* xcpb,
const real dtm
)
{
const int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i >= n)
return;
real neggi = -g[i];
if(neggi == 0)
neggi = machineepsilon;
real res = x[i] + neggi * dtm;
xcp[i] = res;
xcpb[i] = res;
}
void prog0
(const int& n,
const real* x,
const real* l,
const real* u,
const int* nbd,
const real* g,
real* t,
real* xcp,
real* xcpb,
const int& m,
const real* wy,
const real* ws,
const real* sy,
const int iPitch,
real* wt,
const real& theta,
const int& col,
const int& head,
real* p,
real* c,
real* v,
int& nint,
const real& sbgnrm,
real* buf_s_r,
real* buf_array_p,
const hipStream_t* streamPool
)
{
if(sbgnrm <= 0) {
hipMemcpyAsync(xcp, x, n * sizeof(real), hipMemcpyDeviceToDevice);
return;
}
if(col > 0)
hipMemsetAsync(p, 0, col * 2 * sizeof(real));
real* vec_h;
real* vec_d;
checkCudaErrors(hipHostMalloc(&vec_h, 3 * sizeof(real), hipHostMallocMapped));
checkCudaErrors(hipHostGetDevicePointer(&vec_d, vec_h, 0));
real* bkmin_d = vec_d;
real* f1_d = vec_d + 1;
real* bkmin_h = vec_h;
real* f1_h = vec_h + 1;
real* fd_h = vec_h + 2;
int nblock0 = n;
int mi = log2Up(nblock0);
int nblock1 = iDivUp2(nblock0, mi);
real* output0 = (nblock1 == 1) ? bkmin_d : t;
real* output1 = (nblock1 == 1) ? f1_d : buf_s_r;
real* output2 = (nblock1 == 1) ? p : buf_array_p;
dynamicCall(kernel0, mi, nblock1, 1, streamPool[0], (nblock0, g, nbd, output0, x, u, l));
dynamicCall(kernel1, mi, nblock1, 1, streamPool[1], (nblock0, g, output1));
int op20 = (nblock1 == 1) ? 1 : n;
if(col > 0) {
dynamicCall(kernel20, mi, nblock1, col * 2, streamPool[2], (nblock0, head, m, col, iPitch, op20, g, output2, wy, ws));
}
nblock0 = nblock1;
while(nblock0 > 1) {
nblock1 = iDivUp2(nblock0, mi);
real* input0 = output0;
real* input1 = output1;
real* input2 = output2;
output0 = (nblock1 == 1) ? bkmin_d : (output0 + nblock0);
output1 = (nblock1 == 1) ? f1_d : (output1 + nblock0);
output2 = (nblock1 == 1) ? p : (output2 + nblock0);
dynamicCall(kernel01, mi, nblock1, 1, streamPool[0], (nblock0, input0, output0));
dynamicCall(kernel21, mi, nblock1, 1, streamPool[1], (nblock0, 1, 1, input1, output1));
int op20 = (nblock1 == 1) ? 1 : n;
if(col > 0) {
dynamicCall(kernel21, mi, nblock1, col * 2, streamPool[2], (nblock0, n, op20, input2, output2));
}
nblock0 = nblock1;
}
if( col > 0 && theta != 1 )
{
CheckBuffer(p, col * 2, col * 2);
hipLaunchKernelGGL(( kernel22), dim3(dim3(1)), dim3(dim3(col)), 0, streamPool[2],
col, p + col, theta);
CheckBuffer(p, col * 2, col * 2);
}
*fd_h = 0;
if(col > 0)
{
bmv::prog0(sy, col, iPitch, p, v, streamPool[2]);
CheckBuffer(v, col * 2, col * 2);
CheckBuffer(p, col * 2, col * 2);
bmv::prog1(wt, col, iPitch, p, v, streamPool[2]);
CheckBuffer(v, col * 2, col * 2);
CheckBuffer(p, col * 2, col * 2);
bmv::prog2(sy, wt, col, iPitch, p, v, streamPool[2]);
CheckBuffer(v, col * 2, col * 2);
CheckBuffer(p, col * 2, col * 2);
hipblasSetStream(cublasHd, streamPool[2]);
cublasRdot(cublasHd, col * 2, v, 1, p, 1, fd_h);
hipblasSetStream(cublasHd, NULL);
}
checkCudaErrors(hipStreamSynchronize(streamPool[1]));
checkCudaErrors(hipStreamSynchronize(streamPool[2]));
real f2 = -theta * *f1_h - *fd_h;
real dt = -*f1_h / f2;
real dtm = __max(*bkmin_h, dt);
dtm = __max(0, dtm);
hipLaunchKernelGGL(( kernel3), dim3(dim3(iDivUp(n, 512))), dim3(dim3(512)), 0, streamPool[0],
n, x, g, xcp, xcpb, dtm);
if(col > 0) {
hipLaunchKernelGGL(( kernel4), dim3(dim3(1)), dim3(dim3(col * 2)), 0, streamPool[1],
col * 2, p, c, dtm);
}
}
};
}; | 9ad220b6c8796642bc3f7e29cfee3037e15d956e.cu | /*************************************************************************
GPU Version:
Tsinghua University, Aug. 2012.
Written by Yun Fei in collaboration with
W. Wang and B. Wang
Original:
Optimization Technology Center.
Argonne National Laboratory and Northwestern University.
Written by Ciyou Zhu in collaboration with
R.H. Byrd, P. Lu-Chen and J. Nocedal.
Contributors:
* Sergey Bochkanov (ALGLIB project). Translation from FORTRAN to
pseudocode.
This software is freely available, but we expect that all publications
describing work using this software, or all commercial products using it,
quote at least one of the references given below:
* R. H. Byrd, P. Lu and J. Nocedal. A Limited Memory Algorithm for
Bound Constrained Optimization, (1995), SIAM Journal on Scientific
and Statistical Computing , 16, 5, pp. 1190-1208.
* C. Zhu, R.H. Byrd and J. Nocedal. L-BFGS-B: Algorithm 778: L-BFGS-B,
FORTRAN routines for large scale bound constrained optimization
(1997), ACM Transactions on Mathematical Software, Vol 23, Num. 4,
pp. 550 - 560.
*************************************************************************/
#include "lbfgsbcuda.h"
namespace lbfgsbcuda {
namespace cauchy {
template<int bx>
__global__
void kernel0(
int n,
const real* g,
const int* nbd,
real* t,
const real* x,
const real* u,
const real* l
)
{
const int i = blockIdx.x * blockDim.x + threadIdx.x;
const int tid = threadIdx.x;
volatile __shared__ real sdata[bx];
real mySum;
if(i < n) {
real neggi = -g[i];
const int nbdi = nbd[i];
if(neggi == 0)
neggi = machineepsilon;
real tl = 0;
real tu = 0;
if( nbdi <= 2 )
{
tl = x[i] - l[i];
}
if( nbdi >= 2 )
{
tu = u[i] - x[i];
}
bool k1 = nbdi <= 2 && nbdi != 0 && neggi < 0;
/* bool k2 = nbdi >= 2 && neggi > 0;*/
real tlu = k1 ? -tl : tu;
mySum = tlu / neggi;
} else {
mySum = machinemaximum;
}
sdata[tid] = mySum;
__syncthreads();
if(bx > 512) {if (tid < 512) { sdata[tid] = mySum = minr(mySum, sdata[tid + 512]); } __syncthreads();}
if(bx > 256) {if (tid < 256) { sdata[tid] = mySum = minr(mySum, sdata[tid + 256]); } __syncthreads();}
if(bx > 128) {if (tid < 128) { sdata[tid] = mySum = minr(mySum, sdata[tid + 128]); } __syncthreads();}
if(bx > 64) {if (tid < 64) { sdata[tid] = mySum = minr(mySum, sdata[tid + 64]); } __syncthreads();}
if (tid < __min(bx / 2, 32))
{
// now that we are using warp-synchronous programming (below)
// we need to declare our shared memory volatile so that the compiler
// doesn't reorder stores to it and induce incorrect behavior.
volatile real* smem = sdata + tid;
if(bx > 32) {*smem = mySum = minr(mySum, smem[32]);}
if(bx > 16) {*smem = mySum = minr(mySum, smem[16]);}
if(bx > 8) {*smem = mySum = minr(mySum, smem[8]);}
if(bx > 4) {*smem = mySum = minr(mySum, smem[4]);}
if(bx > 2) {*smem = mySum = minr(mySum, smem[2]);}
if(bx > 1) {*smem = mySum = minr(mySum, smem[1]);}
}
if (tid == 0)
t[blockIdx.x] = mySum;
}
template<int bx>
__global__
void kernel01(
const int n,
const real* buf_in,
real* buf_out)
{
const int i = blockIdx.x * blockDim.x + threadIdx.x;
const int tid = threadIdx.x;
volatile __shared__ real sdata[bx];
real mySum;
if(i < n)
mySum = buf_in[i];
else
mySum = machinemaximum;
sdata[tid] = mySum;
__syncthreads();
if(bx > 512) {if (tid < 512) { sdata[tid] = mySum = minr(mySum, sdata[tid + 512]); } __syncthreads();}
if(bx > 256) {if (tid < 256) { sdata[tid] = mySum = minr(mySum, sdata[tid + 256]); } __syncthreads();}
if(bx > 128) {if (tid < 128) { sdata[tid] = mySum = minr(mySum, sdata[tid + 128]); } __syncthreads();}
if(bx > 64) {if (tid < 64) { sdata[tid] = mySum = minr(mySum, sdata[tid + 64]); } __syncthreads();}
if (tid < __min(bx / 2, 32))
{
// now that we are using warp-synchronous programming (below)
// we need to declare our shared memory volatile so that the compiler
// doesn't reorder stores to it and induce incorrect behavior.
volatile real* smem = sdata + tid;
if(bx > 32) {*smem = mySum = minr(mySum, smem[32]);}
if(bx > 16) {*smem = mySum = minr(mySum, smem[16]);}
if(bx > 8) {*smem = mySum = minr(mySum, smem[8]);}
if(bx > 4) {*smem = mySum = minr(mySum, smem[4]);}
if(bx > 2) {*smem = mySum = minr(mySum, smem[2]);}
if(bx > 1) {*smem = mySum = minr(mySum, smem[1]);}
}
if(tid == 0) {
buf_out[blockIdx.x] = mySum;
}
}
template<int bx>
__global__
void kernel1(
const int n,
const real* g,
real* buf_s_r)
{
const int i = blockIdx.x * blockDim.x + threadIdx.x;
const int tid = threadIdx.x;
volatile __shared__ real sdata[bx];
real mySum;
if(i < n) {
real neggi = g[i];
if(neggi == 0)
neggi = machineepsilon;
mySum = -neggi * neggi;
} else {
mySum = 0;
}
sdata[tid] = mySum;
__syncthreads();
if(bx > 512) {if (tid < 512) { sdata[tid] = mySum = (mySum + sdata[tid + 512]); } __syncthreads();}
if(bx > 256) {if (tid < 256) { sdata[tid] = mySum = (mySum + sdata[tid + 256]); } __syncthreads();}
if(bx > 128) {if (tid < 128) { sdata[tid] = mySum = (mySum + sdata[tid + 128]); } __syncthreads();}
if(bx > 64) {if (tid < 64) { sdata[tid] = mySum = (mySum + sdata[tid + 64]); } __syncthreads();}
if (tid < __min(bx / 2, 32))
{
// now that we are using warp-synchronous programming (below)
// we need to declare our shared memory volatile so that the compiler
// doesn't reorder stores to it and induce incorrect behavior.
volatile real* smem = sdata + tid;
if(bx > 32) {*smem = mySum = mySum + smem[32];}
if(bx > 16) {*smem = mySum = mySum + smem[16];}
if(bx > 8) {*smem = mySum = mySum + smem[8];}
if(bx > 4) {*smem = mySum = mySum + smem[4];}
if(bx > 2) {*smem = mySum = mySum + smem[2];}
if(bx > 1) {*smem = mySum = mySum + smem[1];}
}
if (tid == 0)
buf_s_r[blockIdx.x] = mySum;
}
template<int bx>
__global__
void kernel20(
const int n,
const int head,
const int m,
const int col,
const int iPitch,
const int oPitch,
const real* g,
real* buf_array_p,
const real* wy,
const real* ws)
{
const int i = blockIdx.x * blockDim.x + threadIdx.x;
const int j = blockIdx.y;
const int tid = threadIdx.x;
volatile __shared__ real sdata[bx];
real mySum;
if(i < n) {
real neggi = -g[i];
if(neggi == 0)
neggi = machineepsilon;
real p0;
if(j < col) {
int pointr = Modular((head + j), m);
p0 = wy[i * iPitch + pointr];
} else {
int pointr = Modular((head + j - col), m);
p0 = ws[i * iPitch + pointr];
}
mySum = p0 * neggi;
} else {
mySum = 0;
}
sdata[tid] = mySum;
__syncthreads();
if(bx > 512) {if (tid < 512) { sdata[tid] = mySum = (mySum + sdata[tid + 512]); } __syncthreads();}
if(bx > 256) {if (tid < 256) { sdata[tid] = mySum = (mySum + sdata[tid + 256]); } __syncthreads();}
if(bx > 128) {if (tid < 128) { sdata[tid] = mySum = (mySum + sdata[tid + 128]); } __syncthreads();}
if(bx > 64) {if (tid < 64) { sdata[tid] = mySum = (mySum + sdata[tid + 64]); } __syncthreads();}
if (tid < __min(bx / 2, 32))
{
// now that we are using warp-synchronous programming (below)
// we need to declare our shared memory volatile so that the compiler
// doesn't reorder stores to it and induce incorrect behavior.
volatile real* smem = sdata + tid;
if(bx > 32) {*smem = mySum = mySum + smem[32];}
if(bx > 16) {*smem = mySum = mySum + smem[16];}
if(bx > 8) {*smem = mySum = mySum + smem[8];}
if(bx > 4) {*smem = mySum = mySum + smem[4];}
if(bx > 2) {*smem = mySum = mySum + smem[2];}
if(bx > 1) {*smem = mySum = mySum + smem[1];}
}
if (tid == 0)
buf_array_p[j * oPitch + blockIdx.x] = mySum;
}
template<int bx>
__global__
void kernel21(
const int n,
const int iPitch,
const int oPitch,
const real* buf_in,
real* buf_out)
{
const int i = blockIdx.x * blockDim.x + threadIdx.x;
const int j = blockIdx.y;
const int tid = threadIdx.x;
volatile __shared__ real sdata[bx];
real mySum;
if(i < n)
mySum = buf_in[j * iPitch + i];
else
mySum = 0;
sdata[tid] = mySum;
__syncthreads();
if(bx > 512) {if (tid < 512) { sdata[tid] = mySum = (mySum + sdata[tid + 512]); } __syncthreads();}
if(bx > 256) {if (tid < 256) { sdata[tid] = mySum = (mySum + sdata[tid + 256]); } __syncthreads();}
if(bx > 128) {if (tid < 128) { sdata[tid] = mySum = (mySum + sdata[tid + 128]); } __syncthreads();}
if(bx > 64) {if (tid < 64) { sdata[tid] = mySum = (mySum + sdata[tid + 64]); } __syncthreads();}
if (tid < __min(bx / 2, 32))
{
// now that we are using warp-synchronous programming (below)
// we need to declare our shared memory volatile so that the compiler
// doesn't reorder stores to it and induce incorrect behavior.
volatile real* smem = sdata + tid;
if(bx > 32) {*smem = mySum = mySum + smem[32];}
if(bx > 16) {*smem = mySum = mySum + smem[16];}
if(bx > 8) {*smem = mySum = mySum + smem[8];}
if(bx > 4) {*smem = mySum = mySum + smem[4];}
if(bx > 2) {*smem = mySum = mySum + smem[2];}
if(bx > 1) {*smem = mySum = mySum + smem[1];}
}
if(tid == 0) {
buf_out[j * oPitch + blockIdx.x] = mySum;
}
}
__global__
void kernel22(
const int n,
real* p,
const real theta
)
{
const int i = threadIdx.x;
if(i >= n)
return;
p[i] *= theta;
}
__global__
void kernel4(
const int col2,
const real* p,
real* c,
const real dtm
)
{
const int i = threadIdx.x;
if(i >= col2)
return;
c[i] = p[i] * dtm;
}
__global__
void kernel3(
const int n,
const real* x,
const real* g,
real* xcp,
real* xcpb,
const real dtm
)
{
const int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i >= n)
return;
real neggi = -g[i];
if(neggi == 0)
neggi = machineepsilon;
real res = x[i] + neggi * dtm;
xcp[i] = res;
xcpb[i] = res;
}
void prog0
(const int& n,
const real* x,
const real* l,
const real* u,
const int* nbd,
const real* g,
real* t,
real* xcp,
real* xcpb,
const int& m,
const real* wy,
const real* ws,
const real* sy,
const int iPitch,
real* wt,
const real& theta,
const int& col,
const int& head,
real* p,
real* c,
real* v,
int& nint,
const real& sbgnrm,
real* buf_s_r,
real* buf_array_p,
const cudaStream_t* streamPool
)
{
if(sbgnrm <= 0) {
cudaMemcpyAsync(xcp, x, n * sizeof(real), cudaMemcpyDeviceToDevice);
return;
}
if(col > 0)
cudaMemsetAsync(p, 0, col * 2 * sizeof(real));
real* vec_h;
real* vec_d;
checkCudaErrors(cudaHostAlloc(&vec_h, 3 * sizeof(real), cudaHostAllocMapped));
checkCudaErrors(cudaHostGetDevicePointer(&vec_d, vec_h, 0));
real* bkmin_d = vec_d;
real* f1_d = vec_d + 1;
real* bkmin_h = vec_h;
real* f1_h = vec_h + 1;
real* fd_h = vec_h + 2;
int nblock0 = n;
int mi = log2Up(nblock0);
int nblock1 = iDivUp2(nblock0, mi);
real* output0 = (nblock1 == 1) ? bkmin_d : t;
real* output1 = (nblock1 == 1) ? f1_d : buf_s_r;
real* output2 = (nblock1 == 1) ? p : buf_array_p;
dynamicCall(kernel0, mi, nblock1, 1, streamPool[0], (nblock0, g, nbd, output0, x, u, l));
dynamicCall(kernel1, mi, nblock1, 1, streamPool[1], (nblock0, g, output1));
int op20 = (nblock1 == 1) ? 1 : n;
if(col > 0) {
dynamicCall(kernel20, mi, nblock1, col * 2, streamPool[2], (nblock0, head, m, col, iPitch, op20, g, output2, wy, ws));
}
nblock0 = nblock1;
while(nblock0 > 1) {
nblock1 = iDivUp2(nblock0, mi);
real* input0 = output0;
real* input1 = output1;
real* input2 = output2;
output0 = (nblock1 == 1) ? bkmin_d : (output0 + nblock0);
output1 = (nblock1 == 1) ? f1_d : (output1 + nblock0);
output2 = (nblock1 == 1) ? p : (output2 + nblock0);
dynamicCall(kernel01, mi, nblock1, 1, streamPool[0], (nblock0, input0, output0));
dynamicCall(kernel21, mi, nblock1, 1, streamPool[1], (nblock0, 1, 1, input1, output1));
int op20 = (nblock1 == 1) ? 1 : n;
if(col > 0) {
dynamicCall(kernel21, mi, nblock1, col * 2, streamPool[2], (nblock0, n, op20, input2, output2));
}
nblock0 = nblock1;
}
if( col > 0 && theta != 1 )
{
CheckBuffer(p, col * 2, col * 2);
kernel22<<<dim3(1), dim3(col), 0, streamPool[2]>>>
(col, p + col, theta);
CheckBuffer(p, col * 2, col * 2);
}
*fd_h = 0;
if(col > 0)
{
bmv::prog0(sy, col, iPitch, p, v, streamPool[2]);
CheckBuffer(v, col * 2, col * 2);
CheckBuffer(p, col * 2, col * 2);
bmv::prog1(wt, col, iPitch, p, v, streamPool[2]);
CheckBuffer(v, col * 2, col * 2);
CheckBuffer(p, col * 2, col * 2);
bmv::prog2(sy, wt, col, iPitch, p, v, streamPool[2]);
CheckBuffer(v, col * 2, col * 2);
CheckBuffer(p, col * 2, col * 2);
cublasSetStream(cublasHd, streamPool[2]);
cublasRdot(cublasHd, col * 2, v, 1, p, 1, fd_h);
cublasSetStream(cublasHd, NULL);
}
checkCudaErrors(cudaStreamSynchronize(streamPool[1]));
checkCudaErrors(cudaStreamSynchronize(streamPool[2]));
real f2 = -theta * *f1_h - *fd_h;
real dt = -*f1_h / f2;
real dtm = __max(*bkmin_h, dt);
dtm = __max(0, dtm);
kernel3<<<dim3(iDivUp(n, 512)), dim3(512), 0, streamPool[0]>>>
(n, x, g, xcp, xcpb, dtm);
if(col > 0) {
kernel4<<<dim3(1), dim3(col * 2), 0, streamPool[1]>>>
(col * 2, p, c, dtm);
}
}
};
}; |
6e42ef7e0957405ca55f3274b9037bde4a5252b8.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "init.hip"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
init), dim3(gridBlock),dim3(threadBlock), 0, 0, );
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
init), dim3(gridBlock),dim3(threadBlock), 0, 0, );
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
init), dim3(gridBlock),dim3(threadBlock), 0, 0, );
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 6e42ef7e0957405ca55f3274b9037bde4a5252b8.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "init.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
init<<<gridBlock,threadBlock>>>();
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
init<<<gridBlock,threadBlock>>>();
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
init<<<gridBlock,threadBlock>>>();
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
97aff8e96436ba522fb7d451d4a6e9d1df7c5852.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "channels_first.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *input = NULL;
hipMalloc(&input, XSIZE*YSIZE);
float *rinput = NULL;
hipMalloc(&rinput, XSIZE*YSIZE);
int channels = 1;
int height = YSIZE;
int width = XSIZE;
int pad_size = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
channels_first), dim3(gridBlock),dim3(threadBlock), 0, 0, input,rinput,channels,height,width,pad_size);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
channels_first), dim3(gridBlock),dim3(threadBlock), 0, 0, input,rinput,channels,height,width,pad_size);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
channels_first), dim3(gridBlock),dim3(threadBlock), 0, 0, input,rinput,channels,height,width,pad_size);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 97aff8e96436ba522fb7d451d4a6e9d1df7c5852.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "channels_first.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *input = NULL;
cudaMalloc(&input, XSIZE*YSIZE);
float *rinput = NULL;
cudaMalloc(&rinput, XSIZE*YSIZE);
int channels = 1;
int height = YSIZE;
int width = XSIZE;
int pad_size = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
channels_first<<<gridBlock,threadBlock>>>(input,rinput,channels,height,width,pad_size);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
channels_first<<<gridBlock,threadBlock>>>(input,rinput,channels,height,width,pad_size);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
channels_first<<<gridBlock,threadBlock>>>(input,rinput,channels,height,width,pad_size);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
8148d836b78563290635ad456c92c32f3ad0e45e.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <hip/hip_runtime.h>
#include <paddle/fluid/platform/device_context.h>
#include <algorithm>
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/memory/malloc.h"
#include "paddle/fluid/operators/detail/safe_ref.h"
#include "paddle/fluid/operators/math/blas.h"
namespace paddle {
namespace operators {
#define FINAL_MASK 0xffffffff
#define HALF_WARP 16
#define WARP_SIZE 32
template <typename T>
__inline__ __device__ T warpReduceSum(T val, unsigned lane_mask) {
for (int mask = HALF_WARP; mask > 0; mask >>= 1)
#if __CUDA_ARCH__ >= 350 && TORCH_HIP_VERSION >= 9000
val += __shfl_xor_sync(lane_mask, val, mask, warpSize);
#else
val += __shfl_xor(val, mask, warpSize);
#endif
return val;
}
/* Calculate the sum of all elements in a block */
template <typename T>
__inline__ __device__ T blockReduceSum(T val, unsigned mask) {
static __shared__ T shared[WARP_SIZE];
int lane = threadIdx.x & 0x1f;
int wid = threadIdx.x >> 5;
val = warpReduceSum<T>(val, mask);
if (lane == 0) shared[wid] = val;
__syncthreads();
// align block_span to warpSize
int block_span = (blockDim.x + warpSize - 1) >> 5;
val = (threadIdx.x < block_span) ? shared[lane] : static_cast<T>(0.0f);
val = warpReduceSum<T>(val, mask);
return val;
}
template <typename T>
__inline__ __device__ T warpReduceMax(T val, unsigned lane_mask) {
for (int mask = HALF_WARP; mask > 0; mask >>= 1)
#if __CUDA_ARCH__ >= 350 && TORCH_HIP_VERSION >= 9000
val = max(val, __shfl_xor_sync(lane_mask, val, mask, warpSize));
#else
val = max(val, __shfl_xor(val, mask, warpSize));
#endif
return val;
}
/* Calculate the maximum of all elements in a block */
template <typename T>
__inline__ __device__ T blockReduceMax(T val, unsigned mask) {
static __shared__ T shared[WARP_SIZE];
int lane = threadIdx.x & 0x1f;
int wid = threadIdx.x >> 5;
val = warpReduceMax(val, mask);
if (lane == 0) shared[wid] = val;
__syncthreads();
// align block_span to warpSize
int block_span = (blockDim.x + warpSize - 1) >> 5;
val = (threadIdx.x < block_span) ? shared[lane] : -1e10f;
val = warpReduceMax(val, mask);
return val;
}
template <typename T>
__global__ void add_QKV(const T *Q, const T *K, const T *V, T *q_buf_,
T *k_buf_, T *v_buf_, const T *bias_q, const T *bias_k,
const T *bias_v, int batch_size, int seq_len,
int head_num, int size_per_head) {
const T *data_ptr_q, *data_ptr_k, *data_ptr_v;
const T *bias_ptr_q, *bias_ptr_k, *bias_ptr_v;
int m = batch_size * seq_len;
int n = head_num * size_per_head;
int row_offset = (blockIdx.x % m) * n;
data_ptr_q = Q + row_offset;
data_ptr_k = K + row_offset;
data_ptr_v = V + row_offset;
// bias ptr
bias_ptr_q = bias_q;
bias_ptr_k = bias_k;
bias_ptr_v = bias_v;
int batch_id = (blockIdx.x % m) / seq_len;
int head_id = threadIdx.x / size_per_head;
int id_in_head = threadIdx.x % size_per_head;
int word_start_id = (blockIdx.x) % seq_len;
#if __CUDA_ARCH__ >= 350
T tmp_q = __ldg(&data_ptr_q[threadIdx.x]) + __ldg(&bias_ptr_q[threadIdx.x]);
T tmp_k = __ldg(&data_ptr_k[threadIdx.x]) + __ldg(&bias_ptr_k[threadIdx.x]);
T tmp_v = __ldg(&data_ptr_v[threadIdx.x]) + __ldg(&bias_ptr_v[threadIdx.x]);
#else
T tmp_q = data_ptr_q[threadIdx.x] + bias_ptr_q[threadIdx.x];
T tmp_k = data_ptr_k[threadIdx.x] + bias_ptr_k[threadIdx.x];
T tmp_v = data_ptr_v[threadIdx.x] + bias_ptr_v[threadIdx.x];
#endif
int target_id = batch_id * (seq_len * head_num * size_per_head) +
head_id * seq_len * size_per_head +
word_start_id * size_per_head + id_in_head;
q_buf_[target_id] = tmp_q;
k_buf_[target_id] = tmp_k;
v_buf_[target_id] = tmp_v;
}
// Keep to compare performance
template <typename T>
__global__ void add_QKV_V2(const T *Q, const T *K, const T *V, T *q_buf_,
T *k_buf_, T *v_buf_, const T *bias_Q,
const T *bias_K, const T *bias_V, int batch_size,
int seq_len, int head_num, int size_per_head,
const int word_per_block) {
const T *data_ptr;
T *buf_ptr;
const T *bias_ptr;
int m = batch_size * seq_len;
int n = head_num * size_per_head;
int qkv_id = blockIdx.x * word_per_block / m;
int row_offset = (blockIdx.x * word_per_block % m) * n;
if (qkv_id == 0) {
data_ptr = Q + row_offset;
buf_ptr = q_buf_;
bias_ptr = bias_Q;
} else if (qkv_id == 1) {
data_ptr = K + row_offset;
buf_ptr = k_buf_;
bias_ptr = bias_K;
} else {
data_ptr = V + row_offset;
buf_ptr = v_buf_;
bias_ptr = bias_V;
}
int batch_id = (blockIdx.x * word_per_block % m) / seq_len;
int head_id = threadIdx.x / size_per_head;
int id_in_head = threadIdx.x % size_per_head;
int word_start_id = (blockIdx.x * word_per_block) % seq_len;
#if __CUDA_ARCH__ >= 350
T bias = __ldg(&bias_ptr[threadIdx.x]);
#else
T bias = bias_ptr[threadIdx.x];
#endif
for (int i = word_start_id; i < word_start_id + word_per_block; ++i) {
T tmp = data_ptr[threadIdx.x] + bias;
int target_id = batch_id * (seq_len * head_num * size_per_head) +
head_id * seq_len * size_per_head + i * size_per_head +
id_in_head;
buf_ptr[target_id] = tmp;
data_ptr += n;
}
}
template <typename T>
__global__ void softmax_kernel_with_eltadd(T *qk_buf_, const T *bias_qk_,
const int batch_size,
const int head_num,
const int seq_len,
const unsigned mask) {
int qk_offset = blockIdx.x * seq_len;
assert(blockDim.x % 32 == 0);
__shared__ float s_sum, s_max;
float qk = threadIdx.x < seq_len
? static_cast<float>((qk_buf_[threadIdx.x + qk_offset] +
bias_qk_[threadIdx.x + qk_offset]))
: 0.0f;
float tmp = threadIdx.x < seq_len ? static_cast<float>(qk) : -1e20f;
float max_val = blockReduceMax<float>(tmp, mask);
if (threadIdx.x == 0) s_max = max_val;
__syncthreads();
float qk_tmp =
threadIdx.x < seq_len ? __expf(static_cast<float>(tmp - s_max)) : 0.0f;
float sum_val = blockReduceSum<float>(qk_tmp, mask);
if (threadIdx.x == 0) {
s_sum = sum_val + 1e-6f;
}
__syncthreads();
if (threadIdx.x < seq_len)
qk_buf_[threadIdx.x + qk_offset] = (T)(qk_tmp / s_sum);
}
// For verify result
template <typename T>
__global__ void elt_qk_add(const T *bias_qk, T *qk_buf, int head_num,
int seq_len, int size_per_head, int batch_size) {
int m = batch_size * head_num * seq_len;
int row_id = blockIdx.x % m;
int dst_id = row_id * seq_len + threadIdx.x;
const T *bias_ptr = bias_qk;
#if __CUDA_ARCH__ >= 350
int tmp_bias = __ldg(&bias_ptr[dst_id]);
#else
int tmp_bias = bias_ptr[dst_id];
#endif
qk_buf[dst_id] += tmp_bias;
}
// Compute Q*K->softmax->eltadd
template <typename T>
void MatMulWithHeadQK(const platform::CUDADeviceContext &context, int head_num,
int seq_len, int size_per_head, int batch_size,
bool q_trans, bool k_trans, T *q_buf_, T *k_buf_,
T *qk_buf_, const T *bias_qk, T alpha, T beta) {
CBLAS_TRANSPOSE transA = !q_trans ? CblasNoTrans : CblasTrans;
CBLAS_TRANSPOSE transB = !k_trans ? CblasNoTrans : CblasTrans;
auto blas = math::GetBlas<platform::CUDADeviceContext, T>(context);
auto stream = context.stream();
blas.BatchedGEMM(transA, transB, seq_len, seq_len, size_per_head, alpha,
q_buf_, k_buf_, beta, qk_buf_, batch_size * head_num,
seq_len * size_per_head, seq_len * size_per_head);
int grid = batch_size * head_num * seq_len;
int block = seq_len;
// Align block to 32, also limit seq_len to max block size.
PADDLE_ENFORCE_LE(seq_len, 1024, platform::errors::InvalidArgument(
"seq_len should <= 1024, "
"but received seq_len is:%d",
seq_len));
if (seq_len <= 32)
block = 32;
else if (seq_len > 32 && seq_len <= 64)
block = 64;
else if (seq_len > 64 && seq_len <= 128)
block = 128;
else if (seq_len > 128 && seq_len <= 256)
block = 256;
else if (seq_len > 256 && seq_len <= 512)
block = 512;
else
block = 1024;
hipLaunchKernelGGL(( softmax_kernel_with_eltadd<T>), dim3(grid), dim3(block), 0, stream,
qk_buf_, bias_qk, batch_size, head_num, seq_len, FINAL_MASK);
}
template <typename T>
__global__ void transpose(T *src, T *dst, const int batch_size,
const int seq_len, const int head_num,
const int size_per_head) {
int batch_id = blockIdx.x / (head_num * seq_len);
int seq_id = blockIdx.x % seq_len;
int head_id = (blockIdx.x % (head_num * seq_len)) / seq_len;
dst[batch_id * (head_num * seq_len * size_per_head) +
seq_id * head_num * size_per_head + head_id * size_per_head +
threadIdx.x] = src[blockIdx.x * size_per_head + threadIdx.x];
}
// Compute QK*V->transpose
template <typename T>
void MatMulWithHeadQKV(const platform::CUDADeviceContext &context, int head_num,
int seq_len, int size_per_head, int batch_size,
bool qk_trans, bool v_trans, T *v_buf_, const T *qk_buf_,
T *dst, T *out, T alpha, T beta) {
int m = batch_size * seq_len;
int k = head_num * size_per_head;
auto blas = math::GetBlas<platform::CUDADeviceContext, T>(context);
auto stream = context.stream();
CBLAS_TRANSPOSE transA = !qk_trans ? CblasNoTrans : CblasTrans;
CBLAS_TRANSPOSE transB = !v_trans ? CblasNoTrans : CblasTrans;
blas.BatchedGEMM(transA, transB, seq_len, size_per_head, seq_len, alpha,
qk_buf_, v_buf_, beta, dst, batch_size * head_num,
seq_len * seq_len, seq_len * size_per_head);
int grid = batch_size * head_num * seq_len;
int block = size_per_head;
hipLaunchKernelGGL(( transpose<T>), dim3(grid), dim3(block), 0, stream, dst, out, batch_size, seq_len,
head_num, size_per_head);
}
template <typename T>
void MultiHeadGPUCompute(const platform::CUDADeviceContext &dev_ctx,
int head_num, const framework::DDim &mat_q,
const framework::DDim &mat_k,
const framework::DDim &mat_v, const T *Q, const T *K,
const T *V, const T *bias_q, const T *bias_k,
const T *bias_v, const T *bias_qk, T *out, T alpha,
T beta, bool trans_q, bool trans_k, bool trans_v) {
int seq_len = mat_q[1];
int size_per_head = (mat_q[2] / head_num);
int batch_size = mat_q[0];
int buf_size = batch_size * head_num * seq_len * size_per_head;
int qk_buf_size = batch_size * head_num * seq_len * seq_len;
auto alloc_buf =
memory::Alloc(dev_ctx, (buf_size * 4 + qk_buf_size) * sizeof(T));
T *buf = reinterpret_cast<T *>(alloc_buf->ptr());
T *q_buf = buf;
T *k_buf = buf + buf_size;
T *v_buf = buf + 2 * buf_size;
T *qk_buf = buf + 3 * buf_size;
T *dst_buf = buf + 3 * buf_size + qk_buf_size;
int m = batch_size * seq_len;
int k = head_num * size_per_head;
// Each block process head*size-per_head element,
// have m lines. bias is m lines
auto blas = math::GetBlas<platform::CUDADeviceContext, T>(dev_ctx);
auto stream = dev_ctx.stream();
int grid = m;
PADDLE_ENFORCE_LE(k, 1024,
"Input head_number * size_per_head should <= 1024");
int block = k <= 1024 ? k : 1024;
hipLaunchKernelGGL(( add_QKV<T>), dim3(grid), dim3(block), 0, stream, Q, K, V, q_buf, k_buf, v_buf, bias_q,
bias_k, bias_v, batch_size, seq_len,
head_num, size_per_head);
MatMulWithHeadQK<T>(dev_ctx, head_num, seq_len, size_per_head, batch_size,
trans_q, trans_k, q_buf, k_buf, qk_buf, bias_qk, alpha,
beta);
MatMulWithHeadQKV<T>(dev_ctx, head_num, seq_len, size_per_head, batch_size,
false, trans_v, v_buf, qk_buf, dst_buf, out, T(1.0),
beta);
}
template <typename DeviceContext, typename T>
class MultiHeadMatMulKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext &context) const override {
auto *q = context.Input<framework::Tensor>("Q");
auto *k = context.Input<framework::Tensor>("K");
auto *v = context.Input<framework::Tensor>("V");
auto &bias_q = detail::Ref(context.Input<framework::Tensor>("BiasQ"),
"Cannot find BiasQ");
auto &bias_k = detail::Ref(context.Input<framework::Tensor>("BiasK"),
"Cannot find BiasK");
auto &bias_v = detail::Ref(context.Input<framework::Tensor>("BiasV"),
"Cannot find BiasV");
auto &bias_qk = detail::Ref(context.Input<framework::Tensor>("BiasQK"),
"Cannot find QK");
auto *out = context.Output<framework::Tensor>("Out");
out->mutable_data<T>(context.GetPlace());
T scale = static_cast<T>(context.Attr<float>("alpha"));
bool transpose_q = context.Attr<bool>("transpose_Q");
bool transpose_k = context.Attr<bool>("transpose_K");
bool transpose_v = context.Attr<bool>("transpose_V");
int head_number = context.Attr<int>("head_number");
// compute q*k with eltadd
auto &device_ctx = context.template device_context<DeviceContext>();
MultiHeadGPUCompute<T>(device_ctx, head_number, q->dims(), k->dims(),
v->dims(), q->data<T>(), k->data<T>(), v->data<T>(),
bias_q.data<T>(), bias_k.data<T>(), bias_v.data<T>(),
bias_qk.data<T>(), out->data<T>(), scale, T(0.0),
transpose_q, transpose_k, transpose_v);
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(
multihead_matmul,
ops::MultiHeadMatMulKernel<paddle::platform::CUDADeviceContext, float>,
ops::MultiHeadMatMulKernel<paddle::platform::CUDADeviceContext, double>);
| 8148d836b78563290635ad456c92c32f3ad0e45e.cu | // Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <cuda_runtime.h>
#include <paddle/fluid/platform/device_context.h>
#include <algorithm>
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/memory/malloc.h"
#include "paddle/fluid/operators/detail/safe_ref.h"
#include "paddle/fluid/operators/math/blas.h"
namespace paddle {
namespace operators {
#define FINAL_MASK 0xffffffff
#define HALF_WARP 16
#define WARP_SIZE 32
template <typename T>
__inline__ __device__ T warpReduceSum(T val, unsigned lane_mask) {
for (int mask = HALF_WARP; mask > 0; mask >>= 1)
#if __CUDA_ARCH__ >= 350 && CUDA_VERSION >= 9000
val += __shfl_xor_sync(lane_mask, val, mask, warpSize);
#else
val += __shfl_xor(val, mask, warpSize);
#endif
return val;
}
/* Calculate the sum of all elements in a block */
template <typename T>
__inline__ __device__ T blockReduceSum(T val, unsigned mask) {
static __shared__ T shared[WARP_SIZE];
int lane = threadIdx.x & 0x1f;
int wid = threadIdx.x >> 5;
val = warpReduceSum<T>(val, mask);
if (lane == 0) shared[wid] = val;
__syncthreads();
// align block_span to warpSize
int block_span = (blockDim.x + warpSize - 1) >> 5;
val = (threadIdx.x < block_span) ? shared[lane] : static_cast<T>(0.0f);
val = warpReduceSum<T>(val, mask);
return val;
}
template <typename T>
__inline__ __device__ T warpReduceMax(T val, unsigned lane_mask) {
for (int mask = HALF_WARP; mask > 0; mask >>= 1)
#if __CUDA_ARCH__ >= 350 && CUDA_VERSION >= 9000
val = max(val, __shfl_xor_sync(lane_mask, val, mask, warpSize));
#else
val = max(val, __shfl_xor(val, mask, warpSize));
#endif
return val;
}
/* Calculate the maximum of all elements in a block */
template <typename T>
__inline__ __device__ T blockReduceMax(T val, unsigned mask) {
static __shared__ T shared[WARP_SIZE];
int lane = threadIdx.x & 0x1f;
int wid = threadIdx.x >> 5;
val = warpReduceMax(val, mask);
if (lane == 0) shared[wid] = val;
__syncthreads();
// align block_span to warpSize
int block_span = (blockDim.x + warpSize - 1) >> 5;
val = (threadIdx.x < block_span) ? shared[lane] : -1e10f;
val = warpReduceMax(val, mask);
return val;
}
template <typename T>
__global__ void add_QKV(const T *Q, const T *K, const T *V, T *q_buf_,
T *k_buf_, T *v_buf_, const T *bias_q, const T *bias_k,
const T *bias_v, int batch_size, int seq_len,
int head_num, int size_per_head) {
const T *data_ptr_q, *data_ptr_k, *data_ptr_v;
const T *bias_ptr_q, *bias_ptr_k, *bias_ptr_v;
int m = batch_size * seq_len;
int n = head_num * size_per_head;
int row_offset = (blockIdx.x % m) * n;
data_ptr_q = Q + row_offset;
data_ptr_k = K + row_offset;
data_ptr_v = V + row_offset;
// bias ptr
bias_ptr_q = bias_q;
bias_ptr_k = bias_k;
bias_ptr_v = bias_v;
int batch_id = (blockIdx.x % m) / seq_len;
int head_id = threadIdx.x / size_per_head;
int id_in_head = threadIdx.x % size_per_head;
int word_start_id = (blockIdx.x) % seq_len;
#if __CUDA_ARCH__ >= 350
T tmp_q = __ldg(&data_ptr_q[threadIdx.x]) + __ldg(&bias_ptr_q[threadIdx.x]);
T tmp_k = __ldg(&data_ptr_k[threadIdx.x]) + __ldg(&bias_ptr_k[threadIdx.x]);
T tmp_v = __ldg(&data_ptr_v[threadIdx.x]) + __ldg(&bias_ptr_v[threadIdx.x]);
#else
T tmp_q = data_ptr_q[threadIdx.x] + bias_ptr_q[threadIdx.x];
T tmp_k = data_ptr_k[threadIdx.x] + bias_ptr_k[threadIdx.x];
T tmp_v = data_ptr_v[threadIdx.x] + bias_ptr_v[threadIdx.x];
#endif
int target_id = batch_id * (seq_len * head_num * size_per_head) +
head_id * seq_len * size_per_head +
word_start_id * size_per_head + id_in_head;
q_buf_[target_id] = tmp_q;
k_buf_[target_id] = tmp_k;
v_buf_[target_id] = tmp_v;
}
// Keep to compare performance
template <typename T>
__global__ void add_QKV_V2(const T *Q, const T *K, const T *V, T *q_buf_,
T *k_buf_, T *v_buf_, const T *bias_Q,
const T *bias_K, const T *bias_V, int batch_size,
int seq_len, int head_num, int size_per_head,
const int word_per_block) {
const T *data_ptr;
T *buf_ptr;
const T *bias_ptr;
int m = batch_size * seq_len;
int n = head_num * size_per_head;
int qkv_id = blockIdx.x * word_per_block / m;
int row_offset = (blockIdx.x * word_per_block % m) * n;
if (qkv_id == 0) {
data_ptr = Q + row_offset;
buf_ptr = q_buf_;
bias_ptr = bias_Q;
} else if (qkv_id == 1) {
data_ptr = K + row_offset;
buf_ptr = k_buf_;
bias_ptr = bias_K;
} else {
data_ptr = V + row_offset;
buf_ptr = v_buf_;
bias_ptr = bias_V;
}
int batch_id = (blockIdx.x * word_per_block % m) / seq_len;
int head_id = threadIdx.x / size_per_head;
int id_in_head = threadIdx.x % size_per_head;
int word_start_id = (blockIdx.x * word_per_block) % seq_len;
#if __CUDA_ARCH__ >= 350
T bias = __ldg(&bias_ptr[threadIdx.x]);
#else
T bias = bias_ptr[threadIdx.x];
#endif
for (int i = word_start_id; i < word_start_id + word_per_block; ++i) {
T tmp = data_ptr[threadIdx.x] + bias;
int target_id = batch_id * (seq_len * head_num * size_per_head) +
head_id * seq_len * size_per_head + i * size_per_head +
id_in_head;
buf_ptr[target_id] = tmp;
data_ptr += n;
}
}
template <typename T>
__global__ void softmax_kernel_with_eltadd(T *qk_buf_, const T *bias_qk_,
const int batch_size,
const int head_num,
const int seq_len,
const unsigned mask) {
int qk_offset = blockIdx.x * seq_len;
assert(blockDim.x % 32 == 0);
__shared__ float s_sum, s_max;
float qk = threadIdx.x < seq_len
? static_cast<float>((qk_buf_[threadIdx.x + qk_offset] +
bias_qk_[threadIdx.x + qk_offset]))
: 0.0f;
float tmp = threadIdx.x < seq_len ? static_cast<float>(qk) : -1e20f;
float max_val = blockReduceMax<float>(tmp, mask);
if (threadIdx.x == 0) s_max = max_val;
__syncthreads();
float qk_tmp =
threadIdx.x < seq_len ? __expf(static_cast<float>(tmp - s_max)) : 0.0f;
float sum_val = blockReduceSum<float>(qk_tmp, mask);
if (threadIdx.x == 0) {
s_sum = sum_val + 1e-6f;
}
__syncthreads();
if (threadIdx.x < seq_len)
qk_buf_[threadIdx.x + qk_offset] = (T)(qk_tmp / s_sum);
}
// For verify result
template <typename T>
__global__ void elt_qk_add(const T *bias_qk, T *qk_buf, int head_num,
int seq_len, int size_per_head, int batch_size) {
int m = batch_size * head_num * seq_len;
int row_id = blockIdx.x % m;
int dst_id = row_id * seq_len + threadIdx.x;
const T *bias_ptr = bias_qk;
#if __CUDA_ARCH__ >= 350
int tmp_bias = __ldg(&bias_ptr[dst_id]);
#else
int tmp_bias = bias_ptr[dst_id];
#endif
qk_buf[dst_id] += tmp_bias;
}
// Compute Q*K->softmax->eltadd
template <typename T>
void MatMulWithHeadQK(const platform::CUDADeviceContext &context, int head_num,
int seq_len, int size_per_head, int batch_size,
bool q_trans, bool k_trans, T *q_buf_, T *k_buf_,
T *qk_buf_, const T *bias_qk, T alpha, T beta) {
CBLAS_TRANSPOSE transA = !q_trans ? CblasNoTrans : CblasTrans;
CBLAS_TRANSPOSE transB = !k_trans ? CblasNoTrans : CblasTrans;
auto blas = math::GetBlas<platform::CUDADeviceContext, T>(context);
auto stream = context.stream();
blas.BatchedGEMM(transA, transB, seq_len, seq_len, size_per_head, alpha,
q_buf_, k_buf_, beta, qk_buf_, batch_size * head_num,
seq_len * size_per_head, seq_len * size_per_head);
int grid = batch_size * head_num * seq_len;
int block = seq_len;
// Align block to 32, also limit seq_len to max block size.
PADDLE_ENFORCE_LE(seq_len, 1024, platform::errors::InvalidArgument(
"seq_len should <= 1024, "
"but received seq_len is:%d",
seq_len));
if (seq_len <= 32)
block = 32;
else if (seq_len > 32 && seq_len <= 64)
block = 64;
else if (seq_len > 64 && seq_len <= 128)
block = 128;
else if (seq_len > 128 && seq_len <= 256)
block = 256;
else if (seq_len > 256 && seq_len <= 512)
block = 512;
else
block = 1024;
softmax_kernel_with_eltadd<T><<<grid, block, 0, stream>>>(
qk_buf_, bias_qk, batch_size, head_num, seq_len, FINAL_MASK);
}
template <typename T>
__global__ void transpose(T *src, T *dst, const int batch_size,
const int seq_len, const int head_num,
const int size_per_head) {
int batch_id = blockIdx.x / (head_num * seq_len);
int seq_id = blockIdx.x % seq_len;
int head_id = (blockIdx.x % (head_num * seq_len)) / seq_len;
dst[batch_id * (head_num * seq_len * size_per_head) +
seq_id * head_num * size_per_head + head_id * size_per_head +
threadIdx.x] = src[blockIdx.x * size_per_head + threadIdx.x];
}
// Compute QK*V->transpose
template <typename T>
void MatMulWithHeadQKV(const platform::CUDADeviceContext &context, int head_num,
int seq_len, int size_per_head, int batch_size,
bool qk_trans, bool v_trans, T *v_buf_, const T *qk_buf_,
T *dst, T *out, T alpha, T beta) {
int m = batch_size * seq_len;
int k = head_num * size_per_head;
auto blas = math::GetBlas<platform::CUDADeviceContext, T>(context);
auto stream = context.stream();
CBLAS_TRANSPOSE transA = !qk_trans ? CblasNoTrans : CblasTrans;
CBLAS_TRANSPOSE transB = !v_trans ? CblasNoTrans : CblasTrans;
blas.BatchedGEMM(transA, transB, seq_len, size_per_head, seq_len, alpha,
qk_buf_, v_buf_, beta, dst, batch_size * head_num,
seq_len * seq_len, seq_len * size_per_head);
int grid = batch_size * head_num * seq_len;
int block = size_per_head;
transpose<T><<<grid, block, 0, stream>>>(dst, out, batch_size, seq_len,
head_num, size_per_head);
}
template <typename T>
void MultiHeadGPUCompute(const platform::CUDADeviceContext &dev_ctx,
int head_num, const framework::DDim &mat_q,
const framework::DDim &mat_k,
const framework::DDim &mat_v, const T *Q, const T *K,
const T *V, const T *bias_q, const T *bias_k,
const T *bias_v, const T *bias_qk, T *out, T alpha,
T beta, bool trans_q, bool trans_k, bool trans_v) {
int seq_len = mat_q[1];
int size_per_head = (mat_q[2] / head_num);
int batch_size = mat_q[0];
int buf_size = batch_size * head_num * seq_len * size_per_head;
int qk_buf_size = batch_size * head_num * seq_len * seq_len;
auto alloc_buf =
memory::Alloc(dev_ctx, (buf_size * 4 + qk_buf_size) * sizeof(T));
T *buf = reinterpret_cast<T *>(alloc_buf->ptr());
T *q_buf = buf;
T *k_buf = buf + buf_size;
T *v_buf = buf + 2 * buf_size;
T *qk_buf = buf + 3 * buf_size;
T *dst_buf = buf + 3 * buf_size + qk_buf_size;
int m = batch_size * seq_len;
int k = head_num * size_per_head;
// Each block process head*size-per_head element,
// have m lines. bias is m lines
auto blas = math::GetBlas<platform::CUDADeviceContext, T>(dev_ctx);
auto stream = dev_ctx.stream();
int grid = m;
PADDLE_ENFORCE_LE(k, 1024,
"Input head_number * size_per_head should <= 1024");
int block = k <= 1024 ? k : 1024;
add_QKV<T><<<grid, block, 0, stream>>>(Q, K, V, q_buf, k_buf, v_buf, bias_q,
bias_k, bias_v, batch_size, seq_len,
head_num, size_per_head);
MatMulWithHeadQK<T>(dev_ctx, head_num, seq_len, size_per_head, batch_size,
trans_q, trans_k, q_buf, k_buf, qk_buf, bias_qk, alpha,
beta);
MatMulWithHeadQKV<T>(dev_ctx, head_num, seq_len, size_per_head, batch_size,
false, trans_v, v_buf, qk_buf, dst_buf, out, T(1.0),
beta);
}
template <typename DeviceContext, typename T>
class MultiHeadMatMulKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext &context) const override {
auto *q = context.Input<framework::Tensor>("Q");
auto *k = context.Input<framework::Tensor>("K");
auto *v = context.Input<framework::Tensor>("V");
auto &bias_q = detail::Ref(context.Input<framework::Tensor>("BiasQ"),
"Cannot find BiasQ");
auto &bias_k = detail::Ref(context.Input<framework::Tensor>("BiasK"),
"Cannot find BiasK");
auto &bias_v = detail::Ref(context.Input<framework::Tensor>("BiasV"),
"Cannot find BiasV");
auto &bias_qk = detail::Ref(context.Input<framework::Tensor>("BiasQK"),
"Cannot find QK");
auto *out = context.Output<framework::Tensor>("Out");
out->mutable_data<T>(context.GetPlace());
T scale = static_cast<T>(context.Attr<float>("alpha"));
bool transpose_q = context.Attr<bool>("transpose_Q");
bool transpose_k = context.Attr<bool>("transpose_K");
bool transpose_v = context.Attr<bool>("transpose_V");
int head_number = context.Attr<int>("head_number");
// compute q*k with eltadd
auto &device_ctx = context.template device_context<DeviceContext>();
MultiHeadGPUCompute<T>(device_ctx, head_number, q->dims(), k->dims(),
v->dims(), q->data<T>(), k->data<T>(), v->data<T>(),
bias_q.data<T>(), bias_k.data<T>(), bias_v.data<T>(),
bias_qk.data<T>(), out->data<T>(), scale, T(0.0),
transpose_q, transpose_k, transpose_v);
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(
multihead_matmul,
ops::MultiHeadMatMulKernel<paddle::platform::CUDADeviceContext, float>,
ops::MultiHeadMatMulKernel<paddle::platform::CUDADeviceContext, double>);
|
9960a98d1f01e2193bea5ad5b40ab658482d7d60.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "../inc/yyfnutil.h"
/*************************************************************************************************************************/
/*
nx-1
z[i]= sum x[j]*y[i+j] ; i=0,1,2,3,4...;
j=0
z: length nz
x: length nx
y: length (nx+nz)
*/
template<typename T>
__global__ void static kernel(int nx, int nz, T* x, T* y, T* z){
extern T __shared__ s[];
if(blockIdx.x < nz){
T ret = (T) 0;
for(int i = threadIdx.x; i < nx; i += blockDim.x){
ret += x[i]*y[blockIdx.x + i];
}
s[threadIdx.x] = ret;
__syncthreads();
for(int i = blockDim.x/2; i > 0; i /=2){
if(threadIdx.x < i){
s[threadIdx.x] += s[threadIdx.x+i];
}
__syncthreads();
}
if(0 == threadIdx.x)
z[blockIdx.x] = s[0];
}
}
template<typename T>
void crossRelated(int nx, int nz, T* x, T* y, T* z){
int blockSize = 256;
hipLaunchKernelGGL(( kernel<T>), dim3(nz), dim3(blockSize), blockSize*sizeof(T), 0, nx, nz, x, y, z);
}
template<typename T>
void runGPU(int nx, int nz, const T* x, const T* y, T* z){
T* d_x;
CudaMalloc((void**)&d_x, nx*sizeof(T));
CudaMemcpy(d_x, x, nx*sizeof(T), hipMemcpyHostToDevice);
T* d_y;
CudaMalloc((void**)&d_y, (nx+nz)*sizeof(T));
CudaMemcpy(d_y, y, (nx+nz)*sizeof(T), hipMemcpyHostToDevice);
T* d_z;
CudaMalloc((void**)&d_z, nz*sizeof(T));
CudaMemcpy(d_z, z, nz*sizeof(T), hipMemcpyHostToDevice);
crossRelated<T>(nx, nz, d_x, d_y, d_z);
CudaMemcpy(z, d_z, nz*sizeof(T), hipMemcpyDeviceToHost);
CudaFree(d_x);
CudaFree(d_y);
CudaFree(d_z);
}
template<typename T>
static void randomize(T *d, int size){
for(int i = 0; i < size; i++){
d[i] = 1.0f;
//d[i] = 1.0f*rand()/RAND_MAX;
}
}
int main(int argc, char *argv[]){
int nx = 6000;
int nz = 200;
float *x = (float*) Malloc(nx*sizeof(float));
randomize(x, nx);
float *y = (float*) Malloc((nx+nz)*sizeof(float));
randomize(y, nx+nz);
float *z = (float*) Malloc(nz*sizeof(float));
runGPU<float>(nx, nz, x, y, z);
for(int i = 0; i < nz; i++){
printf("%.5f\n", z[i]);
}
free(x);
free(y);
free(z);
return 0;
}
| 9960a98d1f01e2193bea5ad5b40ab658482d7d60.cu | #include "../inc/yyfnutil.h"
/*************************************************************************************************************************/
/*
nx-1
z[i]= sum x[j]*y[i+j] ; i=0,1,2,3,4...;
j=0
z: length nz
x: length nx
y: length (nx+nz)
*/
template<typename T>
__global__ void static kernel(int nx, int nz, T* x, T* y, T* z){
extern T __shared__ s[];
if(blockIdx.x < nz){
T ret = (T) 0;
for(int i = threadIdx.x; i < nx; i += blockDim.x){
ret += x[i]*y[blockIdx.x + i];
}
s[threadIdx.x] = ret;
__syncthreads();
for(int i = blockDim.x/2; i > 0; i /=2){
if(threadIdx.x < i){
s[threadIdx.x] += s[threadIdx.x+i];
}
__syncthreads();
}
if(0 == threadIdx.x)
z[blockIdx.x] = s[0];
}
}
template<typename T>
void crossRelated(int nx, int nz, T* x, T* y, T* z){
int blockSize = 256;
kernel<T><<<nz, blockSize, blockSize*sizeof(T)>>>(nx, nz, x, y, z);
}
template<typename T>
void runGPU(int nx, int nz, const T* x, const T* y, T* z){
T* d_x;
CudaMalloc((void**)&d_x, nx*sizeof(T));
CudaMemcpy(d_x, x, nx*sizeof(T), cudaMemcpyHostToDevice);
T* d_y;
CudaMalloc((void**)&d_y, (nx+nz)*sizeof(T));
CudaMemcpy(d_y, y, (nx+nz)*sizeof(T), cudaMemcpyHostToDevice);
T* d_z;
CudaMalloc((void**)&d_z, nz*sizeof(T));
CudaMemcpy(d_z, z, nz*sizeof(T), cudaMemcpyHostToDevice);
crossRelated<T>(nx, nz, d_x, d_y, d_z);
CudaMemcpy(z, d_z, nz*sizeof(T), cudaMemcpyDeviceToHost);
CudaFree(d_x);
CudaFree(d_y);
CudaFree(d_z);
}
template<typename T>
static void randomize(T *d, int size){
for(int i = 0; i < size; i++){
d[i] = 1.0f;
//d[i] = 1.0f*rand()/RAND_MAX;
}
}
int main(int argc, char *argv[]){
int nx = 6000;
int nz = 200;
float *x = (float*) Malloc(nx*sizeof(float));
randomize(x, nx);
float *y = (float*) Malloc((nx+nz)*sizeof(float));
randomize(y, nx+nz);
float *z = (float*) Malloc(nz*sizeof(float));
runGPU<float>(nx, nz, x, y, z);
for(int i = 0; i < nz; i++){
printf("%.5f\n", z[i]);
}
free(x);
free(y);
free(z);
return 0;
}
|
934287003e2c26494961f1cb7d9faf107d5b4383.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#include <ATen/ATen.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/impl/HIPGuardImplMasqueradingAsCUDA.h>
#include <float.h>
#include <math.h>
#include <thrust/tuple.h>
#include <cstdio>
#include <tuple>
#include "rasterize_points/bitmask.cuh"
#include "rasterize_points/rasterization_utils.cuh"
#include "utils/float_math.cuh"
#include "utils/geometry_utils.cuh"
namespace {
// A structure for holding details about a pixel.
struct Pixel {
float z;
int64_t idx;
float dist;
float3 bary;
};
__device__ bool operator<(const Pixel& a, const Pixel& b) {
return a.z < b.z;
}
__device__ float FloatMin3(const float p1, const float p2, const float p3) {
return fminf(p1, fminf(p2, p3));
}
__device__ float FloatMax3(const float p1, const float p2, const float p3) {
return fmaxf(p1, fmaxf(p2, p3));
}
// Get the xyz coordinates of the three vertices for the face given by the
// index face_idx into face_verts.
__device__ thrust::tuple<float3, float3, float3> GetSingleFaceVerts(
const float* face_verts,
int face_idx) {
const float x0 = face_verts[face_idx * 9 + 0];
const float y0 = face_verts[face_idx * 9 + 1];
const float z0 = face_verts[face_idx * 9 + 2];
const float x1 = face_verts[face_idx * 9 + 3];
const float y1 = face_verts[face_idx * 9 + 4];
const float z1 = face_verts[face_idx * 9 + 5];
const float x2 = face_verts[face_idx * 9 + 6];
const float y2 = face_verts[face_idx * 9 + 7];
const float z2 = face_verts[face_idx * 9 + 8];
const float3 v0xyz = make_float3(x0, y0, z0);
const float3 v1xyz = make_float3(x1, y1, z1);
const float3 v2xyz = make_float3(x2, y2, z2);
return thrust::make_tuple(v0xyz, v1xyz, v2xyz);
}
// Get the min/max x/y/z values for the face given by vertices v0, v1, v2.
__device__ thrust::tuple<float2, float2, float2>
GetFaceBoundingBox(float3 v0, float3 v1, float3 v2) {
const float xmin = FloatMin3(v0.x, v1.x, v2.x);
const float ymin = FloatMin3(v0.y, v1.y, v2.y);
const float zmin = FloatMin3(v0.z, v1.z, v2.z);
const float xmax = FloatMax3(v0.x, v1.x, v2.x);
const float ymax = FloatMax3(v0.y, v1.y, v2.y);
const float zmax = FloatMax3(v0.z, v1.z, v2.z);
return thrust::make_tuple(
make_float2(xmin, xmax),
make_float2(ymin, ymax),
make_float2(zmin, zmax));
}
// Check if the point (px, py) lies outside the face bounding box face_bbox.
// Return true if the point is outside.
__device__ bool CheckPointOutsideBoundingBox(
float3 v0,
float3 v1,
float3 v2,
float blur_radius,
float2 pxy) {
const auto bbox = GetFaceBoundingBox(v0, v1, v2);
const float2 xlims = thrust::get<0>(bbox);
const float2 ylims = thrust::get<1>(bbox);
const float2 zlims = thrust::get<2>(bbox);
const float x_min = xlims.x - blur_radius;
const float y_min = ylims.x - blur_radius;
const float x_max = xlims.y + blur_radius;
const float y_max = ylims.y + blur_radius;
// Faces with at least one vertex behind the camera won't render correctly
// and should be removed or clipped before calling the rasterizer
const bool z_invalid = zlims.x < kEpsilon;
// Check if the current point is oustside the triangle bounding box.
return (
pxy.x > x_max || pxy.x < x_min || pxy.y > y_max || pxy.y < y_min ||
z_invalid);
}
// This function checks if a pixel given by xy location pxy lies within the
// face with index face_idx in face_verts. One of the inputs is a list (q)
// which contains Pixel structs with the indices of the faces which intersect
// with this pixel sorted by closest z distance. If the point pxy lies in the
// face, the list (q) is updated and re-orderered in place. In addition
// the auxillary variables q_size, q_max_z and q_max_idx are also modified.
// This code is shared between RasterizeMeshesNaiveCudaKernel and
// RasterizeMeshesFineCudaKernel.
template <typename FaceQ>
__device__ void CheckPixelInsideFace(
const float* face_verts, // (F, 3, 3)
const int face_idx,
int& q_size,
float& q_max_z,
int& q_max_idx,
FaceQ& q,
const float blur_radius,
const float2 pxy, // Coordinates of the pixel
const int K,
const bool perspective_correct,
const bool clip_barycentric_coords,
const bool cull_backfaces) {
const auto v012 = GetSingleFaceVerts(face_verts, face_idx);
const float3 v0 = thrust::get<0>(v012);
const float3 v1 = thrust::get<1>(v012);
const float3 v2 = thrust::get<2>(v012);
// Only need xy for barycentric coordinates and distance calculations.
const float2 v0xy = make_float2(v0.x, v0.y);
const float2 v1xy = make_float2(v1.x, v1.y);
const float2 v2xy = make_float2(v2.x, v2.y);
// Perform checks and skip if:
// 1. the face is behind the camera
// 2. the face is facing away from the camera
// 3. the face has very small face area
// 4. the pixel is outside the face bbox
const float zmax = FloatMax3(v0.z, v1.z, v2.z);
const bool outside_bbox = CheckPointOutsideBoundingBox(
v0, v1, v2, sqrt(blur_radius), pxy); // use sqrt of blur for bbox
const float face_area = EdgeFunctionForward(v0xy, v1xy, v2xy);
// Check if the face is visible to the camera.
const bool back_face = face_area < 0.0;
const bool zero_face_area =
(face_area <= kEpsilon && face_area >= -1.0f * kEpsilon);
if (zmax < 0 || cull_backfaces && back_face || outside_bbox ||
zero_face_area) {
return;
}
// Calculate barycentric coords and euclidean dist to triangle.
const float3 p_bary0 = BarycentricCoordsForward(pxy, v0xy, v1xy, v2xy);
const float3 p_bary = !perspective_correct
? p_bary0
: BarycentricPerspectiveCorrectionForward(p_bary0, v0.z, v1.z, v2.z);
const float3 p_bary_clip =
!clip_barycentric_coords ? p_bary : BarycentricClipForward(p_bary);
const float pz =
p_bary_clip.x * v0.z + p_bary_clip.y * v1.z + p_bary_clip.z * v2.z;
if (pz < 0) {
return; // Face is behind the image plane.
}
// Get abs squared distance
const float dist = PointTriangleDistanceForward(pxy, v0xy, v1xy, v2xy);
// Use the unclipped bary coordinates to determine if the point is inside the
// face.
const bool inside = p_bary.x > 0.0f && p_bary.y > 0.0f && p_bary.z > 0.0f;
const float signed_dist = inside ? -dist : dist;
// Check if pixel is outside blur region
if (!inside && dist >= blur_radius) {
return;
}
if (q_size < K) {
// Just insert it.
q[q_size] = {pz, face_idx, signed_dist, p_bary_clip};
if (pz > q_max_z) {
q_max_z = pz;
q_max_idx = q_size;
}
q_size++;
} else if (pz < q_max_z) {
// Overwrite the old max, and find the new max.
q[q_max_idx] = {pz, face_idx, signed_dist, p_bary_clip};
q_max_z = pz;
for (int i = 0; i < K; i++) {
if (q[i].z > q_max_z) {
q_max_z = q[i].z;
q_max_idx = i;
}
}
}
}
} // namespace
// ****************************************************************************
// * NAIVE RASTERIZATION *
// ****************************************************************************
__global__ void RasterizeMeshesNaiveCudaKernel(
const float* face_verts,
const int64_t* mesh_to_face_first_idx,
const int64_t* num_faces_per_mesh,
const float blur_radius,
const bool perspective_correct,
const bool clip_barycentric_coords,
const bool cull_backfaces,
const int N,
const int H,
const int W,
const int K,
int64_t* face_idxs,
float* zbuf,
float* pix_dists,
float* bary) {
// Simple version: One thread per output pixel
int num_threads = gridDim.x * blockDim.x;
int tid = blockDim.x * blockIdx.x + threadIdx.x;
for (int i = tid; i < N * H * W; i += num_threads) {
// Convert linear index to 3D index
const int n = i / (H * W); // batch index.
const int pix_idx = i % (H * W);
// Reverse ordering of X and Y axes
const int yi = H - 1 - pix_idx / W;
const int xi = W - 1 - pix_idx % W;
// screen coordinates to ndc coordiantes of pixel.
const float xf = PixToNonSquareNdc(xi, W, H);
const float yf = PixToNonSquareNdc(yi, H, W);
const float2 pxy = make_float2(xf, yf);
// For keeping track of the K closest points we want a data structure
// that (1) gives O(1) access to the closest point for easy comparisons,
// and (2) allows insertion of new elements. In the CPU version we use
// std::priority_queue; then (2) is O(log K). We can't use STL
// containers in CUDA; we could roll our own max heap in an array, but
// that would likely have a lot of warp divergence so we do something
// simpler instead: keep the elements in an unsorted array, but keep
// track of the max value and the index of the max value. Then (1) is
// still O(1) time, while (2) is O(K) with a clean loop. Since K <= 8
// this should be fast enough for our purposes.
Pixel q[kMaxPointsPerPixel];
int q_size = 0;
float q_max_z = -1000;
int q_max_idx = -1;
// Using the batch index of the thread get the start and stop
// indices for the faces.
const int64_t face_start_idx = mesh_to_face_first_idx[n];
const int64_t face_stop_idx = face_start_idx + num_faces_per_mesh[n];
// Loop through the faces in the mesh.
for (int f = face_start_idx; f < face_stop_idx; ++f) {
// Check if the pixel pxy is inside the face bounding box and if it is,
// update q, q_size, q_max_z and q_max_idx in place.
CheckPixelInsideFace(
face_verts,
f,
q_size,
q_max_z,
q_max_idx,
q,
blur_radius,
pxy,
K,
perspective_correct,
clip_barycentric_coords,
cull_backfaces);
}
// TODO: make sorting an option as only top k is needed, not sorted values.
BubbleSort(q, q_size);
int idx = n * H * W * K + pix_idx * K;
for (int k = 0; k < q_size; ++k) {
face_idxs[idx + k] = q[k].idx;
zbuf[idx + k] = q[k].z;
pix_dists[idx + k] = q[k].dist;
bary[(idx + k) * 3 + 0] = q[k].bary.x;
bary[(idx + k) * 3 + 1] = q[k].bary.y;
bary[(idx + k) * 3 + 2] = q[k].bary.z;
}
}
}
std::tuple<at::Tensor, at::Tensor, at::Tensor, at::Tensor>
RasterizeMeshesNaiveCuda(
const at::Tensor& face_verts,
const at::Tensor& mesh_to_faces_packed_first_idx,
const at::Tensor& num_faces_per_mesh,
const std::tuple<int, int> image_size,
const float blur_radius,
const int num_closest,
const bool perspective_correct,
const bool clip_barycentric_coords,
const bool cull_backfaces) {
TORCH_CHECK(
face_verts.ndimension() == 3 && face_verts.size(1) == 3 &&
face_verts.size(2) == 3,
"face_verts must have dimensions (num_faces, 3, 3)");
TORCH_CHECK(
num_faces_per_mesh.size(0) == mesh_to_faces_packed_first_idx.size(0),
"num_faces_per_mesh must have save size first dimension as mesh_to_faces_packed_first_idx");
if (num_closest > kMaxPointsPerPixel) {
std::stringstream ss;
ss << "Must have points_per_pixel <= " << kMaxPointsPerPixel;
AT_ERROR(ss.str());
}
// Check inputs are on the same device
at::TensorArg face_verts_t{face_verts, "face_verts", 1},
mesh_to_faces_packed_first_idx_t{
mesh_to_faces_packed_first_idx, "mesh_to_faces_packed_first_idx", 2},
num_faces_per_mesh_t{num_faces_per_mesh, "num_faces_per_mesh", 3};
at::CheckedFrom c = "RasterizeMeshesNaiveCuda";
at::checkAllSameGPU(
c,
{face_verts_t, mesh_to_faces_packed_first_idx_t, num_faces_per_mesh_t});
// Set the device for the kernel launch based on the device of the input
at::hip::HIPGuardMasqueradingAsCUDA device_guard(face_verts.device());
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
const int N = num_faces_per_mesh.size(0); // batch size.
const int H = std::get<0>(image_size);
const int W = std::get<1>(image_size);
const int K = num_closest;
auto long_opts = num_faces_per_mesh.options().dtype(at::kLong);
auto float_opts = face_verts.options().dtype(at::kFloat);
at::Tensor face_idxs = at::full({N, H, W, K}, -1, long_opts);
at::Tensor zbuf = at::full({N, H, W, K}, -1, float_opts);
at::Tensor pix_dists = at::full({N, H, W, K}, -1, float_opts);
at::Tensor bary = at::full({N, H, W, K, 3}, -1, float_opts);
if (face_idxs.numel() == 0) {
AT_CUDA_CHECK(hipGetLastError());
return std::make_tuple(face_idxs, zbuf, bary, pix_dists);
}
const size_t blocks = 1024;
const size_t threads = 64;
hipLaunchKernelGGL(( RasterizeMeshesNaiveCudaKernel), dim3(blocks), dim3(threads), 0, stream,
face_verts.contiguous().data_ptr<float>(),
mesh_to_faces_packed_first_idx.contiguous().data_ptr<int64_t>(),
num_faces_per_mesh.contiguous().data_ptr<int64_t>(),
blur_radius,
perspective_correct,
clip_barycentric_coords,
cull_backfaces,
N,
H,
W,
K,
face_idxs.data_ptr<int64_t>(),
zbuf.data_ptr<float>(),
pix_dists.data_ptr<float>(),
bary.data_ptr<float>());
AT_CUDA_CHECK(hipGetLastError());
return std::make_tuple(face_idxs, zbuf, bary, pix_dists);
}
// ****************************************************************************
// * BACKWARD PASS *
// ****************************************************************************
// TODO: benchmark parallelizing over faces_verts instead of over pixels.
__global__ void RasterizeMeshesBackwardCudaKernel(
const float* face_verts, // (F, 3, 3)
const int64_t* pix_to_face, // (N, H, W, K)
const bool perspective_correct,
const bool clip_barycentric_coords,
const int N,
const int H,
const int W,
const int K,
const float* grad_zbuf, // (N, H, W, K)
const float* grad_bary, // (N, H, W, K, 3)
const float* grad_dists, // (N, H, W, K)
float* grad_face_verts) { // (F, 3, 3)
// Parallelize over each pixel in images of
// size H * W, for each image in the batch of size N.
const int num_threads = gridDim.x * blockDim.x;
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (int t_i = tid; t_i < N * H * W; t_i += num_threads) {
// Convert linear index to 3D index
const int n = t_i / (H * W); // batch index.
const int pix_idx = t_i % (H * W);
// Reverse ordering of X and Y axes.
const int yi = H - 1 - pix_idx / W;
const int xi = W - 1 - pix_idx % W;
const float xf = PixToNonSquareNdc(xi, W, H);
const float yf = PixToNonSquareNdc(yi, H, W);
const float2 pxy = make_float2(xf, yf);
// Loop over all the faces for this pixel.
for (int k = 0; k < K; k++) {
// Index into (N, H, W, K, :) grad tensors
// pixel index + top k index
int i = n * H * W * K + pix_idx * K + k;
const int f = pix_to_face[i];
if (f < 0) {
continue; // padded face.
}
// Get xyz coordinates of the three face vertices.
const auto v012 = GetSingleFaceVerts(face_verts, f);
const float3 v0 = thrust::get<0>(v012);
const float3 v1 = thrust::get<1>(v012);
const float3 v2 = thrust::get<2>(v012);
// Only neex xy for barycentric coordinate and distance calculations.
const float2 v0xy = make_float2(v0.x, v0.y);
const float2 v1xy = make_float2(v1.x, v1.y);
const float2 v2xy = make_float2(v2.x, v2.y);
// Get upstream gradients for the face.
const float grad_dist_upstream = grad_dists[i];
const float grad_zbuf_upstream = grad_zbuf[i];
const float grad_bary_upstream_w0 = grad_bary[i * 3 + 0];
const float grad_bary_upstream_w1 = grad_bary[i * 3 + 1];
const float grad_bary_upstream_w2 = grad_bary[i * 3 + 2];
const float3 grad_bary_upstream = make_float3(
grad_bary_upstream_w0, grad_bary_upstream_w1, grad_bary_upstream_w2);
const float3 b_w = BarycentricCoordsForward(pxy, v0xy, v1xy, v2xy);
const float3 b_pp = !perspective_correct
? b_w
: BarycentricPerspectiveCorrectionForward(b_w, v0.z, v1.z, v2.z);
const float3 b_w_clip =
!clip_barycentric_coords ? b_pp : BarycentricClipForward(b_pp);
const bool inside = b_pp.x > 0.0f && b_pp.y > 0.0f && b_pp.z > 0.0f;
const float sign = inside ? -1.0f : 1.0f;
auto grad_dist_f = PointTriangleDistanceBackward(
pxy, v0xy, v1xy, v2xy, sign * grad_dist_upstream);
const float2 ddist_d_v0 = thrust::get<1>(grad_dist_f);
const float2 ddist_d_v1 = thrust::get<2>(grad_dist_f);
const float2 ddist_d_v2 = thrust::get<3>(grad_dist_f);
// Upstream gradient for barycentric coords from zbuf calculation:
// zbuf = bary_w0 * z0 + bary_w1 * z1 + bary_w2 * z2
// Therefore
// d_zbuf/d_bary_w0 = z0
// d_zbuf/d_bary_w1 = z1
// d_zbuf/d_bary_w2 = z2
const float3 d_zbuf_d_bwclip = make_float3(v0.z, v1.z, v2.z);
// Total upstream barycentric gradients are the sum of
// external upstream gradients and contribution from zbuf.
const float3 grad_bary_f_sum =
(grad_bary_upstream + grad_zbuf_upstream * d_zbuf_d_bwclip);
float3 grad_bary0 = grad_bary_f_sum;
if (clip_barycentric_coords) {
grad_bary0 = BarycentricClipBackward(b_w, grad_bary_f_sum);
}
float dz0_persp = 0.0f, dz1_persp = 0.0f, dz2_persp = 0.0f;
if (perspective_correct) {
auto perspective_grads = BarycentricPerspectiveCorrectionBackward(
b_w, v0.z, v1.z, v2.z, grad_bary0);
grad_bary0 = thrust::get<0>(perspective_grads);
dz0_persp = thrust::get<1>(perspective_grads);
dz1_persp = thrust::get<2>(perspective_grads);
dz2_persp = thrust::get<3>(perspective_grads);
}
auto grad_bary_f =
BarycentricCoordsBackward(pxy, v0xy, v1xy, v2xy, grad_bary0);
const float2 dbary_d_v0 = thrust::get<1>(grad_bary_f);
const float2 dbary_d_v1 = thrust::get<2>(grad_bary_f);
const float2 dbary_d_v2 = thrust::get<3>(grad_bary_f);
atomicAdd(grad_face_verts + f * 9 + 0, dbary_d_v0.x + ddist_d_v0.x);
atomicAdd(grad_face_verts + f * 9 + 1, dbary_d_v0.y + ddist_d_v0.y);
atomicAdd(
grad_face_verts + f * 9 + 2,
grad_zbuf_upstream * b_w_clip.x + dz0_persp);
atomicAdd(grad_face_verts + f * 9 + 3, dbary_d_v1.x + ddist_d_v1.x);
atomicAdd(grad_face_verts + f * 9 + 4, dbary_d_v1.y + ddist_d_v1.y);
atomicAdd(
grad_face_verts + f * 9 + 5,
grad_zbuf_upstream * b_w_clip.y + dz1_persp);
atomicAdd(grad_face_verts + f * 9 + 6, dbary_d_v2.x + ddist_d_v2.x);
atomicAdd(grad_face_verts + f * 9 + 7, dbary_d_v2.y + ddist_d_v2.y);
atomicAdd(
grad_face_verts + f * 9 + 8,
grad_zbuf_upstream * b_w_clip.z + dz2_persp);
}
}
}
at::Tensor RasterizeMeshesBackwardCuda(
const at::Tensor& face_verts, // (F, 3, 3)
const at::Tensor& pix_to_face, // (N, H, W, K)
const at::Tensor& grad_zbuf, // (N, H, W, K)
const at::Tensor& grad_bary, // (N, H, W, K, 3)
const at::Tensor& grad_dists, // (N, H, W, K)
const bool perspective_correct,
const bool clip_barycentric_coords) {
// Check inputs are on the same device
at::TensorArg face_verts_t{face_verts, "face_verts", 1},
pix_to_face_t{pix_to_face, "pix_to_face", 2},
grad_zbuf_t{grad_zbuf, "grad_zbuf", 3},
grad_bary_t{grad_bary, "grad_bary", 4},
grad_dists_t{grad_dists, "grad_dists", 5};
at::CheckedFrom c = "RasterizeMeshesBackwardCuda";
at::checkAllSameGPU(
c, {face_verts_t, pix_to_face_t, grad_zbuf_t, grad_bary_t, grad_dists_t});
at::checkAllSameType(
c, {face_verts_t, grad_zbuf_t, grad_bary_t, grad_dists_t});
// Set the device for the kernel launch based on the device of the input
at::hip::HIPGuardMasqueradingAsCUDA device_guard(face_verts.device());
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
const int F = face_verts.size(0);
const int N = pix_to_face.size(0);
const int H = pix_to_face.size(1);
const int W = pix_to_face.size(2);
const int K = pix_to_face.size(3);
at::Tensor grad_face_verts = at::zeros({F, 3, 3}, face_verts.options());
if (grad_face_verts.numel() == 0) {
AT_CUDA_CHECK(hipGetLastError());
return grad_face_verts;
}
const size_t blocks = 1024;
const size_t threads = 64;
hipLaunchKernelGGL(( RasterizeMeshesBackwardCudaKernel), dim3(blocks), dim3(threads), 0, stream,
face_verts.contiguous().data_ptr<float>(),
pix_to_face.contiguous().data_ptr<int64_t>(),
perspective_correct,
clip_barycentric_coords,
N,
H,
W,
K,
grad_zbuf.contiguous().data_ptr<float>(),
grad_bary.contiguous().data_ptr<float>(),
grad_dists.contiguous().data_ptr<float>(),
grad_face_verts.data_ptr<float>());
AT_CUDA_CHECK(hipGetLastError());
return grad_face_verts;
}
// ****************************************************************************
// * COARSE RASTERIZATION *
// ****************************************************************************
__global__ void RasterizeMeshesCoarseCudaKernel(
const float* face_verts,
const int64_t* mesh_to_face_first_idx,
const int64_t* num_faces_per_mesh,
const float blur_radius,
const int N,
const int F,
const int H,
const int W,
const int bin_size,
const int chunk_size,
const int max_faces_per_bin,
int* faces_per_bin,
int* bin_faces) {
extern __shared__ char sbuf[];
const int M = max_faces_per_bin;
// Integer divide round up
const int num_bins_x = 1 + (W - 1) / bin_size;
const int num_bins_y = 1 + (H - 1) / bin_size;
// NDC range depends on the ratio of W/H
// The shorter side from (H, W) is given an NDC range of 2.0 and
// the other side is scaled by the ratio of H:W.
const float NDC_x_half_range = NonSquareNdcRange(W, H) / 2.0f;
const float NDC_y_half_range = NonSquareNdcRange(H, W) / 2.0f;
// Size of half a pixel in NDC units is the NDC half range
// divided by the corresponding image dimension
const float half_pix_x = NDC_x_half_range / W;
const float half_pix_y = NDC_y_half_range / H;
// This is a boolean array of shape (num_bins_y, num_bins_x, chunk_size)
// stored in shared memory that will track whether each point in the chunk
// falls into each bin of the image.
BitMask binmask((unsigned int*)sbuf, num_bins_y, num_bins_x, chunk_size);
// Have each block handle a chunk of faces
const int chunks_per_batch = 1 + (F - 1) / chunk_size;
const int num_chunks = N * chunks_per_batch;
for (int chunk = blockIdx.x; chunk < num_chunks; chunk += gridDim.x) {
const int batch_idx = chunk / chunks_per_batch; // batch index
const int chunk_idx = chunk % chunks_per_batch;
const int face_start_idx = chunk_idx * chunk_size;
binmask.block_clear();
const int64_t mesh_face_start_idx = mesh_to_face_first_idx[batch_idx];
const int64_t mesh_face_stop_idx =
mesh_face_start_idx + num_faces_per_mesh[batch_idx];
// Have each thread handle a different face within the chunk
for (int f = threadIdx.x; f < chunk_size; f += blockDim.x) {
const int f_idx = face_start_idx + f;
// Check if face index corresponds to the mesh in the batch given by
// batch_idx
if (f_idx >= mesh_face_stop_idx || f_idx < mesh_face_start_idx) {
continue;
}
// Get xyz coordinates of the three face vertices.
const auto v012 = GetSingleFaceVerts(face_verts, f_idx);
const float3 v0 = thrust::get<0>(v012);
const float3 v1 = thrust::get<1>(v012);
const float3 v2 = thrust::get<2>(v012);
// Compute screen-space bbox for the triangle expanded by blur.
float xmin = FloatMin3(v0.x, v1.x, v2.x) - sqrt(blur_radius);
float ymin = FloatMin3(v0.y, v1.y, v2.y) - sqrt(blur_radius);
float xmax = FloatMax3(v0.x, v1.x, v2.x) + sqrt(blur_radius);
float ymax = FloatMax3(v0.y, v1.y, v2.y) + sqrt(blur_radius);
float zmin = FloatMin3(v0.z, v1.z, v2.z);
// Faces with at least one vertex behind the camera won't render
// correctly and should be removed or clipped before calling the
// rasterizer
if (zmin < kEpsilon) {
continue;
}
// Brute-force search over all bins; TODO(T54294966) something smarter.
for (int by = 0; by < num_bins_y; ++by) {
// Y coordinate of the top and bottom of the bin.
// PixToNdc gives the location of the center of each pixel, so we
// need to add/subtract a half pixel to get the true extent of the bin.
// Reverse ordering of Y axis so that +Y is upwards in the image.
const float bin_y_min =
PixToNonSquareNdc(by * bin_size, H, W) - half_pix_y;
const float bin_y_max =
PixToNonSquareNdc((by + 1) * bin_size - 1, H, W) + half_pix_y;
const bool y_overlap = (ymin <= bin_y_max) && (bin_y_min < ymax);
for (int bx = 0; bx < num_bins_x; ++bx) {
// X coordinate of the left and right of the bin.
// Reverse ordering of x axis so that +X is left.
const float bin_x_max =
PixToNonSquareNdc((bx + 1) * bin_size - 1, W, H) + half_pix_x;
const float bin_x_min =
PixToNonSquareNdc(bx * bin_size, W, H) - half_pix_x;
const bool x_overlap = (xmin <= bin_x_max) && (bin_x_min < xmax);
if (y_overlap && x_overlap) {
binmask.set(by, bx, f);
}
}
}
}
__syncthreads();
// Now we have processed every face in the current chunk. We need to
// count the number of faces in each bin so we can write the indices
// out to global memory. We have each thread handle a different bin.
for (int byx = threadIdx.x; byx < num_bins_y * num_bins_x;
byx += blockDim.x) {
const int by = byx / num_bins_x;
const int bx = byx % num_bins_x;
const int count = binmask.count(by, bx);
const int faces_per_bin_idx =
batch_idx * num_bins_y * num_bins_x + by * num_bins_x + bx;
// This atomically increments the (global) number of faces found
// in the current bin, and gets the previous value of the counter;
// this effectively allocates space in the bin_faces array for the
// faces in the current chunk that fall into this bin.
const int start = atomicAdd(faces_per_bin + faces_per_bin_idx, count);
// Now loop over the binmask and write the active bits for this bin
// out to bin_faces.
int next_idx = batch_idx * num_bins_y * num_bins_x * M +
by * num_bins_x * M + bx * M + start;
for (int f = 0; f < chunk_size; ++f) {
if (binmask.get(by, bx, f)) {
// TODO(T54296346) find the correct method for handling errors in
// CUDA. Throw an error if num_faces_per_bin > max_faces_per_bin.
// Either decrease bin size or increase max_faces_per_bin
bin_faces[next_idx] = face_start_idx + f;
next_idx++;
}
}
}
__syncthreads();
}
}
at::Tensor RasterizeMeshesCoarseCuda(
const at::Tensor& face_verts,
const at::Tensor& mesh_to_face_first_idx,
const at::Tensor& num_faces_per_mesh,
const std::tuple<int, int> image_size,
const float blur_radius,
const int bin_size,
const int max_faces_per_bin) {
TORCH_CHECK(
face_verts.ndimension() == 3 && face_verts.size(1) == 3 &&
face_verts.size(2) == 3,
"face_verts must have dimensions (num_faces, 3, 3)");
// Check inputs are on the same device
at::TensorArg face_verts_t{face_verts, "face_verts", 1},
mesh_to_face_first_idx_t{
mesh_to_face_first_idx, "mesh_to_face_first_idx", 2},
num_faces_per_mesh_t{num_faces_per_mesh, "num_faces_per_mesh", 3};
at::CheckedFrom c = "RasterizeMeshesCoarseCuda";
at::checkAllSameGPU(
c, {face_verts_t, mesh_to_face_first_idx_t, num_faces_per_mesh_t});
// Set the device for the kernel launch based on the device of the input
at::hip::HIPGuardMasqueradingAsCUDA device_guard(face_verts.device());
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
const int H = std::get<0>(image_size);
const int W = std::get<1>(image_size);
const int F = face_verts.size(0);
const int N = num_faces_per_mesh.size(0);
const int M = max_faces_per_bin;
// Integer divide round up.
const int num_bins_y = 1 + (H - 1) / bin_size;
const int num_bins_x = 1 + (W - 1) / bin_size;
if (num_bins_y >= kMaxItemsPerBin || num_bins_x >= kMaxItemsPerBin) {
std::stringstream ss;
ss << "In Coarse Rasterizer got num_bins_y: " << num_bins_y
<< ", num_bins_x: " << num_bins_x << ", "
<< "; that's too many!";
AT_ERROR(ss.str());
}
auto opts = num_faces_per_mesh.options().dtype(at::kInt);
at::Tensor faces_per_bin = at::zeros({N, num_bins_y, num_bins_x}, opts);
at::Tensor bin_faces = at::full({N, num_bins_y, num_bins_x, M}, -1, opts);
if (bin_faces.numel() == 0) {
AT_CUDA_CHECK(hipGetLastError());
return bin_faces;
}
const int chunk_size = 512;
const size_t shared_size = num_bins_y * num_bins_x * chunk_size / 8;
const size_t blocks = 64;
const size_t threads = 512;
hipLaunchKernelGGL(( RasterizeMeshesCoarseCudaKernel), dim3(blocks), dim3(threads), shared_size, stream,
face_verts.contiguous().data_ptr<float>(),
mesh_to_face_first_idx.contiguous().data_ptr<int64_t>(),
num_faces_per_mesh.contiguous().data_ptr<int64_t>(),
blur_radius,
N,
F,
H,
W,
bin_size,
chunk_size,
M,
faces_per_bin.data_ptr<int32_t>(),
bin_faces.data_ptr<int32_t>());
AT_CUDA_CHECK(hipGetLastError());
return bin_faces;
}
// ****************************************************************************
// * FINE RASTERIZATION *
// ****************************************************************************
__global__ void RasterizeMeshesFineCudaKernel(
const float* face_verts, // (F, 3, 3)
const int32_t* bin_faces, // (N, BH, BW, T)
const float blur_radius,
const int bin_size,
const bool perspective_correct,
const bool clip_barycentric_coords,
const bool cull_backfaces,
const int N,
const int BH,
const int BW,
const int M,
const int H,
const int W,
const int K,
int64_t* face_idxs, // (N, H, W, K)
float* zbuf, // (N, H, W, K)
float* pix_dists, // (N, H, W, K)
float* bary // (N, H, W, K, 3)
) {
// This can be more than H * W if H or W are not divisible by bin_size.
int num_pixels = N * BH * BW * bin_size * bin_size;
int num_threads = gridDim.x * blockDim.x;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (int pid = tid; pid < num_pixels; pid += num_threads) {
// Convert linear index into bin and pixel indices. We make the within
// block pixel ids move the fastest, so that adjacent threads will fall
// into the same bin; this should give them coalesced memory reads when
// they read from faces and bin_faces.
int i = pid;
const int n = i / (BH * BW * bin_size * bin_size);
i %= BH * BW * bin_size * bin_size;
// bin index y
const int by = i / (BW * bin_size * bin_size);
i %= BW * bin_size * bin_size;
// bin index y
const int bx = i / (bin_size * bin_size);
// pixel within the bin
i %= bin_size * bin_size;
// Pixel x, y indices
const int yi = i / bin_size + by * bin_size;
const int xi = i % bin_size + bx * bin_size;
if (yi >= H || xi >= W)
continue;
const float xf = PixToNonSquareNdc(xi, W, H);
const float yf = PixToNonSquareNdc(yi, H, W);
const float2 pxy = make_float2(xf, yf);
// This part looks like the naive rasterization kernel, except we use
// bin_faces to only look at a subset of faces already known to fall
// in this bin. TODO abstract out this logic into some data structure
// that is shared by both kernels?
Pixel q[kMaxPointsPerPixel];
int q_size = 0;
float q_max_z = -1000;
int q_max_idx = -1;
for (int m = 0; m < M; m++) {
const int f = bin_faces[n * BH * BW * M + by * BW * M + bx * M + m];
if (f < 0) {
continue; // bin_faces uses -1 as a sentinal value.
}
// Check if the pixel pxy is inside the face bounding box and if it is,
// update q, q_size, q_max_z and q_max_idx in place.
CheckPixelInsideFace(
face_verts,
f,
q_size,
q_max_z,
q_max_idx,
q,
blur_radius,
pxy,
K,
perspective_correct,
clip_barycentric_coords,
cull_backfaces);
}
// Now we've looked at all the faces for this bin, so we can write
// output for the current pixel.
// TODO: make sorting an option as only top k is needed, not sorted values.
BubbleSort(q, q_size);
// Reverse ordering of the X and Y axis so that
// in the image +Y is pointing up and +X is pointing left.
const int yidx = H - 1 - yi;
const int xidx = W - 1 - xi;
const int pix_idx = n * H * W * K + yidx * W * K + xidx * K;
for (int k = 0; k < q_size; k++) {
face_idxs[pix_idx + k] = q[k].idx;
zbuf[pix_idx + k] = q[k].z;
pix_dists[pix_idx + k] = q[k].dist;
bary[(pix_idx + k) * 3 + 0] = q[k].bary.x;
bary[(pix_idx + k) * 3 + 1] = q[k].bary.y;
bary[(pix_idx + k) * 3 + 2] = q[k].bary.z;
}
}
}
std::tuple<at::Tensor, at::Tensor, at::Tensor, at::Tensor>
RasterizeMeshesFineCuda(
const at::Tensor& face_verts,
const at::Tensor& bin_faces,
const std::tuple<int, int> image_size,
const float blur_radius,
const int bin_size,
const int faces_per_pixel,
const bool perspective_correct,
const bool clip_barycentric_coords,
const bool cull_backfaces) {
TORCH_CHECK(
face_verts.ndimension() == 3 && face_verts.size(1) == 3 &&
face_verts.size(2) == 3,
"face_verts must have dimensions (num_faces, 3, 3)");
TORCH_CHECK(bin_faces.ndimension() == 4, "bin_faces must have 4 dimensions");
// Check inputs are on the same device
at::TensorArg face_verts_t{face_verts, "face_verts", 1},
bin_faces_t{bin_faces, "bin_faces", 2};
at::CheckedFrom c = "RasterizeMeshesFineCuda";
at::checkAllSameGPU(c, {face_verts_t, bin_faces_t});
// Set the device for the kernel launch based on the device of the input
at::hip::HIPGuardMasqueradingAsCUDA device_guard(face_verts.device());
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
// bin_faces shape (N, BH, BW, M)
const int N = bin_faces.size(0);
const int BH = bin_faces.size(1);
const int BW = bin_faces.size(2);
const int M = bin_faces.size(3);
const int K = faces_per_pixel;
const int H = std::get<0>(image_size);
const int W = std::get<1>(image_size);
if (K > kMaxPointsPerPixel) {
AT_ERROR("Must have num_closest <= 150");
}
auto long_opts = bin_faces.options().dtype(at::kLong);
auto float_opts = face_verts.options().dtype(at::kFloat);
at::Tensor face_idxs = at::full({N, H, W, K}, -1, long_opts);
at::Tensor zbuf = at::full({N, H, W, K}, -1, float_opts);
at::Tensor pix_dists = at::full({N, H, W, K}, -1, float_opts);
at::Tensor bary = at::full({N, H, W, K, 3}, -1, float_opts);
if (face_idxs.numel() == 0) {
AT_CUDA_CHECK(hipGetLastError());
return std::make_tuple(face_idxs, zbuf, bary, pix_dists);
}
const size_t blocks = 1024;
const size_t threads = 64;
hipLaunchKernelGGL(( RasterizeMeshesFineCudaKernel), dim3(blocks), dim3(threads), 0, stream,
face_verts.contiguous().data_ptr<float>(),
bin_faces.contiguous().data_ptr<int32_t>(),
blur_radius,
bin_size,
perspective_correct,
clip_barycentric_coords,
cull_backfaces,
N,
BH,
BW,
M,
H,
W,
K,
face_idxs.data_ptr<int64_t>(),
zbuf.data_ptr<float>(),
pix_dists.data_ptr<float>(),
bary.data_ptr<float>());
return std::make_tuple(face_idxs, zbuf, bary, pix_dists);
}
| 934287003e2c26494961f1cb7d9faf107d5b4383.cu | // Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <c10/cuda/CUDAGuard.h>
#include <float.h>
#include <math.h>
#include <thrust/tuple.h>
#include <cstdio>
#include <tuple>
#include "rasterize_points/bitmask.cuh"
#include "rasterize_points/rasterization_utils.cuh"
#include "utils/float_math.cuh"
#include "utils/geometry_utils.cuh"
namespace {
// A structure for holding details about a pixel.
struct Pixel {
float z;
int64_t idx;
float dist;
float3 bary;
};
__device__ bool operator<(const Pixel& a, const Pixel& b) {
return a.z < b.z;
}
__device__ float FloatMin3(const float p1, const float p2, const float p3) {
return fminf(p1, fminf(p2, p3));
}
__device__ float FloatMax3(const float p1, const float p2, const float p3) {
return fmaxf(p1, fmaxf(p2, p3));
}
// Get the xyz coordinates of the three vertices for the face given by the
// index face_idx into face_verts.
__device__ thrust::tuple<float3, float3, float3> GetSingleFaceVerts(
const float* face_verts,
int face_idx) {
const float x0 = face_verts[face_idx * 9 + 0];
const float y0 = face_verts[face_idx * 9 + 1];
const float z0 = face_verts[face_idx * 9 + 2];
const float x1 = face_verts[face_idx * 9 + 3];
const float y1 = face_verts[face_idx * 9 + 4];
const float z1 = face_verts[face_idx * 9 + 5];
const float x2 = face_verts[face_idx * 9 + 6];
const float y2 = face_verts[face_idx * 9 + 7];
const float z2 = face_verts[face_idx * 9 + 8];
const float3 v0xyz = make_float3(x0, y0, z0);
const float3 v1xyz = make_float3(x1, y1, z1);
const float3 v2xyz = make_float3(x2, y2, z2);
return thrust::make_tuple(v0xyz, v1xyz, v2xyz);
}
// Get the min/max x/y/z values for the face given by vertices v0, v1, v2.
__device__ thrust::tuple<float2, float2, float2>
GetFaceBoundingBox(float3 v0, float3 v1, float3 v2) {
const float xmin = FloatMin3(v0.x, v1.x, v2.x);
const float ymin = FloatMin3(v0.y, v1.y, v2.y);
const float zmin = FloatMin3(v0.z, v1.z, v2.z);
const float xmax = FloatMax3(v0.x, v1.x, v2.x);
const float ymax = FloatMax3(v0.y, v1.y, v2.y);
const float zmax = FloatMax3(v0.z, v1.z, v2.z);
return thrust::make_tuple(
make_float2(xmin, xmax),
make_float2(ymin, ymax),
make_float2(zmin, zmax));
}
// Check if the point (px, py) lies outside the face bounding box face_bbox.
// Return true if the point is outside.
__device__ bool CheckPointOutsideBoundingBox(
float3 v0,
float3 v1,
float3 v2,
float blur_radius,
float2 pxy) {
const auto bbox = GetFaceBoundingBox(v0, v1, v2);
const float2 xlims = thrust::get<0>(bbox);
const float2 ylims = thrust::get<1>(bbox);
const float2 zlims = thrust::get<2>(bbox);
const float x_min = xlims.x - blur_radius;
const float y_min = ylims.x - blur_radius;
const float x_max = xlims.y + blur_radius;
const float y_max = ylims.y + blur_radius;
// Faces with at least one vertex behind the camera won't render correctly
// and should be removed or clipped before calling the rasterizer
const bool z_invalid = zlims.x < kEpsilon;
// Check if the current point is oustside the triangle bounding box.
return (
pxy.x > x_max || pxy.x < x_min || pxy.y > y_max || pxy.y < y_min ||
z_invalid);
}
// This function checks if a pixel given by xy location pxy lies within the
// face with index face_idx in face_verts. One of the inputs is a list (q)
// which contains Pixel structs with the indices of the faces which intersect
// with this pixel sorted by closest z distance. If the point pxy lies in the
// face, the list (q) is updated and re-orderered in place. In addition
// the auxillary variables q_size, q_max_z and q_max_idx are also modified.
// This code is shared between RasterizeMeshesNaiveCudaKernel and
// RasterizeMeshesFineCudaKernel.
template <typename FaceQ>
__device__ void CheckPixelInsideFace(
const float* face_verts, // (F, 3, 3)
const int face_idx,
int& q_size,
float& q_max_z,
int& q_max_idx,
FaceQ& q,
const float blur_radius,
const float2 pxy, // Coordinates of the pixel
const int K,
const bool perspective_correct,
const bool clip_barycentric_coords,
const bool cull_backfaces) {
const auto v012 = GetSingleFaceVerts(face_verts, face_idx);
const float3 v0 = thrust::get<0>(v012);
const float3 v1 = thrust::get<1>(v012);
const float3 v2 = thrust::get<2>(v012);
// Only need xy for barycentric coordinates and distance calculations.
const float2 v0xy = make_float2(v0.x, v0.y);
const float2 v1xy = make_float2(v1.x, v1.y);
const float2 v2xy = make_float2(v2.x, v2.y);
// Perform checks and skip if:
// 1. the face is behind the camera
// 2. the face is facing away from the camera
// 3. the face has very small face area
// 4. the pixel is outside the face bbox
const float zmax = FloatMax3(v0.z, v1.z, v2.z);
const bool outside_bbox = CheckPointOutsideBoundingBox(
v0, v1, v2, sqrt(blur_radius), pxy); // use sqrt of blur for bbox
const float face_area = EdgeFunctionForward(v0xy, v1xy, v2xy);
// Check if the face is visible to the camera.
const bool back_face = face_area < 0.0;
const bool zero_face_area =
(face_area <= kEpsilon && face_area >= -1.0f * kEpsilon);
if (zmax < 0 || cull_backfaces && back_face || outside_bbox ||
zero_face_area) {
return;
}
// Calculate barycentric coords and euclidean dist to triangle.
const float3 p_bary0 = BarycentricCoordsForward(pxy, v0xy, v1xy, v2xy);
const float3 p_bary = !perspective_correct
? p_bary0
: BarycentricPerspectiveCorrectionForward(p_bary0, v0.z, v1.z, v2.z);
const float3 p_bary_clip =
!clip_barycentric_coords ? p_bary : BarycentricClipForward(p_bary);
const float pz =
p_bary_clip.x * v0.z + p_bary_clip.y * v1.z + p_bary_clip.z * v2.z;
if (pz < 0) {
return; // Face is behind the image plane.
}
// Get abs squared distance
const float dist = PointTriangleDistanceForward(pxy, v0xy, v1xy, v2xy);
// Use the unclipped bary coordinates to determine if the point is inside the
// face.
const bool inside = p_bary.x > 0.0f && p_bary.y > 0.0f && p_bary.z > 0.0f;
const float signed_dist = inside ? -dist : dist;
// Check if pixel is outside blur region
if (!inside && dist >= blur_radius) {
return;
}
if (q_size < K) {
// Just insert it.
q[q_size] = {pz, face_idx, signed_dist, p_bary_clip};
if (pz > q_max_z) {
q_max_z = pz;
q_max_idx = q_size;
}
q_size++;
} else if (pz < q_max_z) {
// Overwrite the old max, and find the new max.
q[q_max_idx] = {pz, face_idx, signed_dist, p_bary_clip};
q_max_z = pz;
for (int i = 0; i < K; i++) {
if (q[i].z > q_max_z) {
q_max_z = q[i].z;
q_max_idx = i;
}
}
}
}
} // namespace
// ****************************************************************************
// * NAIVE RASTERIZATION *
// ****************************************************************************
__global__ void RasterizeMeshesNaiveCudaKernel(
const float* face_verts,
const int64_t* mesh_to_face_first_idx,
const int64_t* num_faces_per_mesh,
const float blur_radius,
const bool perspective_correct,
const bool clip_barycentric_coords,
const bool cull_backfaces,
const int N,
const int H,
const int W,
const int K,
int64_t* face_idxs,
float* zbuf,
float* pix_dists,
float* bary) {
// Simple version: One thread per output pixel
int num_threads = gridDim.x * blockDim.x;
int tid = blockDim.x * blockIdx.x + threadIdx.x;
for (int i = tid; i < N * H * W; i += num_threads) {
// Convert linear index to 3D index
const int n = i / (H * W); // batch index.
const int pix_idx = i % (H * W);
// Reverse ordering of X and Y axes
const int yi = H - 1 - pix_idx / W;
const int xi = W - 1 - pix_idx % W;
// screen coordinates to ndc coordiantes of pixel.
const float xf = PixToNonSquareNdc(xi, W, H);
const float yf = PixToNonSquareNdc(yi, H, W);
const float2 pxy = make_float2(xf, yf);
// For keeping track of the K closest points we want a data structure
// that (1) gives O(1) access to the closest point for easy comparisons,
// and (2) allows insertion of new elements. In the CPU version we use
// std::priority_queue; then (2) is O(log K). We can't use STL
// containers in CUDA; we could roll our own max heap in an array, but
// that would likely have a lot of warp divergence so we do something
// simpler instead: keep the elements in an unsorted array, but keep
// track of the max value and the index of the max value. Then (1) is
// still O(1) time, while (2) is O(K) with a clean loop. Since K <= 8
// this should be fast enough for our purposes.
Pixel q[kMaxPointsPerPixel];
int q_size = 0;
float q_max_z = -1000;
int q_max_idx = -1;
// Using the batch index of the thread get the start and stop
// indices for the faces.
const int64_t face_start_idx = mesh_to_face_first_idx[n];
const int64_t face_stop_idx = face_start_idx + num_faces_per_mesh[n];
// Loop through the faces in the mesh.
for (int f = face_start_idx; f < face_stop_idx; ++f) {
// Check if the pixel pxy is inside the face bounding box and if it is,
// update q, q_size, q_max_z and q_max_idx in place.
CheckPixelInsideFace(
face_verts,
f,
q_size,
q_max_z,
q_max_idx,
q,
blur_radius,
pxy,
K,
perspective_correct,
clip_barycentric_coords,
cull_backfaces);
}
// TODO: make sorting an option as only top k is needed, not sorted values.
BubbleSort(q, q_size);
int idx = n * H * W * K + pix_idx * K;
for (int k = 0; k < q_size; ++k) {
face_idxs[idx + k] = q[k].idx;
zbuf[idx + k] = q[k].z;
pix_dists[idx + k] = q[k].dist;
bary[(idx + k) * 3 + 0] = q[k].bary.x;
bary[(idx + k) * 3 + 1] = q[k].bary.y;
bary[(idx + k) * 3 + 2] = q[k].bary.z;
}
}
}
std::tuple<at::Tensor, at::Tensor, at::Tensor, at::Tensor>
RasterizeMeshesNaiveCuda(
const at::Tensor& face_verts,
const at::Tensor& mesh_to_faces_packed_first_idx,
const at::Tensor& num_faces_per_mesh,
const std::tuple<int, int> image_size,
const float blur_radius,
const int num_closest,
const bool perspective_correct,
const bool clip_barycentric_coords,
const bool cull_backfaces) {
TORCH_CHECK(
face_verts.ndimension() == 3 && face_verts.size(1) == 3 &&
face_verts.size(2) == 3,
"face_verts must have dimensions (num_faces, 3, 3)");
TORCH_CHECK(
num_faces_per_mesh.size(0) == mesh_to_faces_packed_first_idx.size(0),
"num_faces_per_mesh must have save size first dimension as mesh_to_faces_packed_first_idx");
if (num_closest > kMaxPointsPerPixel) {
std::stringstream ss;
ss << "Must have points_per_pixel <= " << kMaxPointsPerPixel;
AT_ERROR(ss.str());
}
// Check inputs are on the same device
at::TensorArg face_verts_t{face_verts, "face_verts", 1},
mesh_to_faces_packed_first_idx_t{
mesh_to_faces_packed_first_idx, "mesh_to_faces_packed_first_idx", 2},
num_faces_per_mesh_t{num_faces_per_mesh, "num_faces_per_mesh", 3};
at::CheckedFrom c = "RasterizeMeshesNaiveCuda";
at::checkAllSameGPU(
c,
{face_verts_t, mesh_to_faces_packed_first_idx_t, num_faces_per_mesh_t});
// Set the device for the kernel launch based on the device of the input
at::cuda::CUDAGuard device_guard(face_verts.device());
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
const int N = num_faces_per_mesh.size(0); // batch size.
const int H = std::get<0>(image_size);
const int W = std::get<1>(image_size);
const int K = num_closest;
auto long_opts = num_faces_per_mesh.options().dtype(at::kLong);
auto float_opts = face_verts.options().dtype(at::kFloat);
at::Tensor face_idxs = at::full({N, H, W, K}, -1, long_opts);
at::Tensor zbuf = at::full({N, H, W, K}, -1, float_opts);
at::Tensor pix_dists = at::full({N, H, W, K}, -1, float_opts);
at::Tensor bary = at::full({N, H, W, K, 3}, -1, float_opts);
if (face_idxs.numel() == 0) {
AT_CUDA_CHECK(cudaGetLastError());
return std::make_tuple(face_idxs, zbuf, bary, pix_dists);
}
const size_t blocks = 1024;
const size_t threads = 64;
RasterizeMeshesNaiveCudaKernel<<<blocks, threads, 0, stream>>>(
face_verts.contiguous().data_ptr<float>(),
mesh_to_faces_packed_first_idx.contiguous().data_ptr<int64_t>(),
num_faces_per_mesh.contiguous().data_ptr<int64_t>(),
blur_radius,
perspective_correct,
clip_barycentric_coords,
cull_backfaces,
N,
H,
W,
K,
face_idxs.data_ptr<int64_t>(),
zbuf.data_ptr<float>(),
pix_dists.data_ptr<float>(),
bary.data_ptr<float>());
AT_CUDA_CHECK(cudaGetLastError());
return std::make_tuple(face_idxs, zbuf, bary, pix_dists);
}
// ****************************************************************************
// * BACKWARD PASS *
// ****************************************************************************
// TODO: benchmark parallelizing over faces_verts instead of over pixels.
__global__ void RasterizeMeshesBackwardCudaKernel(
const float* face_verts, // (F, 3, 3)
const int64_t* pix_to_face, // (N, H, W, K)
const bool perspective_correct,
const bool clip_barycentric_coords,
const int N,
const int H,
const int W,
const int K,
const float* grad_zbuf, // (N, H, W, K)
const float* grad_bary, // (N, H, W, K, 3)
const float* grad_dists, // (N, H, W, K)
float* grad_face_verts) { // (F, 3, 3)
// Parallelize over each pixel in images of
// size H * W, for each image in the batch of size N.
const int num_threads = gridDim.x * blockDim.x;
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (int t_i = tid; t_i < N * H * W; t_i += num_threads) {
// Convert linear index to 3D index
const int n = t_i / (H * W); // batch index.
const int pix_idx = t_i % (H * W);
// Reverse ordering of X and Y axes.
const int yi = H - 1 - pix_idx / W;
const int xi = W - 1 - pix_idx % W;
const float xf = PixToNonSquareNdc(xi, W, H);
const float yf = PixToNonSquareNdc(yi, H, W);
const float2 pxy = make_float2(xf, yf);
// Loop over all the faces for this pixel.
for (int k = 0; k < K; k++) {
// Index into (N, H, W, K, :) grad tensors
// pixel index + top k index
int i = n * H * W * K + pix_idx * K + k;
const int f = pix_to_face[i];
if (f < 0) {
continue; // padded face.
}
// Get xyz coordinates of the three face vertices.
const auto v012 = GetSingleFaceVerts(face_verts, f);
const float3 v0 = thrust::get<0>(v012);
const float3 v1 = thrust::get<1>(v012);
const float3 v2 = thrust::get<2>(v012);
// Only neex xy for barycentric coordinate and distance calculations.
const float2 v0xy = make_float2(v0.x, v0.y);
const float2 v1xy = make_float2(v1.x, v1.y);
const float2 v2xy = make_float2(v2.x, v2.y);
// Get upstream gradients for the face.
const float grad_dist_upstream = grad_dists[i];
const float grad_zbuf_upstream = grad_zbuf[i];
const float grad_bary_upstream_w0 = grad_bary[i * 3 + 0];
const float grad_bary_upstream_w1 = grad_bary[i * 3 + 1];
const float grad_bary_upstream_w2 = grad_bary[i * 3 + 2];
const float3 grad_bary_upstream = make_float3(
grad_bary_upstream_w0, grad_bary_upstream_w1, grad_bary_upstream_w2);
const float3 b_w = BarycentricCoordsForward(pxy, v0xy, v1xy, v2xy);
const float3 b_pp = !perspective_correct
? b_w
: BarycentricPerspectiveCorrectionForward(b_w, v0.z, v1.z, v2.z);
const float3 b_w_clip =
!clip_barycentric_coords ? b_pp : BarycentricClipForward(b_pp);
const bool inside = b_pp.x > 0.0f && b_pp.y > 0.0f && b_pp.z > 0.0f;
const float sign = inside ? -1.0f : 1.0f;
auto grad_dist_f = PointTriangleDistanceBackward(
pxy, v0xy, v1xy, v2xy, sign * grad_dist_upstream);
const float2 ddist_d_v0 = thrust::get<1>(grad_dist_f);
const float2 ddist_d_v1 = thrust::get<2>(grad_dist_f);
const float2 ddist_d_v2 = thrust::get<3>(grad_dist_f);
// Upstream gradient for barycentric coords from zbuf calculation:
// zbuf = bary_w0 * z0 + bary_w1 * z1 + bary_w2 * z2
// Therefore
// d_zbuf/d_bary_w0 = z0
// d_zbuf/d_bary_w1 = z1
// d_zbuf/d_bary_w2 = z2
const float3 d_zbuf_d_bwclip = make_float3(v0.z, v1.z, v2.z);
// Total upstream barycentric gradients are the sum of
// external upstream gradients and contribution from zbuf.
const float3 grad_bary_f_sum =
(grad_bary_upstream + grad_zbuf_upstream * d_zbuf_d_bwclip);
float3 grad_bary0 = grad_bary_f_sum;
if (clip_barycentric_coords) {
grad_bary0 = BarycentricClipBackward(b_w, grad_bary_f_sum);
}
float dz0_persp = 0.0f, dz1_persp = 0.0f, dz2_persp = 0.0f;
if (perspective_correct) {
auto perspective_grads = BarycentricPerspectiveCorrectionBackward(
b_w, v0.z, v1.z, v2.z, grad_bary0);
grad_bary0 = thrust::get<0>(perspective_grads);
dz0_persp = thrust::get<1>(perspective_grads);
dz1_persp = thrust::get<2>(perspective_grads);
dz2_persp = thrust::get<3>(perspective_grads);
}
auto grad_bary_f =
BarycentricCoordsBackward(pxy, v0xy, v1xy, v2xy, grad_bary0);
const float2 dbary_d_v0 = thrust::get<1>(grad_bary_f);
const float2 dbary_d_v1 = thrust::get<2>(grad_bary_f);
const float2 dbary_d_v2 = thrust::get<3>(grad_bary_f);
atomicAdd(grad_face_verts + f * 9 + 0, dbary_d_v0.x + ddist_d_v0.x);
atomicAdd(grad_face_verts + f * 9 + 1, dbary_d_v0.y + ddist_d_v0.y);
atomicAdd(
grad_face_verts + f * 9 + 2,
grad_zbuf_upstream * b_w_clip.x + dz0_persp);
atomicAdd(grad_face_verts + f * 9 + 3, dbary_d_v1.x + ddist_d_v1.x);
atomicAdd(grad_face_verts + f * 9 + 4, dbary_d_v1.y + ddist_d_v1.y);
atomicAdd(
grad_face_verts + f * 9 + 5,
grad_zbuf_upstream * b_w_clip.y + dz1_persp);
atomicAdd(grad_face_verts + f * 9 + 6, dbary_d_v2.x + ddist_d_v2.x);
atomicAdd(grad_face_verts + f * 9 + 7, dbary_d_v2.y + ddist_d_v2.y);
atomicAdd(
grad_face_verts + f * 9 + 8,
grad_zbuf_upstream * b_w_clip.z + dz2_persp);
}
}
}
at::Tensor RasterizeMeshesBackwardCuda(
const at::Tensor& face_verts, // (F, 3, 3)
const at::Tensor& pix_to_face, // (N, H, W, K)
const at::Tensor& grad_zbuf, // (N, H, W, K)
const at::Tensor& grad_bary, // (N, H, W, K, 3)
const at::Tensor& grad_dists, // (N, H, W, K)
const bool perspective_correct,
const bool clip_barycentric_coords) {
// Check inputs are on the same device
at::TensorArg face_verts_t{face_verts, "face_verts", 1},
pix_to_face_t{pix_to_face, "pix_to_face", 2},
grad_zbuf_t{grad_zbuf, "grad_zbuf", 3},
grad_bary_t{grad_bary, "grad_bary", 4},
grad_dists_t{grad_dists, "grad_dists", 5};
at::CheckedFrom c = "RasterizeMeshesBackwardCuda";
at::checkAllSameGPU(
c, {face_verts_t, pix_to_face_t, grad_zbuf_t, grad_bary_t, grad_dists_t});
at::checkAllSameType(
c, {face_verts_t, grad_zbuf_t, grad_bary_t, grad_dists_t});
// Set the device for the kernel launch based on the device of the input
at::cuda::CUDAGuard device_guard(face_verts.device());
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
const int F = face_verts.size(0);
const int N = pix_to_face.size(0);
const int H = pix_to_face.size(1);
const int W = pix_to_face.size(2);
const int K = pix_to_face.size(3);
at::Tensor grad_face_verts = at::zeros({F, 3, 3}, face_verts.options());
if (grad_face_verts.numel() == 0) {
AT_CUDA_CHECK(cudaGetLastError());
return grad_face_verts;
}
const size_t blocks = 1024;
const size_t threads = 64;
RasterizeMeshesBackwardCudaKernel<<<blocks, threads, 0, stream>>>(
face_verts.contiguous().data_ptr<float>(),
pix_to_face.contiguous().data_ptr<int64_t>(),
perspective_correct,
clip_barycentric_coords,
N,
H,
W,
K,
grad_zbuf.contiguous().data_ptr<float>(),
grad_bary.contiguous().data_ptr<float>(),
grad_dists.contiguous().data_ptr<float>(),
grad_face_verts.data_ptr<float>());
AT_CUDA_CHECK(cudaGetLastError());
return grad_face_verts;
}
// ****************************************************************************
// * COARSE RASTERIZATION *
// ****************************************************************************
__global__ void RasterizeMeshesCoarseCudaKernel(
const float* face_verts,
const int64_t* mesh_to_face_first_idx,
const int64_t* num_faces_per_mesh,
const float blur_radius,
const int N,
const int F,
const int H,
const int W,
const int bin_size,
const int chunk_size,
const int max_faces_per_bin,
int* faces_per_bin,
int* bin_faces) {
extern __shared__ char sbuf[];
const int M = max_faces_per_bin;
// Integer divide round up
const int num_bins_x = 1 + (W - 1) / bin_size;
const int num_bins_y = 1 + (H - 1) / bin_size;
// NDC range depends on the ratio of W/H
// The shorter side from (H, W) is given an NDC range of 2.0 and
// the other side is scaled by the ratio of H:W.
const float NDC_x_half_range = NonSquareNdcRange(W, H) / 2.0f;
const float NDC_y_half_range = NonSquareNdcRange(H, W) / 2.0f;
// Size of half a pixel in NDC units is the NDC half range
// divided by the corresponding image dimension
const float half_pix_x = NDC_x_half_range / W;
const float half_pix_y = NDC_y_half_range / H;
// This is a boolean array of shape (num_bins_y, num_bins_x, chunk_size)
// stored in shared memory that will track whether each point in the chunk
// falls into each bin of the image.
BitMask binmask((unsigned int*)sbuf, num_bins_y, num_bins_x, chunk_size);
// Have each block handle a chunk of faces
const int chunks_per_batch = 1 + (F - 1) / chunk_size;
const int num_chunks = N * chunks_per_batch;
for (int chunk = blockIdx.x; chunk < num_chunks; chunk += gridDim.x) {
const int batch_idx = chunk / chunks_per_batch; // batch index
const int chunk_idx = chunk % chunks_per_batch;
const int face_start_idx = chunk_idx * chunk_size;
binmask.block_clear();
const int64_t mesh_face_start_idx = mesh_to_face_first_idx[batch_idx];
const int64_t mesh_face_stop_idx =
mesh_face_start_idx + num_faces_per_mesh[batch_idx];
// Have each thread handle a different face within the chunk
for (int f = threadIdx.x; f < chunk_size; f += blockDim.x) {
const int f_idx = face_start_idx + f;
// Check if face index corresponds to the mesh in the batch given by
// batch_idx
if (f_idx >= mesh_face_stop_idx || f_idx < mesh_face_start_idx) {
continue;
}
// Get xyz coordinates of the three face vertices.
const auto v012 = GetSingleFaceVerts(face_verts, f_idx);
const float3 v0 = thrust::get<0>(v012);
const float3 v1 = thrust::get<1>(v012);
const float3 v2 = thrust::get<2>(v012);
// Compute screen-space bbox for the triangle expanded by blur.
float xmin = FloatMin3(v0.x, v1.x, v2.x) - sqrt(blur_radius);
float ymin = FloatMin3(v0.y, v1.y, v2.y) - sqrt(blur_radius);
float xmax = FloatMax3(v0.x, v1.x, v2.x) + sqrt(blur_radius);
float ymax = FloatMax3(v0.y, v1.y, v2.y) + sqrt(blur_radius);
float zmin = FloatMin3(v0.z, v1.z, v2.z);
// Faces with at least one vertex behind the camera won't render
// correctly and should be removed or clipped before calling the
// rasterizer
if (zmin < kEpsilon) {
continue;
}
// Brute-force search over all bins; TODO(T54294966) something smarter.
for (int by = 0; by < num_bins_y; ++by) {
// Y coordinate of the top and bottom of the bin.
// PixToNdc gives the location of the center of each pixel, so we
// need to add/subtract a half pixel to get the true extent of the bin.
// Reverse ordering of Y axis so that +Y is upwards in the image.
const float bin_y_min =
PixToNonSquareNdc(by * bin_size, H, W) - half_pix_y;
const float bin_y_max =
PixToNonSquareNdc((by + 1) * bin_size - 1, H, W) + half_pix_y;
const bool y_overlap = (ymin <= bin_y_max) && (bin_y_min < ymax);
for (int bx = 0; bx < num_bins_x; ++bx) {
// X coordinate of the left and right of the bin.
// Reverse ordering of x axis so that +X is left.
const float bin_x_max =
PixToNonSquareNdc((bx + 1) * bin_size - 1, W, H) + half_pix_x;
const float bin_x_min =
PixToNonSquareNdc(bx * bin_size, W, H) - half_pix_x;
const bool x_overlap = (xmin <= bin_x_max) && (bin_x_min < xmax);
if (y_overlap && x_overlap) {
binmask.set(by, bx, f);
}
}
}
}
__syncthreads();
// Now we have processed every face in the current chunk. We need to
// count the number of faces in each bin so we can write the indices
// out to global memory. We have each thread handle a different bin.
for (int byx = threadIdx.x; byx < num_bins_y * num_bins_x;
byx += blockDim.x) {
const int by = byx / num_bins_x;
const int bx = byx % num_bins_x;
const int count = binmask.count(by, bx);
const int faces_per_bin_idx =
batch_idx * num_bins_y * num_bins_x + by * num_bins_x + bx;
// This atomically increments the (global) number of faces found
// in the current bin, and gets the previous value of the counter;
// this effectively allocates space in the bin_faces array for the
// faces in the current chunk that fall into this bin.
const int start = atomicAdd(faces_per_bin + faces_per_bin_idx, count);
// Now loop over the binmask and write the active bits for this bin
// out to bin_faces.
int next_idx = batch_idx * num_bins_y * num_bins_x * M +
by * num_bins_x * M + bx * M + start;
for (int f = 0; f < chunk_size; ++f) {
if (binmask.get(by, bx, f)) {
// TODO(T54296346) find the correct method for handling errors in
// CUDA. Throw an error if num_faces_per_bin > max_faces_per_bin.
// Either decrease bin size or increase max_faces_per_bin
bin_faces[next_idx] = face_start_idx + f;
next_idx++;
}
}
}
__syncthreads();
}
}
at::Tensor RasterizeMeshesCoarseCuda(
const at::Tensor& face_verts,
const at::Tensor& mesh_to_face_first_idx,
const at::Tensor& num_faces_per_mesh,
const std::tuple<int, int> image_size,
const float blur_radius,
const int bin_size,
const int max_faces_per_bin) {
TORCH_CHECK(
face_verts.ndimension() == 3 && face_verts.size(1) == 3 &&
face_verts.size(2) == 3,
"face_verts must have dimensions (num_faces, 3, 3)");
// Check inputs are on the same device
at::TensorArg face_verts_t{face_verts, "face_verts", 1},
mesh_to_face_first_idx_t{
mesh_to_face_first_idx, "mesh_to_face_first_idx", 2},
num_faces_per_mesh_t{num_faces_per_mesh, "num_faces_per_mesh", 3};
at::CheckedFrom c = "RasterizeMeshesCoarseCuda";
at::checkAllSameGPU(
c, {face_verts_t, mesh_to_face_first_idx_t, num_faces_per_mesh_t});
// Set the device for the kernel launch based on the device of the input
at::cuda::CUDAGuard device_guard(face_verts.device());
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
const int H = std::get<0>(image_size);
const int W = std::get<1>(image_size);
const int F = face_verts.size(0);
const int N = num_faces_per_mesh.size(0);
const int M = max_faces_per_bin;
// Integer divide round up.
const int num_bins_y = 1 + (H - 1) / bin_size;
const int num_bins_x = 1 + (W - 1) / bin_size;
if (num_bins_y >= kMaxItemsPerBin || num_bins_x >= kMaxItemsPerBin) {
std::stringstream ss;
ss << "In Coarse Rasterizer got num_bins_y: " << num_bins_y
<< ", num_bins_x: " << num_bins_x << ", "
<< "; that's too many!";
AT_ERROR(ss.str());
}
auto opts = num_faces_per_mesh.options().dtype(at::kInt);
at::Tensor faces_per_bin = at::zeros({N, num_bins_y, num_bins_x}, opts);
at::Tensor bin_faces = at::full({N, num_bins_y, num_bins_x, M}, -1, opts);
if (bin_faces.numel() == 0) {
AT_CUDA_CHECK(cudaGetLastError());
return bin_faces;
}
const int chunk_size = 512;
const size_t shared_size = num_bins_y * num_bins_x * chunk_size / 8;
const size_t blocks = 64;
const size_t threads = 512;
RasterizeMeshesCoarseCudaKernel<<<blocks, threads, shared_size, stream>>>(
face_verts.contiguous().data_ptr<float>(),
mesh_to_face_first_idx.contiguous().data_ptr<int64_t>(),
num_faces_per_mesh.contiguous().data_ptr<int64_t>(),
blur_radius,
N,
F,
H,
W,
bin_size,
chunk_size,
M,
faces_per_bin.data_ptr<int32_t>(),
bin_faces.data_ptr<int32_t>());
AT_CUDA_CHECK(cudaGetLastError());
return bin_faces;
}
// ****************************************************************************
// * FINE RASTERIZATION *
// ****************************************************************************
__global__ void RasterizeMeshesFineCudaKernel(
const float* face_verts, // (F, 3, 3)
const int32_t* bin_faces, // (N, BH, BW, T)
const float blur_radius,
const int bin_size,
const bool perspective_correct,
const bool clip_barycentric_coords,
const bool cull_backfaces,
const int N,
const int BH,
const int BW,
const int M,
const int H,
const int W,
const int K,
int64_t* face_idxs, // (N, H, W, K)
float* zbuf, // (N, H, W, K)
float* pix_dists, // (N, H, W, K)
float* bary // (N, H, W, K, 3)
) {
// This can be more than H * W if H or W are not divisible by bin_size.
int num_pixels = N * BH * BW * bin_size * bin_size;
int num_threads = gridDim.x * blockDim.x;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (int pid = tid; pid < num_pixels; pid += num_threads) {
// Convert linear index into bin and pixel indices. We make the within
// block pixel ids move the fastest, so that adjacent threads will fall
// into the same bin; this should give them coalesced memory reads when
// they read from faces and bin_faces.
int i = pid;
const int n = i / (BH * BW * bin_size * bin_size);
i %= BH * BW * bin_size * bin_size;
// bin index y
const int by = i / (BW * bin_size * bin_size);
i %= BW * bin_size * bin_size;
// bin index y
const int bx = i / (bin_size * bin_size);
// pixel within the bin
i %= bin_size * bin_size;
// Pixel x, y indices
const int yi = i / bin_size + by * bin_size;
const int xi = i % bin_size + bx * bin_size;
if (yi >= H || xi >= W)
continue;
const float xf = PixToNonSquareNdc(xi, W, H);
const float yf = PixToNonSquareNdc(yi, H, W);
const float2 pxy = make_float2(xf, yf);
// This part looks like the naive rasterization kernel, except we use
// bin_faces to only look at a subset of faces already known to fall
// in this bin. TODO abstract out this logic into some data structure
// that is shared by both kernels?
Pixel q[kMaxPointsPerPixel];
int q_size = 0;
float q_max_z = -1000;
int q_max_idx = -1;
for (int m = 0; m < M; m++) {
const int f = bin_faces[n * BH * BW * M + by * BW * M + bx * M + m];
if (f < 0) {
continue; // bin_faces uses -1 as a sentinal value.
}
// Check if the pixel pxy is inside the face bounding box and if it is,
// update q, q_size, q_max_z and q_max_idx in place.
CheckPixelInsideFace(
face_verts,
f,
q_size,
q_max_z,
q_max_idx,
q,
blur_radius,
pxy,
K,
perspective_correct,
clip_barycentric_coords,
cull_backfaces);
}
// Now we've looked at all the faces for this bin, so we can write
// output for the current pixel.
// TODO: make sorting an option as only top k is needed, not sorted values.
BubbleSort(q, q_size);
// Reverse ordering of the X and Y axis so that
// in the image +Y is pointing up and +X is pointing left.
const int yidx = H - 1 - yi;
const int xidx = W - 1 - xi;
const int pix_idx = n * H * W * K + yidx * W * K + xidx * K;
for (int k = 0; k < q_size; k++) {
face_idxs[pix_idx + k] = q[k].idx;
zbuf[pix_idx + k] = q[k].z;
pix_dists[pix_idx + k] = q[k].dist;
bary[(pix_idx + k) * 3 + 0] = q[k].bary.x;
bary[(pix_idx + k) * 3 + 1] = q[k].bary.y;
bary[(pix_idx + k) * 3 + 2] = q[k].bary.z;
}
}
}
std::tuple<at::Tensor, at::Tensor, at::Tensor, at::Tensor>
RasterizeMeshesFineCuda(
const at::Tensor& face_verts,
const at::Tensor& bin_faces,
const std::tuple<int, int> image_size,
const float blur_radius,
const int bin_size,
const int faces_per_pixel,
const bool perspective_correct,
const bool clip_barycentric_coords,
const bool cull_backfaces) {
TORCH_CHECK(
face_verts.ndimension() == 3 && face_verts.size(1) == 3 &&
face_verts.size(2) == 3,
"face_verts must have dimensions (num_faces, 3, 3)");
TORCH_CHECK(bin_faces.ndimension() == 4, "bin_faces must have 4 dimensions");
// Check inputs are on the same device
at::TensorArg face_verts_t{face_verts, "face_verts", 1},
bin_faces_t{bin_faces, "bin_faces", 2};
at::CheckedFrom c = "RasterizeMeshesFineCuda";
at::checkAllSameGPU(c, {face_verts_t, bin_faces_t});
// Set the device for the kernel launch based on the device of the input
at::cuda::CUDAGuard device_guard(face_verts.device());
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
// bin_faces shape (N, BH, BW, M)
const int N = bin_faces.size(0);
const int BH = bin_faces.size(1);
const int BW = bin_faces.size(2);
const int M = bin_faces.size(3);
const int K = faces_per_pixel;
const int H = std::get<0>(image_size);
const int W = std::get<1>(image_size);
if (K > kMaxPointsPerPixel) {
AT_ERROR("Must have num_closest <= 150");
}
auto long_opts = bin_faces.options().dtype(at::kLong);
auto float_opts = face_verts.options().dtype(at::kFloat);
at::Tensor face_idxs = at::full({N, H, W, K}, -1, long_opts);
at::Tensor zbuf = at::full({N, H, W, K}, -1, float_opts);
at::Tensor pix_dists = at::full({N, H, W, K}, -1, float_opts);
at::Tensor bary = at::full({N, H, W, K, 3}, -1, float_opts);
if (face_idxs.numel() == 0) {
AT_CUDA_CHECK(cudaGetLastError());
return std::make_tuple(face_idxs, zbuf, bary, pix_dists);
}
const size_t blocks = 1024;
const size_t threads = 64;
RasterizeMeshesFineCudaKernel<<<blocks, threads, 0, stream>>>(
face_verts.contiguous().data_ptr<float>(),
bin_faces.contiguous().data_ptr<int32_t>(),
blur_radius,
bin_size,
perspective_correct,
clip_barycentric_coords,
cull_backfaces,
N,
BH,
BW,
M,
H,
W,
K,
face_idxs.data_ptr<int64_t>(),
zbuf.data_ptr<float>(),
pix_dists.data_ptr<float>(),
bary.data_ptr<float>());
return std::make_tuple(face_idxs, zbuf, bary, pix_dists);
}
|
097f402dece640ff4b538213169a7ce78102e6e6.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "CalculateFixed.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const float *background = NULL;
hipMalloc(&background, XSIZE*YSIZE);
const float *target = NULL;
hipMalloc(&target, XSIZE*YSIZE);
const float *mask = NULL;
hipMalloc(&mask, XSIZE*YSIZE);
float *fixed = NULL;
hipMalloc(&fixed, XSIZE*YSIZE);
const int wb = 1;
const int hb = 1;
const int wt = 1;
const int ht = 1;
const int oy = 1;
const int ox = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
CalculateFixed), dim3(gridBlock),dim3(threadBlock), 0, 0, background,target,mask,fixed,wb,hb,wt,ht,oy,ox);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
CalculateFixed), dim3(gridBlock),dim3(threadBlock), 0, 0, background,target,mask,fixed,wb,hb,wt,ht,oy,ox);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
CalculateFixed), dim3(gridBlock),dim3(threadBlock), 0, 0, background,target,mask,fixed,wb,hb,wt,ht,oy,ox);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 097f402dece640ff4b538213169a7ce78102e6e6.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "CalculateFixed.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const float *background = NULL;
cudaMalloc(&background, XSIZE*YSIZE);
const float *target = NULL;
cudaMalloc(&target, XSIZE*YSIZE);
const float *mask = NULL;
cudaMalloc(&mask, XSIZE*YSIZE);
float *fixed = NULL;
cudaMalloc(&fixed, XSIZE*YSIZE);
const int wb = 1;
const int hb = 1;
const int wt = 1;
const int ht = 1;
const int oy = 1;
const int ox = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
CalculateFixed<<<gridBlock,threadBlock>>>(background,target,mask,fixed,wb,hb,wt,ht,oy,ox);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
CalculateFixed<<<gridBlock,threadBlock>>>(background,target,mask,fixed,wb,hb,wt,ht,oy,ox);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
CalculateFixed<<<gridBlock,threadBlock>>>(background,target,mask,fixed,wb,hb,wt,ht,oy,ox);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
3a877fab0d9c93b42427f153c12a17ef6ff53b42.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
int main()
{
/*
* Assign values to these variables so that the output string below prints the
* requested properties of the currently active GPU.
*/
int deviceId;
hipGetDevice(&deviceId);
hipDeviceProp_t props;
hipGetDeviceProperties(&props, deviceId);
int computeCapabilityMajor = props.major;
int computeCapabilityMinor = props.minor;
int multiProcessorCount = props.multiProcessorCount;
int warpSize = props.warpSize;
/*
* There should be no need to modify the output string below.
*/
printf("Device ID: %d\nNumber of SMs: %d\nCompute Capability Major: %d\nCompute Capability Minor: %d\nWarp Size: %d\n", deviceId, multiProcessorCount, computeCapabilityMajor, computeCapabilityMinor, warpSize);
}
| 3a877fab0d9c93b42427f153c12a17ef6ff53b42.cu | #include <stdio.h>
int main()
{
/*
* Assign values to these variables so that the output string below prints the
* requested properties of the currently active GPU.
*/
int deviceId;
cudaGetDevice(&deviceId);
cudaDeviceProp props;
cudaGetDeviceProperties(&props, deviceId);
int computeCapabilityMajor = props.major;
int computeCapabilityMinor = props.minor;
int multiProcessorCount = props.multiProcessorCount;
int warpSize = props.warpSize;
/*
* There should be no need to modify the output string below.
*/
printf("Device ID: %d\nNumber of SMs: %d\nCompute Capability Major: %d\nCompute Capability Minor: %d\nWarp Size: %d\n", deviceId, multiProcessorCount, computeCapabilityMajor, computeCapabilityMinor, warpSize);
}
|
142a9ce4fb1a1540d2060f8a892440ea83ff609b.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdlib.h>
#include <hip/hip_runtime.h>
//#include <hip/hip_runtime_api.h>
#include <hip/hip_runtime.h>
extern "C" void add_vector_gpu( float* a, float* b, float *c, int size );
__global__ void VectorAdd( float* arrayA, float* arrayB, float* output )
{
int idx = threadIdx.x;
output[idx] = arrayA[idx] + arrayB[idx];
}
extern "C" void add_vector_gpu( float* a, float* b, float *c, int size );
void add_vector_gpu( float* a, float* b, float *c, int size )
{
int data_size = size * sizeof(float);
// part1, allocate data on device
float *dev_A, *dev_B, *dev_C;
hipMalloc( (void**)&dev_A, data_size );
hipMalloc( (void**)&dev_B, data_size );
hipMalloc( (void**)&dev_C, data_size );
// part2, copy memory to device
hipMemcpy( dev_A, a, data_size, hipMemcpyHostToDevice );
hipMemcpy( dev_B, b, data_size, hipMemcpyHostToDevice );
// part3, run kernel
hipLaunchKernelGGL(( VectorAdd), dim3(1), dim3(size) , 0, 0, dev_A, dev_B, dev_C );
// part4, copy data from device
hipMemcpy( c, dev_C, data_size, hipMemcpyDeviceToHost );
// part5, release data
hipFree(dev_A);
hipFree(dev_B);
hipFree(dev_C);
}
| 142a9ce4fb1a1540d2060f8a892440ea83ff609b.cu | #include <stdlib.h>
#include <cuda.h>
//#include <cuda_runtime_api.h>
#include <cuda_runtime.h>
extern "C" void add_vector_gpu( float* a, float* b, float *c, int size );
__global__ void VectorAdd( float* arrayA, float* arrayB, float* output )
{
int idx = threadIdx.x;
output[idx] = arrayA[idx] + arrayB[idx];
}
extern "C" void add_vector_gpu( float* a, float* b, float *c, int size );
void add_vector_gpu( float* a, float* b, float *c, int size )
{
int data_size = size * sizeof(float);
// part1, allocate data on device
float *dev_A, *dev_B, *dev_C;
cudaMalloc( (void**)&dev_A, data_size );
cudaMalloc( (void**)&dev_B, data_size );
cudaMalloc( (void**)&dev_C, data_size );
// part2, copy memory to device
cudaMemcpy( dev_A, a, data_size, cudaMemcpyHostToDevice );
cudaMemcpy( dev_B, b, data_size, cudaMemcpyHostToDevice );
// part3, run kernel
VectorAdd<<< 1, size >>>( dev_A, dev_B, dev_C );
// part4, copy data from device
cudaMemcpy( c, dev_C, data_size, cudaMemcpyDeviceToHost );
// part5, release data
cudaFree(dev_A);
cudaFree(dev_B);
cudaFree(dev_C);
}
|
773a512e31b31f6722c45a816a4e7abdb69bfb41.hip | // !!! This is a file automatically generated by hipify!!!
//
// ConvCutlassExecution.cpp
// MNN
//
// Created by MNN on 2020/08/22.
// Copyright 2018, Alibaba Group Holding Limited
//
#include "ConvCutlassExecution.hpp"
#include "Raster.cuh"
#include "ConvBaseKernel.cuh"
//#define DEBUG
namespace MNN {
namespace CUDA {
ConvCutlassExecution::Resource::Resource(Backend* bn, const MNN::Op* op) {
mBackend = bn;
auto runtime = static_cast<CUDABackend*>(bn)->getCUDARuntime();
auto conv = op->main_as_Convolution2D();
auto common = conv->common();
//weight host->device
const float* filterDataPtr = nullptr;
int weightSize = 0;
std::shared_ptr<ConvolutionCommon::Int8Common> quanCommon;
ConvolutionCommon::getConvParameters(&quanCommon, conv, &filterDataPtr, &weightSize);
auto oc = common->outputCount();
int l = weightSize / oc;
int h = oc;
int lp = UP_DIV(l, 8) * 8;
int hp = UP_DIV(h, 8) * 8;
// Reorder weight
{
auto tempCacheBuffer = static_cast<CUDABackend*>(bn)->getStaticBufferPool()->alloc(weightSize * sizeof(float));
float* cacheWeight = (float*)((uint8_t*)tempCacheBuffer.first + tempCacheBuffer.second);
runtime->memcpy(cacheWeight, filterDataPtr, weightSize * sizeof(float), MNNMemcpyHostToDevice);
if(static_cast<CUDABackend*>(bn)->getPrecision() == 1) {
weightTensor.reset(Tensor::createDevice<int32_t>({lp * hp}));
} else {
weightTensor.reset(Tensor::createDevice<int16_t>({lp * hp}));
}
bn->onAcquireBuffer(weightTensor.get(), Backend::STATIC);
mFilter = (void *)weightTensor.get()->buffer().device;
int precision = static_cast<CUDABackend*>(bn)->getPrecision();
if(precision == 2) {
precision == 0;
}
callWeightFill((const void *)cacheWeight, (void *)mFilter, l, h, lp, hp, static_cast<CUDABackend*>(bn)->getPrecision() == 1, runtime);
static_cast<CUDABackend*>(bn)->getStaticBufferPool()->free(tempCacheBuffer);
}
// Copy Bias
{
if(static_cast<CUDABackend*>(bn)->useFp16()) {
auto tempBiasStorage = static_cast<CUDABackend*>(bn)->getStaticBufferPool()->alloc(conv->bias()->size()*sizeof(float));
auto biasTemp = (float*)((uint8_t*)tempBiasStorage.first + tempBiasStorage.second);
cuda_check(hipMemcpy(biasTemp, conv->bias()->data(), conv->bias()->size()*sizeof(float), hipMemcpyHostToDevice));
int biasSize = conv->bias()->size();
int hp = UP_DIV(biasSize, 8) * 8;
biasTensor.reset(Tensor::createDevice<int16_t>({hp}));
bn->onAcquireBuffer(biasTensor.get(), Backend::STATIC);
mBias = (void *)biasTensor.get()->buffer().device;
runtime->memset(mBias, 0, hp * sizeof(int16_t));
callFloat2Half((const void*)biasTemp, (void*)mBias, hp, runtime);
static_cast<CUDABackend*>(bn)->getStaticBufferPool()->free(tempBiasStorage);
} else {
int biasSize = conv->bias()->size();
int hp = UP_DIV(biasSize, 8) * 8;
biasTensor.reset(Tensor::createDevice<int32_t>({hp}));
bn->onAcquireBuffer(biasTensor.get(), Backend::STATIC);
mBias = (void *)biasTensor.get()->buffer().device;
runtime->memset(mBias, 0, hp * sizeof(int32_t));
cuda_check(hipMemcpy(mBias, conv->bias()->data(), conv->bias()->size()*sizeof(float), hipMemcpyHostToDevice));
}
}
}
ConvCutlassExecution::Resource::~Resource() {
// Do nothing
}
ConvCutlassExecution::ConvCutlassExecution(Backend* backend, const MNN::Op* op, std::shared_ptr<Resource> res) : CutlassCommonExecution(backend) {
mOp = op;
mResource = res;
auto runtime = static_cast<CUDABackend*>(backend)->getCUDARuntime();
mPrecisonLevel = static_cast<CUDABackend*>(backend)->getPrecision();
mFp16Infer = (mPrecisonLevel == 2);
mFp32Infer = (mPrecisonLevel == 1);
mFp16Fp32MixInfer = (mPrecisonLevel == 0);
}
ConvCutlassExecution::~ConvCutlassExecution() {
}
bool ConvCutlassExecution::onClone(Backend* bn, const Op* op, Execution** dst) {
if (!mValid) {
return false;
}
if (nullptr == dst) {
return true;
}
auto dstExe = new ConvCutlassExecution(bn, op, mResource);
*dst = dstExe;
return true;
}
ErrorCode ConvCutlassExecution::onResize(const std::vector<Tensor*> &inputs, const std::vector<Tensor*> &outputs) {
auto runtime = static_cast<CUDABackend*>(backend())->getCUDARuntime();
auto input = inputs[0], output = outputs[0];
const int UNIT = PACK_NUMBER;
auto convCommon = mOp->main_as_Convolution2D()->common();
auto pads = ConvolutionCommon::convolutionPadFull(input, output, mOp->main_as_Convolution2D()->common());
int ic = input->channel();
auto icDiv = UP_DIV(ic, UNIT);
mIm2ColParamter.dilateX = convCommon->dilateX();
mIm2ColParamter.dilateY = convCommon->dilateY();
mIm2ColParamter.strideX = convCommon->strideX();
mIm2ColParamter.strideY = convCommon->strideY();
mIm2ColParamter.icDiv4 = icDiv;
mIm2ColParamter.kernelX = convCommon->kernelX();
mIm2ColParamter.kernelY = convCommon->kernelY();
mIm2ColParamter.padX = std::get<0>(pads);
mIm2ColParamter.padY = std::get<1>(pads);
mIm2ColParamter.ih = input->height();
mIm2ColParamter.iw = input->width();
mIm2ColParamter.oh = output->height();
mIm2ColParamter.ow = output->width();
mIm2ColParamter.srcZStep = input->height() * input->width() * UNIT * input->batch();
mIm2ColParamter.srcYStep = input->width() * UNIT;
mIm2ColParamter.packCUnit = UNIT;
mActivationType = convCommon->relu() ? 1 : convCommon->relu6() ? 2 : 0;
//MNN_PRINT("conv size:%d-%d, %d-%d-%d, %d-%d-%d\n", mIm2ColParamter.kernelX, mIm2ColParamter.strideX, input->height(), input->width(), input->channel(), output->height(), output->width(), output->channel());
int e = output->height() * output->width() * output->batch();
int l = ic * mIm2ColParamter.kernelX * mIm2ColParamter.kernelY;
int h = output->channel();
mGemmInfo.elh[0] = e;
mGemmInfo.elh[1] = l;
mGemmInfo.elh[2] = h;
mGemmInfo.elhPad[0] = UP_DIV(e, 8) * 8;
mGemmInfo.elhPad[1] = UP_DIV(l, 8) * 8;
mGemmInfo.elhPad[2] = UP_DIV(h, 8) * 8;
//MNN_PRINT("Activate:%d \n", mActivationType);
//MNN_PRINT("Im2Col%d-%d-%d temp size:%zu!!!\n\n",output->width(), ic, mIm2ColParamter.kernelX, (size_t)sizeof(__half) * mMatMulParam.elhPack[0] * mMatMulParam.elhPack[1] * MATMULPACK * MATMULPACK);
// When Im2Col memory size big than 2GB
if(0){//(size_t)mGemmInfo.elh[0] * (size_t)mGemmInfo.elh[1] > 1024*1024*1024 && mIm2ColParamter.kernelX > 1 && mIm2ColParamter.kernelY > 1) {
//printf("need im2col in block\n");
mIsBlock = true;
mBlockNum = 16;
mGemmInfo.elh[0] = UP_DIV(mGemmInfo.elh[0], mBlockNum);
}
mIsConv1x1S1D1P0 = (mIm2ColParamter.kernelX == 1 && mIm2ColParamter.kernelY == 1 && \
mIm2ColParamter.strideX == 1 && mIm2ColParamter.strideY == 1 && \
mIm2ColParamter.dilateX == 1 && mIm2ColParamter.dilateY == 1 && \
mIm2ColParamter.padX == 0 && mIm2ColParamter.padY == 0);
mNeedIm2Col = !(mIsConv1x1S1D1P0 && (mFp16Infer || mFp32Infer));
auto pool = static_cast<CUDABackend*>(backend())->getBufferPool();
if(mNeedIm2Col) {
size_t im2colBytes = 2;
// Only when fp32 Im2Col convert to fp32, Fp16Fp32Mix Im2Col convert to fp16
if(mFp32Infer) {
im2colBytes = 4;
}
auto buffer = pool->alloc(im2colBytes * (size_t)mGemmInfo.elh[0] * (size_t)mGemmInfo.elhPad[1]);
mIm2ColBuffer = (void*)((uint8_t*)buffer.first + buffer.second);
pool->free(buffer);
}
mFilterAddr = mResource->mFilter;
mBiasAddr = mResource->mBias;
mBackendPtr = mResource->mBackend;
// Call from different function
if(mFp32Infer){
return callCutlassGemmCudaCoreFloat32(inputs, outputs);
}
mGpuComputeCap = runtime->compute_capability();
//MNN_PRINT("Gpu smArch is sm_%d\n", mGpuComputeCap);
if(mGpuComputeCap < 70) {
return callCutlassGemmCudaCoreFloat16(inputs, outputs);
} else if(mGpuComputeCap < 75) {
return callCutlassGemmTensorCore884(inputs, outputs);
}
return callCutlassGemmTensorCore(inputs, outputs);
}
ErrorCode ConvCutlassExecution::onExecute(const std::vector<Tensor*> &inputs, const std::vector<Tensor*> &outputs) {
//MNN_PRINT("cuda convSingleInput onExecute in, inputsize:%d %d\n", (int)inputs.size(), workspace_size_);
MNN_ASSERT(inputs.size() == 1);
MNN_ASSERT(outputs.size() == 1);
auto input = inputs[0];
auto output = outputs[0];
//printf("convcutlass:%p %p\n", input->deviceId(), output->deviceId());
//MNN_PRINT("cutlass hw:%d-%d\n", input->height(), input->width());
auto runtime = static_cast<CUDABackend*>(backend())->getCUDARuntime();
const void *input_addr = (const void*)inputs[0]->deviceId();
const void *filter_addr = mResource->mFilter;
const void *bias_addr = mResource->mBias;
auto bn = backend();
void *output_addr = (void*)outputs[0]->deviceId();
const int sw = mIm2ColParamter.strideX;
const int sh = mIm2ColParamter.strideY;
const int dw = mIm2ColParamter.dilateX;
const int dh = mIm2ColParamter.dilateY;
const int pw = mIm2ColParamter.padX;
const int ph = mIm2ColParamter.padY;
const int icDiv4 = mIm2ColParamter.icDiv4;
const int iw = mIm2ColParamter.iw;
const int ih = mIm2ColParamter.ih;
//printf("%d-%d-%d-%d-%d, %d-%d\n", cpuIm2Col->icDiv4, cpuIm2Col->ih, cpuIm2Col->iw, cpuIm2Col->oh, cpuIm2Col->ow, eAlign, lAlign);
// Im2col in Block
for(int block_idx = 0; block_idx < mBlockNum; block_idx++) {
if(mIsConv1x1S1D1P0 && mFp16Fp32MixInfer) {
size_t maxCount = mGemmInfo.elh[0] * mGemmInfo.elhPad[1];
callFloat2Half(input_addr, mIm2ColBuffer, maxCount, runtime);
} else if (mNeedIm2Col) {
callIm2ColPack((const void *)input_addr, (void *)mIm2ColBuffer, &mIm2ColParamter, mGemmInfo.elh[0], mGemmInfo.elh[1], \
mGemmInfo.elhPad[0], mGemmInfo.elhPad[1], mPrecisonLevel, runtime);
}
}
// Run cutlass gemm forward
return runCutlassGemmFunc();
}
}// namespace CUDA
}// namespace MNN | 773a512e31b31f6722c45a816a4e7abdb69bfb41.cu | //
// ConvCutlassExecution.cpp
// MNN
//
// Created by MNN on 2020/08/22.
// Copyright © 2018, Alibaba Group Holding Limited
//
#include "ConvCutlassExecution.hpp"
#include "Raster.cuh"
#include "ConvBaseKernel.cuh"
//#define DEBUG
namespace MNN {
namespace CUDA {
ConvCutlassExecution::Resource::Resource(Backend* bn, const MNN::Op* op) {
mBackend = bn;
auto runtime = static_cast<CUDABackend*>(bn)->getCUDARuntime();
auto conv = op->main_as_Convolution2D();
auto common = conv->common();
//weight host->device
const float* filterDataPtr = nullptr;
int weightSize = 0;
std::shared_ptr<ConvolutionCommon::Int8Common> quanCommon;
ConvolutionCommon::getConvParameters(&quanCommon, conv, &filterDataPtr, &weightSize);
auto oc = common->outputCount();
int l = weightSize / oc;
int h = oc;
int lp = UP_DIV(l, 8) * 8;
int hp = UP_DIV(h, 8) * 8;
// Reorder weight
{
auto tempCacheBuffer = static_cast<CUDABackend*>(bn)->getStaticBufferPool()->alloc(weightSize * sizeof(float));
float* cacheWeight = (float*)((uint8_t*)tempCacheBuffer.first + tempCacheBuffer.second);
runtime->memcpy(cacheWeight, filterDataPtr, weightSize * sizeof(float), MNNMemcpyHostToDevice);
if(static_cast<CUDABackend*>(bn)->getPrecision() == 1) {
weightTensor.reset(Tensor::createDevice<int32_t>({lp * hp}));
} else {
weightTensor.reset(Tensor::createDevice<int16_t>({lp * hp}));
}
bn->onAcquireBuffer(weightTensor.get(), Backend::STATIC);
mFilter = (void *)weightTensor.get()->buffer().device;
int precision = static_cast<CUDABackend*>(bn)->getPrecision();
if(precision == 2) {
precision == 0;
}
callWeightFill((const void *)cacheWeight, (void *)mFilter, l, h, lp, hp, static_cast<CUDABackend*>(bn)->getPrecision() == 1, runtime);
static_cast<CUDABackend*>(bn)->getStaticBufferPool()->free(tempCacheBuffer);
}
// Copy Bias
{
if(static_cast<CUDABackend*>(bn)->useFp16()) {
auto tempBiasStorage = static_cast<CUDABackend*>(bn)->getStaticBufferPool()->alloc(conv->bias()->size()*sizeof(float));
auto biasTemp = (float*)((uint8_t*)tempBiasStorage.first + tempBiasStorage.second);
cuda_check(cudaMemcpy(biasTemp, conv->bias()->data(), conv->bias()->size()*sizeof(float), cudaMemcpyHostToDevice));
int biasSize = conv->bias()->size();
int hp = UP_DIV(biasSize, 8) * 8;
biasTensor.reset(Tensor::createDevice<int16_t>({hp}));
bn->onAcquireBuffer(biasTensor.get(), Backend::STATIC);
mBias = (void *)biasTensor.get()->buffer().device;
runtime->memset(mBias, 0, hp * sizeof(int16_t));
callFloat2Half((const void*)biasTemp, (void*)mBias, hp, runtime);
static_cast<CUDABackend*>(bn)->getStaticBufferPool()->free(tempBiasStorage);
} else {
int biasSize = conv->bias()->size();
int hp = UP_DIV(biasSize, 8) * 8;
biasTensor.reset(Tensor::createDevice<int32_t>({hp}));
bn->onAcquireBuffer(biasTensor.get(), Backend::STATIC);
mBias = (void *)biasTensor.get()->buffer().device;
runtime->memset(mBias, 0, hp * sizeof(int32_t));
cuda_check(cudaMemcpy(mBias, conv->bias()->data(), conv->bias()->size()*sizeof(float), cudaMemcpyHostToDevice));
}
}
}
ConvCutlassExecution::Resource::~Resource() {
// Do nothing
}
ConvCutlassExecution::ConvCutlassExecution(Backend* backend, const MNN::Op* op, std::shared_ptr<Resource> res) : CutlassCommonExecution(backend) {
mOp = op;
mResource = res;
auto runtime = static_cast<CUDABackend*>(backend)->getCUDARuntime();
mPrecisonLevel = static_cast<CUDABackend*>(backend)->getPrecision();
mFp16Infer = (mPrecisonLevel == 2);
mFp32Infer = (mPrecisonLevel == 1);
mFp16Fp32MixInfer = (mPrecisonLevel == 0);
}
ConvCutlassExecution::~ConvCutlassExecution() {
}
bool ConvCutlassExecution::onClone(Backend* bn, const Op* op, Execution** dst) {
if (!mValid) {
return false;
}
if (nullptr == dst) {
return true;
}
auto dstExe = new ConvCutlassExecution(bn, op, mResource);
*dst = dstExe;
return true;
}
ErrorCode ConvCutlassExecution::onResize(const std::vector<Tensor*> &inputs, const std::vector<Tensor*> &outputs) {
auto runtime = static_cast<CUDABackend*>(backend())->getCUDARuntime();
auto input = inputs[0], output = outputs[0];
const int UNIT = PACK_NUMBER;
auto convCommon = mOp->main_as_Convolution2D()->common();
auto pads = ConvolutionCommon::convolutionPadFull(input, output, mOp->main_as_Convolution2D()->common());
int ic = input->channel();
auto icDiv = UP_DIV(ic, UNIT);
mIm2ColParamter.dilateX = convCommon->dilateX();
mIm2ColParamter.dilateY = convCommon->dilateY();
mIm2ColParamter.strideX = convCommon->strideX();
mIm2ColParamter.strideY = convCommon->strideY();
mIm2ColParamter.icDiv4 = icDiv;
mIm2ColParamter.kernelX = convCommon->kernelX();
mIm2ColParamter.kernelY = convCommon->kernelY();
mIm2ColParamter.padX = std::get<0>(pads);
mIm2ColParamter.padY = std::get<1>(pads);
mIm2ColParamter.ih = input->height();
mIm2ColParamter.iw = input->width();
mIm2ColParamter.oh = output->height();
mIm2ColParamter.ow = output->width();
mIm2ColParamter.srcZStep = input->height() * input->width() * UNIT * input->batch();
mIm2ColParamter.srcYStep = input->width() * UNIT;
mIm2ColParamter.packCUnit = UNIT;
mActivationType = convCommon->relu() ? 1 : convCommon->relu6() ? 2 : 0;
//MNN_PRINT("conv size:%d-%d, %d-%d-%d, %d-%d-%d\n", mIm2ColParamter.kernelX, mIm2ColParamter.strideX, input->height(), input->width(), input->channel(), output->height(), output->width(), output->channel());
int e = output->height() * output->width() * output->batch();
int l = ic * mIm2ColParamter.kernelX * mIm2ColParamter.kernelY;
int h = output->channel();
mGemmInfo.elh[0] = e;
mGemmInfo.elh[1] = l;
mGemmInfo.elh[2] = h;
mGemmInfo.elhPad[0] = UP_DIV(e, 8) * 8;
mGemmInfo.elhPad[1] = UP_DIV(l, 8) * 8;
mGemmInfo.elhPad[2] = UP_DIV(h, 8) * 8;
//MNN_PRINT("Activate:%d \n", mActivationType);
//MNN_PRINT("Im2Col:%d-%d-%d temp size:%zu!!!\n\n",output->width(), ic, mIm2ColParamter.kernelX, (size_t)sizeof(__half) * mMatMulParam.elhPack[0] * mMatMulParam.elhPack[1] * MATMULPACK * MATMULPACK);
// When Im2Col memory size big than 2GB
if(0){//(size_t)mGemmInfo.elh[0] * (size_t)mGemmInfo.elh[1] > 1024*1024*1024 && mIm2ColParamter.kernelX > 1 && mIm2ColParamter.kernelY > 1) {
//printf("need im2col in block\n");
mIsBlock = true;
mBlockNum = 16;
mGemmInfo.elh[0] = UP_DIV(mGemmInfo.elh[0], mBlockNum);
}
mIsConv1x1S1D1P0 = (mIm2ColParamter.kernelX == 1 && mIm2ColParamter.kernelY == 1 && \
mIm2ColParamter.strideX == 1 && mIm2ColParamter.strideY == 1 && \
mIm2ColParamter.dilateX == 1 && mIm2ColParamter.dilateY == 1 && \
mIm2ColParamter.padX == 0 && mIm2ColParamter.padY == 0);
mNeedIm2Col = !(mIsConv1x1S1D1P0 && (mFp16Infer || mFp32Infer));
auto pool = static_cast<CUDABackend*>(backend())->getBufferPool();
if(mNeedIm2Col) {
size_t im2colBytes = 2;
// Only when fp32 Im2Col convert to fp32, Fp16Fp32Mix Im2Col convert to fp16
if(mFp32Infer) {
im2colBytes = 4;
}
auto buffer = pool->alloc(im2colBytes * (size_t)mGemmInfo.elh[0] * (size_t)mGemmInfo.elhPad[1]);
mIm2ColBuffer = (void*)((uint8_t*)buffer.first + buffer.second);
pool->free(buffer);
}
mFilterAddr = mResource->mFilter;
mBiasAddr = mResource->mBias;
mBackendPtr = mResource->mBackend;
// Call from different function
if(mFp32Infer){
return callCutlassGemmCudaCoreFloat32(inputs, outputs);
}
mGpuComputeCap = runtime->compute_capability();
//MNN_PRINT("Gpu smArch is sm_%d\n", mGpuComputeCap);
if(mGpuComputeCap < 70) {
return callCutlassGemmCudaCoreFloat16(inputs, outputs);
} else if(mGpuComputeCap < 75) {
return callCutlassGemmTensorCore884(inputs, outputs);
}
return callCutlassGemmTensorCore(inputs, outputs);
}
ErrorCode ConvCutlassExecution::onExecute(const std::vector<Tensor*> &inputs, const std::vector<Tensor*> &outputs) {
//MNN_PRINT("cuda convSingleInput onExecute in, inputsize:%d %d\n", (int)inputs.size(), workspace_size_);
MNN_ASSERT(inputs.size() == 1);
MNN_ASSERT(outputs.size() == 1);
auto input = inputs[0];
auto output = outputs[0];
//printf("convcutlass:%p %p\n", input->deviceId(), output->deviceId());
//MNN_PRINT("cutlass hw:%d-%d\n", input->height(), input->width());
auto runtime = static_cast<CUDABackend*>(backend())->getCUDARuntime();
const void *input_addr = (const void*)inputs[0]->deviceId();
const void *filter_addr = mResource->mFilter;
const void *bias_addr = mResource->mBias;
auto bn = backend();
void *output_addr = (void*)outputs[0]->deviceId();
const int sw = mIm2ColParamter.strideX;
const int sh = mIm2ColParamter.strideY;
const int dw = mIm2ColParamter.dilateX;
const int dh = mIm2ColParamter.dilateY;
const int pw = mIm2ColParamter.padX;
const int ph = mIm2ColParamter.padY;
const int icDiv4 = mIm2ColParamter.icDiv4;
const int iw = mIm2ColParamter.iw;
const int ih = mIm2ColParamter.ih;
//printf("%d-%d-%d-%d-%d, %d-%d\n", cpuIm2Col->icDiv4, cpuIm2Col->ih, cpuIm2Col->iw, cpuIm2Col->oh, cpuIm2Col->ow, eAlign, lAlign);
// Im2col in Block
for(int block_idx = 0; block_idx < mBlockNum; block_idx++) {
if(mIsConv1x1S1D1P0 && mFp16Fp32MixInfer) {
size_t maxCount = mGemmInfo.elh[0] * mGemmInfo.elhPad[1];
callFloat2Half(input_addr, mIm2ColBuffer, maxCount, runtime);
} else if (mNeedIm2Col) {
callIm2ColPack((const void *)input_addr, (void *)mIm2ColBuffer, &mIm2ColParamter, mGemmInfo.elh[0], mGemmInfo.elh[1], \
mGemmInfo.elhPad[0], mGemmInfo.elhPad[1], mPrecisonLevel, runtime);
}
}
// Run cutlass gemm forward
return runCutlassGemmFunc();
}
}// namespace CUDA
}// namespace MNN |
72540fd7bc82515e587e4fac59c2c9caec527d71.hip | // !!! This is a file automatically generated by hipify!!!
/* Copyright 2021 NVIDIA Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#include "sorting/tasks/sort_values_nccl.h"
#include "column/column.h"
#include "cudf_util/bitmask.h"
#include "cudf_util/column.h"
#include "nccl/util.h"
#include "nccl/shuffle.h"
#include "util/cuda_helper.h"
#include "util/gpu_task_context.h"
#include "util/zip_for_each.h"
#include "deserializer.h"
#include <rccl.h>
#include <cudf/detail/copy.hpp>
#include <cudf/detail/gather.hpp>
#include <cudf/detail/search.hpp>
#include <cudf/detail/sorting.hpp>
#include <cudf/table/table.hpp>
#include <cudf/table/table_view.hpp>
namespace legate {
namespace pandas {
namespace sorting {
using namespace Legion;
namespace detail {
// Task arguments
struct SortValuesTaskArgs {
void sanity_check(void);
int64_t volume;
int32_t num_pieces;
bool put_null_first;
std::vector<bool> ascending;
std::vector<int32_t> key_indices;
std::vector<Column<true>> input;
std::vector<OutputColumn> output;
ncclComm_t *comm;
friend void deserialize(Deserializer &ctx, SortValuesTaskArgs &args);
};
void SortValuesTaskArgs::sanity_check(void)
{
for (auto &column : input) assert(input[0].shape() == column.shape());
}
void deserialize(Deserializer &ctx, SortValuesTaskArgs &args)
{
deserialize_from_future(ctx, args.volume);
deserialize(ctx, args.num_pieces);
deserialize(ctx, args.put_null_first);
uint32_t num_key_columns = 0;
deserialize(ctx, num_key_columns);
args.ascending.resize(num_key_columns);
deserialize(ctx, args.ascending, false);
args.key_indices.resize(num_key_columns);
deserialize(ctx, args.key_indices, false);
uint32_t num_columns = 0;
deserialize(ctx, num_columns);
args.input.resize(num_columns);
args.output.resize(num_columns);
deserialize(ctx, args.input, false);
deserialize(ctx, args.output, false);
deserialize_from_future(ctx, args.comm);
}
std::unique_ptr<cudf::table> gather(const cudf::table_view &input,
const std::vector<int32_t> &indices,
hipStream_t stream,
rmm::mr::device_memory_resource *mr)
{
DeferredBuffer<int32_t, 1> device_indices_buf{Memory::Z_COPY_MEM, Rect<1>{0, indices.size() - 1}};
auto device_indices = device_indices_buf.ptr(0);
for (auto idx = 0; idx < indices.size(); ++idx) device_indices[idx] = indices[idx];
cudf::column_view gather_map(cudf::data_type(cudf::type_id::INT32),
static_cast<cudf::size_type>(indices.size()),
device_indices);
return cudf::detail::gather(input,
gather_map,
cudf::out_of_bounds_policy::DONT_CHECK,
cudf::detail::negative_index_policy::NOT_ALLOWED,
stream,
mr);
}
// Arguments to the sorting functions
struct SortArgs {
SortArgs(const Task *task, SortValuesTaskArgs &args);
~SortArgs() = default;
constexpr hipStream_t stream() const { return gpu_ctx.stream(); }
GPUTaskContext gpu_ctx;
cudf::table_view input;
std::unordered_map<uint32_t, cudf::column_view> dictionaries;
std::vector<int32_t> key_indices;
std::vector<cudf::order> column_order;
std::vector<cudf::null_order> null_precedence;
int64_t task_id;
int64_t volume;
int32_t num_pieces;
ncclComm_t *comm;
rmm::mr::device_memory_resource *temp_mr;
std::unique_ptr<DeferredBufferAllocator> output_mr;
};
SortArgs::SortArgs(const Task *task, SortValuesTaskArgs &args)
: key_indices(std::move(args.key_indices)),
task_id(task->index_point[0]),
volume(args.volume),
num_pieces(args.num_pieces),
comm(args.comm),
output_mr(new DeferredBufferAllocator())
{
temp_mr = rmm::mr::get_current_device_resource();
auto input_table = to_cudf_table(args.input, stream());
auto converted = comm::extract_dictionaries(input_table);
std::tie(input, dictionaries) = std::move(converted);
for (auto asc : args.ascending) {
column_order.push_back(asc ? cudf::order::ASCENDING : cudf::order::DESCENDING);
null_precedence.push_back(asc == args.put_null_first ? cudf::null_order::BEFORE
: cudf::null_order::AFTER);
}
}
bool use_sample_sort(SortArgs &args)
{
#ifdef FORCE_SAMPLE_SORT
return true;
#else
// Use sample sort only when the average sampling rate is lower than 25%.
// TODO: Make this magic numbers configurable
return args.volume / args.num_pieces / 32 >= 4;
#endif
}
std::unique_ptr<cudf::table> all_gather_sort(SortArgs &args)
{
// Gather all rows and sort them
auto all_rows = comm::all_gather(
args.input, args.task_id, args.num_pieces, args.comm, args.stream(), args.temp_mr);
auto all_rows_view = all_rows->view();
auto all_rows_keys = all_rows_view.select(args.key_indices);
auto sorted = cudf::detail::sort_by_key(all_rows_view,
all_rows_keys,
args.column_order,
args.null_precedence,
args.stream(),
args.temp_mr);
auto start_idx = static_cast<cudf::size_type>(args.volume * args.task_id / args.num_pieces);
auto stop_idx = static_cast<cudf::size_type>(args.volume * (args.task_id + 1) / args.num_pieces);
std::vector<cudf::size_type> indices{start_idx, stop_idx};
auto sliced = cudf::slice(sorted->view(), indices);
return std::make_unique<cudf::table>(sliced[0], args.stream(), args.output_mr.get());
}
std::unique_ptr<cudf::table> sample_sort(SortArgs &args)
{
// Sort the table locally
auto input_keys = args.input.select(args.key_indices);
auto locally_sorted = cudf::detail::sort_by_key(
args.input, input_keys, args.column_order, args.null_precedence, args.stream(), args.temp_mr);
auto locally_sorted_keys = locally_sorted->view().select(args.key_indices);
// Randomly sample keys
auto num_samples = ::min(32, locally_sorted_keys.num_rows());
auto samples = cudf::detail::sample(locally_sorted_keys,
num_samples,
cudf::sample_with_replacement::FALSE,
Realm::Clock::current_time_in_nanoseconds(),
args.stream(),
args.temp_mr);
// Gather all samples and sort them
auto all_samples = comm::all_gather(
samples->view(), args.task_id, args.num_pieces, args.comm, args.stream(), args.temp_mr);
auto sorted_samples = cudf::detail::sort_by_key(all_samples->view(),
all_samples->view(),
args.column_order,
args.null_precedence,
args.stream(),
args.temp_mr);
// Sample again, but deterministically this time so that tasks agree on the split points they
// choose
auto stride = sorted_samples->num_rows() / args.num_pieces;
std::vector<int32_t> boundary_indices;
for (auto idx = 0; idx < args.num_pieces - 1; ++idx)
boundary_indices.push_back((idx + 1) * stride);
auto dividers =
detail::gather(sorted_samples->view(), boundary_indices, args.stream(), args.temp_mr);
// Find split points using the samples
auto device_splits = cudf::detail::lower_bound(locally_sorted_keys,
dividers->view(),
args.column_order,
args.null_precedence,
args.stream(),
args.temp_mr);
std::vector<int32_t> host_splits(device_splits->size());
hipMemcpyAsync(host_splits.data(),
device_splits->view().data<int32_t>(),
sizeof(int32_t) * device_splits->size(),
hipMemcpyDeviceToHost,
args.stream());
// We should wait until the splits are copied
SYNC_AND_CHECK_STREAM(args.stream());
// All-to-all exchange tables
return comm::shuffle(locally_sorted->view(),
host_splits,
args.task_id,
args.comm,
args.stream(),
args.output_mr.get());
}
std::unique_ptr<cudf::table> sort(SortArgs &args)
{
if (use_sample_sort(args))
return sample_sort(args);
else
return all_gather_sort(args);
}
} // namespace detail
/*static*/ int64_t SortValuesNCCLTask::gpu_variant(const Task *task,
const std::vector<PhysicalRegion> ®ions,
Context context,
Runtime *runtime)
{
Deserializer ctx{task, regions};
detail::SortValuesTaskArgs task_args;
detail::deserialize(ctx, task_args);
detail::SortArgs args(task, task_args);
auto result = detail::sort(args);
auto result_size = result->num_rows();
auto converted = comm::embed_dictionaries(std::move(result), args.dictionaries);
from_cudf_table(task_args.output, std::move(converted), args.stream(), *args.output_mr);
return result_size;
}
static void __attribute__((constructor)) register_tasks(void)
{
SortValuesNCCLTask::register_variants_with_return<int64_t, int64_t>();
}
} // namespace sorting
} // namespace pandas
} // namespace legate
| 72540fd7bc82515e587e4fac59c2c9caec527d71.cu | /* Copyright 2021 NVIDIA Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#include "sorting/tasks/sort_values_nccl.h"
#include "column/column.h"
#include "cudf_util/bitmask.h"
#include "cudf_util/column.h"
#include "nccl/util.h"
#include "nccl/shuffle.h"
#include "util/cuda_helper.h"
#include "util/gpu_task_context.h"
#include "util/zip_for_each.h"
#include "deserializer.h"
#include <nccl.h>
#include <cudf/detail/copy.hpp>
#include <cudf/detail/gather.hpp>
#include <cudf/detail/search.hpp>
#include <cudf/detail/sorting.hpp>
#include <cudf/table/table.hpp>
#include <cudf/table/table_view.hpp>
namespace legate {
namespace pandas {
namespace sorting {
using namespace Legion;
namespace detail {
// Task arguments
struct SortValuesTaskArgs {
void sanity_check(void);
int64_t volume;
int32_t num_pieces;
bool put_null_first;
std::vector<bool> ascending;
std::vector<int32_t> key_indices;
std::vector<Column<true>> input;
std::vector<OutputColumn> output;
ncclComm_t *comm;
friend void deserialize(Deserializer &ctx, SortValuesTaskArgs &args);
};
void SortValuesTaskArgs::sanity_check(void)
{
for (auto &column : input) assert(input[0].shape() == column.shape());
}
void deserialize(Deserializer &ctx, SortValuesTaskArgs &args)
{
deserialize_from_future(ctx, args.volume);
deserialize(ctx, args.num_pieces);
deserialize(ctx, args.put_null_first);
uint32_t num_key_columns = 0;
deserialize(ctx, num_key_columns);
args.ascending.resize(num_key_columns);
deserialize(ctx, args.ascending, false);
args.key_indices.resize(num_key_columns);
deserialize(ctx, args.key_indices, false);
uint32_t num_columns = 0;
deserialize(ctx, num_columns);
args.input.resize(num_columns);
args.output.resize(num_columns);
deserialize(ctx, args.input, false);
deserialize(ctx, args.output, false);
deserialize_from_future(ctx, args.comm);
}
std::unique_ptr<cudf::table> gather(const cudf::table_view &input,
const std::vector<int32_t> &indices,
cudaStream_t stream,
rmm::mr::device_memory_resource *mr)
{
DeferredBuffer<int32_t, 1> device_indices_buf{Memory::Z_COPY_MEM, Rect<1>{0, indices.size() - 1}};
auto device_indices = device_indices_buf.ptr(0);
for (auto idx = 0; idx < indices.size(); ++idx) device_indices[idx] = indices[idx];
cudf::column_view gather_map(cudf::data_type(cudf::type_id::INT32),
static_cast<cudf::size_type>(indices.size()),
device_indices);
return cudf::detail::gather(input,
gather_map,
cudf::out_of_bounds_policy::DONT_CHECK,
cudf::detail::negative_index_policy::NOT_ALLOWED,
stream,
mr);
}
// Arguments to the sorting functions
struct SortArgs {
SortArgs(const Task *task, SortValuesTaskArgs &args);
~SortArgs() = default;
constexpr cudaStream_t stream() const { return gpu_ctx.stream(); }
GPUTaskContext gpu_ctx;
cudf::table_view input;
std::unordered_map<uint32_t, cudf::column_view> dictionaries;
std::vector<int32_t> key_indices;
std::vector<cudf::order> column_order;
std::vector<cudf::null_order> null_precedence;
int64_t task_id;
int64_t volume;
int32_t num_pieces;
ncclComm_t *comm;
rmm::mr::device_memory_resource *temp_mr;
std::unique_ptr<DeferredBufferAllocator> output_mr;
};
SortArgs::SortArgs(const Task *task, SortValuesTaskArgs &args)
: key_indices(std::move(args.key_indices)),
task_id(task->index_point[0]),
volume(args.volume),
num_pieces(args.num_pieces),
comm(args.comm),
output_mr(new DeferredBufferAllocator())
{
temp_mr = rmm::mr::get_current_device_resource();
auto input_table = to_cudf_table(args.input, stream());
auto converted = comm::extract_dictionaries(input_table);
std::tie(input, dictionaries) = std::move(converted);
for (auto asc : args.ascending) {
column_order.push_back(asc ? cudf::order::ASCENDING : cudf::order::DESCENDING);
null_precedence.push_back(asc == args.put_null_first ? cudf::null_order::BEFORE
: cudf::null_order::AFTER);
}
}
bool use_sample_sort(SortArgs &args)
{
#ifdef FORCE_SAMPLE_SORT
return true;
#else
// Use sample sort only when the average sampling rate is lower than 25%.
// TODO: Make this magic numbers configurable
return args.volume / args.num_pieces / 32 >= 4;
#endif
}
std::unique_ptr<cudf::table> all_gather_sort(SortArgs &args)
{
// Gather all rows and sort them
auto all_rows = comm::all_gather(
args.input, args.task_id, args.num_pieces, args.comm, args.stream(), args.temp_mr);
auto all_rows_view = all_rows->view();
auto all_rows_keys = all_rows_view.select(args.key_indices);
auto sorted = cudf::detail::sort_by_key(all_rows_view,
all_rows_keys,
args.column_order,
args.null_precedence,
args.stream(),
args.temp_mr);
auto start_idx = static_cast<cudf::size_type>(args.volume * args.task_id / args.num_pieces);
auto stop_idx = static_cast<cudf::size_type>(args.volume * (args.task_id + 1) / args.num_pieces);
std::vector<cudf::size_type> indices{start_idx, stop_idx};
auto sliced = cudf::slice(sorted->view(), indices);
return std::make_unique<cudf::table>(sliced[0], args.stream(), args.output_mr.get());
}
std::unique_ptr<cudf::table> sample_sort(SortArgs &args)
{
// Sort the table locally
auto input_keys = args.input.select(args.key_indices);
auto locally_sorted = cudf::detail::sort_by_key(
args.input, input_keys, args.column_order, args.null_precedence, args.stream(), args.temp_mr);
auto locally_sorted_keys = locally_sorted->view().select(args.key_indices);
// Randomly sample keys
auto num_samples = std::min(32, locally_sorted_keys.num_rows());
auto samples = cudf::detail::sample(locally_sorted_keys,
num_samples,
cudf::sample_with_replacement::FALSE,
Realm::Clock::current_time_in_nanoseconds(),
args.stream(),
args.temp_mr);
// Gather all samples and sort them
auto all_samples = comm::all_gather(
samples->view(), args.task_id, args.num_pieces, args.comm, args.stream(), args.temp_mr);
auto sorted_samples = cudf::detail::sort_by_key(all_samples->view(),
all_samples->view(),
args.column_order,
args.null_precedence,
args.stream(),
args.temp_mr);
// Sample again, but deterministically this time so that tasks agree on the split points they
// choose
auto stride = sorted_samples->num_rows() / args.num_pieces;
std::vector<int32_t> boundary_indices;
for (auto idx = 0; idx < args.num_pieces - 1; ++idx)
boundary_indices.push_back((idx + 1) * stride);
auto dividers =
detail::gather(sorted_samples->view(), boundary_indices, args.stream(), args.temp_mr);
// Find split points using the samples
auto device_splits = cudf::detail::lower_bound(locally_sorted_keys,
dividers->view(),
args.column_order,
args.null_precedence,
args.stream(),
args.temp_mr);
std::vector<int32_t> host_splits(device_splits->size());
cudaMemcpyAsync(host_splits.data(),
device_splits->view().data<int32_t>(),
sizeof(int32_t) * device_splits->size(),
cudaMemcpyDeviceToHost,
args.stream());
// We should wait until the splits are copied
SYNC_AND_CHECK_STREAM(args.stream());
// All-to-all exchange tables
return comm::shuffle(locally_sorted->view(),
host_splits,
args.task_id,
args.comm,
args.stream(),
args.output_mr.get());
}
std::unique_ptr<cudf::table> sort(SortArgs &args)
{
if (use_sample_sort(args))
return sample_sort(args);
else
return all_gather_sort(args);
}
} // namespace detail
/*static*/ int64_t SortValuesNCCLTask::gpu_variant(const Task *task,
const std::vector<PhysicalRegion> ®ions,
Context context,
Runtime *runtime)
{
Deserializer ctx{task, regions};
detail::SortValuesTaskArgs task_args;
detail::deserialize(ctx, task_args);
detail::SortArgs args(task, task_args);
auto result = detail::sort(args);
auto result_size = result->num_rows();
auto converted = comm::embed_dictionaries(std::move(result), args.dictionaries);
from_cudf_table(task_args.output, std::move(converted), args.stream(), *args.output_mr);
return result_size;
}
static void __attribute__((constructor)) register_tasks(void)
{
SortValuesNCCLTask::register_variants_with_return<int64_t, int64_t>();
}
} // namespace sorting
} // namespace pandas
} // namespace legate
|
4d59a786dac47046e6418e146f03c4ab32b8921d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void cudaDRectifier_propagate_kernel(double* x, double* y, unsigned int size, double leakSlope, double clipping)
{
const unsigned int index = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int stride = blockDim.x * gridDim.x;
for (unsigned int i = index; i < size; i += stride) {
double value = x[i];
if (clipping > 0.0)
y[i] = (value > 0.0) ? min(value, clipping) : leakSlope * value;
else
y[i] = (value > 0.0) ? value : leakSlope * value;
}
} | 4d59a786dac47046e6418e146f03c4ab32b8921d.cu | #include "includes.h"
__global__ void cudaDRectifier_propagate_kernel(double* x, double* y, unsigned int size, double leakSlope, double clipping)
{
const unsigned int index = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int stride = blockDim.x * gridDim.x;
for (unsigned int i = index; i < size; i += stride) {
double value = x[i];
if (clipping > 0.0)
y[i] = (value > 0.0) ? min(value, clipping) : leakSlope * value;
else
y[i] = (value > 0.0) ? value : leakSlope * value;
}
} |
2b98486503aa39f884ac4a5313f17d6011fe26a1.hip | // !!! This is a file automatically generated by hipify!!!
/*
* @Name: ps.cu
* @Description: CUDA kernels and wrappers for Prefix-Sum.
*
* @Author: Giacomo Marciani <gmarciani@acm.org>
* Gabriele Santi <gsanti@acm.org>
*
* @Institution: University of Rome Tor Vergata
*/
#include <stdio.h>
#include <hip/hip_runtime.h>
#include "common/error.h"
#include "ps.cuh"
#include "psutil.cuh"
/*------------------------------------------------------------------------------
WRAPPERS
*******************************************************************************/
/*------------------------------------------------------------------------------
@description Execute the Prefix-Sum operation (Host Wrapper).
@param input The input data.
@param output The output data.
@param dim_data The number of elements in data.
@return void.
----------------------------------------------------------------------------*/
__host__ void prefix_sum(int *input, int *output, int dim_data) {
/*----------------------------------------------------------------------------
MAIN VARIABLES
*****************************************************************************/
// Input/Output data (Device)
int *dev_data = NULL;
// Vector of partial sums (Device)
int *dev_partialSums = NULL;
/*----------------------------------------------------------------------------
RESOURCES ALLOCATION AND INITIALIZATION
*****************************************************************************/
/*---------------------------------------------------------------------------+
| Device Allocations and Initializations:
| * dev_data: input data, array of integers.
| * dev_partialSums: partial sums by blocks, array of integers.
+---------------------------------------------------------------------------*/
// Dimensions
const int dim_partialSums = dim_data / BLOCK_SIZE; // number of elements in partialSums
// Sizes
const size_t size_data = sizeof(int) * dim_data; // bytes in data
const size_t size_partialSums = sizeof(int) * dim_partialSums; // bytes in partialSums
// Allocations
HANDLE_ERROR(hipMalloc((void **)&dev_data, size_data));
HANDLE_ERROR(hipMalloc((void **)&dev_partialSums, size_partialSums));
// Initializations
HANDLE_ERROR(hipMemcpy(dev_data, input, size_data, hipMemcpyHostToDevice));
HANDLE_ERROR(hipMemset(dev_partialSums, 0, size_partialSums));
/*----------------------------------------------------------------------------
* GRID SETTINGS
*****************************************************************************/
// Grid
const dim3 grid(dim_data / BLOCK_SIZE, 1, 1);
const dim3 block(BLOCK_SIZE, 1, 1);
// Shared memory: shmem[i] contains the sum for warp i
const int dim_sharedMemory = BLOCK_SIZE / WARP_SIZE; // number of elements in shared memory
const size_t size_sharedMemory = sizeof(int) * dim_sharedMemory; // bytes in partialSums
/*----------------------------------------------------------------------------
* KERNEL LAUNCH
*****************************************************************************/
// If kernel profiling is active, register events for elapsed time calculation
#ifdef PROFILE_KERNEL
hipEvent_t start, stop;
float elapsed = 0;
HANDLE_ERROR(hipEventCreate(&start));
HANDLE_ERROR(hipEventCreate(&stop));
HANDLE_ERROR(hipEventRecord(start));
#endif
// Launch kernel for Prefix-Sum calculation
hipLaunchKernelGGL(( __prefix_sum), dim3(grid), dim3(block), size_sharedMemory , 0, dev_data, dev_partialSums, WARP_SIZE);
// If kernel profiling is active, register events for elapsed time calculation
#ifdef PROFILE_KERNEL
HANDLE_ERROR(hipEventRecord(stop));
HANDLE_ERROR(hipEventSynchronize(stop));
HANDLE_ERROR(hipEventElapsedTime(&elapsed, start, stop));
printf("Elapsed Time (ms): %f\n", elapsed);
printf("%d elements scanned in %f ms -> %f MegaElements/s\n",
dim_data, elapsed, dim_data/(elapsed/1000.0f)/1000000.0f);
#endif
HANDLE_ERROR(hipMemcpy(output, dev_data, size_data, hipMemcpyDeviceToHost));
/*----------------------------------------------------------------------------
* FREE RESOURCES (DEVICE)
*****************************************************************************/
HANDLE_ERROR(hipFree(dev_data));
HANDLE_ERROR(hipFree(dev_partialSums));
}
/*------------------------------------------------------------------------------
* KERNELS
******************************************************************************/
/*------------------------------------------------------------------------------
@description Computes the Prefix-Sum on the input array.
@param data Input/Outut data (output is overwritten).
@param partialSums Partial sums, shared between blocks.
@param warpSize The warp size.
@return void.
----------------------------------------------------------------------------*/
__global__ void __prefix_sum(int *data, int *partialSums, int warpSize) {
// Shared memory: shmem[i] contains sum for warp i
extern __shared__ int shmem[];
// Declaration of: CellId, ThreadId and WarpId.
const int id = (blockIdx.x * blockDim.x) + threadIdx.x;
const int tid = threadIdx.x;
const int warpId = tid / warpSize;
int d, i; // Indices for loops
// Initialize value with the input at cell i
int value = data[id];
/*----------------------------------------------------------------------------
STEP 1 - WARP REDUCE
*****************************************************************************/
for ( d = 1; d < warpSize; d *= 2 ) {
int temp = __shfl_up_sync(0xFFFFFFFF, value, d, warpSize); // CUDA 9
if ( tid % warpSize >= d ) {
value += temp;
}
}
// The last thread whitin a warp writes its value on shared memory
if ( tid % warpSize == (warpSize-1) ) {
shmem[warpId] = value;
}
__syncthreads();
/*----------------------------------------------------------------------------
STEP 2 - BLOCK REDUCE
*****************************************************************************/
// Each warp (not the first) of a block updates value with sums of
// previous warps
if ( warpId > 0 ) {
for ( i = 0; i < warpId; i++ ) {
value += shmem[i];
}
}
// The last thread of each block stores the partial sum of its block into
// the vector of partial sums
if ( threadIdx.x == (blockDim.x-1) ) {
partialSums[blockIdx.x] = value;
}
__syncthreads();
/*----------------------------------------------------------------------------
STEP 3 - FINAL REDUCE
*****************************************************************************/
// Each thread of each block (not the first block) updates value with sums of
// previous blocks
if ( blockIdx.x > 0 ) {
for ( i = 0; i < blockIdx.x; i++ ) {
value += partialSums[i];
}
}
// Update cell i with value
data[id] = value;
}
| 2b98486503aa39f884ac4a5313f17d6011fe26a1.cu | /*
* @Name: ps.cu
* @Description: CUDA kernels and wrappers for Prefix-Sum.
*
* @Author: Giacomo Marciani <gmarciani@acm.org>
* Gabriele Santi <gsanti@acm.org>
*
* @Institution: University of Rome Tor Vergata
*/
#include <stdio.h>
#include <cuda_runtime.h>
#include "common/error.h"
#include "ps.cuh"
#include "psutil.cuh"
/*------------------------------------------------------------------------------
WRAPPERS
*******************************************************************************/
/*------------------------------------------------------------------------------
@description Execute the Prefix-Sum operation (Host Wrapper).
@param input The input data.
@param output The output data.
@param dim_data The number of elements in data.
@return void.
----------------------------------------------------------------------------*/
__host__ void prefix_sum(int *input, int *output, int dim_data) {
/*----------------------------------------------------------------------------
MAIN VARIABLES
*****************************************************************************/
// Input/Output data (Device)
int *dev_data = NULL;
// Vector of partial sums (Device)
int *dev_partialSums = NULL;
/*----------------------------------------------------------------------------
RESOURCES ALLOCATION AND INITIALIZATION
*****************************************************************************/
/*---------------------------------------------------------------------------+
| Device Allocations and Initializations:
| * dev_data: input data, array of integers.
| * dev_partialSums: partial sums by blocks, array of integers.
+---------------------------------------------------------------------------*/
// Dimensions
const int dim_partialSums = dim_data / BLOCK_SIZE; // number of elements in partialSums
// Sizes
const size_t size_data = sizeof(int) * dim_data; // bytes in data
const size_t size_partialSums = sizeof(int) * dim_partialSums; // bytes in partialSums
// Allocations
HANDLE_ERROR(cudaMalloc((void **)&dev_data, size_data));
HANDLE_ERROR(cudaMalloc((void **)&dev_partialSums, size_partialSums));
// Initializations
HANDLE_ERROR(cudaMemcpy(dev_data, input, size_data, cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMemset(dev_partialSums, 0, size_partialSums));
/*----------------------------------------------------------------------------
* GRID SETTINGS
*****************************************************************************/
// Grid
const dim3 grid(dim_data / BLOCK_SIZE, 1, 1);
const dim3 block(BLOCK_SIZE, 1, 1);
// Shared memory: shmem[i] contains the sum for warp i
const int dim_sharedMemory = BLOCK_SIZE / WARP_SIZE; // number of elements in shared memory
const size_t size_sharedMemory = sizeof(int) * dim_sharedMemory; // bytes in partialSums
/*----------------------------------------------------------------------------
* KERNEL LAUNCH
*****************************************************************************/
// If kernel profiling is active, register events for elapsed time calculation
#ifdef PROFILE_KERNEL
cudaEvent_t start, stop;
float elapsed = 0;
HANDLE_ERROR(cudaEventCreate(&start));
HANDLE_ERROR(cudaEventCreate(&stop));
HANDLE_ERROR(cudaEventRecord(start));
#endif
// Launch kernel for Prefix-Sum calculation
__prefix_sum<<< grid, block, size_sharedMemory >>>(dev_data, dev_partialSums, WARP_SIZE);
// If kernel profiling is active, register events for elapsed time calculation
#ifdef PROFILE_KERNEL
HANDLE_ERROR(cudaEventRecord(stop));
HANDLE_ERROR(cudaEventSynchronize(stop));
HANDLE_ERROR(cudaEventElapsedTime(&elapsed, start, stop));
printf("Elapsed Time (ms): %f\n", elapsed);
printf("%d elements scanned in %f ms -> %f MegaElements/s\n",
dim_data, elapsed, dim_data/(elapsed/1000.0f)/1000000.0f);
#endif
HANDLE_ERROR(cudaMemcpy(output, dev_data, size_data, cudaMemcpyDeviceToHost));
/*----------------------------------------------------------------------------
* FREE RESOURCES (DEVICE)
*****************************************************************************/
HANDLE_ERROR(cudaFree(dev_data));
HANDLE_ERROR(cudaFree(dev_partialSums));
}
/*------------------------------------------------------------------------------
* KERNELS
******************************************************************************/
/*------------------------------------------------------------------------------
@description Computes the Prefix-Sum on the input array.
@param data Input/Outut data (output is overwritten).
@param partialSums Partial sums, shared between blocks.
@param warpSize The warp size.
@return void.
----------------------------------------------------------------------------*/
__global__ void __prefix_sum(int *data, int *partialSums, int warpSize) {
// Shared memory: shmem[i] contains sum for warp i
extern __shared__ int shmem[];
// Declaration of: CellId, ThreadId and WarpId.
const int id = (blockIdx.x * blockDim.x) + threadIdx.x;
const int tid = threadIdx.x;
const int warpId = tid / warpSize;
int d, i; // Indices for loops
// Initialize value with the input at cell i
int value = data[id];
/*----------------------------------------------------------------------------
STEP 1 - WARP REDUCE
*****************************************************************************/
for ( d = 1; d < warpSize; d *= 2 ) {
int temp = __shfl_up_sync(0xFFFFFFFF, value, d, warpSize); // CUDA 9
if ( tid % warpSize >= d ) {
value += temp;
}
}
// The last thread whitin a warp writes its value on shared memory
if ( tid % warpSize == (warpSize-1) ) {
shmem[warpId] = value;
}
__syncthreads();
/*----------------------------------------------------------------------------
STEP 2 - BLOCK REDUCE
*****************************************************************************/
// Each warp (not the first) of a block updates value with sums of
// previous warps
if ( warpId > 0 ) {
for ( i = 0; i < warpId; i++ ) {
value += shmem[i];
}
}
// The last thread of each block stores the partial sum of its block into
// the vector of partial sums
if ( threadIdx.x == (blockDim.x-1) ) {
partialSums[blockIdx.x] = value;
}
__syncthreads();
/*----------------------------------------------------------------------------
STEP 3 - FINAL REDUCE
*****************************************************************************/
// Each thread of each block (not the first block) updates value with sums of
// previous blocks
if ( blockIdx.x > 0 ) {
for ( i = 0; i < blockIdx.x; i++ ) {
value += partialSums[i];
}
}
// Update cell i with value
data[id] = value;
}
|
e872885ddccea6e0e88a33c1f186d9309c1f7a5e.hip | // !!! This is a file automatically generated by hipify!!!
/**
* @internal
* @author Federico Busato <br>
* Univerity of Verona, Dept. of Computer Science <br>
* federico.busato@univr.it
* @date August, 2017
* @version v2
*
* @copyright Copyright 2017 Hornet. All rights reserved.
*
* @license{<blockquote>
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
* </blockquote>}
*
* @file
*/
#include "Device/CubWrapper.cuh"
#include "Device/SafeCudaAPI.cuh"
#include "Device/VectorUtil.cuh"
#include "Host/Numeric.hpp"
#include "Core/DataLayout/DataLayout.cuh" //<-- !!!!
#include <cub.cuh>
namespace xlib {
CubWrapper::CubWrapper(int num_items) noexcept : _num_items(num_items) {}
void CubWrapper::initialize(int num_items) noexcept {
_num_items = num_items;
}
CubWrapper::~CubWrapper() noexcept {
cuFree(_d_temp_storage);
}
//==============================================================================
//==============================================================================
template<typename T>
CubReduce<T>::CubReduce(const T* d_in, size_t num_items) noexcept :
CubWrapper(num_items), _d_in(d_in) {
cuMalloc(_d_out, 1);
hipcub::DeviceReduce::Sum(_d_temp_storage, _temp_storage_bytes,
_d_in, _d_out, _num_items);
SAFE_CALL( hipMalloc(&_d_temp_storage, _temp_storage_bytes) )
}
template<typename T>
T CubReduce<T>::run() noexcept {
hipcub::DeviceReduce::Sum(_d_temp_storage, _temp_storage_bytes,
_d_in, _d_out, _num_items);
int h_result;
cuMemcpyToHostAsync(_d_out, h_result);
return h_result;
}
template<typename T>
CubReduce<T>::~CubReduce() noexcept {
cuFree(_d_out);
}
//------------------------------------------------------------------------------
template<typename T>
CubSegmentedReduce<T>::CubSegmentedReduce(int* d_offsets, const T* d_in,
int num_segments, T*& d_out) :
CubWrapper(num_segments), _d_in(d_in),
_d_out(d_out), _d_offsets(d_offsets) {
hipcub::DeviceSegmentedReduce::Sum(_d_temp_storage, _temp_storage_bytes,
_d_in, _d_out, num_segments,
_d_offsets, _d_offsets + 1);
SAFE_CALL( hipMalloc(&_d_temp_storage, _temp_storage_bytes) )
cuMalloc(d_out, num_segments);
}
template<typename T>
CubSegmentedReduce<T>::~CubSegmentedReduce() noexcept {
cuFree(_d_out);
}
template<typename T>
void CubSegmentedReduce<T>::run() noexcept {
hipcub::DeviceSegmentedReduce::Sum(_d_temp_storage, _temp_storage_bytes,
_d_in, _d_out, _num_items,
_d_offsets, _d_offsets + 1);
}
//------------------------------------------------------------------------------
template<typename T>
CubSpMV<T>::CubSpMV(T* d_value, int* d_row_offsets, int* d_column_indices,
T* d_vector_x, T* d_vector_y,
int num_rows, int num_cols, int num_nonzeros) :
CubWrapper(0),
_d_row_offsets(d_row_offsets),
_d_column_indices(d_column_indices),
_d_values(d_value),
_d_vector_x(d_vector_x), _d_vector_y(d_vector_y),
_num_rows(num_rows), _num_cols(num_cols),
_num_nonzeros(num_nonzeros) {
cub::DeviceSpmv::CsrMV(_d_temp_storage, _temp_storage_bytes,
_d_values, _d_row_offsets, _d_column_indices,
_d_vector_x, _d_vector_y,
_num_rows, _num_cols, _num_nonzeros);
SAFE_CALL( hipMalloc(&_d_temp_storage, _temp_storage_bytes) )
}
/*
template<typename T>
CubSpMV<T>::~CubSpMV() noexcept {
cuFree(_d_out);
}*/
template<typename T>
void CubSpMV<T>::run() noexcept {
cub::DeviceSpmv::CsrMV(_d_temp_storage, _temp_storage_bytes, _d_values,
_d_row_offsets, _d_column_indices,
_d_vector_x, _d_vector_y,
_num_rows, _num_cols, _num_nonzeros);
}
//------------------------------------------------------------------------------
template<typename T>
CubArgMax<T>::CubArgMax(const T* d_in, size_t num_items) noexcept :
_d_in(d_in), CubWrapper(num_items) {
hipcub::KeyValuePair<int, T>* d_tmp;
cuMalloc(d_tmp, 1);
hipcub::DeviceReduce::ArgMax(_d_temp_storage, _temp_storage_bytes, _d_in,
static_cast<hipcub::KeyValuePair<int, T>*>(_d_out),
_num_items);
SAFE_CALL( hipMalloc(&_d_temp_storage, _temp_storage_bytes) )
_d_out = reinterpret_cast<hipcub::KeyValuePair<int, T>*>(d_tmp);
}
template<typename T>
typename std::pair<int, T>
CubArgMax<T>::run() noexcept {
hipcub::DeviceReduce::ArgMax(_d_temp_storage, _temp_storage_bytes, _d_in,
static_cast<hipcub::KeyValuePair<int, T>*>(_d_out),
_num_items);
hipcub::KeyValuePair<int, T> h_out;
cuMemcpyToHost(static_cast<hipcub::KeyValuePair<int, T>*>(_d_out), h_out);
return std::pair<int, T>(h_out.key, h_out.value);
}
//==============================================================================
//==============================================================================
/////////////////
// SortByValue //
/////////////////
template<typename T>
CubSortByValue<T>::CubSortByValue(int max_items) noexcept {
initialize(max_items);
}
template<typename T>
void CubSortByValue<T>::initialize(int max_items) noexcept {
size_t temp_storage_bytes;
T* d_in = nullptr, *d_sorted = nullptr;
hipcub::DeviceRadixSort::SortKeys(nullptr, temp_storage_bytes,
d_in, d_sorted, max_items,
0, sizeof(T) * 8);
SAFE_CALL( hipMalloc(&_d_temp_storage, temp_storage_bytes) )
}
//------------------------------------------------------------------------------
template<typename T>
void CubSortByValue<T>::run(const T* d_in, int num_items, T* d_sorted,
T d_value_max) noexcept {
size_t temp_storage_bytes;
int num_bits = std::is_floating_point<T>::value ? sizeof(T) * 8 :
xlib::ceil_log2(d_value_max);
hipcub::DeviceRadixSort::SortKeys(nullptr, temp_storage_bytes,
d_in, d_sorted, num_items, 0, num_bits);
hipcub::DeviceRadixSort::SortKeys(_d_temp_storage, temp_storage_bytes,
d_in, d_sorted, num_items, 0, num_bits);
}
//------------------------------------------------------------------------------
template<typename T>
void CubSortByValue<T>::srun(const T* d_in, int num_items, T* d_sorted,
T d_in_max) noexcept {
CubSortByValue<T> cub_instance(num_items);
cub_instance.run(d_in, num_items, d_sorted, d_in_max);
}
//==============================================================================
//==============================================================================
///////////////
// SortByKey //
///////////////
template<typename T, typename R>
CubSortByKey<T, R>::CubSortByKey(int max_items) noexcept {
initialize(max_items);
}
template<typename T, typename R>
void CubSortByKey<T, R>::initialize(int max_items) noexcept {
size_t temp_storage_bytes;
T* d_key = nullptr, *d_key_sorted = nullptr;
R* d_data_in = nullptr, *d_data_out = nullptr;
hipcub::DeviceRadixSort::SortPairs(nullptr, temp_storage_bytes,
d_key, d_key_sorted,
d_data_in, d_data_out,
max_items, 0, sizeof(T) * 8);
SAFE_CALL( hipMalloc(&_d_temp_storage, temp_storage_bytes) )
}
//------------------------------------------------------------------------------
template<typename T, typename R>
void CubSortByKey<T, R>::run(const T* d_key, const R* d_data_in, int num_items,
T* d_key_sorted, R* d_data_out, T d_key_max)
noexcept {
using U = typename std::conditional<std::is_floating_point<T>::value,
int, T>::type;
int num_bits = std::is_floating_point<T>::value ? sizeof(T) * 8 :
xlib::ceil_log2(static_cast<U>(d_key_max));
hipcub::DeviceRadixSort::SortPairs(nullptr, _temp_storage_bytes,
d_key, d_key_sorted,
d_data_in, d_data_out,
num_items, 0, num_bits);
hipcub::DeviceRadixSort::SortPairs(_d_temp_storage, _temp_storage_bytes,
d_key, d_key_sorted,
d_data_in, d_data_out,
num_items, 0, num_bits);
}
//------------------------------------------------------------------------------
template<typename T, typename R>
void CubSortByKey<T, R>::srun(const T* d_key, const R* d_data_in,
int num_items, T* d_key_sorted,
R* d_data_out, T d_key_max) noexcept {
CubSortByKey<T, R> cub_instance(num_items);
cub_instance.run(d_key, d_data_in, num_items, d_key_sorted, d_data_out);
}
//==============================================================================
//==============================================================================
////////////////
// SortPairs2 //
////////////////
template<typename T, typename R>
CubSortPairs2<T, R>::CubSortPairs2(int max_items, bool internal_allocation)
noexcept {
initialize(max_items, internal_allocation);
}
template<typename T, typename R>
void CubSortPairs2<T, R>::initialize(int max_items, bool internal_allocation)
noexcept {
if (internal_allocation) {
cuMalloc(_d_in1_tmp, max_items);
cuMalloc(_d_in2_tmp, max_items);
}
size_t temp_storage_bytes;
T* d_in1 = nullptr;
R* d_in2 = nullptr;
if (sizeof(T) > sizeof(R)) {
hipcub::DeviceRadixSort::SortPairs(nullptr, temp_storage_bytes,
d_in1, _d_in1_tmp, d_in2, _d_in2_tmp,
max_items, 0, sizeof(T) * 8);
}
else {
hipcub::DeviceRadixSort::SortPairs(nullptr, temp_storage_bytes,
d_in2, _d_in2_tmp, d_in1, _d_in1_tmp,
max_items, 0, sizeof(R) * 8);
}
SAFE_CALL( hipMalloc(&_d_temp_storage, temp_storage_bytes) )
}
template<typename T, typename R>
CubSortPairs2<T, R>::~CubSortPairs2() noexcept {
cuFree(_d_in1_tmp, _d_in2_tmp);
}
//------------------------------------------------------------------------------
template<typename T, typename R>
void CubSortPairs2<T, R>::run(T* d_in1, R* d_in2, int num_items,
T* d_in1_tmp, R* d_in2_tmp,
T d_in1_max, R d_in2_max) noexcept {
int num_bits1 = std::is_floating_point<T>::value ? sizeof(T) * 8 :
xlib::ceil_log2(d_in1_max);
int num_bits2 = std::is_floating_point<R>::value ? sizeof(T) * 8 :
xlib::ceil_log2(d_in2_max);
size_t temp_storage_bytes;
hipcub::DeviceRadixSort::SortPairs(nullptr, temp_storage_bytes,
d_in2, d_in2_tmp, d_in1, d_in1_tmp,
num_items, 0, num_bits2);
hipcub::DeviceRadixSort::SortPairs(_d_temp_storage, temp_storage_bytes,
d_in2, d_in2_tmp, d_in1, d_in1_tmp,
num_items, 0, num_bits2);
hipcub::DeviceRadixSort::SortPairs(nullptr, temp_storage_bytes,
d_in1_tmp, d_in1, d_in2_tmp, d_in2,
num_items, 0, num_bits1);
hipcub::DeviceRadixSort::SortPairs(_d_temp_storage, temp_storage_bytes,
d_in1_tmp, d_in1, d_in2_tmp, d_in2,
num_items, 0, num_bits1);
}
template<typename T, typename R>
void CubSortPairs2<T, R>::run(T* d_in1, R* d_in2, int num_items,
T d_in1_max, R d_in2_max) noexcept {
run(d_in1, d_in2, num_items, _d_in1_tmp, _d_in2_tmp, d_in1_max, d_in2_max);
}
//------------------------------------------------------------------------------
template<typename T, typename R>
void CubSortPairs2<T, R>::srun(T* d_in1, R* d_in2, int num_items,
T d_in1_max, R d_in2_max) noexcept {
CubSortPairs2<T, R> cub_instance(num_items, true);
cub_instance.run(d_in1, d_in2, num_items, d_in1_max, d_in2_max);
}
template<typename T, typename R>
void CubSortPairs2<T, R>::srun(T* d_in1, R* d_in2, int num_items,
T* d_in1_tmp, R* d_in2_tmp,
T d_in1_max, R d_in2_max) noexcept {
CubSortPairs2<T, R> cub_instance(num_items, false);
cub_instance.run(d_in1, d_in2, num_items, d_in1_tmp, d_in2_tmp,
d_in1_max, d_in2_max);
}
//==============================================================================
//==============================================================================
/////////////////////
// RunLengthEncode //
/////////////////////
template<typename T>
CubRunLengthEncode<T>::CubRunLengthEncode(int max_items) noexcept {
initialize(max_items);
}
template<typename T>
CubRunLengthEncode<T>::~CubRunLengthEncode() noexcept {
cuFree(_d_num_runs_out);
}
template<typename T>
void CubRunLengthEncode<T>::initialize(int max_items) noexcept {
cuMalloc(_d_num_runs_out, 1);
T* d_in = nullptr, *d_unique_out = nullptr;
int* d_counts_out = nullptr;
size_t temp_storage_bytes;
hipcub::DeviceRunLengthEncode::Encode(nullptr, temp_storage_bytes,
d_in, d_unique_out, d_counts_out,
_d_num_runs_out, max_items);
SAFE_CALL( hipMalloc(&_d_temp_storage, temp_storage_bytes) )
}
//------------------------------------------------------------------------------
template<typename T>
int CubRunLengthEncode<T>::run(const T* d_in, int num_items,
T* d_unique_out, int* d_counts_out) noexcept {
size_t temp_storage_bytes;
hipcub::DeviceRunLengthEncode::Encode(nullptr, temp_storage_bytes,
d_in, d_unique_out, d_counts_out,
_d_num_runs_out, num_items);
hipcub::DeviceRunLengthEncode::Encode(_d_temp_storage, temp_storage_bytes,
d_in, d_unique_out, d_counts_out,
_d_num_runs_out, num_items);
int h_num_runs_out;
cuMemcpyToHostAsync(_d_num_runs_out, h_num_runs_out);
return h_num_runs_out;
}
//------------------------------------------------------------------------------
template<typename T>
int CubRunLengthEncode<T>::srun(const T* d_in, int num_items, T* d_unique_out,
int* d_counts_out) noexcept {
CubRunLengthEncode<T> cub_instance(num_items);
return cub_instance.run(d_in, num_items, d_unique_out, d_counts_out);
}
//==============================================================================
//==============================================================================
//////////////////
// ExclusiveSum //
//////////////////
template<typename T>
CubExclusiveSum<T>::CubExclusiveSum(int max_items) noexcept {
initialize(max_items);
}
template<typename T>
void CubExclusiveSum<T>::initialize(int max_items) noexcept {
size_t temp_storage_bytes;
T* d_in = nullptr, *d_out = nullptr;
hipcub::DeviceScan::ExclusiveSum(nullptr, temp_storage_bytes,
d_in, d_out, max_items);
SAFE_CALL( hipMalloc(&_d_temp_storage, temp_storage_bytes) )
}
//------------------------------------------------------------------------------
template<typename T>
void CubExclusiveSum<T>::run(const T* d_in, int num_items, T* d_out)
const noexcept {
size_t temp_storage_bytes;
hipcub::DeviceScan::ExclusiveSum(nullptr, temp_storage_bytes,
d_in, d_out, num_items);
hipcub::DeviceScan::ExclusiveSum(_d_temp_storage, temp_storage_bytes,
d_in, d_out, num_items);
}
template<typename T>
void CubExclusiveSum<T>::run(T* d_in_out, int num_items) const noexcept {
run(d_in_out, num_items, d_in_out);
}
//------------------------------------------------------------------------------
template<typename T>
void CubExclusiveSum<T>::srun(const T* d_in, int num_items, T* d_out) noexcept {
CubExclusiveSum<T> cub_instance(num_items);
cub_instance.run(d_in, num_items, d_out);
}
template<typename T>
void CubExclusiveSum<T>::srun(T* d_in_out, int num_items) noexcept {
CubExclusiveSum::srun(d_in_out, num_items, d_in_out);
}
//==============================================================================
//==============================================================================
///////////////////
// SelectFlagged //
///////////////////
template<typename T>
CubSelectFlagged<T>::CubSelectFlagged(int max_items) noexcept {
initialize(max_items);
}
template<typename T>
CubSelectFlagged<T>::~CubSelectFlagged() noexcept {
cuFree(_d_num_selected_out);
}
template<typename T>
void CubSelectFlagged<T>::initialize(int max_items) noexcept {
cuMalloc(_d_num_selected_out, 1);
size_t temp_storage_bytes;
T* d_in = nullptr, *d_out = nullptr;
bool* d_flags = nullptr;
hipcub::DeviceSelect::Flagged(nullptr, temp_storage_bytes, d_in,
d_flags, d_out, _d_num_selected_out,
max_items);
SAFE_CALL( hipMalloc(&_d_temp_storage, temp_storage_bytes) )
}
//------------------------------------------------------------------------------
template<typename T>
int CubSelectFlagged<T>::run(const T* d_in, int num_items,
const bool* d_flags, T* d_out) noexcept {
size_t temp_storage_bytes;
hipcub::DeviceSelect::Flagged(nullptr, temp_storage_bytes, d_in,
d_flags, d_out, _d_num_selected_out,
num_items);
hipcub::DeviceSelect::Flagged(_d_temp_storage, temp_storage_bytes, d_in,
d_flags, d_out, _d_num_selected_out,
num_items);
int h_num_selected_out;
cuMemcpyToHostAsync(_d_num_selected_out, h_num_selected_out);
return h_num_selected_out;
}
template<typename T>
int CubSelectFlagged<T>::run(T* d_in_out, int num_items, const bool* d_flags)
noexcept {
return run(d_in_out, num_items, d_flags, d_in_out);
}
//------------------------------------------------------------------------------
template<typename T>
int CubSelectFlagged<T>::srun(const T* d_in, int num_items, const bool* d_flags,
T* d_out) noexcept {
CubSelectFlagged cub_instance(num_items);
return cub_instance.run(d_in, num_items, d_flags, d_out);
}
template<typename T>
int CubSelectFlagged<T>::srun(T* d_in_out, int num_items, const bool* d_flags)
noexcept {
return CubSelectFlagged::srun(d_in_out, num_items, d_flags, d_in_out);
};
//==============================================================================
//==============================================================================
template class CubArgMax<int>;
template class CubSortByValue<int>;
template class CubSortByKey<int, int>;
template class CubSortByKey<double, int>;
template class CubSortPairs2<int, int>;
template class CubRunLengthEncode<int>;
template class CubExclusiveSum<int>;
template class CubSelectFlagged<int>;
template class CubSelectFlagged<hornets_nest::AoSData<int>>;
} //namespace xlib
| e872885ddccea6e0e88a33c1f186d9309c1f7a5e.cu | /**
* @internal
* @author Federico Busato <br>
* Univerity of Verona, Dept. of Computer Science <br>
* federico.busato@univr.it
* @date August, 2017
* @version v2
*
* @copyright Copyright © 2017 Hornet. All rights reserved.
*
* @license{<blockquote>
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
* </blockquote>}
*
* @file
*/
#include "Device/CubWrapper.cuh"
#include "Device/SafeCudaAPI.cuh"
#include "Device/VectorUtil.cuh"
#include "Host/Numeric.hpp"
#include "Core/DataLayout/DataLayout.cuh" //<-- !!!!
#include <cub.cuh>
namespace xlib {
CubWrapper::CubWrapper(int num_items) noexcept : _num_items(num_items) {}
void CubWrapper::initialize(int num_items) noexcept {
_num_items = num_items;
}
CubWrapper::~CubWrapper() noexcept {
cuFree(_d_temp_storage);
}
//==============================================================================
//==============================================================================
template<typename T>
CubReduce<T>::CubReduce(const T* d_in, size_t num_items) noexcept :
CubWrapper(num_items), _d_in(d_in) {
cuMalloc(_d_out, 1);
cub::DeviceReduce::Sum(_d_temp_storage, _temp_storage_bytes,
_d_in, _d_out, _num_items);
SAFE_CALL( cudaMalloc(&_d_temp_storage, _temp_storage_bytes) )
}
template<typename T>
T CubReduce<T>::run() noexcept {
cub::DeviceReduce::Sum(_d_temp_storage, _temp_storage_bytes,
_d_in, _d_out, _num_items);
int h_result;
cuMemcpyToHostAsync(_d_out, h_result);
return h_result;
}
template<typename T>
CubReduce<T>::~CubReduce() noexcept {
cuFree(_d_out);
}
//------------------------------------------------------------------------------
template<typename T>
CubSegmentedReduce<T>::CubSegmentedReduce(int* d_offsets, const T* d_in,
int num_segments, T*& d_out) :
CubWrapper(num_segments), _d_in(d_in),
_d_out(d_out), _d_offsets(d_offsets) {
cub::DeviceSegmentedReduce::Sum(_d_temp_storage, _temp_storage_bytes,
_d_in, _d_out, num_segments,
_d_offsets, _d_offsets + 1);
SAFE_CALL( cudaMalloc(&_d_temp_storage, _temp_storage_bytes) )
cuMalloc(d_out, num_segments);
}
template<typename T>
CubSegmentedReduce<T>::~CubSegmentedReduce() noexcept {
cuFree(_d_out);
}
template<typename T>
void CubSegmentedReduce<T>::run() noexcept {
cub::DeviceSegmentedReduce::Sum(_d_temp_storage, _temp_storage_bytes,
_d_in, _d_out, _num_items,
_d_offsets, _d_offsets + 1);
}
//------------------------------------------------------------------------------
template<typename T>
CubSpMV<T>::CubSpMV(T* d_value, int* d_row_offsets, int* d_column_indices,
T* d_vector_x, T* d_vector_y,
int num_rows, int num_cols, int num_nonzeros) :
CubWrapper(0),
_d_row_offsets(d_row_offsets),
_d_column_indices(d_column_indices),
_d_values(d_value),
_d_vector_x(d_vector_x), _d_vector_y(d_vector_y),
_num_rows(num_rows), _num_cols(num_cols),
_num_nonzeros(num_nonzeros) {
cub::DeviceSpmv::CsrMV(_d_temp_storage, _temp_storage_bytes,
_d_values, _d_row_offsets, _d_column_indices,
_d_vector_x, _d_vector_y,
_num_rows, _num_cols, _num_nonzeros);
SAFE_CALL( cudaMalloc(&_d_temp_storage, _temp_storage_bytes) )
}
/*
template<typename T>
CubSpMV<T>::~CubSpMV() noexcept {
cuFree(_d_out);
}*/
template<typename T>
void CubSpMV<T>::run() noexcept {
cub::DeviceSpmv::CsrMV(_d_temp_storage, _temp_storage_bytes, _d_values,
_d_row_offsets, _d_column_indices,
_d_vector_x, _d_vector_y,
_num_rows, _num_cols, _num_nonzeros);
}
//------------------------------------------------------------------------------
template<typename T>
CubArgMax<T>::CubArgMax(const T* d_in, size_t num_items) noexcept :
_d_in(d_in), CubWrapper(num_items) {
cub::KeyValuePair<int, T>* d_tmp;
cuMalloc(d_tmp, 1);
cub::DeviceReduce::ArgMax(_d_temp_storage, _temp_storage_bytes, _d_in,
static_cast<cub::KeyValuePair<int, T>*>(_d_out),
_num_items);
SAFE_CALL( cudaMalloc(&_d_temp_storage, _temp_storage_bytes) )
_d_out = reinterpret_cast<cub::KeyValuePair<int, T>*>(d_tmp);
}
template<typename T>
typename std::pair<int, T>
CubArgMax<T>::run() noexcept {
cub::DeviceReduce::ArgMax(_d_temp_storage, _temp_storage_bytes, _d_in,
static_cast<cub::KeyValuePair<int, T>*>(_d_out),
_num_items);
cub::KeyValuePair<int, T> h_out;
cuMemcpyToHost(static_cast<cub::KeyValuePair<int, T>*>(_d_out), h_out);
return std::pair<int, T>(h_out.key, h_out.value);
}
//==============================================================================
//==============================================================================
/////////////////
// SortByValue //
/////////////////
template<typename T>
CubSortByValue<T>::CubSortByValue(int max_items) noexcept {
initialize(max_items);
}
template<typename T>
void CubSortByValue<T>::initialize(int max_items) noexcept {
size_t temp_storage_bytes;
T* d_in = nullptr, *d_sorted = nullptr;
cub::DeviceRadixSort::SortKeys(nullptr, temp_storage_bytes,
d_in, d_sorted, max_items,
0, sizeof(T) * 8);
SAFE_CALL( cudaMalloc(&_d_temp_storage, temp_storage_bytes) )
}
//------------------------------------------------------------------------------
template<typename T>
void CubSortByValue<T>::run(const T* d_in, int num_items, T* d_sorted,
T d_value_max) noexcept {
size_t temp_storage_bytes;
int num_bits = std::is_floating_point<T>::value ? sizeof(T) * 8 :
xlib::ceil_log2(d_value_max);
cub::DeviceRadixSort::SortKeys(nullptr, temp_storage_bytes,
d_in, d_sorted, num_items, 0, num_bits);
cub::DeviceRadixSort::SortKeys(_d_temp_storage, temp_storage_bytes,
d_in, d_sorted, num_items, 0, num_bits);
}
//------------------------------------------------------------------------------
template<typename T>
void CubSortByValue<T>::srun(const T* d_in, int num_items, T* d_sorted,
T d_in_max) noexcept {
CubSortByValue<T> cub_instance(num_items);
cub_instance.run(d_in, num_items, d_sorted, d_in_max);
}
//==============================================================================
//==============================================================================
///////////////
// SortByKey //
///////////////
template<typename T, typename R>
CubSortByKey<T, R>::CubSortByKey(int max_items) noexcept {
initialize(max_items);
}
template<typename T, typename R>
void CubSortByKey<T, R>::initialize(int max_items) noexcept {
size_t temp_storage_bytes;
T* d_key = nullptr, *d_key_sorted = nullptr;
R* d_data_in = nullptr, *d_data_out = nullptr;
cub::DeviceRadixSort::SortPairs(nullptr, temp_storage_bytes,
d_key, d_key_sorted,
d_data_in, d_data_out,
max_items, 0, sizeof(T) * 8);
SAFE_CALL( cudaMalloc(&_d_temp_storage, temp_storage_bytes) )
}
//------------------------------------------------------------------------------
template<typename T, typename R>
void CubSortByKey<T, R>::run(const T* d_key, const R* d_data_in, int num_items,
T* d_key_sorted, R* d_data_out, T d_key_max)
noexcept {
using U = typename std::conditional<std::is_floating_point<T>::value,
int, T>::type;
int num_bits = std::is_floating_point<T>::value ? sizeof(T) * 8 :
xlib::ceil_log2(static_cast<U>(d_key_max));
cub::DeviceRadixSort::SortPairs(nullptr, _temp_storage_bytes,
d_key, d_key_sorted,
d_data_in, d_data_out,
num_items, 0, num_bits);
cub::DeviceRadixSort::SortPairs(_d_temp_storage, _temp_storage_bytes,
d_key, d_key_sorted,
d_data_in, d_data_out,
num_items, 0, num_bits);
}
//------------------------------------------------------------------------------
template<typename T, typename R>
void CubSortByKey<T, R>::srun(const T* d_key, const R* d_data_in,
int num_items, T* d_key_sorted,
R* d_data_out, T d_key_max) noexcept {
CubSortByKey<T, R> cub_instance(num_items);
cub_instance.run(d_key, d_data_in, num_items, d_key_sorted, d_data_out);
}
//==============================================================================
//==============================================================================
////////////////
// SortPairs2 //
////////////////
template<typename T, typename R>
CubSortPairs2<T, R>::CubSortPairs2(int max_items, bool internal_allocation)
noexcept {
initialize(max_items, internal_allocation);
}
template<typename T, typename R>
void CubSortPairs2<T, R>::initialize(int max_items, bool internal_allocation)
noexcept {
if (internal_allocation) {
cuMalloc(_d_in1_tmp, max_items);
cuMalloc(_d_in2_tmp, max_items);
}
size_t temp_storage_bytes;
T* d_in1 = nullptr;
R* d_in2 = nullptr;
if (sizeof(T) > sizeof(R)) {
cub::DeviceRadixSort::SortPairs(nullptr, temp_storage_bytes,
d_in1, _d_in1_tmp, d_in2, _d_in2_tmp,
max_items, 0, sizeof(T) * 8);
}
else {
cub::DeviceRadixSort::SortPairs(nullptr, temp_storage_bytes,
d_in2, _d_in2_tmp, d_in1, _d_in1_tmp,
max_items, 0, sizeof(R) * 8);
}
SAFE_CALL( cudaMalloc(&_d_temp_storage, temp_storage_bytes) )
}
template<typename T, typename R>
CubSortPairs2<T, R>::~CubSortPairs2() noexcept {
cuFree(_d_in1_tmp, _d_in2_tmp);
}
//------------------------------------------------------------------------------
template<typename T, typename R>
void CubSortPairs2<T, R>::run(T* d_in1, R* d_in2, int num_items,
T* d_in1_tmp, R* d_in2_tmp,
T d_in1_max, R d_in2_max) noexcept {
int num_bits1 = std::is_floating_point<T>::value ? sizeof(T) * 8 :
xlib::ceil_log2(d_in1_max);
int num_bits2 = std::is_floating_point<R>::value ? sizeof(T) * 8 :
xlib::ceil_log2(d_in2_max);
size_t temp_storage_bytes;
cub::DeviceRadixSort::SortPairs(nullptr, temp_storage_bytes,
d_in2, d_in2_tmp, d_in1, d_in1_tmp,
num_items, 0, num_bits2);
cub::DeviceRadixSort::SortPairs(_d_temp_storage, temp_storage_bytes,
d_in2, d_in2_tmp, d_in1, d_in1_tmp,
num_items, 0, num_bits2);
cub::DeviceRadixSort::SortPairs(nullptr, temp_storage_bytes,
d_in1_tmp, d_in1, d_in2_tmp, d_in2,
num_items, 0, num_bits1);
cub::DeviceRadixSort::SortPairs(_d_temp_storage, temp_storage_bytes,
d_in1_tmp, d_in1, d_in2_tmp, d_in2,
num_items, 0, num_bits1);
}
template<typename T, typename R>
void CubSortPairs2<T, R>::run(T* d_in1, R* d_in2, int num_items,
T d_in1_max, R d_in2_max) noexcept {
run(d_in1, d_in2, num_items, _d_in1_tmp, _d_in2_tmp, d_in1_max, d_in2_max);
}
//------------------------------------------------------------------------------
template<typename T, typename R>
void CubSortPairs2<T, R>::srun(T* d_in1, R* d_in2, int num_items,
T d_in1_max, R d_in2_max) noexcept {
CubSortPairs2<T, R> cub_instance(num_items, true);
cub_instance.run(d_in1, d_in2, num_items, d_in1_max, d_in2_max);
}
template<typename T, typename R>
void CubSortPairs2<T, R>::srun(T* d_in1, R* d_in2, int num_items,
T* d_in1_tmp, R* d_in2_tmp,
T d_in1_max, R d_in2_max) noexcept {
CubSortPairs2<T, R> cub_instance(num_items, false);
cub_instance.run(d_in1, d_in2, num_items, d_in1_tmp, d_in2_tmp,
d_in1_max, d_in2_max);
}
//==============================================================================
//==============================================================================
/////////////////////
// RunLengthEncode //
/////////////////////
template<typename T>
CubRunLengthEncode<T>::CubRunLengthEncode(int max_items) noexcept {
initialize(max_items);
}
template<typename T>
CubRunLengthEncode<T>::~CubRunLengthEncode() noexcept {
cuFree(_d_num_runs_out);
}
template<typename T>
void CubRunLengthEncode<T>::initialize(int max_items) noexcept {
cuMalloc(_d_num_runs_out, 1);
T* d_in = nullptr, *d_unique_out = nullptr;
int* d_counts_out = nullptr;
size_t temp_storage_bytes;
cub::DeviceRunLengthEncode::Encode(nullptr, temp_storage_bytes,
d_in, d_unique_out, d_counts_out,
_d_num_runs_out, max_items);
SAFE_CALL( cudaMalloc(&_d_temp_storage, temp_storage_bytes) )
}
//------------------------------------------------------------------------------
template<typename T>
int CubRunLengthEncode<T>::run(const T* d_in, int num_items,
T* d_unique_out, int* d_counts_out) noexcept {
size_t temp_storage_bytes;
cub::DeviceRunLengthEncode::Encode(nullptr, temp_storage_bytes,
d_in, d_unique_out, d_counts_out,
_d_num_runs_out, num_items);
cub::DeviceRunLengthEncode::Encode(_d_temp_storage, temp_storage_bytes,
d_in, d_unique_out, d_counts_out,
_d_num_runs_out, num_items);
int h_num_runs_out;
cuMemcpyToHostAsync(_d_num_runs_out, h_num_runs_out);
return h_num_runs_out;
}
//------------------------------------------------------------------------------
template<typename T>
int CubRunLengthEncode<T>::srun(const T* d_in, int num_items, T* d_unique_out,
int* d_counts_out) noexcept {
CubRunLengthEncode<T> cub_instance(num_items);
return cub_instance.run(d_in, num_items, d_unique_out, d_counts_out);
}
//==============================================================================
//==============================================================================
//////////////////
// ExclusiveSum //
//////////////////
template<typename T>
CubExclusiveSum<T>::CubExclusiveSum(int max_items) noexcept {
initialize(max_items);
}
template<typename T>
void CubExclusiveSum<T>::initialize(int max_items) noexcept {
size_t temp_storage_bytes;
T* d_in = nullptr, *d_out = nullptr;
cub::DeviceScan::ExclusiveSum(nullptr, temp_storage_bytes,
d_in, d_out, max_items);
SAFE_CALL( cudaMalloc(&_d_temp_storage, temp_storage_bytes) )
}
//------------------------------------------------------------------------------
template<typename T>
void CubExclusiveSum<T>::run(const T* d_in, int num_items, T* d_out)
const noexcept {
size_t temp_storage_bytes;
cub::DeviceScan::ExclusiveSum(nullptr, temp_storage_bytes,
d_in, d_out, num_items);
cub::DeviceScan::ExclusiveSum(_d_temp_storage, temp_storage_bytes,
d_in, d_out, num_items);
}
template<typename T>
void CubExclusiveSum<T>::run(T* d_in_out, int num_items) const noexcept {
run(d_in_out, num_items, d_in_out);
}
//------------------------------------------------------------------------------
template<typename T>
void CubExclusiveSum<T>::srun(const T* d_in, int num_items, T* d_out) noexcept {
CubExclusiveSum<T> cub_instance(num_items);
cub_instance.run(d_in, num_items, d_out);
}
template<typename T>
void CubExclusiveSum<T>::srun(T* d_in_out, int num_items) noexcept {
CubExclusiveSum::srun(d_in_out, num_items, d_in_out);
}
//==============================================================================
//==============================================================================
///////////////////
// SelectFlagged //
///////////////////
template<typename T>
CubSelectFlagged<T>::CubSelectFlagged(int max_items) noexcept {
initialize(max_items);
}
template<typename T>
CubSelectFlagged<T>::~CubSelectFlagged() noexcept {
cuFree(_d_num_selected_out);
}
template<typename T>
void CubSelectFlagged<T>::initialize(int max_items) noexcept {
cuMalloc(_d_num_selected_out, 1);
size_t temp_storage_bytes;
T* d_in = nullptr, *d_out = nullptr;
bool* d_flags = nullptr;
cub::DeviceSelect::Flagged(nullptr, temp_storage_bytes, d_in,
d_flags, d_out, _d_num_selected_out,
max_items);
SAFE_CALL( cudaMalloc(&_d_temp_storage, temp_storage_bytes) )
}
//------------------------------------------------------------------------------
template<typename T>
int CubSelectFlagged<T>::run(const T* d_in, int num_items,
const bool* d_flags, T* d_out) noexcept {
size_t temp_storage_bytes;
cub::DeviceSelect::Flagged(nullptr, temp_storage_bytes, d_in,
d_flags, d_out, _d_num_selected_out,
num_items);
cub::DeviceSelect::Flagged(_d_temp_storage, temp_storage_bytes, d_in,
d_flags, d_out, _d_num_selected_out,
num_items);
int h_num_selected_out;
cuMemcpyToHostAsync(_d_num_selected_out, h_num_selected_out);
return h_num_selected_out;
}
template<typename T>
int CubSelectFlagged<T>::run(T* d_in_out, int num_items, const bool* d_flags)
noexcept {
return run(d_in_out, num_items, d_flags, d_in_out);
}
//------------------------------------------------------------------------------
template<typename T>
int CubSelectFlagged<T>::srun(const T* d_in, int num_items, const bool* d_flags,
T* d_out) noexcept {
CubSelectFlagged cub_instance(num_items);
return cub_instance.run(d_in, num_items, d_flags, d_out);
}
template<typename T>
int CubSelectFlagged<T>::srun(T* d_in_out, int num_items, const bool* d_flags)
noexcept {
return CubSelectFlagged::srun(d_in_out, num_items, d_flags, d_in_out);
};
//==============================================================================
//==============================================================================
template class CubArgMax<int>;
template class CubSortByValue<int>;
template class CubSortByKey<int, int>;
template class CubSortByKey<double, int>;
template class CubSortPairs2<int, int>;
template class CubRunLengthEncode<int>;
template class CubExclusiveSum<int>;
template class CubSelectFlagged<int>;
template class CubSelectFlagged<hornets_nest::AoSData<int>>;
} //namespace xlib
|
e4fc43e41cb985158ea7fce0450b94a05dd6c927.hip | // !!! This is a file automatically generated by hipify!!!
// Solve the Laplace equation on a 2D lattice with boundary conditions.
//
// compile with the following command:
//
// (for GTX970)
// nvcc -arch=compute_52 -code=sm_52,sm_52 -O3 -m64 -o laplace laplace.cu
//
// (for GTX1060)
// nvcc -arch=compute_61 -code=sm_61,sm_61 -O3 -m64 -o laplace laplace.cu
// Includes
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
// field variables
float* h_new; // host field vectors
float* h_old;
float* h_C; // result of diff*diff of each block
float* g_new;
float* d_new; // device field vectors
float* d_old;
float* d_C;
int MAX=1000000; // maximum iterations
double eps=1.0e-10; // stopping criterion
__global__ void laplacian(float* phi_old, float* phi_new, float* C, bool flag)
{
extern __shared__ float cache[];
float t, l, r, b, u, d; // top, left, right, bottom, up, down
float diff;
int site, ym1, xm1, zm1, xp1, yp1, zp1;
int Nx = blockDim.x*gridDim.x; // number of site in x direction
int Ny = blockDim.y*gridDim.y; // number of site in y direction
int Nz = blockDim.z*gridDim.z; // number of site in z direction
int x = blockDim.x*blockIdx.x + threadIdx.x;
int y = blockDim.y*blockIdx.y + threadIdx.y;
int z = blockDim.z*blockIdx.z + threadIdx.z;
int cacheIndex = threadIdx.x + threadIdx.y*blockDim.x + threadIdx.z*blockDim.x*blockDim.y;
site = x + y*Nx + z*Nx*Ny;
if((x == 0) || (x == Nx-1) || (y == 0) || (y == Ny-1) || (z == 0) || (z == Nz - 1)) {
// Do nothing on the boundary
}
else {
xm1 = site - 1; // x-1
xp1 = site + 1; // x+1
ym1 = site - Nx; // y-1
yp1 = site + Nx; // y+1
zm1 = site - Nx*Ny; // z-1
zp1 = site + Nx*Ny; // z+1
if(flag) {
b = phi_old[ym1];
l = phi_old[xm1];
d = phi_old[zm1];
r = phi_old[xp1];
t = phi_old[yp1];
u = phi_old[zp1];
phi_new[site] = (1.0/6.0)*(b + l + r + t + u + d);
}
else {
b = phi_new[ym1];
l = phi_new[xm1];
d = phi_new[zm1];
r = phi_new[xp1];
t = phi_new[yp1];
u = phi_new[zp1];
phi_old[site] = (1.0/6.0)*(b + l + r + t + u + d);
}
diff = phi_new[site]-phi_old[site];
}
cache[cacheIndex]=diff*diff;
__syncthreads();
// perform parallel reduction
int ib = (blockDim.x * blockDim.y * blockDim.z)/ 2;
while (ib != 0) {
if(cacheIndex < ib)
cache[cacheIndex] += cache[cacheIndex + ib];
__syncthreads();
ib /=2;
}
int blockIndex = blockIdx.x + gridDim.x*blockIdx.y + gridDim.x*gridDim.y*blockIdx.z;
if(cacheIndex == 0) C[blockIndex] = cache[0];
}
int main(void)
{
int gid; // GPU_ID
int iter;
volatile bool flag; // to toggle between *_new and *_old
float cputime;
float gputime;
float gputime_tot;
double error;
printf("Enter the GPU ID (0/1): ");
scanf("%d",&gid);
printf("%d\n",gid);
// Error code to check return values for CUDA calls
hipError_t err = hipSuccess;
err = hipSetDevice(gid);
if (err != hipSuccess) {
printf("!!! Cannot select GPU with device ID = %d\n", gid);
exit(1);
}
printf("Select GPU with device ID = %d\n", gid);
hipSetDevice(gid);
printf("Solve Laplace equation on a 3D lattice with boundary conditions\n");
int Nx, Ny, Nz; // lattice size
printf("Enter the size (Nx, Ny, Nz) of the 3D lattice: ");
scanf("%d %d %d", &Nx, &Ny, &Nz);
printf("%d %d %d\n", Nx, Ny, Nz);
// Set the number of threads (tx,ty,tz) per block
int tx, ty, tz;
printf("Enter the number of threads (tx, ty, tz) per block: ");
scanf("%d %d %d", &tx, &ty, &tz);
printf("%d %d %d\n", tx, ty, tz);
if( tx > 1024 ) {
printf("Max dimension size of a thread block (x,y,z): (1024, 1024, 64)\n");
exit(0);
}
if( ty > 1024 ) {
printf("Max dimension size of a thread block (x,y,z): (1024, 1024, 64)\n");
exit(0);
}
if( tz > 64 ) {
printf("Max dimension size of a thread block (x,y,z): (1024, 1024, 64)\n");
exit(0);
}
if( tx * ty * tz > 1024 ) {
printf("The number of threads per block must be less than 1024 ! \n");
exit(0);
}
dim3 threads(tx, ty, tz);
// The total number of threads in the grid is equal to the total number of lattice sites
int bx = Nx / tx;
if(bx * tx != Nx) {
printf("The block size in x is incorrect\n");
exit(0);
}
int by = Ny / ty;
if(by * ty != Ny) {
printf("The block size in y is incorrect\n");
exit(0);
}
int bz = Nz / tz;
if(bz * tz != Nz) {
printf("The block size in z is incorrect\n");
exit(0);
}
if((bx > 2147483647) || (by > 65535) || (bz > 65535)) {
printf("The grid size exceeds the limit ! \n");
exit(0);
}
dim3 blocks(bx, by, bz);
printf("The dimension of the grid is (%d, %d, %d)\n", bx, by, bz);
int CPU;
printf("To compute the solution vector with CPU/GPU/both (0/1/2) ? ");
scanf("%d",&CPU);
printf("%d\n",CPU);
fflush(stdout);
// Allocate field vector h_phi in host memory
int N = Nx * Ny * Nz;
int size = N * sizeof(float);
int sb = bx * by * bz * sizeof(float);
h_old = (float*)malloc(size);
h_new = (float*)malloc(size);
g_new = (float*)malloc(size);
h_C = (float*)malloc(sb);
// Initialize the array to 0
memset(h_old, 0, size);
memset(h_new, 0, size);
// Initialize the field vector with boundary conditions
for(int x = 0; x < Nx; x = x+1) {
for(int y = 0; y < Ny; y = y+1){
h_new[x + Nx*y + Nx*Ny*(Nz-1)]=1.0;
h_old[x + Nx*y + Nx*Ny*(Nz-1)]=1.0;
}
}
FILE *out1; // save initial configuration in phi_initial.dat
out1 = fopen("phi_initial_3D.dat","w");
for(int k = Nz-1; k > -1; k = k-1){
fprintf(out1, "z = %d\n", k);
for(int j = Ny-1; j > -1; j = j-1){
for(int i = 0; i < Nx; i = i+1){
fprintf(out1,"%.2e ",h_new[i + j*Nx + k*Nx*Ny]);
}
fprintf(out1,"\n");
}
}
fclose(out1);
printf("\n");
// create the timer
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
if(CPU>0) {
// start the timer
hipEventRecord(start,0);
// Allocate vectors in device memory
hipMalloc((void**)&d_new, size);
hipMalloc((void**)&d_old, size);
hipMalloc((void**)&d_C, sb);
// Copy vectors from host memory to device memory
hipMemcpy(d_new, h_new, size, hipMemcpyHostToDevice);
hipMemcpy(d_old, h_old, size, hipMemcpyHostToDevice);
// stop the timer
hipEventRecord(stop,0);
hipEventSynchronize(stop);
float Intime;
hipEventElapsedTime( &Intime, start, stop);
printf("Input time for GPU: %f (ms) \n",Intime);
// start the timer
hipEventRecord(start,0);
error = 10*eps; // any value bigger than eps is OK
iter = 0; // counter for iterations
flag = true;
int sm = tx * ty * tz * sizeof(float); // size of the shared memory in each block
while ( (error > eps) && (iter < MAX) ) {
hipLaunchKernelGGL(( laplacian), dim3(blocks), dim3(threads), sm, 0, d_old, d_new, d_C, flag);
hipMemcpy(h_C, d_C, sb, hipMemcpyDeviceToHost);
error = 0.0;
for(int i = 0; i < bx * by * bz; i = i+1) {
error = error + h_C[i];
}
error = sqrt(error);
iter++;
flag = !flag;
}
printf("error (GPU) = %.15e\n",error);
printf("total iterations (GPU) = %d\n",iter);
// stop the timer
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime( &gputime, start, stop);
printf("Processing time for GPU: %f (ms) \n",gputime);
// Copy result from device memory to host memory
// start the timer
hipEventRecord(start,0);
// Because after the iteration, d_new and d_old are basically the same.
hipMemcpy(g_new, d_new, size, hipMemcpyDeviceToHost);
hipFree(d_new);
hipFree(d_old);
hipFree(d_C);
// stop the timer
hipEventRecord(stop,0);
hipEventSynchronize(stop);
float Outime;
hipEventElapsedTime( &Outime, start, stop);
printf("Output time for GPU: %f (ms) \n",Outime);
gputime_tot = Intime + gputime + Outime;
printf("Total time for GPU: %f (ms) \n",gputime_tot);
fflush(stdout);
FILE *outg; // save GPU solution in phi_GPU.dat
outg = fopen("phi_GPU_3D.dat","w");
for(int k = Nz-1; k > -1; k = k-1){
fprintf(outg, "z = %d\n", k);
for(int j = Ny-1; j > -1; j = j-1){
for(int i = 0; i < Nx; i = i+1){
fprintf(outg, "%.2e ",g_new[i + j*Nx + k*Nx*Ny]);
}
fprintf(outg,"\n");
}
}
fclose(outg);
printf("\n");
}
if(CPU==1) { // not to compute the CPU solution
free(h_new);
free(h_old);
free(g_new);
free(h_C);
hipDeviceReset();
exit(0);
}
if((CPU==0)||(CPU==2)) { // to compute the CPU solution
// start the timer
hipEventRecord(start,0);
// to compute the reference solution
error = 10*eps; // any value bigger than eps
iter = 0; // counter for iterations
flag = true;
double diff;
float t, l, r, b, u, d; // top, left, right, bottom, up, down
int site, ym1, xm1, zm1, xp1, yp1, zp1;
while ( (error > eps) && (iter < MAX) ) {
if(flag) {
error = 0.0;
for(int z = 0; z < Nz; z = z+1){
for(int y = 0; y < Ny; y = y+1) {
for(int x = 0; x < Nx; x = x+1) {
if(x==0 || x==Nx-1 || y==0 || y==Ny-1 || z==0 || z==Nz-1) {
// Do nothing on the boundary
}
else {
site = x + y*Nx + z*Nx*Ny;
xm1 = site - 1; // x-1
xp1 = site + 1; // x+1
ym1 = site - Nx; // y-1
yp1 = site + Nx; // y+1
zm1 = site - Nx*Ny; // z-1
zp1 = site + Nx*Ny; // z+1
b = h_old[ym1];
l = h_old[xm1];
d = h_old[zm1];
r = h_old[xp1];
t = h_old[yp1];
u = h_old[zp1];
h_new[site] = (1.0/6.0)*(b + l + r + t + u + d);
diff = h_new[site]-h_old[site];
error = error + diff*diff;
}
}
}
}
}
else{
error = 0.0;
for(int z = 0; z < Nz; z = z+1){
for(int y = 0; y < Ny; y = y+1) {
for(int x = 0; x < Nx; x = x+1) {
if(x==0 || x==Nx-1 || y==0 || y==Ny-1 || z==0 || z==Nz-1) {
// Do nothing on the boundary
}
else {
site = x + y*Nx + z*Nx*Ny;
xm1 = site - 1; // x-1
xp1 = site + 1; // x+1
ym1 = site - Nx; // y-1
yp1 = site + Nx; // y+1
zm1 = site - Nx*Ny; // z-1
zp1 = site + Nx*Ny; // z+1
b = h_new[ym1];
l = h_new[xm1];
d = h_new[zm1];
r = h_new[xp1];
t = h_new[yp1];
u = h_new[zp1];
h_old[site] = (1.0/6.0)*(b + l + r + t + u + d);
diff = h_new[site]-h_old[site];
error = error + diff*diff;
}
}
}
}
}
flag = !flag;
iter = iter + 1;
error = sqrt(error);
} // exit if error < eps
printf("error (CPU) = %.15e\n",error);
printf("total iterations (CPU) = %d\n",iter);
// stop the timer
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime( &cputime, start, stop);
printf("Processing time for CPU: %f (ms) \n",cputime);
printf("Speed up of GPU = %f\n", cputime/(gputime_tot));
fflush(stdout);
// destroy the timer
hipEventDestroy(start);
hipEventDestroy(stop);
FILE *outc; // save CPU solution in phi_CPU.dat
outc = fopen("phi_CPU_3D.dat","w");
for(int k = Nz-1; k > -1; k = k-1){
fprintf(outc, "z = %d\n", k);
for(int j = Ny-1; j > -1; j = j-1){
for(int i = 0; i < Nx; i = i+1){
fprintf(outc, "%.2e ",h_new[i + j*Nx + k*Nx*Ny]);
}
fprintf(outc,"\n");
}
}
fclose(outc);
printf("\n");
free(h_new);
free(h_old);
free(g_new);
free(h_C);
}
hipDeviceReset();
}
| e4fc43e41cb985158ea7fce0450b94a05dd6c927.cu | // Solve the Laplace equation on a 2D lattice with boundary conditions.
//
// compile with the following command:
//
// (for GTX970)
// nvcc -arch=compute_52 -code=sm_52,sm_52 -O3 -m64 -o laplace laplace.cu
//
// (for GTX1060)
// nvcc -arch=compute_61 -code=sm_61,sm_61 -O3 -m64 -o laplace laplace.cu
// Includes
#include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
// field variables
float* h_new; // host field vectors
float* h_old;
float* h_C; // result of diff*diff of each block
float* g_new;
float* d_new; // device field vectors
float* d_old;
float* d_C;
int MAX=1000000; // maximum iterations
double eps=1.0e-10; // stopping criterion
__global__ void laplacian(float* phi_old, float* phi_new, float* C, bool flag)
{
extern __shared__ float cache[];
float t, l, r, b, u, d; // top, left, right, bottom, up, down
float diff;
int site, ym1, xm1, zm1, xp1, yp1, zp1;
int Nx = blockDim.x*gridDim.x; // number of site in x direction
int Ny = blockDim.y*gridDim.y; // number of site in y direction
int Nz = blockDim.z*gridDim.z; // number of site in z direction
int x = blockDim.x*blockIdx.x + threadIdx.x;
int y = blockDim.y*blockIdx.y + threadIdx.y;
int z = blockDim.z*blockIdx.z + threadIdx.z;
int cacheIndex = threadIdx.x + threadIdx.y*blockDim.x + threadIdx.z*blockDim.x*blockDim.y;
site = x + y*Nx + z*Nx*Ny;
if((x == 0) || (x == Nx-1) || (y == 0) || (y == Ny-1) || (z == 0) || (z == Nz - 1)) {
// Do nothing on the boundary
}
else {
xm1 = site - 1; // x-1
xp1 = site + 1; // x+1
ym1 = site - Nx; // y-1
yp1 = site + Nx; // y+1
zm1 = site - Nx*Ny; // z-1
zp1 = site + Nx*Ny; // z+1
if(flag) {
b = phi_old[ym1];
l = phi_old[xm1];
d = phi_old[zm1];
r = phi_old[xp1];
t = phi_old[yp1];
u = phi_old[zp1];
phi_new[site] = (1.0/6.0)*(b + l + r + t + u + d);
}
else {
b = phi_new[ym1];
l = phi_new[xm1];
d = phi_new[zm1];
r = phi_new[xp1];
t = phi_new[yp1];
u = phi_new[zp1];
phi_old[site] = (1.0/6.0)*(b + l + r + t + u + d);
}
diff = phi_new[site]-phi_old[site];
}
cache[cacheIndex]=diff*diff;
__syncthreads();
// perform parallel reduction
int ib = (blockDim.x * blockDim.y * blockDim.z)/ 2;
while (ib != 0) {
if(cacheIndex < ib)
cache[cacheIndex] += cache[cacheIndex + ib];
__syncthreads();
ib /=2;
}
int blockIndex = blockIdx.x + gridDim.x*blockIdx.y + gridDim.x*gridDim.y*blockIdx.z;
if(cacheIndex == 0) C[blockIndex] = cache[0];
}
int main(void)
{
int gid; // GPU_ID
int iter;
volatile bool flag; // to toggle between *_new and *_old
float cputime;
float gputime;
float gputime_tot;
double error;
printf("Enter the GPU ID (0/1): ");
scanf("%d",&gid);
printf("%d\n",gid);
// Error code to check return values for CUDA calls
cudaError_t err = cudaSuccess;
err = cudaSetDevice(gid);
if (err != cudaSuccess) {
printf("!!! Cannot select GPU with device ID = %d\n", gid);
exit(1);
}
printf("Select GPU with device ID = %d\n", gid);
cudaSetDevice(gid);
printf("Solve Laplace equation on a 3D lattice with boundary conditions\n");
int Nx, Ny, Nz; // lattice size
printf("Enter the size (Nx, Ny, Nz) of the 3D lattice: ");
scanf("%d %d %d", &Nx, &Ny, &Nz);
printf("%d %d %d\n", Nx, Ny, Nz);
// Set the number of threads (tx,ty,tz) per block
int tx, ty, tz;
printf("Enter the number of threads (tx, ty, tz) per block: ");
scanf("%d %d %d", &tx, &ty, &tz);
printf("%d %d %d\n", tx, ty, tz);
if( tx > 1024 ) {
printf("Max dimension size of a thread block (x,y,z): (1024, 1024, 64)\n");
exit(0);
}
if( ty > 1024 ) {
printf("Max dimension size of a thread block (x,y,z): (1024, 1024, 64)\n");
exit(0);
}
if( tz > 64 ) {
printf("Max dimension size of a thread block (x,y,z): (1024, 1024, 64)\n");
exit(0);
}
if( tx * ty * tz > 1024 ) {
printf("The number of threads per block must be less than 1024 ! \n");
exit(0);
}
dim3 threads(tx, ty, tz);
// The total number of threads in the grid is equal to the total number of lattice sites
int bx = Nx / tx;
if(bx * tx != Nx) {
printf("The block size in x is incorrect\n");
exit(0);
}
int by = Ny / ty;
if(by * ty != Ny) {
printf("The block size in y is incorrect\n");
exit(0);
}
int bz = Nz / tz;
if(bz * tz != Nz) {
printf("The block size in z is incorrect\n");
exit(0);
}
if((bx > 2147483647) || (by > 65535) || (bz > 65535)) {
printf("The grid size exceeds the limit ! \n");
exit(0);
}
dim3 blocks(bx, by, bz);
printf("The dimension of the grid is (%d, %d, %d)\n", bx, by, bz);
int CPU;
printf("To compute the solution vector with CPU/GPU/both (0/1/2) ? ");
scanf("%d",&CPU);
printf("%d\n",CPU);
fflush(stdout);
// Allocate field vector h_phi in host memory
int N = Nx * Ny * Nz;
int size = N * sizeof(float);
int sb = bx * by * bz * sizeof(float);
h_old = (float*)malloc(size);
h_new = (float*)malloc(size);
g_new = (float*)malloc(size);
h_C = (float*)malloc(sb);
// Initialize the array to 0
memset(h_old, 0, size);
memset(h_new, 0, size);
// Initialize the field vector with boundary conditions
for(int x = 0; x < Nx; x = x+1) {
for(int y = 0; y < Ny; y = y+1){
h_new[x + Nx*y + Nx*Ny*(Nz-1)]=1.0;
h_old[x + Nx*y + Nx*Ny*(Nz-1)]=1.0;
}
}
FILE *out1; // save initial configuration in phi_initial.dat
out1 = fopen("phi_initial_3D.dat","w");
for(int k = Nz-1; k > -1; k = k-1){
fprintf(out1, "z = %d\n", k);
for(int j = Ny-1; j > -1; j = j-1){
for(int i = 0; i < Nx; i = i+1){
fprintf(out1,"%.2e ",h_new[i + j*Nx + k*Nx*Ny]);
}
fprintf(out1,"\n");
}
}
fclose(out1);
printf("\n");
// create the timer
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
if(CPU>0) {
// start the timer
cudaEventRecord(start,0);
// Allocate vectors in device memory
cudaMalloc((void**)&d_new, size);
cudaMalloc((void**)&d_old, size);
cudaMalloc((void**)&d_C, sb);
// Copy vectors from host memory to device memory
cudaMemcpy(d_new, h_new, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_old, h_old, size, cudaMemcpyHostToDevice);
// stop the timer
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
float Intime;
cudaEventElapsedTime( &Intime, start, stop);
printf("Input time for GPU: %f (ms) \n",Intime);
// start the timer
cudaEventRecord(start,0);
error = 10*eps; // any value bigger than eps is OK
iter = 0; // counter for iterations
flag = true;
int sm = tx * ty * tz * sizeof(float); // size of the shared memory in each block
while ( (error > eps) && (iter < MAX) ) {
laplacian<<<blocks, threads, sm>>>(d_old, d_new, d_C, flag);
cudaMemcpy(h_C, d_C, sb, cudaMemcpyDeviceToHost);
error = 0.0;
for(int i = 0; i < bx * by * bz; i = i+1) {
error = error + h_C[i];
}
error = sqrt(error);
iter++;
flag = !flag;
}
printf("error (GPU) = %.15e\n",error);
printf("total iterations (GPU) = %d\n",iter);
// stop the timer
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime( &gputime, start, stop);
printf("Processing time for GPU: %f (ms) \n",gputime);
// Copy result from device memory to host memory
// start the timer
cudaEventRecord(start,0);
// Because after the iteration, d_new and d_old are basically the same.
cudaMemcpy(g_new, d_new, size, cudaMemcpyDeviceToHost);
cudaFree(d_new);
cudaFree(d_old);
cudaFree(d_C);
// stop the timer
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
float Outime;
cudaEventElapsedTime( &Outime, start, stop);
printf("Output time for GPU: %f (ms) \n",Outime);
gputime_tot = Intime + gputime + Outime;
printf("Total time for GPU: %f (ms) \n",gputime_tot);
fflush(stdout);
FILE *outg; // save GPU solution in phi_GPU.dat
outg = fopen("phi_GPU_3D.dat","w");
for(int k = Nz-1; k > -1; k = k-1){
fprintf(outg, "z = %d\n", k);
for(int j = Ny-1; j > -1; j = j-1){
for(int i = 0; i < Nx; i = i+1){
fprintf(outg, "%.2e ",g_new[i + j*Nx + k*Nx*Ny]);
}
fprintf(outg,"\n");
}
}
fclose(outg);
printf("\n");
}
if(CPU==1) { // not to compute the CPU solution
free(h_new);
free(h_old);
free(g_new);
free(h_C);
cudaDeviceReset();
exit(0);
}
if((CPU==0)||(CPU==2)) { // to compute the CPU solution
// start the timer
cudaEventRecord(start,0);
// to compute the reference solution
error = 10*eps; // any value bigger than eps
iter = 0; // counter for iterations
flag = true;
double diff;
float t, l, r, b, u, d; // top, left, right, bottom, up, down
int site, ym1, xm1, zm1, xp1, yp1, zp1;
while ( (error > eps) && (iter < MAX) ) {
if(flag) {
error = 0.0;
for(int z = 0; z < Nz; z = z+1){
for(int y = 0; y < Ny; y = y+1) {
for(int x = 0; x < Nx; x = x+1) {
if(x==0 || x==Nx-1 || y==0 || y==Ny-1 || z==0 || z==Nz-1) {
// Do nothing on the boundary
}
else {
site = x + y*Nx + z*Nx*Ny;
xm1 = site - 1; // x-1
xp1 = site + 1; // x+1
ym1 = site - Nx; // y-1
yp1 = site + Nx; // y+1
zm1 = site - Nx*Ny; // z-1
zp1 = site + Nx*Ny; // z+1
b = h_old[ym1];
l = h_old[xm1];
d = h_old[zm1];
r = h_old[xp1];
t = h_old[yp1];
u = h_old[zp1];
h_new[site] = (1.0/6.0)*(b + l + r + t + u + d);
diff = h_new[site]-h_old[site];
error = error + diff*diff;
}
}
}
}
}
else{
error = 0.0;
for(int z = 0; z < Nz; z = z+1){
for(int y = 0; y < Ny; y = y+1) {
for(int x = 0; x < Nx; x = x+1) {
if(x==0 || x==Nx-1 || y==0 || y==Ny-1 || z==0 || z==Nz-1) {
// Do nothing on the boundary
}
else {
site = x + y*Nx + z*Nx*Ny;
xm1 = site - 1; // x-1
xp1 = site + 1; // x+1
ym1 = site - Nx; // y-1
yp1 = site + Nx; // y+1
zm1 = site - Nx*Ny; // z-1
zp1 = site + Nx*Ny; // z+1
b = h_new[ym1];
l = h_new[xm1];
d = h_new[zm1];
r = h_new[xp1];
t = h_new[yp1];
u = h_new[zp1];
h_old[site] = (1.0/6.0)*(b + l + r + t + u + d);
diff = h_new[site]-h_old[site];
error = error + diff*diff;
}
}
}
}
}
flag = !flag;
iter = iter + 1;
error = sqrt(error);
} // exit if error < eps
printf("error (CPU) = %.15e\n",error);
printf("total iterations (CPU) = %d\n",iter);
// stop the timer
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime( &cputime, start, stop);
printf("Processing time for CPU: %f (ms) \n",cputime);
printf("Speed up of GPU = %f\n", cputime/(gputime_tot));
fflush(stdout);
// destroy the timer
cudaEventDestroy(start);
cudaEventDestroy(stop);
FILE *outc; // save CPU solution in phi_CPU.dat
outc = fopen("phi_CPU_3D.dat","w");
for(int k = Nz-1; k > -1; k = k-1){
fprintf(outc, "z = %d\n", k);
for(int j = Ny-1; j > -1; j = j-1){
for(int i = 0; i < Nx; i = i+1){
fprintf(outc, "%.2e ",h_new[i + j*Nx + k*Nx*Ny]);
}
fprintf(outc,"\n");
}
}
fclose(outc);
printf("\n");
free(h_new);
free(h_old);
free(g_new);
free(h_C);
}
cudaDeviceReset();
}
|
29c5507fdd97b0483994e07346e3089a0f45ef26.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2009-2017 The Regents of the University of Michigan
// This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
// Maintainer: joaander
#include "TwoStepLangevinGPU.cuh"
#include "hoomd/extern/saruprngCUDA.h"
#include <assert.h>
/*! \file TwoStepLangevinGPU.cu
\brief Defines GPU kernel code for Langevin integration on the GPU. Used by TwoStepLangevinGPU.
*/
//! Shared memory array for gpu_langevin_step_two_kernel()
extern __shared__ Scalar s_gammas[];
//! Shared memory array for gpu_langevin_angular_step_two_kernel()
extern __shared__ Scalar s_gammas_r[];
//! Shared memory used in reducing sums for bd energy tally
extern __shared__ Scalar bdtally_sdata[];
//! Takes the second half-step forward in the Langevin integration on a group of particles with
/*! \param d_pos array of particle positions and types
\param d_vel array of particle positions and masses
\param d_accel array of particle accelerations
\param d_diameter array of particle diameters
\param d_tag array of particle tags
\param d_group_members Device array listing the indicies of the mebers of the group to integrate
\param group_size Number of members in the group
\param d_net_force Net force on each particle
\param d_gamma List of per-type gammas
\param n_types Number of particle types in the simulation
\param use_lambda If true, gamma = lambda * diameter
\param lambda Scale factor to convert diameter to lambda (when use_lambda is true)
\param timestep Current timestep of the simulation
\param seed User chosen random number seed
\param T Temperature set point
\param deltaT Amount of real time to step forward in one time step
\param D Dimensionality of the system
\param tally Boolean indicating whether energy tally is performed or not
\param d_partial_sum_bdenergy Placeholder for the partial sum
This kernel is implemented in a very similar manner to gpu_nve_step_two_kernel(), see it for design details.
This kernel will tally the energy transfer from the bd thermal reservoir and the particle system
Random number generation is done per thread with Saru's 3-seed constructor. The seeds are, the time step,
the particle tag, and the user-defined seed.
This kernel must be launched with enough dynamic shared memory per block to read in d_gamma
*/
extern "C" __global__
void gpu_langevin_step_two_kernel(const Scalar4 *d_pos,
Scalar4 *d_vel,
Scalar3 *d_accel,
const Scalar *d_diameter,
const unsigned int *d_tag,
unsigned int *d_group_members,
unsigned int group_size,
Scalar4 *d_net_force,
Scalar *d_gamma,
unsigned int n_types,
bool use_lambda,
Scalar lambda,
unsigned int timestep,
unsigned int seed,
Scalar T,
bool noiseless_t,
Scalar deltaT,
unsigned int D,
bool tally,
Scalar *d_partial_sum_bdenergy)
{
if (!use_lambda)
{
// read in the gammas (1 dimensional array)
for (int cur_offset = 0; cur_offset < n_types; cur_offset += blockDim.x)
{
if (cur_offset + threadIdx.x < n_types)
s_gammas[cur_offset + threadIdx.x] = d_gamma[cur_offset + threadIdx.x];
}
__syncthreads();
}
// determine which particle this thread works on (MEM TRANSFER: 4 bytes)
int group_idx = blockIdx.x * blockDim.x + threadIdx.x;
Scalar bd_energy_transfer = 0;
if (group_idx < group_size)
{
unsigned int idx = d_group_members[group_idx];
// ******** first, calculate the additional BD force
// read the current particle velocity (MEM TRANSFER: 16 bytes)
Scalar4 vel = d_vel[idx];
// read in the tag of our particle.
// (MEM TRANSFER: 4 bytes)
unsigned int ptag = d_tag[idx];
// calculate the magnitude of the random force
Scalar gamma;
if (use_lambda)
{
// read in the tag of our particle.
// (MEM TRANSFER: 4 bytes)
gamma = lambda*d_diameter[idx];
}
else
{
// read in the type of our particle. A texture read of only the fourth part of the position Scalar4
// (where type is stored) is used.
unsigned int typ = __scalar_as_int(d_pos[idx].w);
gamma = s_gammas[typ];
}
Scalar coeff = sqrtf(Scalar(6.0) * gamma * T / deltaT);
Scalar3 bd_force = make_scalar3(Scalar(0.0), Scalar(0.0), Scalar(0.0));
if (noiseless_t)
coeff = Scalar(0.0);
//Initialize the Random Number Generator and generate the 3 random numbers
SaruGPU s(ptag, timestep + seed); // 2 dimensional seeding
Scalar randomx=s.s<Scalar>(-1.0, 1.0);
Scalar randomy=s.s<Scalar>(-1.0, 1.0);
Scalar randomz=s.s<Scalar>(-1.0, 1.0);
bd_force.x = randomx*coeff - gamma*vel.x;
bd_force.y = randomy*coeff - gamma*vel.y;
if (D > 2)
bd_force.z = randomz*coeff - gamma*vel.z;
// read in the net force and calculate the acceleration MEM TRANSFER: 16 bytes
Scalar4 net_force = d_net_force[idx];
Scalar3 accel = make_scalar3(net_force.x,net_force.y,net_force.z);
// MEM TRANSFER: 4 bytes FLOPS: 3
Scalar mass = vel.w;
Scalar minv = Scalar(1.0) / mass;
accel.x = (accel.x + bd_force.x) * minv;
accel.y = (accel.y + bd_force.y) * minv;
accel.z = (accel.z + bd_force.z) * minv;
// v(t+deltaT) = v(t+deltaT/2) + 1/2 * a(t+deltaT)*deltaT
// update the velocity (FLOPS: 6)
vel.x += (Scalar(1.0)/Scalar(2.0)) * accel.x * deltaT;
vel.y += (Scalar(1.0)/Scalar(2.0)) * accel.y * deltaT;
vel.z += (Scalar(1.0)/Scalar(2.0)) * accel.z * deltaT;
// tally the energy transfer from the bd thermal reservor to the particles (FLOPS: 6)
bd_energy_transfer = bd_force.x *vel.x + bd_force.y * vel.y + bd_force.z * vel.z;
// write out data (MEM TRANSFER: 32 bytes)
d_vel[idx] = vel;
// since we calculate the acceleration, we need to write it for the next step
d_accel[idx] = accel;
}
if (tally)
{
// don't ovewrite values in the s_gammas array with bd_energy transfer
__syncthreads();
bdtally_sdata[threadIdx.x] = bd_energy_transfer;
__syncthreads();
// reduce the sum in parallel
int offs = blockDim.x >> 1;
while (offs > 0)
{
if (threadIdx.x < offs)
bdtally_sdata[threadIdx.x] += bdtally_sdata[threadIdx.x + offs];
offs >>= 1;
__syncthreads();
}
// write out our partial sum
if (threadIdx.x == 0)
{
d_partial_sum_bdenergy[blockIdx.x] = bdtally_sdata[0];
}
}
}
//! Kernel function for reducing a partial sum to a full sum (one value)
/*! \param d_sum Placeholder for the sum
\param d_partial_sum Array containing the parial sum
\param num_blocks Number of blocks to execute
*/
extern "C" __global__
void gpu_bdtally_reduce_partial_sum_kernel(Scalar *d_sum,
Scalar* d_partial_sum,
unsigned int num_blocks)
{
Scalar sum = Scalar(0.0);
// sum up the values in the partial sum via a sliding window
for (int start = 0; start < num_blocks; start += blockDim.x)
{
__syncthreads();
if (start + threadIdx.x < num_blocks)
bdtally_sdata[threadIdx.x] = d_partial_sum[start + threadIdx.x];
else
bdtally_sdata[threadIdx.x] = Scalar(0.0);
__syncthreads();
// reduce the sum in parallel
int offs = blockDim.x >> 1;
while (offs > 0)
{
if (threadIdx.x < offs)
bdtally_sdata[threadIdx.x] += bdtally_sdata[threadIdx.x + offs];
offs >>= 1;
__syncthreads();
}
// everybody sums up sum2K
sum += bdtally_sdata[0];
}
if (threadIdx.x == 0)
*d_sum = sum;
}
//! NO_SQUISH angular part of the second half step
/*!
\param d_pos array of particle positions (4th dimension is particle type)
\param d_orientation array of particle orientations
\param d_angmom array of particle conjugate quaternions
\param d_inertia array of moments of inertia
\param d_net_torque array of net torques
\param d_group_members Device array listing the indicies of the mebers of the group to integrate
\param d_gamma_r List of per-type gamma_rs (rotational drag coeff.)
\param d_tag array of particle tags
\param group_size Number of members in the group
\param timestep Current timestep of the simulation
\param seed User chosen random number seed
\param T Temperature set point
\param d_noiseless_r If set true, there will be no rotational noise (random torque)
\param deltaT integration time step size
\param D dimensionality of the system
*/
__global__ void gpu_langevin_angular_step_two_kernel(
const Scalar4 *d_pos,
Scalar4 *d_orientation,
Scalar4 *d_angmom,
const Scalar3 *d_inertia,
Scalar4 *d_net_torque,
const unsigned int *d_group_members,
const Scalar *d_gamma_r,
const unsigned int *d_tag,
unsigned int n_types,
unsigned int group_size,
unsigned int timestep,
unsigned int seed,
Scalar T,
bool noiseless_r,
Scalar deltaT,
unsigned int D,
Scalar scale
)
{
// read in the gamma_r, stored in s_gammas_r[0: n_type] (Pythonic convention)
for (int cur_offset = 0; cur_offset < n_types; cur_offset += blockDim.x)
{
if (cur_offset + threadIdx.x < n_types)
s_gammas_r[cur_offset + threadIdx.x] = d_gamma_r[cur_offset + threadIdx.x];
}
__syncthreads();
// determine which particle this thread works on (MEM TRANSFER: 4 bytes)
int group_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (group_idx < group_size)
{
unsigned int idx = d_group_members[group_idx];
unsigned int ptag = d_tag[idx];
// torque update with rotational drag and noise
unsigned int type_r = __scalar_as_int(d_pos[idx].w);
Scalar gamma_r = s_gammas_r[type_r];
if (gamma_r > 0)
{
quat<Scalar> q(d_orientation[idx]);
quat<Scalar> p(d_angmom[idx]);
vec3<Scalar> t(d_net_torque[idx]);
vec3<Scalar> I(d_inertia[idx]);
vec3<Scalar> s;
s = (Scalar(1./2.) * conj(q) * p).v;
// first calculate in the body frame random and damping torque imposed by the dynamics
vec3<Scalar> bf_torque;
// original Gaussian random torque
// for future reference: if gamma_r is different for xyz, then we need to generate 3 sigma_r
Scalar sigma_r = fast::sqrt(Scalar(2.0)*gamma_r*T/deltaT);
if (noiseless_r) sigma_r = Scalar(0.0);
SaruGPU saru(ptag, timestep + seed); // 2 dimensional seeding
Scalar rand_x = gaussian_rng(saru, sigma_r);
Scalar rand_y = gaussian_rng(saru, sigma_r);
Scalar rand_z = gaussian_rng(saru, sigma_r);
// check for zero moment of inertia
bool x_zero, y_zero, z_zero;
x_zero = (I.x < Scalar(EPSILON)); y_zero = (I.y < Scalar(EPSILON)); z_zero = (I.z < Scalar(EPSILON));
bf_torque.x = rand_x - gamma_r * (s.x / I.x);
bf_torque.y = rand_y - gamma_r * (s.y / I.y);
bf_torque.z = rand_z - gamma_r * (s.z / I.z);
// ignore torque component along an axis for which the moment of inertia zero
if (x_zero) bf_torque.x = 0;
if (y_zero) bf_torque.y = 0;
if (z_zero) bf_torque.z = 0;
// change to lab frame and update the net torque
bf_torque = rotate(q, bf_torque);
d_net_torque[idx].x += bf_torque.x;
d_net_torque[idx].y += bf_torque.y;
d_net_torque[idx].z += bf_torque.z;
// with the wishful mind that compiler may use conditional move to avoid branching
if (D < 3) d_net_torque[idx].x = 0;
if (D < 3) d_net_torque[idx].y = 0;
}
//////////////////////////////
// read the particle's orientation, conjugate quaternion, moment of inertia and net torque
quat<Scalar> q(d_orientation[idx]);
quat<Scalar> p(d_angmom[idx]);
vec3<Scalar> t(d_net_torque[idx]);
vec3<Scalar> I(d_inertia[idx]);
// rotate torque into principal frame
t = rotate(conj(q),t);
// check for zero moment of inertia
bool x_zero, y_zero, z_zero;
x_zero = (I.x < Scalar(EPSILON)); y_zero = (I.y < Scalar(EPSILON)); z_zero = (I.z < Scalar(EPSILON));
// ignore torque component along an axis for which the moment of inertia zero
if (x_zero) t.x = Scalar(0.0);
if (y_zero) t.y = Scalar(0.0);
if (z_zero) t.z = Scalar(0.0);
// rescale
p = p*scale;
// advance p(t)->p(t+deltaT/2), q(t)->q(t+deltaT)
p += deltaT*q*t;
d_angmom[idx] = quat_to_scalar4(p);
}
}
/*! \param d_pos array of particle positions (4th dimension is particle type)
\param d_orientation array of particle orientations
\param d_angmom array of particle conjugate quaternions
\param d_inertia array of moments of inertia
\param d_net_torque array of net torques
\param d_group_members Device array listing the indicies of the mebers of the group to integrate
\param d_gamma_r List of per-type gamma_rs (rotational drag coeff.)
\param d_tag array of particle tags
\param group_size Number of members in the group
\param langevin_args Collected arguments for gpu_langevin_step_two_kernel() and gpu_langevin_angular_step_two()
\param deltaT timestep
\param D dimensionality of the system
This is just a driver for gpu_langevin_angular_step_two_kernel(), see it for details.
*/
hipError_t gpu_langevin_angular_step_two(const Scalar4 *d_pos,
Scalar4 *d_orientation,
Scalar4 *d_angmom,
const Scalar3 *d_inertia,
Scalar4 *d_net_torque,
const unsigned int *d_group_members,
const Scalar *d_gamma_r,
const unsigned int *d_tag,
unsigned int group_size,
const langevin_step_two_args& langevin_args,
Scalar deltaT,
unsigned int D,
Scalar scale)
{
// setup the grid to run the kernel
int block_size = 256;
dim3 grid( (group_size/block_size) + 1, 1, 1);
dim3 threads(block_size, 1, 1);
// run the kernel
hipLaunchKernelGGL(( gpu_langevin_angular_step_two_kernel), dim3(grid), dim3(threads), max( (unsigned int)(sizeof(Scalar)*langevin_args.n_types),
(unsigned int)(langevin_args.block_size*sizeof(Scalar))
) , 0,
d_pos,
d_orientation,
d_angmom,
d_inertia,
d_net_torque,
d_group_members,
d_gamma_r,
d_tag,
langevin_args.n_types,
group_size,
langevin_args.timestep,
langevin_args.seed,
langevin_args.T,
langevin_args.noiseless_r,
deltaT,
D,
scale
);
return hipSuccess;
}
/*! \param d_pos array of particle positions and types
\param d_vel array of particle positions and masses
\param d_accel array of particle accelerations
\param d_diameter array of particle diameters
\param d_tag array of particle tags
\param d_group_members Device array listing the indicies of the mebers of the group to integrate
\param group_size Number of members in the group
\param d_net_force Net force on each particle
\param langevin_args Collected arguments for gpu_langevin_step_two_kernel() and gpu_langevin_angular_step_two()
\param deltaT Amount of real time to step forward in one time step
\param D Dimensionality of the system
This is just a driver for gpu_langevin_step_two_kernel(), see it for details.
*/
hipError_t gpu_langevin_step_two(const Scalar4 *d_pos,
Scalar4 *d_vel,
Scalar3 *d_accel,
const Scalar *d_diameter,
const unsigned int *d_tag,
unsigned int *d_group_members,
unsigned int group_size,
Scalar4 *d_net_force,
const langevin_step_two_args& langevin_args,
Scalar deltaT,
unsigned int D)
{
// setup the grid to run the kernel
dim3 grid(langevin_args.num_blocks, 1, 1);
dim3 grid1(1, 1, 1);
dim3 threads(langevin_args.block_size, 1, 1);
dim3 threads1(256, 1, 1);
// run the kernel
hipLaunchKernelGGL(( gpu_langevin_step_two_kernel), dim3(grid),
dim3(threads),
max((unsigned int)(sizeof(Scalar)*langevin_args.n_types),
(unsigned int)(langevin_args.block_size*sizeof(Scalar)))
, 0, d_pos,
d_vel,
d_accel,
d_diameter,
d_tag,
d_group_members,
group_size,
d_net_force,
langevin_args.d_gamma,
langevin_args.n_types,
langevin_args.use_lambda,
langevin_args.lambda,
langevin_args.timestep,
langevin_args.seed,
langevin_args.T,
langevin_args.noiseless_t,
deltaT,
D,
langevin_args.tally,
langevin_args.d_partial_sum_bdenergy);
// run the summation kernel
if (langevin_args.tally)
hipLaunchKernelGGL(( gpu_bdtally_reduce_partial_sum_kernel), dim3(grid1),
dim3(threads1),
langevin_args.block_size*sizeof(Scalar)
, 0, &langevin_args.d_sum_bdenergy[0],
langevin_args.d_partial_sum_bdenergy,
langevin_args.num_blocks);
return hipSuccess;
}
| 29c5507fdd97b0483994e07346e3089a0f45ef26.cu | // Copyright (c) 2009-2017 The Regents of the University of Michigan
// This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
// Maintainer: joaander
#include "TwoStepLangevinGPU.cuh"
#include "hoomd/extern/saruprngCUDA.h"
#include <assert.h>
/*! \file TwoStepLangevinGPU.cu
\brief Defines GPU kernel code for Langevin integration on the GPU. Used by TwoStepLangevinGPU.
*/
//! Shared memory array for gpu_langevin_step_two_kernel()
extern __shared__ Scalar s_gammas[];
//! Shared memory array for gpu_langevin_angular_step_two_kernel()
extern __shared__ Scalar s_gammas_r[];
//! Shared memory used in reducing sums for bd energy tally
extern __shared__ Scalar bdtally_sdata[];
//! Takes the second half-step forward in the Langevin integration on a group of particles with
/*! \param d_pos array of particle positions and types
\param d_vel array of particle positions and masses
\param d_accel array of particle accelerations
\param d_diameter array of particle diameters
\param d_tag array of particle tags
\param d_group_members Device array listing the indicies of the mebers of the group to integrate
\param group_size Number of members in the group
\param d_net_force Net force on each particle
\param d_gamma List of per-type gammas
\param n_types Number of particle types in the simulation
\param use_lambda If true, gamma = lambda * diameter
\param lambda Scale factor to convert diameter to lambda (when use_lambda is true)
\param timestep Current timestep of the simulation
\param seed User chosen random number seed
\param T Temperature set point
\param deltaT Amount of real time to step forward in one time step
\param D Dimensionality of the system
\param tally Boolean indicating whether energy tally is performed or not
\param d_partial_sum_bdenergy Placeholder for the partial sum
This kernel is implemented in a very similar manner to gpu_nve_step_two_kernel(), see it for design details.
This kernel will tally the energy transfer from the bd thermal reservoir and the particle system
Random number generation is done per thread with Saru's 3-seed constructor. The seeds are, the time step,
the particle tag, and the user-defined seed.
This kernel must be launched with enough dynamic shared memory per block to read in d_gamma
*/
extern "C" __global__
void gpu_langevin_step_two_kernel(const Scalar4 *d_pos,
Scalar4 *d_vel,
Scalar3 *d_accel,
const Scalar *d_diameter,
const unsigned int *d_tag,
unsigned int *d_group_members,
unsigned int group_size,
Scalar4 *d_net_force,
Scalar *d_gamma,
unsigned int n_types,
bool use_lambda,
Scalar lambda,
unsigned int timestep,
unsigned int seed,
Scalar T,
bool noiseless_t,
Scalar deltaT,
unsigned int D,
bool tally,
Scalar *d_partial_sum_bdenergy)
{
if (!use_lambda)
{
// read in the gammas (1 dimensional array)
for (int cur_offset = 0; cur_offset < n_types; cur_offset += blockDim.x)
{
if (cur_offset + threadIdx.x < n_types)
s_gammas[cur_offset + threadIdx.x] = d_gamma[cur_offset + threadIdx.x];
}
__syncthreads();
}
// determine which particle this thread works on (MEM TRANSFER: 4 bytes)
int group_idx = blockIdx.x * blockDim.x + threadIdx.x;
Scalar bd_energy_transfer = 0;
if (group_idx < group_size)
{
unsigned int idx = d_group_members[group_idx];
// ******** first, calculate the additional BD force
// read the current particle velocity (MEM TRANSFER: 16 bytes)
Scalar4 vel = d_vel[idx];
// read in the tag of our particle.
// (MEM TRANSFER: 4 bytes)
unsigned int ptag = d_tag[idx];
// calculate the magnitude of the random force
Scalar gamma;
if (use_lambda)
{
// read in the tag of our particle.
// (MEM TRANSFER: 4 bytes)
gamma = lambda*d_diameter[idx];
}
else
{
// read in the type of our particle. A texture read of only the fourth part of the position Scalar4
// (where type is stored) is used.
unsigned int typ = __scalar_as_int(d_pos[idx].w);
gamma = s_gammas[typ];
}
Scalar coeff = sqrtf(Scalar(6.0) * gamma * T / deltaT);
Scalar3 bd_force = make_scalar3(Scalar(0.0), Scalar(0.0), Scalar(0.0));
if (noiseless_t)
coeff = Scalar(0.0);
//Initialize the Random Number Generator and generate the 3 random numbers
SaruGPU s(ptag, timestep + seed); // 2 dimensional seeding
Scalar randomx=s.s<Scalar>(-1.0, 1.0);
Scalar randomy=s.s<Scalar>(-1.0, 1.0);
Scalar randomz=s.s<Scalar>(-1.0, 1.0);
bd_force.x = randomx*coeff - gamma*vel.x;
bd_force.y = randomy*coeff - gamma*vel.y;
if (D > 2)
bd_force.z = randomz*coeff - gamma*vel.z;
// read in the net force and calculate the acceleration MEM TRANSFER: 16 bytes
Scalar4 net_force = d_net_force[idx];
Scalar3 accel = make_scalar3(net_force.x,net_force.y,net_force.z);
// MEM TRANSFER: 4 bytes FLOPS: 3
Scalar mass = vel.w;
Scalar minv = Scalar(1.0) / mass;
accel.x = (accel.x + bd_force.x) * minv;
accel.y = (accel.y + bd_force.y) * minv;
accel.z = (accel.z + bd_force.z) * minv;
// v(t+deltaT) = v(t+deltaT/2) + 1/2 * a(t+deltaT)*deltaT
// update the velocity (FLOPS: 6)
vel.x += (Scalar(1.0)/Scalar(2.0)) * accel.x * deltaT;
vel.y += (Scalar(1.0)/Scalar(2.0)) * accel.y * deltaT;
vel.z += (Scalar(1.0)/Scalar(2.0)) * accel.z * deltaT;
// tally the energy transfer from the bd thermal reservor to the particles (FLOPS: 6)
bd_energy_transfer = bd_force.x *vel.x + bd_force.y * vel.y + bd_force.z * vel.z;
// write out data (MEM TRANSFER: 32 bytes)
d_vel[idx] = vel;
// since we calculate the acceleration, we need to write it for the next step
d_accel[idx] = accel;
}
if (tally)
{
// don't ovewrite values in the s_gammas array with bd_energy transfer
__syncthreads();
bdtally_sdata[threadIdx.x] = bd_energy_transfer;
__syncthreads();
// reduce the sum in parallel
int offs = blockDim.x >> 1;
while (offs > 0)
{
if (threadIdx.x < offs)
bdtally_sdata[threadIdx.x] += bdtally_sdata[threadIdx.x + offs];
offs >>= 1;
__syncthreads();
}
// write out our partial sum
if (threadIdx.x == 0)
{
d_partial_sum_bdenergy[blockIdx.x] = bdtally_sdata[0];
}
}
}
//! Kernel function for reducing a partial sum to a full sum (one value)
/*! \param d_sum Placeholder for the sum
\param d_partial_sum Array containing the parial sum
\param num_blocks Number of blocks to execute
*/
extern "C" __global__
void gpu_bdtally_reduce_partial_sum_kernel(Scalar *d_sum,
Scalar* d_partial_sum,
unsigned int num_blocks)
{
Scalar sum = Scalar(0.0);
// sum up the values in the partial sum via a sliding window
for (int start = 0; start < num_blocks; start += blockDim.x)
{
__syncthreads();
if (start + threadIdx.x < num_blocks)
bdtally_sdata[threadIdx.x] = d_partial_sum[start + threadIdx.x];
else
bdtally_sdata[threadIdx.x] = Scalar(0.0);
__syncthreads();
// reduce the sum in parallel
int offs = blockDim.x >> 1;
while (offs > 0)
{
if (threadIdx.x < offs)
bdtally_sdata[threadIdx.x] += bdtally_sdata[threadIdx.x + offs];
offs >>= 1;
__syncthreads();
}
// everybody sums up sum2K
sum += bdtally_sdata[0];
}
if (threadIdx.x == 0)
*d_sum = sum;
}
//! NO_SQUISH angular part of the second half step
/*!
\param d_pos array of particle positions (4th dimension is particle type)
\param d_orientation array of particle orientations
\param d_angmom array of particle conjugate quaternions
\param d_inertia array of moments of inertia
\param d_net_torque array of net torques
\param d_group_members Device array listing the indicies of the mebers of the group to integrate
\param d_gamma_r List of per-type gamma_rs (rotational drag coeff.)
\param d_tag array of particle tags
\param group_size Number of members in the group
\param timestep Current timestep of the simulation
\param seed User chosen random number seed
\param T Temperature set point
\param d_noiseless_r If set true, there will be no rotational noise (random torque)
\param deltaT integration time step size
\param D dimensionality of the system
*/
__global__ void gpu_langevin_angular_step_two_kernel(
const Scalar4 *d_pos,
Scalar4 *d_orientation,
Scalar4 *d_angmom,
const Scalar3 *d_inertia,
Scalar4 *d_net_torque,
const unsigned int *d_group_members,
const Scalar *d_gamma_r,
const unsigned int *d_tag,
unsigned int n_types,
unsigned int group_size,
unsigned int timestep,
unsigned int seed,
Scalar T,
bool noiseless_r,
Scalar deltaT,
unsigned int D,
Scalar scale
)
{
// read in the gamma_r, stored in s_gammas_r[0: n_type] (Pythonic convention)
for (int cur_offset = 0; cur_offset < n_types; cur_offset += blockDim.x)
{
if (cur_offset + threadIdx.x < n_types)
s_gammas_r[cur_offset + threadIdx.x] = d_gamma_r[cur_offset + threadIdx.x];
}
__syncthreads();
// determine which particle this thread works on (MEM TRANSFER: 4 bytes)
int group_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (group_idx < group_size)
{
unsigned int idx = d_group_members[group_idx];
unsigned int ptag = d_tag[idx];
// torque update with rotational drag and noise
unsigned int type_r = __scalar_as_int(d_pos[idx].w);
Scalar gamma_r = s_gammas_r[type_r];
if (gamma_r > 0)
{
quat<Scalar> q(d_orientation[idx]);
quat<Scalar> p(d_angmom[idx]);
vec3<Scalar> t(d_net_torque[idx]);
vec3<Scalar> I(d_inertia[idx]);
vec3<Scalar> s;
s = (Scalar(1./2.) * conj(q) * p).v;
// first calculate in the body frame random and damping torque imposed by the dynamics
vec3<Scalar> bf_torque;
// original Gaussian random torque
// for future reference: if gamma_r is different for xyz, then we need to generate 3 sigma_r
Scalar sigma_r = fast::sqrt(Scalar(2.0)*gamma_r*T/deltaT);
if (noiseless_r) sigma_r = Scalar(0.0);
SaruGPU saru(ptag, timestep + seed); // 2 dimensional seeding
Scalar rand_x = gaussian_rng(saru, sigma_r);
Scalar rand_y = gaussian_rng(saru, sigma_r);
Scalar rand_z = gaussian_rng(saru, sigma_r);
// check for zero moment of inertia
bool x_zero, y_zero, z_zero;
x_zero = (I.x < Scalar(EPSILON)); y_zero = (I.y < Scalar(EPSILON)); z_zero = (I.z < Scalar(EPSILON));
bf_torque.x = rand_x - gamma_r * (s.x / I.x);
bf_torque.y = rand_y - gamma_r * (s.y / I.y);
bf_torque.z = rand_z - gamma_r * (s.z / I.z);
// ignore torque component along an axis for which the moment of inertia zero
if (x_zero) bf_torque.x = 0;
if (y_zero) bf_torque.y = 0;
if (z_zero) bf_torque.z = 0;
// change to lab frame and update the net torque
bf_torque = rotate(q, bf_torque);
d_net_torque[idx].x += bf_torque.x;
d_net_torque[idx].y += bf_torque.y;
d_net_torque[idx].z += bf_torque.z;
// with the wishful mind that compiler may use conditional move to avoid branching
if (D < 3) d_net_torque[idx].x = 0;
if (D < 3) d_net_torque[idx].y = 0;
}
//////////////////////////////
// read the particle's orientation, conjugate quaternion, moment of inertia and net torque
quat<Scalar> q(d_orientation[idx]);
quat<Scalar> p(d_angmom[idx]);
vec3<Scalar> t(d_net_torque[idx]);
vec3<Scalar> I(d_inertia[idx]);
// rotate torque into principal frame
t = rotate(conj(q),t);
// check for zero moment of inertia
bool x_zero, y_zero, z_zero;
x_zero = (I.x < Scalar(EPSILON)); y_zero = (I.y < Scalar(EPSILON)); z_zero = (I.z < Scalar(EPSILON));
// ignore torque component along an axis for which the moment of inertia zero
if (x_zero) t.x = Scalar(0.0);
if (y_zero) t.y = Scalar(0.0);
if (z_zero) t.z = Scalar(0.0);
// rescale
p = p*scale;
// advance p(t)->p(t+deltaT/2), q(t)->q(t+deltaT)
p += deltaT*q*t;
d_angmom[idx] = quat_to_scalar4(p);
}
}
/*! \param d_pos array of particle positions (4th dimension is particle type)
\param d_orientation array of particle orientations
\param d_angmom array of particle conjugate quaternions
\param d_inertia array of moments of inertia
\param d_net_torque array of net torques
\param d_group_members Device array listing the indicies of the mebers of the group to integrate
\param d_gamma_r List of per-type gamma_rs (rotational drag coeff.)
\param d_tag array of particle tags
\param group_size Number of members in the group
\param langevin_args Collected arguments for gpu_langevin_step_two_kernel() and gpu_langevin_angular_step_two()
\param deltaT timestep
\param D dimensionality of the system
This is just a driver for gpu_langevin_angular_step_two_kernel(), see it for details.
*/
cudaError_t gpu_langevin_angular_step_two(const Scalar4 *d_pos,
Scalar4 *d_orientation,
Scalar4 *d_angmom,
const Scalar3 *d_inertia,
Scalar4 *d_net_torque,
const unsigned int *d_group_members,
const Scalar *d_gamma_r,
const unsigned int *d_tag,
unsigned int group_size,
const langevin_step_two_args& langevin_args,
Scalar deltaT,
unsigned int D,
Scalar scale)
{
// setup the grid to run the kernel
int block_size = 256;
dim3 grid( (group_size/block_size) + 1, 1, 1);
dim3 threads(block_size, 1, 1);
// run the kernel
gpu_langevin_angular_step_two_kernel<<< grid, threads, max( (unsigned int)(sizeof(Scalar)*langevin_args.n_types),
(unsigned int)(langevin_args.block_size*sizeof(Scalar))
) >>>
(d_pos,
d_orientation,
d_angmom,
d_inertia,
d_net_torque,
d_group_members,
d_gamma_r,
d_tag,
langevin_args.n_types,
group_size,
langevin_args.timestep,
langevin_args.seed,
langevin_args.T,
langevin_args.noiseless_r,
deltaT,
D,
scale
);
return cudaSuccess;
}
/*! \param d_pos array of particle positions and types
\param d_vel array of particle positions and masses
\param d_accel array of particle accelerations
\param d_diameter array of particle diameters
\param d_tag array of particle tags
\param d_group_members Device array listing the indicies of the mebers of the group to integrate
\param group_size Number of members in the group
\param d_net_force Net force on each particle
\param langevin_args Collected arguments for gpu_langevin_step_two_kernel() and gpu_langevin_angular_step_two()
\param deltaT Amount of real time to step forward in one time step
\param D Dimensionality of the system
This is just a driver for gpu_langevin_step_two_kernel(), see it for details.
*/
cudaError_t gpu_langevin_step_two(const Scalar4 *d_pos,
Scalar4 *d_vel,
Scalar3 *d_accel,
const Scalar *d_diameter,
const unsigned int *d_tag,
unsigned int *d_group_members,
unsigned int group_size,
Scalar4 *d_net_force,
const langevin_step_two_args& langevin_args,
Scalar deltaT,
unsigned int D)
{
// setup the grid to run the kernel
dim3 grid(langevin_args.num_blocks, 1, 1);
dim3 grid1(1, 1, 1);
dim3 threads(langevin_args.block_size, 1, 1);
dim3 threads1(256, 1, 1);
// run the kernel
gpu_langevin_step_two_kernel<<< grid,
threads,
max((unsigned int)(sizeof(Scalar)*langevin_args.n_types),
(unsigned int)(langevin_args.block_size*sizeof(Scalar)))
>>>(d_pos,
d_vel,
d_accel,
d_diameter,
d_tag,
d_group_members,
group_size,
d_net_force,
langevin_args.d_gamma,
langevin_args.n_types,
langevin_args.use_lambda,
langevin_args.lambda,
langevin_args.timestep,
langevin_args.seed,
langevin_args.T,
langevin_args.noiseless_t,
deltaT,
D,
langevin_args.tally,
langevin_args.d_partial_sum_bdenergy);
// run the summation kernel
if (langevin_args.tally)
gpu_bdtally_reduce_partial_sum_kernel<<<grid1,
threads1,
langevin_args.block_size*sizeof(Scalar)
>>>(&langevin_args.d_sum_bdenergy[0],
langevin_args.d_partial_sum_bdenergy,
langevin_args.num_blocks);
return cudaSuccess;
}
|
b7f2a241d3a912977f3961ea209657ac2ee19e60.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include "ball_query_gpu.h"
#include "cuda_utils.h"
__global__ void ball_query_kernel_fast(int b, int n, int m, float radius, int nsample,
const float *__restrict__ new_xyz, const float *__restrict__ xyz, int *__restrict__ idx) {
// new_xyz: (B, M, 3)
// xyz: (B, N, 3)
// output:
// idx: (B, M, nsample)
int bs_idx = blockIdx.y;
int pt_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (bs_idx >= b || pt_idx >= m) return;
new_xyz += bs_idx * m * 3 + pt_idx * 3;
xyz += bs_idx * n * 3;
idx += bs_idx * m * nsample + pt_idx * nsample;
float radius2 = radius * radius;
float new_x = new_xyz[0];
float new_y = new_xyz[1];
float new_z = new_xyz[2];
int cnt = 0;
for (int k = 0; k < n; ++k) {
float x = xyz[k * 3 + 0];
float y = xyz[k * 3 + 1];
float z = xyz[k * 3 + 2];
float d2 = (new_x - x) * (new_x - x) + (new_y - y) * (new_y - y) + (new_z - z) * (new_z - z);
if (d2 < radius2){
if (cnt == 0){
for (int l = 0; l < nsample; ++l) {
idx[l] = k;
}
}
idx[cnt] = k;
++cnt;
if (cnt >= nsample) break;
}
}
}
void ball_query_kernel_launcher_fast(int b, int n, int m, float radius, int nsample, \
const float *new_xyz, const float *xyz, int *idx, hipStream_t stream) {
// new_xyz: (B, M, 3)
// xyz: (B, N, 3)
// output:
// idx: (B, M, nsample)
hipError_t err;
dim3 blocks(DIVUP(m, THREADS_PER_BLOCK), b); // blockIdx.x(col), blockIdx.y(row)
dim3 threads(THREADS_PER_BLOCK);
hipLaunchKernelGGL(( ball_query_kernel_fast), dim3(blocks), dim3(threads), 0, stream, b, n, m, radius, nsample, new_xyz, xyz, idx);
// hipDeviceSynchronize(); // for using printf in kernel function
err = hipGetLastError();
if (hipSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n", hipGetErrorString(err));
exit(-1);
}
} | b7f2a241d3a912977f3961ea209657ac2ee19e60.cu | #include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include "ball_query_gpu.h"
#include "cuda_utils.h"
__global__ void ball_query_kernel_fast(int b, int n, int m, float radius, int nsample,
const float *__restrict__ new_xyz, const float *__restrict__ xyz, int *__restrict__ idx) {
// new_xyz: (B, M, 3)
// xyz: (B, N, 3)
// output:
// idx: (B, M, nsample)
int bs_idx = blockIdx.y;
int pt_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (bs_idx >= b || pt_idx >= m) return;
new_xyz += bs_idx * m * 3 + pt_idx * 3;
xyz += bs_idx * n * 3;
idx += bs_idx * m * nsample + pt_idx * nsample;
float radius2 = radius * radius;
float new_x = new_xyz[0];
float new_y = new_xyz[1];
float new_z = new_xyz[2];
int cnt = 0;
for (int k = 0; k < n; ++k) {
float x = xyz[k * 3 + 0];
float y = xyz[k * 3 + 1];
float z = xyz[k * 3 + 2];
float d2 = (new_x - x) * (new_x - x) + (new_y - y) * (new_y - y) + (new_z - z) * (new_z - z);
if (d2 < radius2){
if (cnt == 0){
for (int l = 0; l < nsample; ++l) {
idx[l] = k;
}
}
idx[cnt] = k;
++cnt;
if (cnt >= nsample) break;
}
}
}
void ball_query_kernel_launcher_fast(int b, int n, int m, float radius, int nsample, \
const float *new_xyz, const float *xyz, int *idx, cudaStream_t stream) {
// new_xyz: (B, M, 3)
// xyz: (B, N, 3)
// output:
// idx: (B, M, nsample)
cudaError_t err;
dim3 blocks(DIVUP(m, THREADS_PER_BLOCK), b); // blockIdx.x(col), blockIdx.y(row)
dim3 threads(THREADS_PER_BLOCK);
ball_query_kernel_fast<<<blocks, threads, 0, stream>>>(b, n, m, radius, nsample, new_xyz, xyz, idx);
// cudaDeviceSynchronize(); // for using printf in kernel function
err = cudaGetLastError();
if (cudaSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err));
exit(-1);
}
} |
25b85ac3457470207ce130628788a02a16f3dd8d.hip | // !!! This is a file automatically generated by hipify!!!
/*
* * Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
* *
* * See COPYRIGHT for license information
* */
#include "nvshmem.h"
#include "nvshmemx.h"
#include "gpu_coll.h"
/* This is a utility function for the recursive exchange algorithm.
* It calculates the PEs to which data is sent or received from during the different steps
* of the algorithm. The recursive exchange algorithm is divided into 3 steps.
* In step 1, when the number of PEs are not a power of k, some PEs send their data
* to other PEs that participate in the power-of-k recursive exchange algorithm in Step 2.
* Step 2 has log_k (PE_size) phases. In each phase k PEs exchange data with each other.
* In Step 3, PEs from step 2 send the final reduced data to PEs that did not participate in Step 2.
*/
__host__ void nvshmemi_recexchalgo_get_neighbors(int my_pe, int num_pes) {
int i, j, k;
int p_of_k = 1, log_p_of_k = 0, rem, T, newpe;
int step1_sendto, step1_nrecvs, step2_nphases;
INFO(NVSHMEM_COLL, "step 1 nbr calculation started, num_pes = %d", num_pes);
k = gpu_coll_env_params_var.reduce_recexch_kval;
if (num_pes < k) /* If size of the active set is less than k, reduce the value of k */
k = (num_pes > 2) ? num_pes : 2;
/* Calculate p_of_k, p_of_k is the largest power of k that is less than num_pes */
while (p_of_k <= num_pes) {
p_of_k *= k;
log_p_of_k++;
}
p_of_k /= k;
log_p_of_k--;
step2_nphases = log_p_of_k;
int *step1_recvfrom = (int *)malloc(sizeof(int) * (k - 1));
int **step2_nbrs = (int **)malloc(sizeof(int *) * step2_nphases);
for (int i = 0; i < step2_nphases; i++) {
step2_nbrs[i] = (int *)malloc(sizeof(int) * (k - 1));
}
rem = num_pes - p_of_k;
/* rem is the number of PEs that do not particpate in Step 2
* We need to identify these non-participating PEs. This is done in the following way.
* The first T PEs are divided into sets of k consecutive PEs each.
* In each of these sets, the first k-1 PEs are the non-participating
* PEs while the last PE is the participating PE.
* The non-participating PEs send their data to the participating PE
* in their corresponding set.
*/
T = (rem * k) / (k - 1);
INFO(NVSHMEM_COLL, "step 1 nbr calculation started. T is %d", T);
step1_nrecvs = 0;
step1_sendto = -1;
/* Step 1 */
if (my_pe < T) {
if (my_pe % k != (k - 1)) { /* I am a non-participating PE */
step1_sendto = my_pe + (k - 1 - my_pe % k); /* partipating PE to send the data to */
/* if the corresponding participating PE is not in T,
* then send to the Tth PE to preserve non-commutativity */
if (step1_sendto > T - 1) step1_sendto = T;
newpe = -1; /* tag this PE as non-participating */
} else { /* participating PE */
for (i = 0; i < k - 1; i++) {
step1_recvfrom[i] = my_pe - i - 1;
}
step1_nrecvs = k - 1;
newpe = my_pe / k; /* this is the new PE amongst the set of participating PEs */
}
} else { /* PE >= T */
newpe = my_pe - rem;
if (my_pe == T && (T - 1) % k != k - 1 && T >= 1) {
int nsenders = (T - 1) % k + 1; /* number of PEs sending their data to me in Step 1 */
for (j = nsenders - 1; j >= 0; j--) {
step1_recvfrom[nsenders - 1 - j] = T - nsenders + j;
}
step1_nrecvs = nsenders;
}
}
INFO(NVSHMEM_COLL, "step 1 nbr computation completed");
/* Step 2 */
if (step1_sendto == -1) { /* calulate step2_nbrs only for participating PEs */
int *digit = (int *)malloc(sizeof(int) * step2_nphases);
assert(digit != NULL);
int temppe = newpe;
int mask = 0x1;
int phase = 0, cbit, cnt, nbr, power;
/* calculate the digits in base k representation of newpe */
for (i = 0; i < log_p_of_k; i++) digit[i] = 0;
int remainder, i_digit = 0;
while (temppe != 0) {
remainder = temppe % k;
temppe = temppe / k;
digit[i_digit] = remainder;
i_digit++;
}
while (mask < p_of_k) {
cbit =
digit[phase]; /* phase_th digit changes in this phase, obtain its original value */
cnt = 0;
for (i = 0; i < k; i++) { /* there are k-1 neighbors */
if (i != cbit) { /* do not generate yourself as your nieighbor */
digit[phase] = i; /* this gets us the base k representation of the neighbor */
/* calculate the base 10 value of the neighbor PE */
nbr = 0;
power = 1;
for (j = 0; j < log_p_of_k; j++) {
nbr += digit[j] * power;
power *= k;
}
/* calculate its real PE and store it */
step2_nbrs[phase][cnt] =
(nbr < rem / (k - 1)) ? (nbr * k) + (k - 1) : nbr + rem;
cnt++;
}
}
INFO(NVSHMEM_COLL, "step 2, phase %d nbr calculation completed", phase);
digit[phase] = cbit; /* reset the digit to original value */
phase++;
mask *= k;
}
}
// Copy the data to device memory
hipMemcpyToSymbol(reduce_recexch_step1_sendto_d, &step1_sendto, sizeof(int));
void *dev_ptr;
hipMemcpyFromSymbol(&dev_ptr, reduce_recexch_step1_recvfrom_d, sizeof(int *));
cuMemcpyHtoD((hipDeviceptr_t)dev_ptr, step1_recvfrom, sizeof(int) * step1_nrecvs);
hipMemcpyToSymbol(reduce_recexch_step1_nrecvs_d, &step1_nrecvs, sizeof(int));
hipMemcpyFromSymbol(&dev_ptr, reduce_recexch_step2_nbrs_d, sizeof(int **));
void *dev_ptr_2;
for (int i = 0; i < step2_nphases; i++) {
cuMemcpyDtoH(&dev_ptr_2, (hipDeviceptr_t)((int **)dev_ptr + i), sizeof(int *));
hipDeviceSynchronize();
cuMemcpyHtoD((hipDeviceptr_t)dev_ptr_2, step2_nbrs[i], sizeof(int) * (k - 1));
hipDeviceSynchronize();
}
hipMemcpyToSymbol(reduce_recexch_step2_nphases_d, &step2_nphases, sizeof(int));
}
| 25b85ac3457470207ce130628788a02a16f3dd8d.cu | /*
* * Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
* *
* * See COPYRIGHT for license information
* */
#include "nvshmem.h"
#include "nvshmemx.h"
#include "gpu_coll.h"
/* This is a utility function for the recursive exchange algorithm.
* It calculates the PEs to which data is sent or received from during the different steps
* of the algorithm. The recursive exchange algorithm is divided into 3 steps.
* In step 1, when the number of PEs are not a power of k, some PEs send their data
* to other PEs that participate in the power-of-k recursive exchange algorithm in Step 2.
* Step 2 has log_k (PE_size) phases. In each phase k PEs exchange data with each other.
* In Step 3, PEs from step 2 send the final reduced data to PEs that did not participate in Step 2.
*/
__host__ void nvshmemi_recexchalgo_get_neighbors(int my_pe, int num_pes) {
int i, j, k;
int p_of_k = 1, log_p_of_k = 0, rem, T, newpe;
int step1_sendto, step1_nrecvs, step2_nphases;
INFO(NVSHMEM_COLL, "step 1 nbr calculation started, num_pes = %d", num_pes);
k = gpu_coll_env_params_var.reduce_recexch_kval;
if (num_pes < k) /* If size of the active set is less than k, reduce the value of k */
k = (num_pes > 2) ? num_pes : 2;
/* Calculate p_of_k, p_of_k is the largest power of k that is less than num_pes */
while (p_of_k <= num_pes) {
p_of_k *= k;
log_p_of_k++;
}
p_of_k /= k;
log_p_of_k--;
step2_nphases = log_p_of_k;
int *step1_recvfrom = (int *)malloc(sizeof(int) * (k - 1));
int **step2_nbrs = (int **)malloc(sizeof(int *) * step2_nphases);
for (int i = 0; i < step2_nphases; i++) {
step2_nbrs[i] = (int *)malloc(sizeof(int) * (k - 1));
}
rem = num_pes - p_of_k;
/* rem is the number of PEs that do not particpate in Step 2
* We need to identify these non-participating PEs. This is done in the following way.
* The first T PEs are divided into sets of k consecutive PEs each.
* In each of these sets, the first k-1 PEs are the non-participating
* PEs while the last PE is the participating PE.
* The non-participating PEs send their data to the participating PE
* in their corresponding set.
*/
T = (rem * k) / (k - 1);
INFO(NVSHMEM_COLL, "step 1 nbr calculation started. T is %d", T);
step1_nrecvs = 0;
step1_sendto = -1;
/* Step 1 */
if (my_pe < T) {
if (my_pe % k != (k - 1)) { /* I am a non-participating PE */
step1_sendto = my_pe + (k - 1 - my_pe % k); /* partipating PE to send the data to */
/* if the corresponding participating PE is not in T,
* then send to the Tth PE to preserve non-commutativity */
if (step1_sendto > T - 1) step1_sendto = T;
newpe = -1; /* tag this PE as non-participating */
} else { /* participating PE */
for (i = 0; i < k - 1; i++) {
step1_recvfrom[i] = my_pe - i - 1;
}
step1_nrecvs = k - 1;
newpe = my_pe / k; /* this is the new PE amongst the set of participating PEs */
}
} else { /* PE >= T */
newpe = my_pe - rem;
if (my_pe == T && (T - 1) % k != k - 1 && T >= 1) {
int nsenders = (T - 1) % k + 1; /* number of PEs sending their data to me in Step 1 */
for (j = nsenders - 1; j >= 0; j--) {
step1_recvfrom[nsenders - 1 - j] = T - nsenders + j;
}
step1_nrecvs = nsenders;
}
}
INFO(NVSHMEM_COLL, "step 1 nbr computation completed");
/* Step 2 */
if (step1_sendto == -1) { /* calulate step2_nbrs only for participating PEs */
int *digit = (int *)malloc(sizeof(int) * step2_nphases);
assert(digit != NULL);
int temppe = newpe;
int mask = 0x1;
int phase = 0, cbit, cnt, nbr, power;
/* calculate the digits in base k representation of newpe */
for (i = 0; i < log_p_of_k; i++) digit[i] = 0;
int remainder, i_digit = 0;
while (temppe != 0) {
remainder = temppe % k;
temppe = temppe / k;
digit[i_digit] = remainder;
i_digit++;
}
while (mask < p_of_k) {
cbit =
digit[phase]; /* phase_th digit changes in this phase, obtain its original value */
cnt = 0;
for (i = 0; i < k; i++) { /* there are k-1 neighbors */
if (i != cbit) { /* do not generate yourself as your nieighbor */
digit[phase] = i; /* this gets us the base k representation of the neighbor */
/* calculate the base 10 value of the neighbor PE */
nbr = 0;
power = 1;
for (j = 0; j < log_p_of_k; j++) {
nbr += digit[j] * power;
power *= k;
}
/* calculate its real PE and store it */
step2_nbrs[phase][cnt] =
(nbr < rem / (k - 1)) ? (nbr * k) + (k - 1) : nbr + rem;
cnt++;
}
}
INFO(NVSHMEM_COLL, "step 2, phase %d nbr calculation completed", phase);
digit[phase] = cbit; /* reset the digit to original value */
phase++;
mask *= k;
}
}
// Copy the data to device memory
cudaMemcpyToSymbol(reduce_recexch_step1_sendto_d, &step1_sendto, sizeof(int));
void *dev_ptr;
cudaMemcpyFromSymbol(&dev_ptr, reduce_recexch_step1_recvfrom_d, sizeof(int *));
cuMemcpyHtoD((CUdeviceptr)dev_ptr, step1_recvfrom, sizeof(int) * step1_nrecvs);
cudaMemcpyToSymbol(reduce_recexch_step1_nrecvs_d, &step1_nrecvs, sizeof(int));
cudaMemcpyFromSymbol(&dev_ptr, reduce_recexch_step2_nbrs_d, sizeof(int **));
void *dev_ptr_2;
for (int i = 0; i < step2_nphases; i++) {
cuMemcpyDtoH(&dev_ptr_2, (CUdeviceptr)((int **)dev_ptr + i), sizeof(int *));
cudaDeviceSynchronize();
cuMemcpyHtoD((CUdeviceptr)dev_ptr_2, step2_nbrs[i], sizeof(int) * (k - 1));
cudaDeviceSynchronize();
}
cudaMemcpyToSymbol(reduce_recexch_step2_nphases_d, &step2_nphases, sizeof(int));
}
|
e66013e6c7312eabe49f6bf8b05102edda228c71.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <time.h>
#define TILE_WIDTH 16
void matmulCPU(float *a, float *b, float *r, int n);
__global__ void matmulGPU(float *a, float *b, float *r, int n);
__global__ void MatrixMulKernel(float* Md, float* Nd, float* Pd, int Width);
int main(int argc, char* argv[]){
if (argc < 2) {
puts("Usage: matmul [N]");
return 0;
}
int N = atoi(argv[1]);
printf("N: %d\n", N);
//Total size
size_t sz = sizeof(float) * N * N;
//Struct for time measure
struct timeval start, end, timer;
//Memory allocation for cpu(host)
float *h_a = (float*)malloc(sz);
float *h_b = (float*)malloc(sz);
float *h_r = (float*)malloc(sz);
srand(time(NULL));
for(int i=0; i<N*N; i++) {
h_a[i] = (float)(rand()%100);
h_b[i] = (float)(rand()%100);
h_r[i] = 0;
}
//Memory alocation for gpu(device)
float *d_a, *d_b, *d_r;
hipMalloc((void **) &d_a, sz);
hipMalloc((void **) &d_b, sz);
hipMalloc((void **) &d_r, sz);
float *h_result_global = (float*)malloc(sz);
gettimeofday(&start, NULL);
matmulCPU(h_a, h_b, h_r, N);
gettimeofday(&end, NULL);
timersub(&end, &start, &timer);
printf("CPU elapsed time: %lf\n", (timer.tv_usec / 1000.0 + timer.tv_sec * 1000.0));
int threads_width = 16;
int grid_width = N % threads_width ? N / threads_width + 1 : N / threads_width;
dim3 dim_threads(threads_width, threads_width);
dim3 dim_grid(grid_width, grid_width);
gettimeofday(&start, NULL);
hipMemcpy(d_a, h_a, sz, hipMemcpyHostToDevice);
hipMemcpy(d_b, h_b, sz, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( MatrixMulKernel), dim3(dim_grid), dim3(dim_threads), 0, 0, d_a, d_b, d_r, N);
hipDeviceSynchronize();
hipMemcpy(h_result_global, d_r, sz, hipMemcpyDeviceToHost);
gettimeofday(&end, NULL);
timersub(&end, &start, &timer);
printf("GPU elapsed time: %lf\n", (timer.tv_usec / 1000.0 + timer.tv_sec * 1000.0));
for (int i = 0; i<N*N; i++){
if(h_r[i] != h_result_global[i]){
printf("Failed at %d, h_result_global, %f, %f\n", i, h_r[i], h_result_global[i]);
break;
}
}
hipFree(d_a);
hipFree(d_b);
hipFree(d_r);
free(h_result_global);
free(h_a);
free(h_b);
free(h_r);
return 0;
}
void matmulCPU(float *a, float *b, float *r, int n){
int i=0,j=0,x=0;
for(i=0;i<n;i++){
for(j=0;j<n;j++){
float sum = 0.0f;
for(x=0;x<n;x++){
sum+=a[j*n + x] * b[x * n + i];
}
r[j*n + i] = sum;
}
}
}
__global__ void matmulGPU(float *a, float *b, float *r, int n){
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if(x>=n || y>=n)
return;
float sum = 0;
for(int i=0;i<n;i++)
sum+=(a[y*n +i] * b[i*n+x]);
r[y*n+x] = sum;
}
__global__ void MatrixMulKernel(float*Md, float* Nd, float* Pd, int Width){
__shared__ float Mds[TILE_WIDTH][TILE_WIDTH];
__shared__ float Nds[TILE_WIDTH][TILE_WIDTH];
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int Row = by * TILE_WIDTH + ty;
int Col = bx * TILE_WIDTH + tx;
if(Row >= Width || Col >= Width)
return;
float Pvalue;
if(tx == 0 || ty == 0)
Pvalue = 0;
int num_tile = Width % TILE_WIDTH == 0 ? Width / TILE_WIDTH : Width / TILE_WIDTH + 1;
for(int m=0;m<num_tile;++m){
Mds[ty][tx] = Md[Row * Width + (tx + m * TILE_WIDTH)];
Nds[ty][tx] = Nd[Col + (m * TILE_WIDTH + ty) * Width];
__syncthreads();
int var_tile_width = m == (num_tile - 1) ? Width % TILE_WIDTH : TILE_WIDTH;
for(int k=0; k<var_tile_width;++k){
Pvalue+=Mds[ty][k]*Nds[k][tx];
}
__syncthreads();
}
Pd[Row*Width+Col] = Pvalue;
}
| e66013e6c7312eabe49f6bf8b05102edda228c71.cu |
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <time.h>
#define TILE_WIDTH 16
void matmulCPU(float *a, float *b, float *r, int n);
__global__ void matmulGPU(float *a, float *b, float *r, int n);
__global__ void MatrixMulKernel(float* Md, float* Nd, float* Pd, int Width);
int main(int argc, char* argv[]){
if (argc < 2) {
puts("Usage: matmul [N]");
return 0;
}
int N = atoi(argv[1]);
printf("N: %d\n", N);
//Total size
size_t sz = sizeof(float) * N * N;
//Struct for time measure
struct timeval start, end, timer;
//Memory allocation for cpu(host)
float *h_a = (float*)malloc(sz);
float *h_b = (float*)malloc(sz);
float *h_r = (float*)malloc(sz);
srand(time(NULL));
for(int i=0; i<N*N; i++) {
h_a[i] = (float)(rand()%100);
h_b[i] = (float)(rand()%100);
h_r[i] = 0;
}
//Memory alocation for gpu(device)
float *d_a, *d_b, *d_r;
cudaMalloc((void **) &d_a, sz);
cudaMalloc((void **) &d_b, sz);
cudaMalloc((void **) &d_r, sz);
float *h_result_global = (float*)malloc(sz);
gettimeofday(&start, NULL);
matmulCPU(h_a, h_b, h_r, N);
gettimeofday(&end, NULL);
timersub(&end, &start, &timer);
printf("CPU elapsed time: %lf\n", (timer.tv_usec / 1000.0 + timer.tv_sec * 1000.0));
int threads_width = 16;
int grid_width = N % threads_width ? N / threads_width + 1 : N / threads_width;
dim3 dim_threads(threads_width, threads_width);
dim3 dim_grid(grid_width, grid_width);
gettimeofday(&start, NULL);
cudaMemcpy(d_a, h_a, sz, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, sz, cudaMemcpyHostToDevice);
MatrixMulKernel<<<dim_grid, dim_threads>>>(d_a, d_b, d_r, N);
cudaDeviceSynchronize();
cudaMemcpy(h_result_global, d_r, sz, cudaMemcpyDeviceToHost);
gettimeofday(&end, NULL);
timersub(&end, &start, &timer);
printf("GPU elapsed time: %lf\n", (timer.tv_usec / 1000.0 + timer.tv_sec * 1000.0));
for (int i = 0; i<N*N; i++){
if(h_r[i] != h_result_global[i]){
printf("Failed at %d, h_result_global, %f, %f\n", i, h_r[i], h_result_global[i]);
break;
}
}
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_r);
free(h_result_global);
free(h_a);
free(h_b);
free(h_r);
return 0;
}
void matmulCPU(float *a, float *b, float *r, int n){
int i=0,j=0,x=0;
for(i=0;i<n;i++){
for(j=0;j<n;j++){
float sum = 0.0f;
for(x=0;x<n;x++){
sum+=a[j*n + x] * b[x * n + i];
}
r[j*n + i] = sum;
}
}
}
__global__ void matmulGPU(float *a, float *b, float *r, int n){
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if(x>=n || y>=n)
return;
float sum = 0;
for(int i=0;i<n;i++)
sum+=(a[y*n +i] * b[i*n+x]);
r[y*n+x] = sum;
}
__global__ void MatrixMulKernel(float*Md, float* Nd, float* Pd, int Width){
__shared__ float Mds[TILE_WIDTH][TILE_WIDTH];
__shared__ float Nds[TILE_WIDTH][TILE_WIDTH];
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int Row = by * TILE_WIDTH + ty;
int Col = bx * TILE_WIDTH + tx;
if(Row >= Width || Col >= Width)
return;
float Pvalue;
if(tx == 0 || ty == 0)
Pvalue = 0;
int num_tile = Width % TILE_WIDTH == 0 ? Width / TILE_WIDTH : Width / TILE_WIDTH + 1;
for(int m=0;m<num_tile;++m){
Mds[ty][tx] = Md[Row * Width + (tx + m * TILE_WIDTH)];
Nds[ty][tx] = Nd[Col + (m * TILE_WIDTH + ty) * Width];
__syncthreads();
int var_tile_width = m == (num_tile - 1) ? Width % TILE_WIDTH : TILE_WIDTH;
for(int k=0; k<var_tile_width;++k){
Pvalue+=Mds[ty][k]*Nds[k][tx];
}
__syncthreads();
}
Pd[Row*Width+Col] = Pvalue;
}
|
58f8cffda2e177595f8929694aadc731c4ca3698.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "sha3_gpu.h"
#include "helper_cuda.h"
#include "common.h"
#include <cstdlib>
#include <cassert>
#include <limits>
namespace
{
constexpr size_t npos = std::numeric_limits<size_t>::max();
// Array of indicies and rotation values for P and Pi phases.
__constant__ uint8_t g_ppi_aux[25][2];
// Array of indices for ksi phase.
__constant__ uint8_t g_ksi_aux[25][2];
__constant__ uint64_t g_iota_aux[24];
bool inited = false;
void init_constants()
{
const std::array<std::pair<uint8_t, uint8_t>, 25> h_ppi_aux = {
{{0, 0}, {6, 44}, {12, 43}, {18, 21}, {24, 14}, {3, 28}, {9, 20}, {10, 3}, {16, 45},
{22, 61}, {1, 1}, {7, 6}, {13, 25}, {19, 8}, {20, 18}, {4, 27}, {5, 36}, {11, 10},
{17, 15}, {23, 56}, {2, 62}, {8, 55}, {14, 39}, {15, 41}, {21, 2}}};
checkCudaErrors(hipMemcpyToSymbol(g_ppi_aux, h_ppi_aux.data(), h_ppi_aux.size() * sizeof(uint8_t) * 2));
const std::array<std::pair<uint8_t, uint8_t>, 25> h_ksi_aux = {
{{1, 2}, {2, 3}, {3, 4}, {4, 0}, {0, 1}, {6, 7}, {7, 8}, {8, 9}, {9, 5},
{5, 6}, {11, 12}, {12, 13}, {13, 14}, {14, 10}, {10, 11}, {16, 17}, {17, 18}, {18, 19},
{19, 15}, {15, 16}, {21, 22}, {22, 23}, {23, 24}, {24, 20}, {20, 21}}};
checkCudaErrors(hipMemcpyToSymbol(g_ksi_aux, h_ksi_aux.data(), h_ksi_aux.size() * sizeof(uint8_t) * 2));
const std::array<uint64_t, 24> h_iota_aux = {
0x0000000000000001L, 0x0000000000008082L, 0x800000000000808aL, 0x8000000080008000L, 0x000000000000808bL,
0x0000000080000001L, 0x8000000080008081L, 0x8000000000008009L, 0x000000000000008aL, 0x0000000000000088L,
0x0000000080008009L, 0x000000008000000aL, 0x000000008000808bL, 0x800000000000008bL, 0x8000000000008089L,
0x8000000000008003L, 0x8000000000008002L, 0x8000000000000080L, 0x000000000000800aL, 0x800000008000000aL,
0x8000000080008081L, 0x8000000000008080L, 0x0000000080000001L, 0x8000000080008008L};
checkCudaErrors(hipMemcpyToSymbol(g_iota_aux, h_iota_aux.data(), h_iota_aux.size() * sizeof(uint64_t)));
}
__device__ uint64_t rotate(uint64_t val, unsigned n) { return val << n | val >> (64 - n); }
__device__ void processState(uint64_t *A)
{
const size_t t = threadIdx.x;
const size_t s = threadIdx.x % 5;
__shared__ uint64_t C[25];
assert(t < 25);
#pragma unroll
for (int round_idx = 0; round_idx < 24; ++round_idx)
{
// Thetta phase.
C[t] = A[s] ^ A[s + 5] ^ A[s + 10] ^ A[s + 15] ^ A[s + 20];
A[t] ^= C[s + 5 - 1] ^ rotate(C[s + 1], 1);
// P and Pi combined phases.
C[t] = rotate(A[g_ppi_aux[t][0]], g_ppi_aux[t][1]);
// Ksi phase.
A[t] = C[t] ^ (~C[g_ksi_aux[t][0]] & C[g_ksi_aux[t][1]]);
// Iota phase.
A[t] ^= t == 0 ? g_iota_aux[round_idx] : 0;
}
}
__global__ void processBlockDevice(const uint64_t *data, size_t singleBufSize, const uint64_t *end,
uint64_t *A_original)
{
const size_t t = threadIdx.x;
__shared__ uint64_t A[25];
if (t < 25)
{
A[t] = A_original[t];
for (; data != end; data += singleBufSize)
{
if (t < singleBufSize)
{
// Apply data to inner state. Nvidia keeps all data in little-endian.
A[t] ^= data[t];
}
processState(A);
}
A_original[t] = A[t];
}
}
void addPadding(uint8_t *d_begin, uint8_t *d_end)
{
const int maxBuf = 144;
assert(d_end > d_begin);
size_t size = d_end - d_begin;
assert(size <= maxBuf);
uint8_t buf[maxBuf] = {};
if (size == 1)
{
buf[0] = 0x86;
}
else
{
buf[0] = 0x06;
buf[size - 1] = 0x80;
}
checkCudaErrors(hipMemcpy(d_begin, buf, size, hipMemcpyHostToDevice));
}
} // namespace
SHA3_gpu::~SHA3_gpu()
{
checkCudaErrors(hipFree(m_d_blockBuffers));
checkCudaErrors(hipFree(m_d_A));
}
SHA3_gpu::SHA3_gpu(size_t size)
: m_digestSize(size / 8)
{
assert(m_digestSize * 8 == size);
if (!inited)
{
init_constants();
}
checkCudaErrors(hipMalloc(&m_d_A, 25 * 8));
m_singleBufSz = 200 - 2 * m_digestSize;
checkCudaErrors(hipMalloc(&m_d_blockBuffers, m_singleBufSz * m_nBuffers));
checkCudaErrors(hipMemset(m_d_blockBuffers, 0, m_singleBufSz * m_nBuffers));
init();
}
void SHA3_gpu::init()
{
checkCudaErrors(hipMemset(m_d_A, 0, 25 * sizeof(uint64_t)));
m_bufferOffset = 0;
m_finished = false;
}
void SHA3_gpu::add(const uint8_t *data, size_t sz)
{
assert(!m_finished && "Init should be called");
size_t blockSz = m_nBuffers * m_singleBufSz;
while (sz != 0)
{
if (sz < blockSz - m_bufferOffset)
{
checkCudaErrors(hipMemcpy(m_d_blockBuffers + m_bufferOffset, data, sz, hipMemcpyHostToDevice));
m_bufferOffset += sz;
return;
}
size_t dataSize = blockSz - m_bufferOffset;
checkCudaErrors(hipMemcpy(m_d_blockBuffers + m_bufferOffset, data, dataSize, hipMemcpyHostToDevice));
processBlock(blockSz);
m_bufferOffset = 0;
sz -= dataSize;
data += dataSize;
}
}
std::vector<uint8_t> SHA3_gpu::digest()
{
if (!m_finished)
{
size_t size = (m_bufferOffset / m_singleBufSz + 1) * m_singleBufSz;
addPadding(m_d_blockBuffers + m_bufferOffset, m_d_blockBuffers + size);
processBlock(size);
m_finished = true;
}
std::vector<uint8_t> result(m_digestSize);
checkCudaErrors(hipMemcpy(result.data(), m_d_A, m_digestSize, hipMemcpyDeviceToHost));
return result;
}
void SHA3_gpu::processBlock(size_t bufSize)
{
assert(bufSize % m_singleBufSz == 0);
auto ptr64 = reinterpret_cast<const uint64_t *>(m_d_blockBuffers);
assert(m_singleBufSz % 8 == 0);
hipLaunchKernelGGL(( processBlockDevice), dim3(1), dim3(32), 0, 0, ptr64, m_singleBufSz / 8, ptr64 + bufSize / 8, m_d_A);
}
//
// SHA3_gpu_batch
//
struct SHA3_gpu_batch::State
{
uint64_t *d_A;
uint8_t *d_blockBuffer;
size_t bufferSize = 0; // Buffer's payload size.
};
namespace
{
__global__ void processBatchBlockDevice(SHA3_gpu_batch::State *data, size_t blockSize)
{
int t = threadIdx.x;
int b = blockIdx.x;
__shared__ uint64_t A[25];
if (t < 25)
{
size_t bufSize = data[b].bufferSize / 8;
A[t] = data[b].d_A[t];
const uint64_t *buf = reinterpret_cast<const uint64_t *>(data[b].d_blockBuffer);
for (; bufSize != 0; bufSize -= blockSize, buf += blockSize)
{
if (t < blockSize)
{
A[t] ^= buf[t];
}
processState(A);
}
data[b].d_A[t] = A[t];
}
}
} // namespace
SHA3_gpu_batch::SHA3_gpu_batch(size_t block)
: m_digestSize(block / 8)
, m_singleBlockSize(200 - 2 * m_digestSize)
{
assert(m_digestSize * 8 == block);
if (!inited)
{
init_constants();
}
int device;
checkCudaErrors(hipGetDevice(&device));
hipDeviceProp_t props;
checkCudaErrors(hipGetDeviceProperties(&props, device));
int cores = props.major == 9999 && props.minor == 9999 ? 1 : _ConvertSMVer2Cores(props.major, props.minor);
cores *= props.multiProcessorCount;
m_nBlocks = cores / props.warpSize;
m_states = std::make_unique<State[]>(m_nBlocks);
checkCudaErrors(hipMalloc(&m_d_states, m_nBlocks * sizeof(State)));
size_t aSize = 25 * sizeof(uint64_t);
size_t available;
checkCudaErrors(hipMemGetInfo(&available, nullptr));
size_t nSingleBuffers = (available - aSize * m_nBlocks) / m_nBlocks / m_singleBlockSize;
if (nSingleBuffers == 0)
{
throw std::logic_error("Not enough memory on gpu device. Please, select another one");
}
// std::min takes reference and m_maxBuffers is not placed.
// Create new value, that equals to m_maxBuffers.
nSingleBuffers = ::min(nSingleBuffers, size_t(m_maxBuffers));
m_bufferSize = nSingleBuffers * m_singleBlockSize;
for (size_t i = 0; i < m_nBlocks; ++i)
{
checkCudaErrors(hipMalloc(&m_states[i].d_A, aSize));
checkCudaErrors(hipMalloc(&m_states[i].d_blockBuffer, m_bufferSize));
assert(m_states[i].d_blockBuffer != nullptr);
}
}
SHA3_gpu_batch::~SHA3_gpu_batch()
{
for (size_t i = 0; i < m_nBlocks; ++i)
{
checkCudaErrors(hipFree(m_states[i].d_A));
checkCudaErrors(hipFree(m_states[i].d_blockBuffer));
}
checkCudaErrors(hipFree(m_d_states));
}
std::vector<SHA3_gpu_batch::Digest>
SHA3_gpu_batch::calculate(const std::vector<std::pair<const uint8_t *, size_t>> &datas)
{
struct LocalState
{
size_t idx = npos; // index of processed element
size_t globalOffset = 0;
};
std::vector<SHA3_gpu_batch::Digest> result = prepareResult(datas.size());
size_t loopSize = std::min<size_t>(m_nBlocks, datas.size());
std::vector<LocalState> localState(loopSize);
size_t next = 0;
size_t finished = 0;
while (finished < datas.size())
{
for (size_t i = 0; i < loopSize; ++i)
{
// Task distributing.
if (localState[i].idx == npos)
{
if (next >= datas.size())
{
// Nothing to give.
continue;
}
localState[i].idx = next++;
localState[i].globalOffset = 0;
checkCudaErrors(hipMemset(m_states[i].d_A, 0, 25 * 8));
}
// Fill buffers.
auto &from = datas[localState[i].idx];
size_t nCopy = ::min(from.second - localState[i].globalOffset, m_bufferSize);
checkCudaErrors(hipMemcpy(m_states[i].d_blockBuffer, from.first + localState[i].globalOffset, nCopy,
hipMemcpyHostToDevice));
if (nCopy != m_bufferSize)
{
// We didn't fill the whole buffer => it's data end. We need to add padding to the last block.
size_t newOffset = (1 + nCopy / m_singleBlockSize) * m_singleBlockSize;
assert(newOffset <= m_bufferSize);
addPadding(m_states[i].d_blockBuffer + nCopy, m_states[i].d_blockBuffer + newOffset);
nCopy = newOffset;
}
m_states[i].bufferSize = nCopy;
localState[i].globalOffset += nCopy;
}
launchKernel();
for (size_t i = 0; i < loopSize; ++i)
{
if (localState[i].idx != npos && localState[i].globalOffset > datas[localState[i].idx].second)
{
// Collect results
checkCudaErrors(
hipMemcpy(result[localState[i].idx].data(), m_states[i].d_A, m_digestSize, hipMemcpyDeviceToHost));
localState[i].idx = npos;
// Mark state as empty for cases when there is no work to do.
// This 0 is required for gpu not to perform inappropriate work.
m_states[i].bufferSize = 0;
++finished;
}
}
}
return result;
}
std::vector<SHA3_gpu_batch::Digest> SHA3_gpu_batch::prepareResult(size_t size)
{
std::vector<Digest> result;
result.resize(size);
for (size_t i = 0; i < size; ++i)
{
result[i].resize(m_digestSize);
}
return result;
}
void SHA3_gpu_batch::launchKernel()
{
if (!isLittleEndian())
{
for (size_t i = 0; i < m_nBlocks; ++i)
{
m_states[i].bufferSize = toLittleEndian(m_states[i].bufferSize);
}
}
checkCudaErrors(hipMemcpy(m_d_states, m_states.get(), m_nBlocks * sizeof(State), hipMemcpyHostToDevice));
hipLaunchKernelGGL(( processBatchBlockDevice), dim3(m_nBlocks), dim3(32), 0, 0, m_d_states, m_singleBlockSize / 8);
#ifndef NDEBUG
checkCudaErrors(hipDeviceSynchronize());
#endif
}
| 58f8cffda2e177595f8929694aadc731c4ca3698.cu | #include "sha3_gpu.h"
#include "helper_cuda.h"
#include "common.h"
#include <cstdlib>
#include <cassert>
#include <limits>
namespace
{
constexpr size_t npos = std::numeric_limits<size_t>::max();
// Array of indicies and rotation values for P and Pi phases.
__constant__ uint8_t g_ppi_aux[25][2];
// Array of indices for ksi phase.
__constant__ uint8_t g_ksi_aux[25][2];
__constant__ uint64_t g_iota_aux[24];
bool inited = false;
void init_constants()
{
const std::array<std::pair<uint8_t, uint8_t>, 25> h_ppi_aux = {
{{0, 0}, {6, 44}, {12, 43}, {18, 21}, {24, 14}, {3, 28}, {9, 20}, {10, 3}, {16, 45},
{22, 61}, {1, 1}, {7, 6}, {13, 25}, {19, 8}, {20, 18}, {4, 27}, {5, 36}, {11, 10},
{17, 15}, {23, 56}, {2, 62}, {8, 55}, {14, 39}, {15, 41}, {21, 2}}};
checkCudaErrors(cudaMemcpyToSymbol(g_ppi_aux, h_ppi_aux.data(), h_ppi_aux.size() * sizeof(uint8_t) * 2));
const std::array<std::pair<uint8_t, uint8_t>, 25> h_ksi_aux = {
{{1, 2}, {2, 3}, {3, 4}, {4, 0}, {0, 1}, {6, 7}, {7, 8}, {8, 9}, {9, 5},
{5, 6}, {11, 12}, {12, 13}, {13, 14}, {14, 10}, {10, 11}, {16, 17}, {17, 18}, {18, 19},
{19, 15}, {15, 16}, {21, 22}, {22, 23}, {23, 24}, {24, 20}, {20, 21}}};
checkCudaErrors(cudaMemcpyToSymbol(g_ksi_aux, h_ksi_aux.data(), h_ksi_aux.size() * sizeof(uint8_t) * 2));
const std::array<uint64_t, 24> h_iota_aux = {
0x0000000000000001L, 0x0000000000008082L, 0x800000000000808aL, 0x8000000080008000L, 0x000000000000808bL,
0x0000000080000001L, 0x8000000080008081L, 0x8000000000008009L, 0x000000000000008aL, 0x0000000000000088L,
0x0000000080008009L, 0x000000008000000aL, 0x000000008000808bL, 0x800000000000008bL, 0x8000000000008089L,
0x8000000000008003L, 0x8000000000008002L, 0x8000000000000080L, 0x000000000000800aL, 0x800000008000000aL,
0x8000000080008081L, 0x8000000000008080L, 0x0000000080000001L, 0x8000000080008008L};
checkCudaErrors(cudaMemcpyToSymbol(g_iota_aux, h_iota_aux.data(), h_iota_aux.size() * sizeof(uint64_t)));
}
__device__ uint64_t rotate(uint64_t val, unsigned n) { return val << n | val >> (64 - n); }
__device__ void processState(uint64_t *A)
{
const size_t t = threadIdx.x;
const size_t s = threadIdx.x % 5;
__shared__ uint64_t C[25];
assert(t < 25);
#pragma unroll
for (int round_idx = 0; round_idx < 24; ++round_idx)
{
// Thetta phase.
C[t] = A[s] ^ A[s + 5] ^ A[s + 10] ^ A[s + 15] ^ A[s + 20];
A[t] ^= C[s + 5 - 1] ^ rotate(C[s + 1], 1);
// P and Pi combined phases.
C[t] = rotate(A[g_ppi_aux[t][0]], g_ppi_aux[t][1]);
// Ksi phase.
A[t] = C[t] ^ (~C[g_ksi_aux[t][0]] & C[g_ksi_aux[t][1]]);
// Iota phase.
A[t] ^= t == 0 ? g_iota_aux[round_idx] : 0;
}
}
__global__ void processBlockDevice(const uint64_t *data, size_t singleBufSize, const uint64_t *end,
uint64_t *A_original)
{
const size_t t = threadIdx.x;
__shared__ uint64_t A[25];
if (t < 25)
{
A[t] = A_original[t];
for (; data != end; data += singleBufSize)
{
if (t < singleBufSize)
{
// Apply data to inner state. Nvidia keeps all data in little-endian.
A[t] ^= data[t];
}
processState(A);
}
A_original[t] = A[t];
}
}
void addPadding(uint8_t *d_begin, uint8_t *d_end)
{
const int maxBuf = 144;
assert(d_end > d_begin);
size_t size = d_end - d_begin;
assert(size <= maxBuf);
uint8_t buf[maxBuf] = {};
if (size == 1)
{
buf[0] = 0x86;
}
else
{
buf[0] = 0x06;
buf[size - 1] = 0x80;
}
checkCudaErrors(cudaMemcpy(d_begin, buf, size, cudaMemcpyHostToDevice));
}
} // namespace
SHA3_gpu::~SHA3_gpu()
{
checkCudaErrors(cudaFree(m_d_blockBuffers));
checkCudaErrors(cudaFree(m_d_A));
}
SHA3_gpu::SHA3_gpu(size_t size)
: m_digestSize(size / 8)
{
assert(m_digestSize * 8 == size);
if (!inited)
{
init_constants();
}
checkCudaErrors(cudaMalloc(&m_d_A, 25 * 8));
m_singleBufSz = 200 - 2 * m_digestSize;
checkCudaErrors(cudaMalloc(&m_d_blockBuffers, m_singleBufSz * m_nBuffers));
checkCudaErrors(cudaMemset(m_d_blockBuffers, 0, m_singleBufSz * m_nBuffers));
init();
}
void SHA3_gpu::init()
{
checkCudaErrors(cudaMemset(m_d_A, 0, 25 * sizeof(uint64_t)));
m_bufferOffset = 0;
m_finished = false;
}
void SHA3_gpu::add(const uint8_t *data, size_t sz)
{
assert(!m_finished && "Init should be called");
size_t blockSz = m_nBuffers * m_singleBufSz;
while (sz != 0)
{
if (sz < blockSz - m_bufferOffset)
{
checkCudaErrors(cudaMemcpy(m_d_blockBuffers + m_bufferOffset, data, sz, cudaMemcpyHostToDevice));
m_bufferOffset += sz;
return;
}
size_t dataSize = blockSz - m_bufferOffset;
checkCudaErrors(cudaMemcpy(m_d_blockBuffers + m_bufferOffset, data, dataSize, cudaMemcpyHostToDevice));
processBlock(blockSz);
m_bufferOffset = 0;
sz -= dataSize;
data += dataSize;
}
}
std::vector<uint8_t> SHA3_gpu::digest()
{
if (!m_finished)
{
size_t size = (m_bufferOffset / m_singleBufSz + 1) * m_singleBufSz;
addPadding(m_d_blockBuffers + m_bufferOffset, m_d_blockBuffers + size);
processBlock(size);
m_finished = true;
}
std::vector<uint8_t> result(m_digestSize);
checkCudaErrors(cudaMemcpy(result.data(), m_d_A, m_digestSize, cudaMemcpyDeviceToHost));
return result;
}
void SHA3_gpu::processBlock(size_t bufSize)
{
assert(bufSize % m_singleBufSz == 0);
auto ptr64 = reinterpret_cast<const uint64_t *>(m_d_blockBuffers);
assert(m_singleBufSz % 8 == 0);
processBlockDevice<<<1, 32>>>(ptr64, m_singleBufSz / 8, ptr64 + bufSize / 8, m_d_A);
}
//
// SHA3_gpu_batch
//
struct SHA3_gpu_batch::State
{
uint64_t *d_A;
uint8_t *d_blockBuffer;
size_t bufferSize = 0; // Buffer's payload size.
};
namespace
{
__global__ void processBatchBlockDevice(SHA3_gpu_batch::State *data, size_t blockSize)
{
int t = threadIdx.x;
int b = blockIdx.x;
__shared__ uint64_t A[25];
if (t < 25)
{
size_t bufSize = data[b].bufferSize / 8;
A[t] = data[b].d_A[t];
const uint64_t *buf = reinterpret_cast<const uint64_t *>(data[b].d_blockBuffer);
for (; bufSize != 0; bufSize -= blockSize, buf += blockSize)
{
if (t < blockSize)
{
A[t] ^= buf[t];
}
processState(A);
}
data[b].d_A[t] = A[t];
}
}
} // namespace
SHA3_gpu_batch::SHA3_gpu_batch(size_t block)
: m_digestSize(block / 8)
, m_singleBlockSize(200 - 2 * m_digestSize)
{
assert(m_digestSize * 8 == block);
if (!inited)
{
init_constants();
}
int device;
checkCudaErrors(cudaGetDevice(&device));
cudaDeviceProp props;
checkCudaErrors(cudaGetDeviceProperties(&props, device));
int cores = props.major == 9999 && props.minor == 9999 ? 1 : _ConvertSMVer2Cores(props.major, props.minor);
cores *= props.multiProcessorCount;
m_nBlocks = cores / props.warpSize;
m_states = std::make_unique<State[]>(m_nBlocks);
checkCudaErrors(cudaMalloc(&m_d_states, m_nBlocks * sizeof(State)));
size_t aSize = 25 * sizeof(uint64_t);
size_t available;
checkCudaErrors(cudaMemGetInfo(&available, nullptr));
size_t nSingleBuffers = (available - aSize * m_nBlocks) / m_nBlocks / m_singleBlockSize;
if (nSingleBuffers == 0)
{
throw std::logic_error("Not enough memory on gpu device. Please, select another one");
}
// std::min takes reference and m_maxBuffers is not placed.
// Create new value, that equals to m_maxBuffers.
nSingleBuffers = std::min(nSingleBuffers, size_t(m_maxBuffers));
m_bufferSize = nSingleBuffers * m_singleBlockSize;
for (size_t i = 0; i < m_nBlocks; ++i)
{
checkCudaErrors(cudaMalloc(&m_states[i].d_A, aSize));
checkCudaErrors(cudaMalloc(&m_states[i].d_blockBuffer, m_bufferSize));
assert(m_states[i].d_blockBuffer != nullptr);
}
}
SHA3_gpu_batch::~SHA3_gpu_batch()
{
for (size_t i = 0; i < m_nBlocks; ++i)
{
checkCudaErrors(cudaFree(m_states[i].d_A));
checkCudaErrors(cudaFree(m_states[i].d_blockBuffer));
}
checkCudaErrors(cudaFree(m_d_states));
}
std::vector<SHA3_gpu_batch::Digest>
SHA3_gpu_batch::calculate(const std::vector<std::pair<const uint8_t *, size_t>> &datas)
{
struct LocalState
{
size_t idx = npos; // index of processed element
size_t globalOffset = 0;
};
std::vector<SHA3_gpu_batch::Digest> result = prepareResult(datas.size());
size_t loopSize = std::min<size_t>(m_nBlocks, datas.size());
std::vector<LocalState> localState(loopSize);
size_t next = 0;
size_t finished = 0;
while (finished < datas.size())
{
for (size_t i = 0; i < loopSize; ++i)
{
// Task distributing.
if (localState[i].idx == npos)
{
if (next >= datas.size())
{
// Nothing to give.
continue;
}
localState[i].idx = next++;
localState[i].globalOffset = 0;
checkCudaErrors(cudaMemset(m_states[i].d_A, 0, 25 * 8));
}
// Fill buffers.
auto &from = datas[localState[i].idx];
size_t nCopy = std::min(from.second - localState[i].globalOffset, m_bufferSize);
checkCudaErrors(cudaMemcpy(m_states[i].d_blockBuffer, from.first + localState[i].globalOffset, nCopy,
cudaMemcpyHostToDevice));
if (nCopy != m_bufferSize)
{
// We didn't fill the whole buffer => it's data end. We need to add padding to the last block.
size_t newOffset = (1 + nCopy / m_singleBlockSize) * m_singleBlockSize;
assert(newOffset <= m_bufferSize);
addPadding(m_states[i].d_blockBuffer + nCopy, m_states[i].d_blockBuffer + newOffset);
nCopy = newOffset;
}
m_states[i].bufferSize = nCopy;
localState[i].globalOffset += nCopy;
}
launchKernel();
for (size_t i = 0; i < loopSize; ++i)
{
if (localState[i].idx != npos && localState[i].globalOffset > datas[localState[i].idx].second)
{
// Collect results
checkCudaErrors(
cudaMemcpy(result[localState[i].idx].data(), m_states[i].d_A, m_digestSize, cudaMemcpyDeviceToHost));
localState[i].idx = npos;
// Mark state as empty for cases when there is no work to do.
// This 0 is required for gpu not to perform inappropriate work.
m_states[i].bufferSize = 0;
++finished;
}
}
}
return result;
}
std::vector<SHA3_gpu_batch::Digest> SHA3_gpu_batch::prepareResult(size_t size)
{
std::vector<Digest> result;
result.resize(size);
for (size_t i = 0; i < size; ++i)
{
result[i].resize(m_digestSize);
}
return result;
}
void SHA3_gpu_batch::launchKernel()
{
if (!isLittleEndian())
{
for (size_t i = 0; i < m_nBlocks; ++i)
{
m_states[i].bufferSize = toLittleEndian(m_states[i].bufferSize);
}
}
checkCudaErrors(cudaMemcpy(m_d_states, m_states.get(), m_nBlocks * sizeof(State), cudaMemcpyHostToDevice));
processBatchBlockDevice<<<m_nBlocks, 32>>>(m_d_states, m_singleBlockSize / 8);
#ifndef NDEBUG
checkCudaErrors(cudaDeviceSynchronize());
#endif
}
|
942ea93ef6d4a856b1fd53c1f385bcd34807fb9f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "cuckoo/cuckoo.hpp"
#include "cuckoo/array.hpp"
#include "cuckoo/pcg_random.hpp"
// Add in the kernels.
#include "cuckoo_gpu_impl.cu"
Cuckoo<cuckoo::CudaBackend>::Cuckoo(const std::size_t N, const std::size_t stash_size, const std::size_t num_hash_functions) {
for (cuckoo::Entry i = 0; i < cuckoo::CUCKOO_SIZE; ++i) {
this->cuckoo.get_cpu()[i] = 0;
}
for (cuckoo::Entry i = 0; i < cuckoo::STASH_SIZE; ++i) {
this->stash.get_cpu()[i] = 0;
}
this->cuckoo.to_gpu();
this->stash.to_gpu();
}
Cuckoo<cuckoo::CudaBackend>::~Cuckoo() {
}
int Cuckoo<cuckoo::CudaBackend>::set(const std::size_t& N, int* keys, int* values, int* results) {
GPUArray<int> g_keys(keys, N);
GPUArray<int> g_values(values, N);
GPUArray<int> g_results(results, N);
for (int i = 0; i < N; ++i) {
g_results.get_cpu()[i] = 1;
}
hipLaunchKernelGGL(( gpu_set), dim3(this->grid_size(N)), dim3(this->block_size()), 0, 0, g_keys.to_gpu(), g_values.to_gpu(), g_results.to_gpu(), N, this->cuckoo.to_gpu(), this->stash.to_gpu());
cudaCheckError()
int count_failed = 0;
for (std::size_t i = 0; i < N; ++i) {
const int value = g_results.get_gpu()[i];
if ((bool) value) {
std::cout << "Failed to insert item: " << i << " " << value << std::endl;
count_failed++;
}
}
const auto from_gpu = this->cuckoo.get_gpu();
for (std::size_t i = 0; i < cuckoo::CUCKOO_SIZE; ++i) {
std::cout << from_gpu[i] << " ";
}
return count_failed;
}
void Cuckoo<cuckoo::CudaBackend>::get(const std::size_t& N, int* keys, int* results) {
GPUArray<int> g_keys(keys, N);
GPUArray<int> g_results(results, N);
for (int i = 0; i < N; ++i) {
g_results.get_cpu()[i] = 1;
}
hipLaunchKernelGGL(( gpu_get), dim3(this->grid_size(N)), dim3(this->block_size()), 0, 0, g_keys.to_gpu(), g_results.to_gpu(), N, this->cuckoo.to_gpu(), this->stash.to_gpu());
cudaCheckError()
const auto from_gpu = g_results.get_gpu();
for (std::size_t i = 0; i < N; ++i) {
results[i] = from_gpu[i];
}
}
| 942ea93ef6d4a856b1fd53c1f385bcd34807fb9f.cu | #include "cuckoo/cuckoo.hpp"
#include "cuckoo/array.hpp"
#include "cuckoo/pcg_random.hpp"
// Add in the kernels.
#include "cuckoo_gpu_impl.cu"
Cuckoo<cuckoo::CudaBackend>::Cuckoo(const std::size_t N, const std::size_t stash_size, const std::size_t num_hash_functions) {
for (cuckoo::Entry i = 0; i < cuckoo::CUCKOO_SIZE; ++i) {
this->cuckoo.get_cpu()[i] = 0;
}
for (cuckoo::Entry i = 0; i < cuckoo::STASH_SIZE; ++i) {
this->stash.get_cpu()[i] = 0;
}
this->cuckoo.to_gpu();
this->stash.to_gpu();
}
Cuckoo<cuckoo::CudaBackend>::~Cuckoo() {
}
int Cuckoo<cuckoo::CudaBackend>::set(const std::size_t& N, int* keys, int* values, int* results) {
GPUArray<int> g_keys(keys, N);
GPUArray<int> g_values(values, N);
GPUArray<int> g_results(results, N);
for (int i = 0; i < N; ++i) {
g_results.get_cpu()[i] = 1;
}
gpu_set<<<this->grid_size(N), this->block_size()>>>(g_keys.to_gpu(), g_values.to_gpu(), g_results.to_gpu(), N, this->cuckoo.to_gpu(), this->stash.to_gpu());
cudaCheckError()
int count_failed = 0;
for (std::size_t i = 0; i < N; ++i) {
const int value = g_results.get_gpu()[i];
if ((bool) value) {
std::cout << "Failed to insert item: " << i << " " << value << std::endl;
count_failed++;
}
}
const auto from_gpu = this->cuckoo.get_gpu();
for (std::size_t i = 0; i < cuckoo::CUCKOO_SIZE; ++i) {
std::cout << from_gpu[i] << " ";
}
return count_failed;
}
void Cuckoo<cuckoo::CudaBackend>::get(const std::size_t& N, int* keys, int* results) {
GPUArray<int> g_keys(keys, N);
GPUArray<int> g_results(results, N);
for (int i = 0; i < N; ++i) {
g_results.get_cpu()[i] = 1;
}
gpu_get<<<this->grid_size(N), this->block_size()>>>(g_keys.to_gpu(), g_results.to_gpu(), N, this->cuckoo.to_gpu(), this->stash.to_gpu());
cudaCheckError()
const auto from_gpu = g_results.get_gpu();
for (std::size_t i = 0; i < N; ++i) {
results[i] = from_gpu[i];
}
}
|
f023a2b340f8d179ff71aeb2209a8cac0114e47d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "utils.h"
__global__ void cyclic_slice(const int nthreads, const float *idata, float *odata,
const int width, const int height, const int inputSize)
{
CUDA_KERNEL_LOOP(index, nthreads)
{
int x0 = index % width;
int y0 = (index / width) % height;
int x2 = y0;
int y2 = width - x0 -1;
int width2 = height;
int x3 = width - x0 -1;
int y3 = height - y0 -1;
int width3 = width;
int x4 = height - y0 -1;
int y4 = x0;
int width4 = height;
int offset = (index/width/height)*width*height;
int ind_out1 = index;
int ind_out2 = offset+y2*width2 + x2 + inputSize;
int ind_out3 = offset+y3*width3 + x3 + inputSize*2;
int ind_out4 = offset+y4*width4 + x4 + inputSize*3;
//tile[threadIdx.x] = idata[index];
//__syncthreads();
float tmp = idata[index];
odata[ind_out1] = tmp;
odata[ind_out2] = tmp;
odata[ind_out3] = tmp;
odata[ind_out4] = tmp;
}
}
__global__ void cyclic_slice_gradinput(const int nthreads, float *gradInputData,
const float *gradOutputData, const int width, const int height,
const int inputSize)
{
//__shared__ float tile[CUDA_NUM_THREADS+32];
CUDA_KERNEL_LOOP(index, nthreads)
{
int x0 = index % width;
int y0 = (index / width) % height;
int x1 = x0;
int y1 = y0;
int width1 = height;
int x2 = y0;
int y2 = width - x0 -1;
int width2 = height;
int x3 = width - x0 -1;
int y3 = height - y0 -1;
int width3 = width;
int x4 = height - y0 -1;
int y4 = x0;
int width4 = height;
int offset = (index/width/height)*width*height;
int ind_out1 = offset+y1*width1 + x1;
int ind_out2 = offset+y2*width2 + x2 + inputSize;
int ind_out3 = offset+y3*width3 + x3 + inputSize*2;
int ind_out4 = offset+y4*width4 + x4 + inputSize*3;
float tmp = gradOutputData[ind_out1];
tmp += gradOutputData[ind_out2];
tmp += gradOutputData[ind_out3];
tmp += gradOutputData[ind_out4];
gradInputData[index] = tmp;
}
}
static int cunn_CyclicSlice_updateOutput(lua_State *L)
{
THCState *state = getCutorchState(L);
THCudaTensor *input = (THCudaTensor *)luaT_checkudata(L, 2, "torch.CudaTensor");
THCudaTensor *output = (THCudaTensor *)luaT_getfieldcheckudata(L, 1, "output", "torch.CudaTensor");
float *output_data;
float *input_data;
luaL_argcheck(L, input->nDimension == 3 || input->nDimension == 4, 2, "3D or 4D (batch) tensor expected");
int width, height;
int nDim = input->nDimension;
height = input->size[nDim-2];
width = input->size[nDim-1];
THCudaTensor_zero(state, output);
int count = THCudaTensor_nElement(state, input);
input = THCudaTensor_newContiguous(state, input);
output_data = THCudaTensor_data(state, output);
input_data = THCudaTensor_data(state, input);
hipLaunchKernelGGL(( cyclic_slice), dim3(GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0, 0,
count, input_data, output_data, width, height, count);
THCudaTensor_free(state, input);
// check for errors
hipError_t err = hipGetLastError();
if (err != hipSuccess) {
printf("error in FracMaxPoolingForward.updateOutput: %s\n", hipGetErrorString(err));
THError("aborting");
}
return 1;
}
static int cunn_CyclicSlice_updateGradInput(lua_State *L)
{
THCState *state = getCutorchState(L);
THCudaTensor *input = (THCudaTensor *)luaT_checkudata(L, 2, "torch.CudaTensor");
THCudaTensor *gradOutput = (THCudaTensor *)luaT_checkudata(L, 3, "torch.CudaTensor");
THCudaTensor *gradInput = (THCudaTensor *)luaT_getfieldcheckudata(L, 1, "gradInput", "torch.CudaTensor");
luaL_argcheck(L, gradOutput->nDimension == 3 || gradOutput->nDimension == 4, 2, "3D or 4D (batch) tensor expected");
float *gradInput_data;
float *gradOutput_data;
int width, height;
int nDim = input->nDimension;
height = input->size[nDim-2];
width = input->size[nDim-1];
gradOutput = THCudaTensor_newContiguous(state, gradOutput);
int count = THCudaTensor_nElement(state, input);
THCudaTensor_resizeAs(state, gradInput, input);
THCudaTensor_zero(state, gradInput);
gradOutput_data = THCudaTensor_data(state, gradOutput);
gradInput_data = THCudaTensor_data(state, gradInput);
hipLaunchKernelGGL(( cyclic_slice_gradinput), dim3(GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0, 0,
count, gradInput_data, gradOutput_data, width, height, count);
// check for errors
hipError_t err = hipGetLastError();
if (err != hipSuccess) {
printf("error in CyclicSlice.updateGradInput: %s\n", hipGetErrorString(err));
THError("aborting");
}
// clean
THCudaTensor_free(state, gradOutput);
return 1;
}
static const struct luaL_Reg cunn_CyclicSlice__ [] = {
{"CyclicSlice_updateOutput", cunn_CyclicSlice_updateOutput},
{"CyclicSlice_updateGradInput", cunn_CyclicSlice_updateGradInput},
{NULL, NULL}
};
static void cunnx_CyclicSlice_init(lua_State *L)
{
luaT_pushmetatable(L, "torch.CudaTensor");
luaT_registeratname(L, cunn_CyclicSlice__, "nn");
lua_pop(L,1);
}
| f023a2b340f8d179ff71aeb2209a8cac0114e47d.cu | #include "utils.h"
__global__ void cyclic_slice(const int nthreads, const float *idata, float *odata,
const int width, const int height, const int inputSize)
{
CUDA_KERNEL_LOOP(index, nthreads)
{
int x0 = index % width;
int y0 = (index / width) % height;
int x2 = y0;
int y2 = width - x0 -1;
int width2 = height;
int x3 = width - x0 -1;
int y3 = height - y0 -1;
int width3 = width;
int x4 = height - y0 -1;
int y4 = x0;
int width4 = height;
int offset = (index/width/height)*width*height;
int ind_out1 = index;
int ind_out2 = offset+y2*width2 + x2 + inputSize;
int ind_out3 = offset+y3*width3 + x3 + inputSize*2;
int ind_out4 = offset+y4*width4 + x4 + inputSize*3;
//tile[threadIdx.x] = idata[index];
//__syncthreads();
float tmp = idata[index];
odata[ind_out1] = tmp;
odata[ind_out2] = tmp;
odata[ind_out3] = tmp;
odata[ind_out4] = tmp;
}
}
__global__ void cyclic_slice_gradinput(const int nthreads, float *gradInputData,
const float *gradOutputData, const int width, const int height,
const int inputSize)
{
//__shared__ float tile[CUDA_NUM_THREADS+32];
CUDA_KERNEL_LOOP(index, nthreads)
{
int x0 = index % width;
int y0 = (index / width) % height;
int x1 = x0;
int y1 = y0;
int width1 = height;
int x2 = y0;
int y2 = width - x0 -1;
int width2 = height;
int x3 = width - x0 -1;
int y3 = height - y0 -1;
int width3 = width;
int x4 = height - y0 -1;
int y4 = x0;
int width4 = height;
int offset = (index/width/height)*width*height;
int ind_out1 = offset+y1*width1 + x1;
int ind_out2 = offset+y2*width2 + x2 + inputSize;
int ind_out3 = offset+y3*width3 + x3 + inputSize*2;
int ind_out4 = offset+y4*width4 + x4 + inputSize*3;
float tmp = gradOutputData[ind_out1];
tmp += gradOutputData[ind_out2];
tmp += gradOutputData[ind_out3];
tmp += gradOutputData[ind_out4];
gradInputData[index] = tmp;
}
}
static int cunn_CyclicSlice_updateOutput(lua_State *L)
{
THCState *state = getCutorchState(L);
THCudaTensor *input = (THCudaTensor *)luaT_checkudata(L, 2, "torch.CudaTensor");
THCudaTensor *output = (THCudaTensor *)luaT_getfieldcheckudata(L, 1, "output", "torch.CudaTensor");
float *output_data;
float *input_data;
luaL_argcheck(L, input->nDimension == 3 || input->nDimension == 4, 2, "3D or 4D (batch) tensor expected");
int width, height;
int nDim = input->nDimension;
height = input->size[nDim-2];
width = input->size[nDim-1];
THCudaTensor_zero(state, output);
int count = THCudaTensor_nElement(state, input);
input = THCudaTensor_newContiguous(state, input);
output_data = THCudaTensor_data(state, output);
input_data = THCudaTensor_data(state, input);
cyclic_slice<<<GET_BLOCKS(count), CUDA_NUM_THREADS>>>
(count, input_data, output_data, width, height, count);
THCudaTensor_free(state, input);
// check for errors
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
printf("error in FracMaxPoolingForward.updateOutput: %s\n", cudaGetErrorString(err));
THError("aborting");
}
return 1;
}
static int cunn_CyclicSlice_updateGradInput(lua_State *L)
{
THCState *state = getCutorchState(L);
THCudaTensor *input = (THCudaTensor *)luaT_checkudata(L, 2, "torch.CudaTensor");
THCudaTensor *gradOutput = (THCudaTensor *)luaT_checkudata(L, 3, "torch.CudaTensor");
THCudaTensor *gradInput = (THCudaTensor *)luaT_getfieldcheckudata(L, 1, "gradInput", "torch.CudaTensor");
luaL_argcheck(L, gradOutput->nDimension == 3 || gradOutput->nDimension == 4, 2, "3D or 4D (batch) tensor expected");
float *gradInput_data;
float *gradOutput_data;
int width, height;
int nDim = input->nDimension;
height = input->size[nDim-2];
width = input->size[nDim-1];
gradOutput = THCudaTensor_newContiguous(state, gradOutput);
int count = THCudaTensor_nElement(state, input);
THCudaTensor_resizeAs(state, gradInput, input);
THCudaTensor_zero(state, gradInput);
gradOutput_data = THCudaTensor_data(state, gradOutput);
gradInput_data = THCudaTensor_data(state, gradInput);
cyclic_slice_gradinput<<<GET_BLOCKS(count), CUDA_NUM_THREADS>>>
(count, gradInput_data, gradOutput_data, width, height, count);
// check for errors
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
printf("error in CyclicSlice.updateGradInput: %s\n", cudaGetErrorString(err));
THError("aborting");
}
// clean
THCudaTensor_free(state, gradOutput);
return 1;
}
static const struct luaL_Reg cunn_CyclicSlice__ [] = {
{"CyclicSlice_updateOutput", cunn_CyclicSlice_updateOutput},
{"CyclicSlice_updateGradInput", cunn_CyclicSlice_updateGradInput},
{NULL, NULL}
};
static void cunnx_CyclicSlice_init(lua_State *L)
{
luaT_pushmetatable(L, "torch.CudaTensor");
luaT_registeratname(L, cunn_CyclicSlice__, "nn");
lua_pop(L,1);
}
|
d72e39c739ccd9299977539bc663a38eb470305f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/native/TensorTransformations.h>
#include <ATen/hip/detail/IndexUtils.cuh>
#include <ATen/NativeFunctions.h>
#include <ATen/hip/HIPApplyUtils.cuh>
#include <ATen/hip/HIPContext.h>
#include <c10/macros/Macros.h>
#include <cstddef>
#include <vector>
namespace at {
namespace native {
constexpr uint32_t AT_APPLY_THREADS_PER_BLOCK = 512;
constexpr uint32_t AT_APPLY_BLOCKS_PER_SM = 4;
template <typename scalar_t, typename IndexType>
#if __CUDA_ARCH__ >= 350 || defined __HIP_PLATFORM_HCC__
C10_LAUNCH_BOUNDS_2(AT_APPLY_THREADS_PER_BLOCK, AT_APPLY_BLOCKS_PER_SM)
#endif
__global__ void
kernel_pointwise_flip_apply2(const cuda::detail::TensorInfo<scalar_t, IndexType> in_tensor_info,
cuda::detail::TensorInfo<scalar_t, IndexType> out_tensor_info,
IndexType N,
int flip_dim,
IndexType total_dims) {
for (IndexType linear_index = blockIdx.x * blockDim.x + threadIdx.x; linear_index < N; linear_index += gridDim.x * blockDim.x) {
IndexType dst_offset = 0;
if (flip_dim == 0) {
// flip 1st dim
dst_offset = (in_tensor_info.sizes[0] - 1 - linear_index / in_tensor_info.strides[0]) * in_tensor_info.strides[0] + linear_index % in_tensor_info.strides[0];
}
else {
// flip last dim
IndexType i = total_dims - 1;
dst_offset = linear_index / in_tensor_info.strides[0] * in_tensor_info.strides[0] + (in_tensor_info.sizes[i] - 1 - linear_index % in_tensor_info.strides[0]);
}
out_tensor_info.data[dst_offset] = in_tensor_info.data[linear_index];
}
}
template <typename scalar_t>
__global__
void flip_cuda_kernel(scalar_t* in_tensor, scalar_t* out_tensor, int64_t N, int64_t* flip_dims, int64_t flip_dims_size,
int64_t* strides, int64_t* strides_contiguous, int64_t* shape, int64_t total_dims) {
int64_t linear_index = blockIdx.x * blockDim.x + threadIdx.x;
if (linear_index >= N) {
return;
}
int64_t cur_indices = linear_index, rem = 0, dst_offset = 0;
for (int64_t i = 0; i < total_dims; i++) {
int64_t temp = cur_indices;
cur_indices = cur_indices / strides_contiguous[i];
rem = temp - cur_indices * strides_contiguous[i];
// flip the indices if it is in flip_dims
for (int64_t j = 0; j < flip_dims_size; j++) {
if (i == flip_dims[j]) {
cur_indices = shape[i] - 1 - cur_indices;
}
}
dst_offset += cur_indices * strides[i];
cur_indices = rem;
}
out_tensor[linear_index] = in_tensor[dst_offset];
}
// Flip tensor given a list of dims
Tensor flip_cuda(const Tensor& self, IntArrayRef dims) {
auto in_tensor = self;
const int64_t flip_dims_size = dims.size(), total_dims = in_tensor.dim(), N = in_tensor.numel();
flip_check_errors(total_dims, flip_dims_size, dims);
int64_t block_size = 512;
dim3 dim_block(block_size);
dim3 dim_grid((N + block_size - 1) / block_size);
auto out_tensor = at::empty_like(in_tensor, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
if (out_tensor.numel() == 0) {
return out_tensor;
}
auto flip_dims = dims.vec();
wrap_all_dims(flip_dims, total_dims);
// use kernel_pointwise_flip_apply2 only when to-flip dim is the 1st or last dim, where collapseDims can reduce the amount of work
if (flip_dims_size == 1 && in_tensor.is_contiguous() && (flip_dims[0] == 0 || flip_dims[0] == total_dims - 1)) {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND2(at::ScalarType::Half, at::ScalarType::Bool, in_tensor.scalar_type(), "flip_cuda", [&] {
auto in_tensor_info = cuda::detail::getTensorInfo<scalar_t, int64_t>(in_tensor);
auto out_tensor_info = cuda::detail::getTensorInfo<scalar_t, int64_t>(out_tensor);
int flip_dim = in_tensor_info.collapseDims(flip_dims[0]);
out_tensor_info.collapseDims(flip_dims[0]);
hipLaunchKernelGGL(( kernel_pointwise_flip_apply2<scalar_t, int64_t>)
, dim3(dim_grid), dim3(dim_block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
in_tensor_info, out_tensor_info, N, flip_dim, total_dims);
TORCH_CUDA_KERNEL_LAUNCH_CHECK();
});
return out_tensor;
}
auto flip_dims_t = at::from_blob(
flip_dims.data(), {static_cast<int64_t>(flip_dims.size())}, at::device(kCPU).dtype(kLong));
auto shape = in_tensor.sizes().vec();
auto shape_t = at::from_blob(
shape.data(), {static_cast<int64_t>(shape.size())}, at::device(kCPU).dtype(kLong));
auto strides = in_tensor.strides().vec();
auto strides_t = at::from_blob(
strides.data(), {static_cast<int64_t>(strides.size())}, at::device(kCPU).dtype(kLong));
// stride_contiguous is the stride of non-contiguous tensor after calling contiguous(),
// it is used to compute indices for each element in non-contiguous tensor
Tensor stride_contiguous = at::zeros({total_dims}, kLong);
int64_t* stride_contiguous_d = stride_contiguous.data_ptr<int64_t>();
for (int64_t i = total_dims - 1; i >= 0; i--) {
if (i == total_dims - 1) {
stride_contiguous_d[i] = 1;
} else {
stride_contiguous_d[i] = std::max<int64_t>(shape[i+1], 1) * stride_contiguous_d[i + 1];
}
}
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND(at::ScalarType::Half, in_tensor.scalar_type(), "flip_cuda", [&] {
hipLaunchKernelGGL(( flip_cuda_kernel), dim3(dim_grid), dim3(dim_block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
in_tensor.data_ptr<scalar_t>(), out_tensor.data_ptr<scalar_t>(), N,
flip_dims_t.cuda().data_ptr<int64_t>(),
flip_dims_size,
strides_t.cuda().data_ptr<int64_t>(),
stride_contiguous.cuda().data_ptr<int64_t>(),
shape_t.cuda().data_ptr<int64_t>(),
total_dims);
TORCH_CUDA_KERNEL_LAUNCH_CHECK();
});
return out_tensor;
}
template <typename scalar_t>
#ifdef __HIP_PLATFORM_HCC__
C10_LAUNCH_BOUNDS_1(512)
#endif
__global__
void roll_cuda_kernel(scalar_t* in_tensor, scalar_t* out_tensor, int64_t N,
int64_t roll_dim, int64_t start,
int64_t size, int64_t stride, int64_t total_dims) {
int64_t linear_index = blockIdx.x * blockDim.x + threadIdx.x;
if (linear_index >= N) {
return;
}
// roll dim idx is the index of linear_index along the rolling dimension.
int64_t roll_dim_idx = linear_index % (stride * size) / stride;
// index into the source data to find appropriate value.
int64_t source_idx = 0;
if( roll_dim_idx >= (size - start) ) {
source_idx = linear_index - ((size - start) * stride);
} else {
source_idx = linear_index + (start * stride);
}
out_tensor[linear_index] = in_tensor[source_idx];
}
// Roll a tensor along a dimension
Tensor roll_cuda(const Tensor& self, IntArrayRef shifts, IntArrayRef dims) {
if (dims.size() != 1 || shifts.size() != 1) {
return roll_common(self, shifts, dims);
}
auto in_tensor = self;
if(!self.is_contiguous()) {
in_tensor = self.contiguous();
}
auto out_tensor = at::empty_like(in_tensor, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
if (out_tensor.numel() == 0) {
return out_tensor;
}
const int64_t N = in_tensor.numel();
const int64_t dim = dims[0];
const int64_t size = in_tensor.size(dim);
int64_t start = (size - shifts[0]) % size;
// Behavior of % is different in C++ vs Python for negative numbers. This
// corrects the difference.
if( start < 0 ) start = start + size;
dim3 dim_block = cuda::getApplyBlock();
dim3 dim_grid;
TORCH_CHECK(cuda::getApplyGrid(N, dim_grid, in_tensor.get_device()), "unable to get dim grid");
auto total_dims = in_tensor.dim();
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND2(at::ScalarType::Half, at::ScalarType::Bool, in_tensor.scalar_type(), "roll_cuda", [&] {
hipLaunchKernelGGL(( roll_cuda_kernel), dim3(dim_grid), dim3(dim_block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
in_tensor.data_ptr<scalar_t>(), out_tensor.data_ptr<scalar_t>(), N,
dim, start,
size,
in_tensor.stride(dim),
total_dims);
TORCH_CUDA_KERNEL_LAUNCH_CHECK();
});
return out_tensor;
}
}} // namespace at::native
| d72e39c739ccd9299977539bc663a38eb470305f.cu | #include <ATen/native/TensorTransformations.h>
#include <ATen/cuda/detail/IndexUtils.cuh>
#include <ATen/NativeFunctions.h>
#include <ATen/cuda/CUDAApplyUtils.cuh>
#include <ATen/cuda/CUDAContext.h>
#include <c10/macros/Macros.h>
#include <cstddef>
#include <vector>
namespace at {
namespace native {
constexpr uint32_t AT_APPLY_THREADS_PER_BLOCK = 512;
constexpr uint32_t AT_APPLY_BLOCKS_PER_SM = 4;
template <typename scalar_t, typename IndexType>
#if __CUDA_ARCH__ >= 350 || defined __HIP_PLATFORM_HCC__
C10_LAUNCH_BOUNDS_2(AT_APPLY_THREADS_PER_BLOCK, AT_APPLY_BLOCKS_PER_SM)
#endif
__global__ void
kernel_pointwise_flip_apply2(const cuda::detail::TensorInfo<scalar_t, IndexType> in_tensor_info,
cuda::detail::TensorInfo<scalar_t, IndexType> out_tensor_info,
IndexType N,
int flip_dim,
IndexType total_dims) {
for (IndexType linear_index = blockIdx.x * blockDim.x + threadIdx.x; linear_index < N; linear_index += gridDim.x * blockDim.x) {
IndexType dst_offset = 0;
if (flip_dim == 0) {
// flip 1st dim
dst_offset = (in_tensor_info.sizes[0] - 1 - linear_index / in_tensor_info.strides[0]) * in_tensor_info.strides[0] + linear_index % in_tensor_info.strides[0];
}
else {
// flip last dim
IndexType i = total_dims - 1;
dst_offset = linear_index / in_tensor_info.strides[0] * in_tensor_info.strides[0] + (in_tensor_info.sizes[i] - 1 - linear_index % in_tensor_info.strides[0]);
}
out_tensor_info.data[dst_offset] = in_tensor_info.data[linear_index];
}
}
template <typename scalar_t>
__global__
void flip_cuda_kernel(scalar_t* in_tensor, scalar_t* out_tensor, int64_t N, int64_t* flip_dims, int64_t flip_dims_size,
int64_t* strides, int64_t* strides_contiguous, int64_t* shape, int64_t total_dims) {
int64_t linear_index = blockIdx.x * blockDim.x + threadIdx.x;
if (linear_index >= N) {
return;
}
int64_t cur_indices = linear_index, rem = 0, dst_offset = 0;
for (int64_t i = 0; i < total_dims; i++) {
int64_t temp = cur_indices;
cur_indices = cur_indices / strides_contiguous[i];
rem = temp - cur_indices * strides_contiguous[i];
// flip the indices if it is in flip_dims
for (int64_t j = 0; j < flip_dims_size; j++) {
if (i == flip_dims[j]) {
cur_indices = shape[i] - 1 - cur_indices;
}
}
dst_offset += cur_indices * strides[i];
cur_indices = rem;
}
out_tensor[linear_index] = in_tensor[dst_offset];
}
// Flip tensor given a list of dims
Tensor flip_cuda(const Tensor& self, IntArrayRef dims) {
auto in_tensor = self;
const int64_t flip_dims_size = dims.size(), total_dims = in_tensor.dim(), N = in_tensor.numel();
flip_check_errors(total_dims, flip_dims_size, dims);
int64_t block_size = 512;
dim3 dim_block(block_size);
dim3 dim_grid((N + block_size - 1) / block_size);
auto out_tensor = at::empty_like(in_tensor, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
if (out_tensor.numel() == 0) {
return out_tensor;
}
auto flip_dims = dims.vec();
wrap_all_dims(flip_dims, total_dims);
// use kernel_pointwise_flip_apply2 only when to-flip dim is the 1st or last dim, where collapseDims can reduce the amount of work
if (flip_dims_size == 1 && in_tensor.is_contiguous() && (flip_dims[0] == 0 || flip_dims[0] == total_dims - 1)) {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND2(at::ScalarType::Half, at::ScalarType::Bool, in_tensor.scalar_type(), "flip_cuda", [&] {
auto in_tensor_info = cuda::detail::getTensorInfo<scalar_t, int64_t>(in_tensor);
auto out_tensor_info = cuda::detail::getTensorInfo<scalar_t, int64_t>(out_tensor);
int flip_dim = in_tensor_info.collapseDims(flip_dims[0]);
out_tensor_info.collapseDims(flip_dims[0]);
kernel_pointwise_flip_apply2<scalar_t, int64_t>
<<<dim_grid, dim_block, 0, at::cuda::getCurrentCUDAStream()>>>(
in_tensor_info, out_tensor_info, N, flip_dim, total_dims);
TORCH_CUDA_KERNEL_LAUNCH_CHECK();
});
return out_tensor;
}
auto flip_dims_t = at::from_blob(
flip_dims.data(), {static_cast<int64_t>(flip_dims.size())}, at::device(kCPU).dtype(kLong));
auto shape = in_tensor.sizes().vec();
auto shape_t = at::from_blob(
shape.data(), {static_cast<int64_t>(shape.size())}, at::device(kCPU).dtype(kLong));
auto strides = in_tensor.strides().vec();
auto strides_t = at::from_blob(
strides.data(), {static_cast<int64_t>(strides.size())}, at::device(kCPU).dtype(kLong));
// stride_contiguous is the stride of non-contiguous tensor after calling contiguous(),
// it is used to compute indices for each element in non-contiguous tensor
Tensor stride_contiguous = at::zeros({total_dims}, kLong);
int64_t* stride_contiguous_d = stride_contiguous.data_ptr<int64_t>();
for (int64_t i = total_dims - 1; i >= 0; i--) {
if (i == total_dims - 1) {
stride_contiguous_d[i] = 1;
} else {
stride_contiguous_d[i] = std::max<int64_t>(shape[i+1], 1) * stride_contiguous_d[i + 1];
}
}
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND(at::ScalarType::Half, in_tensor.scalar_type(), "flip_cuda", [&] {
flip_cuda_kernel<<<dim_grid, dim_block, 0, at::cuda::getCurrentCUDAStream()>>>(
in_tensor.data_ptr<scalar_t>(), out_tensor.data_ptr<scalar_t>(), N,
flip_dims_t.cuda().data_ptr<int64_t>(),
flip_dims_size,
strides_t.cuda().data_ptr<int64_t>(),
stride_contiguous.cuda().data_ptr<int64_t>(),
shape_t.cuda().data_ptr<int64_t>(),
total_dims);
TORCH_CUDA_KERNEL_LAUNCH_CHECK();
});
return out_tensor;
}
template <typename scalar_t>
#ifdef __HIP_PLATFORM_HCC__
C10_LAUNCH_BOUNDS_1(512)
#endif
__global__
void roll_cuda_kernel(scalar_t* in_tensor, scalar_t* out_tensor, int64_t N,
int64_t roll_dim, int64_t start,
int64_t size, int64_t stride, int64_t total_dims) {
int64_t linear_index = blockIdx.x * blockDim.x + threadIdx.x;
if (linear_index >= N) {
return;
}
// roll dim idx is the index of linear_index along the rolling dimension.
int64_t roll_dim_idx = linear_index % (stride * size) / stride;
// index into the source data to find appropriate value.
int64_t source_idx = 0;
if( roll_dim_idx >= (size - start) ) {
source_idx = linear_index - ((size - start) * stride);
} else {
source_idx = linear_index + (start * stride);
}
out_tensor[linear_index] = in_tensor[source_idx];
}
// Roll a tensor along a dimension
Tensor roll_cuda(const Tensor& self, IntArrayRef shifts, IntArrayRef dims) {
if (dims.size() != 1 || shifts.size() != 1) {
return roll_common(self, shifts, dims);
}
auto in_tensor = self;
if(!self.is_contiguous()) {
in_tensor = self.contiguous();
}
auto out_tensor = at::empty_like(in_tensor, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
if (out_tensor.numel() == 0) {
return out_tensor;
}
const int64_t N = in_tensor.numel();
const int64_t dim = dims[0];
const int64_t size = in_tensor.size(dim);
int64_t start = (size - shifts[0]) % size;
// Behavior of % is different in C++ vs Python for negative numbers. This
// corrects the difference.
if( start < 0 ) start = start + size;
dim3 dim_block = cuda::getApplyBlock();
dim3 dim_grid;
TORCH_CHECK(cuda::getApplyGrid(N, dim_grid, in_tensor.get_device()), "unable to get dim grid");
auto total_dims = in_tensor.dim();
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND2(at::ScalarType::Half, at::ScalarType::Bool, in_tensor.scalar_type(), "roll_cuda", [&] {
roll_cuda_kernel<<<dim_grid, dim_block, 0, at::cuda::getCurrentCUDAStream()>>>(
in_tensor.data_ptr<scalar_t>(), out_tensor.data_ptr<scalar_t>(), N,
dim, start,
size,
in_tensor.stride(dim),
total_dims);
TORCH_CUDA_KERNEL_LAUNCH_CHECK();
});
return out_tensor;
}
}} // namespace at::native
|
b919d4a036e788762d321a0d3c165008bf206905.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime.h>
#include "math.h"
typedef
union
{
int32_t i;
struct
{
int16_t lo; // endian-specific!
int16_t hi;
};
} fixed_point;
void checkCUDAError(const char* msg);
__global__ void kernel(char *pairpixelsD, int nP, int width, unsigned char *pairsD,
float* outmD, int degree, const int xheight, int nPairs) {
// just use global memory for now
// get threadID:
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx >= nPairs) return;
// first, get the first and second pixel from pairsD
unsigned char *pairPtr = &pairsD[6*idx];
int x0 = ( pairPtr[0] << 8 ) + pairPtr[1];
int x1 = ( pairPtr[2] << 8 ) + pairPtr[3];
int y0 = pairPtr[4];
int y1 = pairPtr[5];
// calculate the first three variables
float vdx = (x1 - x0)/(float)xheight;
float vdy = (y1 - y0)/(float)xheight;
float vyy = 0.5*(y0 + y1)/(float)xheight;
// now calculate amount of black
int btotal = 0;
int bblack = 0;
fixed_point f;
if(abs(y1-y0) < abs(x1-x0)) {
int x;
int32_t m=((int32_t)(y1-y0)<<16)/(x1-x0);
f.i=y0<<16;
for (x=x0;x<=x1;x++,f.i+=m)
{
fixed_point g=f;
g.i+=32767;
btotal++;
bblack += pairpixelsD[width * g.hi + x];
}
} else {
int y;
int32_t m=((int32_t)(x1-x0)<<16)/(y1-y0);
f.i=x0<<16;
for (y=y0;y<=y1;y++,f.i+=m)
{
fixed_point g=f;
g.i+=32767;
btotal++;
bblack += pairpixelsD[width * y + g.hi];
}
}
float vbl = bblack/(float)btotal;
// now calculate monomial results and store
// should maybe be done locally and then pushed to global memory
for(int evdx = 0; evdx <= degree; evdx++) {
for(int evdy = 0; evdy <= degree; evdy++) {
for(int evyy = 0; evyy <= degree; evyy++) {
for(int evbl = 0; evbl <= degree; evbl++) {
outmD[evbl + degree*evyy + (degree*degree)*evdy +
(degree*degree*degree)*evdx] += powf(vdx, evdx) + powf(vdy, evdy)
+ powf(vyy, evyy) + powf(vbl, evbl);
}
}
}
}
}
void kernel_wrapper(char* pairpixelsH, int width, int height, unsigned char*
pairsH, int nPairs, float* outmH, const int degree, const int nMonomials,
const int xheight) {
// create matrix with both letters in it
// one-dimensional, with one byte per pixel, going from bottom to top,
// left to right.
// also, create matrix for polynomial output on device
float *outmD;
hipMalloc((void**) &outmD, nMonomials*sizeof(float));
hipMemset(outmD, 0.f, nMonomials*sizeof(float));
// copy matrix into CUDA memory
char *pairpixelsD;
hipMalloc((void**) &pairpixelsD, width*height*sizeof(char));
hipMemcpy(pairpixelsD, pairpixelsH, width*height*sizeof(char), hipMemcpyHostToDevice);
// copy list of pixel pairs into CUDA memory
unsigned char* pairsD;
hipMalloc((void**) &pairsD, nPairs*6*sizeof(unsigned char));
checkCUDAError("Couldn't create pairs");
hipMemcpy(pairsD, pairsH, nPairs*6*sizeof(unsigned char), hipMemcpyHostToDevice);
checkCUDAError("Couldn't copy pairs");
// create tons of threads with ID composed of left and right
int nThreadsPerBlock = 512;
int nBlocks = ceil(nPairs/(float)nThreadsPerBlock);
// calculate number of shared memory bytes needed
printf("xheight: %d\n", xheight);
// call the kernel
hipLaunchKernelGGL(( kernel), dim3(nBlocks), dim3(nThreadsPerBlock) , 0, 0, pairpixelsD, width*height, width, pairsD, outmD, degree, xheight, nPairs);
hipDeviceSynchronize();
checkCUDAError("Kernel Failed!");
// copy over results
hipMemcpy(outmH, outmD, nMonomials*sizeof(float), hipMemcpyDeviceToHost);
// finally: free memory
hipFree(pairpixelsD);
hipFree(pairsD);
hipFree(outmD);
}
void checkCUDAError(const char *msg)
{
hipError_t err = hipGetLastError();
if( hipSuccess != err)
{
fprintf(stderr, "Cuda error: %s: %s.\n", msg, hipGetErrorString( err) );
exit(EXIT_FAILURE);
}
}
| b919d4a036e788762d321a0d3c165008bf206905.cu | #include <stdio.h>
#include <cuda.h>
#include "math.h"
typedef
union
{
int32_t i;
struct
{
int16_t lo; // endian-specific!
int16_t hi;
};
} fixed_point;
void checkCUDAError(const char* msg);
__global__ void kernel(char *pairpixelsD, int nP, int width, unsigned char *pairsD,
float* outmD, int degree, const int xheight, int nPairs) {
// just use global memory for now
// get threadID:
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx >= nPairs) return;
// first, get the first and second pixel from pairsD
unsigned char *pairPtr = &pairsD[6*idx];
int x0 = ( pairPtr[0] << 8 ) + pairPtr[1];
int x1 = ( pairPtr[2] << 8 ) + pairPtr[3];
int y0 = pairPtr[4];
int y1 = pairPtr[5];
// calculate the first three variables
float vdx = (x1 - x0)/(float)xheight;
float vdy = (y1 - y0)/(float)xheight;
float vyy = 0.5*(y0 + y1)/(float)xheight;
// now calculate amount of black
int btotal = 0;
int bblack = 0;
fixed_point f;
if(abs(y1-y0) < abs(x1-x0)) {
int x;
int32_t m=((int32_t)(y1-y0)<<16)/(x1-x0);
f.i=y0<<16;
for (x=x0;x<=x1;x++,f.i+=m)
{
fixed_point g=f;
g.i+=32767;
btotal++;
bblack += pairpixelsD[width * g.hi + x];
}
} else {
int y;
int32_t m=((int32_t)(x1-x0)<<16)/(y1-y0);
f.i=x0<<16;
for (y=y0;y<=y1;y++,f.i+=m)
{
fixed_point g=f;
g.i+=32767;
btotal++;
bblack += pairpixelsD[width * y + g.hi];
}
}
float vbl = bblack/(float)btotal;
// now calculate monomial results and store
// should maybe be done locally and then pushed to global memory
for(int evdx = 0; evdx <= degree; evdx++) {
for(int evdy = 0; evdy <= degree; evdy++) {
for(int evyy = 0; evyy <= degree; evyy++) {
for(int evbl = 0; evbl <= degree; evbl++) {
outmD[evbl + degree*evyy + (degree*degree)*evdy +
(degree*degree*degree)*evdx] += powf(vdx, evdx) + powf(vdy, evdy)
+ powf(vyy, evyy) + powf(vbl, evbl);
}
}
}
}
}
void kernel_wrapper(char* pairpixelsH, int width, int height, unsigned char*
pairsH, int nPairs, float* outmH, const int degree, const int nMonomials,
const int xheight) {
// create matrix with both letters in it
// one-dimensional, with one byte per pixel, going from bottom to top,
// left to right.
// also, create matrix for polynomial output on device
float *outmD;
cudaMalloc((void**) &outmD, nMonomials*sizeof(float));
cudaMemset(outmD, 0.f, nMonomials*sizeof(float));
// copy matrix into CUDA memory
char *pairpixelsD;
cudaMalloc((void**) &pairpixelsD, width*height*sizeof(char));
cudaMemcpy(pairpixelsD, pairpixelsH, width*height*sizeof(char), cudaMemcpyHostToDevice);
// copy list of pixel pairs into CUDA memory
unsigned char* pairsD;
cudaMalloc((void**) &pairsD, nPairs*6*sizeof(unsigned char));
checkCUDAError("Couldn't create pairs");
cudaMemcpy(pairsD, pairsH, nPairs*6*sizeof(unsigned char), cudaMemcpyHostToDevice);
checkCUDAError("Couldn't copy pairs");
// create tons of threads with ID composed of left and right
int nThreadsPerBlock = 512;
int nBlocks = ceil(nPairs/(float)nThreadsPerBlock);
// calculate number of shared memory bytes needed
printf("xheight: %d\n", xheight);
// call the kernel
kernel<<< nBlocks, nThreadsPerBlock >>>( pairpixelsD, width*height, width, pairsD, outmD, degree, xheight, nPairs);
cudaThreadSynchronize();
checkCUDAError("Kernel Failed!");
// copy over results
cudaMemcpy(outmH, outmD, nMonomials*sizeof(float), cudaMemcpyDeviceToHost);
// finally: free memory
cudaFree(pairpixelsD);
cudaFree(pairsD);
cudaFree(outmD);
}
void checkCUDAError(const char *msg)
{
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err)
{
fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString( err) );
exit(EXIT_FAILURE);
}
}
|
1ee099cbb51f0ad8c43e6aaf5ac3fef18fd9a147.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// This code was devloped by David Barrie Thomas at Imperial College
// http://www.doc.ic.ac.uk/~dt10/research/rngs-gpu-uniform.html
// shared memory allocation for RNG
extern __shared__ unsigned WarpStandard_shmem[];
// RNG
// Public constants
const unsigned WarpStandard_K=32;
const unsigned WarpStandard_REG_COUNT=3;
const unsigned WarpStandard_STATE_WORDS=32;
// Private constants
const char *WarpStandard_name="WarpRNG[CorrelatedU32Rng;k=32;g=16;rs=0;w=32;n=1024;hash=deac2e12ec6e615]";
const char *WarpStandard_post_processing="addtaps";
const unsigned WarpStandard_N=1024;
const unsigned WarpStandard_W=32;
const unsigned WarpStandard_G=16;
const unsigned WarpStandard_SR=0;
__device__ const unsigned WarpStandard_Q[2][32]={
{29,24,5,23,14,26,11,31,9,3,1,28,0,2,22,20,18,15,27,13,10,16,8,17,25,12,19,30,7,6,4,21},
{5,14,28,24,19,13,0,17,11,20,7,10,6,15,2,9,8,23,4,30,12,25,3,21,26,27,31,18,22,16,29,1}
};
const unsigned WarpStandard_Z0=2;
__device__ const unsigned WarpStandard_Z1[32]={
0,1,0,1,1,1,0,0,1,0,0,1,0,0,1,0,0,1,0,1,0,1,0,1,0,1,0,1,1,1,0,1};
const unsigned WarpStandard_SHMEM_WORDS=32;
const unsigned WarpStandard_GMEM_WORDS=0;
// Public functions
__device__ void WarpStandard_LoadState(const unsigned *seed, unsigned *regs)
{
unsigned offset=threadIdx.x % 32; unsigned base=threadIdx.x-offset;
// setup constants
regs[0]=WarpStandard_Z1[offset];
regs[1]=base + WarpStandard_Q[0][offset];
regs[2]=base + WarpStandard_Q[1][offset];
// Setup state
unsigned stateOff=blockDim.x * blockIdx.x * 1 + threadIdx.x * 1;
WarpStandard_shmem[threadIdx.x]=seed[stateOff];
}
__device__ void WarpStandard_SaveState(const unsigned *regs, unsigned *seed)
{
unsigned stateOff=blockDim.x * blockIdx.x * 1 + threadIdx.x * 1;
seed[stateOff] = WarpStandard_shmem[threadIdx.x];
}
__device__ unsigned WarpStandard_Generate(unsigned *regs)
{
#if __DEVICE_EMULATION__
__syncthreads();
#endif
unsigned t0=WarpStandard_shmem[regs[1]], t1=WarpStandard_shmem[regs[2]];
unsigned res=(t0<<WarpStandard_Z0) ^ (t1>>regs[0]);
#if __DEVICE_EMULATION__
__syncthreads();
#endif
WarpStandard_shmem[threadIdx.x]=res;
return t0+t1;
};
| 1ee099cbb51f0ad8c43e6aaf5ac3fef18fd9a147.cu | // This code was devloped by David Barrie Thomas at Imperial College
// http://www.doc.ic.ac.uk/~dt10/research/rngs-gpu-uniform.html
// shared memory allocation for RNG
extern __shared__ unsigned WarpStandard_shmem[];
// RNG
// Public constants
const unsigned WarpStandard_K=32;
const unsigned WarpStandard_REG_COUNT=3;
const unsigned WarpStandard_STATE_WORDS=32;
// Private constants
const char *WarpStandard_name="WarpRNG[CorrelatedU32Rng;k=32;g=16;rs=0;w=32;n=1024;hash=deac2e12ec6e615]";
const char *WarpStandard_post_processing="addtaps";
const unsigned WarpStandard_N=1024;
const unsigned WarpStandard_W=32;
const unsigned WarpStandard_G=16;
const unsigned WarpStandard_SR=0;
__device__ const unsigned WarpStandard_Q[2][32]={
{29,24,5,23,14,26,11,31,9,3,1,28,0,2,22,20,18,15,27,13,10,16,8,17,25,12,19,30,7,6,4,21},
{5,14,28,24,19,13,0,17,11,20,7,10,6,15,2,9,8,23,4,30,12,25,3,21,26,27,31,18,22,16,29,1}
};
const unsigned WarpStandard_Z0=2;
__device__ const unsigned WarpStandard_Z1[32]={
0,1,0,1,1,1,0,0,1,0,0,1,0,0,1,0,0,1,0,1,0,1,0,1,0,1,0,1,1,1,0,1};
const unsigned WarpStandard_SHMEM_WORDS=32;
const unsigned WarpStandard_GMEM_WORDS=0;
// Public functions
__device__ void WarpStandard_LoadState(const unsigned *seed, unsigned *regs)
{
unsigned offset=threadIdx.x % 32; unsigned base=threadIdx.x-offset;
// setup constants
regs[0]=WarpStandard_Z1[offset];
regs[1]=base + WarpStandard_Q[0][offset];
regs[2]=base + WarpStandard_Q[1][offset];
// Setup state
unsigned stateOff=blockDim.x * blockIdx.x * 1 + threadIdx.x * 1;
WarpStandard_shmem[threadIdx.x]=seed[stateOff];
}
__device__ void WarpStandard_SaveState(const unsigned *regs, unsigned *seed)
{
unsigned stateOff=blockDim.x * blockIdx.x * 1 + threadIdx.x * 1;
seed[stateOff] = WarpStandard_shmem[threadIdx.x];
}
__device__ unsigned WarpStandard_Generate(unsigned *regs)
{
#if __DEVICE_EMULATION__
__syncthreads();
#endif
unsigned t0=WarpStandard_shmem[regs[1]], t1=WarpStandard_shmem[regs[2]];
unsigned res=(t0<<WarpStandard_Z0) ^ (t1>>regs[0]);
#if __DEVICE_EMULATION__
__syncthreads();
#endif
WarpStandard_shmem[threadIdx.x]=res;
return t0+t1;
};
|
9e4132693d4e13d110b4071c93df6637bce0cb4b.hip | // !!! This is a file automatically generated by hipify!!!
/**
* Copyright (c) 2017 Leonardo A. Bautista-Gomez
* All rights reserved
*
* FTI - A multi-level checkpointing library for C/C++/Fortran applications
*
* Revision 1.0 : Fault Tolerance Interface (FTI)
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* @author Konstantinos Parasyris (konstantinos.parasyris@bsc.es)
* @file diff-checkpoint.c
* @date February, 2018
* @brief Routines to compute the MD5 checksum
*/
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <hip/hip_runtime.h>
#include "md5Opt.h"
#include <pthread.h>
#include <fti.h>
#include <interface.h>
#define CPU 1
#define GPU 2
#define CFILE 3
int MD5GPU(FTIT_dataset *);
int MD5CPU(FTIT_dataset *);
int usesAsync = 0;
pthread_t thread;
pthread_mutex_t worker;
pthread_mutex_t application;
long totalWork= 0;
long worker_exit = 0;
int deviceId;
unsigned char* (*cpuHash)( const unsigned char *data, unsigned long nBytes, unsigned char *hash );
typedef struct threadWork{
FTIT_dataset *data;
FILE *f;
unsigned int type;
}tw;
tw work[FTI_BUFS];
#define CUDA_ERROR_CHECK(fun) \
do { \
hipError_t err = fun; \
char str[FTI_BUFS]; \
if (err != hipSuccess) \
{ \
int device; \
hipGetDevice(&device); \
sprintf(str, "Cuda error %d %s:: %s device(%d)", __LINE__, __func__, hipGetErrorString(err),device); \
FTI_Print(str, FTI_WARN); \
return FTI_NSCS; \
} \
} while(0)
#define GETDIV(a,b) ((a/b) + (((a % b) == 0 )? 0:1))
MD5_u32plus *Hin,*Hout;
MD5_u32plus *in,*out,*tmp;
char *tempGpuBuffer;
long tempBufferSize;
long md5ChunkSize;
hipStream_t Gstream;
#define F(x, y, z) ((z) ^ ((x) & ((y) ^ (z))))
#define G(x, y, z) ((y) ^ ((z) & ((x) ^ (y))))
#define H(x, y, z) (((x) ^ (y)) ^ (z))
#define H2(x, y, z) ((x) ^ ((y) ^ (z)))
#define I(x, y, z) ((y) ^ ((x) | ~(z)))
#define STEP(f, a, b, c, d, x, t, s) \
(a) += f((b), (c), (d)) + (x) + (t); \
(a) = (((a) << (s)) | (((a) & 0xffffffff) >> (32 - (s)))); \
(a) += (b);
#define SET(n) \
(*(MD5_u32plus *)&ptr[(n) * 4])
#define GET(n) \
SET(n)
/*
* This processes one or more 64-byte data blocks, but does NOT update the bit
* counters. There are no alignment requirements.
*/
#define OUT(dst, src) \
(dst)[0] = (unsigned char)(src); \
(dst)[1] = (unsigned char)((src) >> 8); \
(dst)[2] = (unsigned char)((src) >> 16); \
(dst)[3] = (unsigned char)((src) >> 24);
/*-------------------------------------------------------------------------*/
/**
@brief This CUDA function computes the MD5 DCP chunks for the data, and stores each
checksum on the corresponding out index
@param out Array containing all the MD5 checksums
@param data pointing to the actual data.
@param size Total Size of the data
@param md5ChunkSize Total bytes used to comute a single checksum
@return void.
This function computes the MD5 checksums of the data stored in the data ptr
the checksums are stored in the out memory location
**/
/*-------------------------------------------------------------------------*/
__global__
void body(MD5_u32plus *out, const void *data, unsigned long size, long md5ChunkSize )
{
const unsigned char *ptr;
MD5_u32plus a, b, c, d;
MD5_u32plus saved_a, saved_b, saved_c, saved_d;
long tid = threadIdx.x + blockIdx.x *blockDim.x;
long index = tid * md5ChunkSize;
unsigned char block[64];
int allocate = 0;
if (index > size)
return;
// unsigned char *block=&allBlock[blockIdx.x][0];
ptr = (const unsigned char *)data;
ptr = &ptr[index];
long localSize = md5ChunkSize;
if ( index+ md5ChunkSize > size){
allocate=1;
localSize = size-index;
unsigned char *tmp = (unsigned char *) malloc (md5ChunkSize);
memset(tmp, 0, md5ChunkSize);
memcpy(tmp, ptr,localSize);
ptr = tmp;
localSize = md5ChunkSize;
}
a = 0x67452301;
b = 0xefcdab89;
c = 0x98badcfe;
d = 0x10325476;
MD5_u32plus hi = localSize >> 29;
MD5_u32plus lo = localSize & 0x1fffffff;
while (localSize >= 64 ){
saved_a = a;
saved_b = b;
saved_c = c;
saved_d = d;
/* Round 1 */
STEP(F, a, b, c, d, SET(0), 0xd76aa478, 7)
STEP(F, d, a, b, c, SET(1), 0xe8c7b756, 12)
STEP(F, c, d, a, b, SET(2), 0x242070db, 17)
STEP(F, b, c, d, a, SET(3), 0xc1bdceee, 22)
STEP(F, a, b, c, d, SET(4), 0xf57c0faf, 7)
STEP(F, d, a, b, c, SET(5), 0x4787c62a, 12)
STEP(F, c, d, a, b, SET(6), 0xa8304613, 17)
STEP(F, b, c, d, a, SET(7), 0xfd469501, 22)
STEP(F, a, b, c, d, SET(8), 0x698098d8, 7)
STEP(F, d, a, b, c, SET(9), 0x8b44f7af, 12)
STEP(F, c, d, a, b, SET(10), 0xffff5bb1, 17)
STEP(F, b, c, d, a, SET(11), 0x895cd7be, 22)
STEP(F, a, b, c, d, SET(12), 0x6b901122, 7)
STEP(F, d, a, b, c, SET(13), 0xfd987193, 12)
STEP(F, c, d, a, b, SET(14), 0xa679438e, 17)
STEP(F, b, c, d, a, SET(15), 0x49b40821, 22)
/* Round 2 */
STEP(G, a, b, c, d, GET(1), 0xf61e2562, 5)
STEP(G, d, a, b, c, GET(6), 0xc040b340, 9)
STEP(G, c, d, a, b, GET(11), 0x265e5a51, 14)
STEP(G, b, c, d, a, GET(0), 0xe9b6c7aa, 20)
STEP(G, a, b, c, d, GET(5), 0xd62f105d, 5)
STEP(G, d, a, b, c, GET(10), 0x02441453, 9)
STEP(G, c, d, a, b, GET(15), 0xd8a1e681, 14)
STEP(G, b, c, d, a, GET(4), 0xe7d3fbc8, 20)
STEP(G, a, b, c, d, GET(9), 0x21e1cde6, 5)
STEP(G, d, a, b, c, GET(14), 0xc33707d6, 9)
STEP(G, c, d, a, b, GET(3), 0xf4d50d87, 14)
STEP(G, b, c, d, a, GET(8), 0x455a14ed, 20)
STEP(G, a, b, c, d, GET(13), 0xa9e3e905, 5)
STEP(G, d, a, b, c, GET(2), 0xfcefa3f8, 9)
STEP(G, c, d, a, b, GET(7), 0x676f02d9, 14)
STEP(G, b, c, d, a, GET(12), 0x8d2a4c8a, 20)
/* Round 3 */
STEP(H, a, b, c, d, GET(5), 0xfffa3942, 4)
STEP(H2, d, a, b, c, GET(8), 0x8771f681, 11)
STEP(H, c, d, a, b, GET(11), 0x6d9d6122, 16)
STEP(H2, b, c, d, a, GET(14), 0xfde5380c, 23)
STEP(H, a, b, c, d, GET(1), 0xa4beea44, 4)
STEP(H2, d, a, b, c, GET(4), 0x4bdecfa9, 11)
STEP(H, c, d, a, b, GET(7), 0xf6bb4b60, 16)
STEP(H2, b, c, d, a, GET(10), 0xbebfbc70, 23)
STEP(H, a, b, c, d, GET(13), 0x289b7ec6, 4)
STEP(H2, d, a, b, c, GET(0), 0xeaa127fa, 11)
STEP(H, c, d, a, b, GET(3), 0xd4ef3085, 16)
STEP(H2, b, c, d, a, GET(6), 0x04881d05, 23)
STEP(H, a, b, c, d, GET(9), 0xd9d4d039, 4)
STEP(H2, d, a, b, c, GET(12), 0xe6db99e5, 11)
STEP(H, c, d, a, b, GET(15), 0x1fa27cf8, 16)
STEP(H2, b, c, d, a, GET(2), 0xc4ac5665, 23)
STEP(I, a, b, c, d, GET(0), 0xf4292244, 6)
STEP(I, d, a, b, c, GET(7), 0x432aff97, 10)
STEP(I, c, d, a, b, GET(14), 0xab9423a7, 15)
STEP(I, b, c, d, a, GET(5), 0xfc93a039, 21)
STEP(I, a, b, c, d, GET(12), 0x655b59c3, 6)
STEP(I, d, a, b, c, GET(3), 0x8f0ccc92, 10)
STEP(I, c, d, a, b, GET(10), 0xffeff47d, 15)
STEP(I, b, c, d, a, GET(1), 0x85845dd1, 21)
STEP(I, a, b, c, d, GET(8), 0x6fa87e4f, 6)
STEP(I, d, a, b, c, GET(15), 0xfe2ce6e0, 10)
STEP(I, c, d, a, b, GET(6), 0xa3014314, 15)
STEP(I, b, c, d, a, GET(13), 0x4e0811a1, 21)
STEP(I, a, b, c, d, GET(4), 0xf7537e82, 6)
STEP(I, d, a, b, c, GET(11), 0xbd3af235, 10)
STEP(I, c, d, a, b, GET(2), 0x2ad7d2bb, 15)
STEP(I, b, c, d, a, GET(9), 0xeb86d391, 21)
a += saved_a;
b += saved_b;
c += saved_c;
d += saved_d;
ptr += 64;
localSize -=64;
}
long used;
for ( used = 0; used < localSize; used++){
block[used] = ptr[used];
}
block[used++]=0x80;
long available=64-used;
if ( available < 8 ){
ptr = block;
saved_a = a;
saved_b = b;
saved_c = c;
saved_d = d;
/* Round 1 */
STEP(F, a, b, c, d, SET(0), 0xd76aa478, 7)
STEP(F, d, a, b, c, SET(1), 0xe8c7b756, 12)
STEP(F, c, d, a, b, SET(2), 0x242070db, 17)
STEP(F, b, c, d, a, SET(3), 0xc1bdceee, 22)
STEP(F, a, b, c, d, SET(4), 0xf57c0faf, 7)
STEP(F, d, a, b, c, SET(5), 0x4787c62a, 12)
STEP(F, c, d, a, b, SET(6), 0xa8304613, 17)
STEP(F, b, c, d, a, SET(7), 0xfd469501, 22)
STEP(F, a, b, c, d, SET(8), 0x698098d8, 7)
STEP(F, d, a, b, c, SET(9), 0x8b44f7af, 12)
STEP(F, c, d, a, b, SET(10), 0xffff5bb1, 17)
STEP(F, b, c, d, a, SET(11), 0x895cd7be, 22)
STEP(F, a, b, c, d, SET(12), 0x6b901122, 7)
STEP(F, d, a, b, c, SET(13), 0xfd987193, 12)
STEP(F, c, d, a, b, SET(14), 0xa679438e, 17)
STEP(F, b, c, d, a, SET(15), 0x49b40821, 22)
/* Round 2 */
STEP(G, a, b, c, d, GET(1), 0xf61e2562, 5)
STEP(G, d, a, b, c, GET(6), 0xc040b340, 9)
STEP(G, c, d, a, b, GET(11), 0x265e5a51, 14)
STEP(G, b, c, d, a, GET(0), 0xe9b6c7aa, 20)
STEP(G, a, b, c, d, GET(5), 0xd62f105d, 5)
STEP(G, d, a, b, c, GET(10), 0x02441453, 9)
STEP(G, c, d, a, b, GET(15), 0xd8a1e681, 14)
STEP(G, b, c, d, a, GET(4), 0xe7d3fbc8, 20)
STEP(G, a, b, c, d, GET(9), 0x21e1cde6, 5)
STEP(G, d, a, b, c, GET(14), 0xc33707d6, 9)
STEP(G, c, d, a, b, GET(3), 0xf4d50d87, 14)
STEP(G, b, c, d, a, GET(8), 0x455a14ed, 20)
STEP(G, a, b, c, d, GET(13), 0xa9e3e905, 5)
STEP(G, d, a, b, c, GET(2), 0xfcefa3f8, 9)
STEP(G, c, d, a, b, GET(7), 0x676f02d9, 14)
STEP(G, b, c, d, a, GET(12), 0x8d2a4c8a, 20)
/* Round 3 */
STEP(H, a, b, c, d, GET(5), 0xfffa3942, 4)
STEP(H2, d, a, b, c, GET(8), 0x8771f681, 11)
STEP(H, c, d, a, b, GET(11), 0x6d9d6122, 16)
STEP(H2, b, c, d, a, GET(14), 0xfde5380c, 23)
STEP(H, a, b, c, d, GET(1), 0xa4beea44, 4)
STEP(H2, d, a, b, c, GET(4), 0x4bdecfa9, 11)
STEP(H, c, d, a, b, GET(7), 0xf6bb4b60, 16)
STEP(H2, b, c, d, a, GET(10), 0xbebfbc70, 23)
STEP(H, a, b, c, d, GET(13), 0x289b7ec6, 4)
STEP(H2, d, a, b, c, GET(0), 0xeaa127fa, 11)
STEP(H, c, d, a, b, GET(3), 0xd4ef3085, 16)
STEP(H2, b, c, d, a, GET(6), 0x04881d05, 23)
STEP(H, a, b, c, d, GET(9), 0xd9d4d039, 4)
STEP(H2, d, a, b, c, GET(12), 0xe6db99e5, 11)
STEP(H, c, d, a, b, GET(15), 0x1fa27cf8, 16)
STEP(H2, b, c, d, a, GET(2), 0xc4ac5665, 23)
STEP(I, a, b, c, d, GET(0), 0xf4292244, 6)
STEP(I, d, a, b, c, GET(7), 0x432aff97, 10)
STEP(I, c, d, a, b, GET(14), 0xab9423a7, 15)
STEP(I, b, c, d, a, GET(5), 0xfc93a039, 21)
STEP(I, a, b, c, d, GET(12), 0x655b59c3, 6)
STEP(I, d, a, b, c, GET(3), 0x8f0ccc92, 10)
STEP(I, c, d, a, b, GET(10), 0xffeff47d, 15)
STEP(I, b, c, d, a, GET(1), 0x85845dd1, 21)
STEP(I, a, b, c, d, GET(8), 0x6fa87e4f, 6)
STEP(I, d, a, b, c, GET(15), 0xfe2ce6e0, 10)
STEP(I, c, d, a, b, GET(6), 0xa3014314, 15)
STEP(I, b, c, d, a, GET(13), 0x4e0811a1, 21)
STEP(I, a, b, c, d, GET(4), 0xf7537e82, 6)
STEP(I, d, a, b, c, GET(11), 0xbd3af235, 10)
STEP(I, c, d, a, b, GET(2), 0x2ad7d2bb, 15)
STEP(I, b, c, d, a, GET(9), 0xeb86d391, 21)
a += saved_a;
b += saved_b;
c += saved_c;
d += saved_d;
used = 0;
available = 64;
}
lo <<=3;
memset(&block[used], 0, available-8);
OUT( &block[56], lo);
OUT( &block[60], hi);
ptr = block;
saved_a = a;
saved_b = b;
saved_c = c;
saved_d = d;
/* Round 1 */
STEP(F, a, b, c, d, SET(0), 0xd76aa478, 7)
STEP(F, d, a, b, c, SET(1), 0xe8c7b756, 12)
STEP(F, c, d, a, b, SET(2), 0x242070db, 17)
STEP(F, b, c, d, a, SET(3), 0xc1bdceee, 22)
STEP(F, a, b, c, d, SET(4), 0xf57c0faf, 7)
STEP(F, d, a, b, c, SET(5), 0x4787c62a, 12)
STEP(F, c, d, a, b, SET(6), 0xa8304613, 17)
STEP(F, b, c, d, a, SET(7), 0xfd469501, 22)
STEP(F, a, b, c, d, SET(8), 0x698098d8, 7)
STEP(F, d, a, b, c, SET(9), 0x8b44f7af, 12)
STEP(F, c, d, a, b, SET(10), 0xffff5bb1, 17)
STEP(F, b, c, d, a, SET(11), 0x895cd7be, 22)
STEP(F, a, b, c, d, SET(12), 0x6b901122, 7)
STEP(F, d, a, b, c, SET(13), 0xfd987193, 12)
STEP(F, c, d, a, b, SET(14), 0xa679438e, 17)
STEP(F, b, c, d, a, SET(15), 0x49b40821, 22)
/* Round 2 */
STEP(G, a, b, c, d, GET(1), 0xf61e2562, 5)
STEP(G, d, a, b, c, GET(6), 0xc040b340, 9)
STEP(G, c, d, a, b, GET(11), 0x265e5a51, 14)
STEP(G, b, c, d, a, GET(0), 0xe9b6c7aa, 20)
STEP(G, a, b, c, d, GET(5), 0xd62f105d, 5)
STEP(G, d, a, b, c, GET(10), 0x02441453, 9)
STEP(G, c, d, a, b, GET(15), 0xd8a1e681, 14)
STEP(G, b, c, d, a, GET(4), 0xe7d3fbc8, 20)
STEP(G, a, b, c, d, GET(9), 0x21e1cde6, 5)
STEP(G, d, a, b, c, GET(14), 0xc33707d6, 9)
STEP(G, c, d, a, b, GET(3), 0xf4d50d87, 14)
STEP(G, b, c, d, a, GET(8), 0x455a14ed, 20)
STEP(G, a, b, c, d, GET(13), 0xa9e3e905, 5)
STEP(G, d, a, b, c, GET(2), 0xfcefa3f8, 9)
STEP(G, c, d, a, b, GET(7), 0x676f02d9, 14)
STEP(G, b, c, d, a, GET(12), 0x8d2a4c8a, 20)
/* Round 3 */
STEP(H, a, b, c, d, GET(5), 0xfffa3942, 4)
STEP(H2, d, a, b, c, GET(8), 0x8771f681, 11)
STEP(H, c, d, a, b, GET(11), 0x6d9d6122, 16)
STEP(H2, b, c, d, a, GET(14), 0xfde5380c, 23)
STEP(H, a, b, c, d, GET(1), 0xa4beea44, 4)
STEP(H2, d, a, b, c, GET(4), 0x4bdecfa9, 11)
STEP(H, c, d, a, b, GET(7), 0xf6bb4b60, 16)
STEP(H2, b, c, d, a, GET(10), 0xbebfbc70, 23)
STEP(H, a, b, c, d, GET(13), 0x289b7ec6, 4)
STEP(H2, d, a, b, c, GET(0), 0xeaa127fa, 11)
STEP(H, c, d, a, b, GET(3), 0xd4ef3085, 16)
STEP(H2, b, c, d, a, GET(6), 0x04881d05, 23)
STEP(H, a, b, c, d, GET(9), 0xd9d4d039, 4)
STEP(H2, d, a, b, c, GET(12), 0xe6db99e5, 11)
STEP(H, c, d, a, b, GET(15), 0x1fa27cf8, 16)
STEP(H2, b, c, d, a, GET(2), 0xc4ac5665, 23)
STEP(I, a, b, c, d, GET(0), 0xf4292244, 6)
STEP(I, d, a, b, c, GET(7), 0x432aff97, 10)
STEP(I, c, d, a, b, GET(14), 0xab9423a7, 15)
STEP(I, b, c, d, a, GET(5), 0xfc93a039, 21)
STEP(I, a, b, c, d, GET(12), 0x655b59c3, 6)
STEP(I, d, a, b, c, GET(3), 0x8f0ccc92, 10)
STEP(I, c, d, a, b, GET(10), 0xffeff47d, 15)
STEP(I, b, c, d, a, GET(1), 0x85845dd1, 21)
STEP(I, a, b, c, d, GET(8), 0x6fa87e4f, 6)
STEP(I, d, a, b, c, GET(15), 0xfe2ce6e0, 10)
STEP(I, c, d, a, b, GET(6), 0xa3014314, 15)
STEP(I, b, c, d, a, GET(13), 0x4e0811a1, 21)
STEP(I, a, b, c, d, GET(4), 0xf7537e82, 6)
STEP(I, d, a, b, c, GET(11), 0xbd3af235, 10)
STEP(I, c, d, a, b, GET(2), 0x2ad7d2bb, 15)
STEP(I, b, c, d, a, GET(9), 0xeb86d391, 21)
a += saved_a;
b += saved_b;
c += saved_c;
d += saved_d;
out[tid*4] = a;
out[tid*4+1] = b;
out[tid*4+2] = c;
out[tid*4+3] = d;
if (allocate ){
free((void *)ptr);
}
return;
}
/*-------------------------------------------------------------------------*/
/**
@brief Syncrhonizes the CPU with the GPU stream that computes the checksims
Syncrhonizes the CPU with the GPU stream that computes the checksims
**/
/*-------------------------------------------------------------------------*/
int syncDevice(){
CUDA_ERROR_CHECK(hipStreamSynchronize(Gstream));
return 1;
}
/*-------------------------------------------------------------------------*/
/**
@brief This is the main loop of the parallel thread responsible of computing
the MD5 checksums
@return void.
This is the main loop of the parallel thread responsible of computing
**/
/*-------------------------------------------------------------------------*/
void *workerMain(void *){
hipSetDevice(deviceId);
long l;
int lock= 1;
while (1){
pthread_mutex_lock(&worker);
if (worker_exit ){
return NULL;
}
for ( l = 0; l < totalWork; l++){
if( work[l].type == CPU ){
MD5CPU(work[l].data);
}
else if ( work[l].type == GPU ){
MD5GPU(work[l].data);
}
else if ( work[l].type == CFILE ){
lock = 0;
char str[100];
double t0 = MPI_Wtime();
fsync(fileno(work[l].f));
fclose(work[l].f);
double t1 = MPI_Wtime();
sprintf(str,"In memory Ckpt Pushed in Stable Storage in : %.2f sec", t1-t0);
FTI_Print(str,FTI_INFO);
}
}
totalWork = 0;
syncDevice();
if ( lock ){
pthread_mutex_unlock(&application);
}
else {
lock = 1;
}
}
}
/*-------------------------------------------------------------------------*/
/**
@brief This function initializes the MD5 checksum functions for DCP
@param cSize Size of the chunk
@param tempSize Size of intermediate buffers (Not used in this file)
@param FTI_Conf Pointer to the configuration options
@return integer FTI_SCES if successfu.
This function initializes parameters for the computation of DCP MD5 checksums
and if requested spawns the worker thread.
**/
/*-------------------------------------------------------------------------*/
int FTI_initMD5(long cSize, long tempSize, FTIT_configuration *FTI_Conf){
if ( FTI_Conf->dcpInfoPosix.cachedCkpt)
usesAsync = 1;
else
usesAsync = 0;
//this will be use by the application to sync
cpuHash = FTI_Conf->dcpInfoPosix.hashFunc;
pthread_attr_t attr;
hipGetDevice(&deviceId);
if (usesAsync){
if (pthread_mutex_init(&application, NULL) != 0){
return 1;
}
pthread_mutex_lock(&application);
// This will be used by the worker to sync
if (pthread_mutex_init(&worker, NULL) != 0){
return 1;
}
pthread_mutex_lock(&worker);
pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
pthread_attr_init(&attr);
if(pthread_create(&thread, &attr, workerMain, NULL)) {
return 1;
}
}
tempBufferSize = tempSize;
md5ChunkSize = cSize;
size_t lvl1Chunks,free,total;
CUDA_ERROR_CHECK(hipMemGetInfo ( &free, &total));
lvl1Chunks = ((total)/cSize)*4; // 4 ints per md5
CUDA_ERROR_CHECK(hipHostMalloc((void **)&Hin, sizeof(MD5_u32plus*) * lvl1Chunks , hipHostMallocMapped));
CUDA_ERROR_CHECK(hipHostMalloc((void **)&Hout, sizeof(MD5_u32plus*) * lvl1Chunks , hipHostMallocMapped));
CUDA_ERROR_CHECK(hipHostGetDevicePointer((void **)&in, (void *) Hin, 0));
CUDA_ERROR_CHECK(hipHostGetDevicePointer((void **)&out, (void *) Hout, 0));
CUDA_ERROR_CHECK(hipMallocManaged(&tempGpuBuffer, sizeof(char)*(tempBufferSize)));
CUDA_ERROR_CHECK(hipStreamCreate(&Gstream));
return FTI_SCES;
}
/*-------------------------------------------------------------------------*/
/**
@brief This function computes the checksums of an Protected Variable
@param data Variable We need to compute the checksums
@return integer FTI_SCES if successfu.
This function computes the checksums of a specific variable stored in the
GPU and is called in the async mode by the worker thread
**/
/*-------------------------------------------------------------------------*/
int MD5GPU(FTIT_dataset *data){
size_t size = data->size;
long numKernels= GETDIV(size,md5ChunkSize);
long numThreads = min(numKernels,1024L);
long numGroups = GETDIV(numKernels,numThreads);// + ((( numKernels % numThreads ) == 0 ) ? 0:1);
unsigned char *tmp = (unsigned char*) malloc (sizeof(char)*size);
hipLaunchKernelGGL(( body), dim3(numGroups),dim3(numThreads),0,Gstream, (MD5_u32plus *) data->dcpInfoPosix.currentHashArray, data->devicePtr, size, md5ChunkSize);
return FTI_SCES;
}
/*-------------------------------------------------------------------------*/
/**
@brief This function computes the checksums of an Protected Variable
@param data Variable We need to compute the checksums
@return integer FTI_SCES if successfu.
This function computes the checksums of a specific variable stored in the
CPU and is called in the async mode by the worker thread
**/
/*-------------------------------------------------------------------------*/
int MD5CPU(FTIT_dataset *data){
unsigned long dataSize = data->size;
unsigned char block[md5ChunkSize];
size_t i;
unsigned char *ptr = (unsigned char *) data->ptr;
for ( i = 0 ; i < data->size; i+=md5ChunkSize){
unsigned int blockId = i/md5ChunkSize;
unsigned int hashIdx = blockId*16;
unsigned int chunkSize = ( (dataSize-i) < md5ChunkSize ) ? dataSize-i: md5ChunkSize;
if( chunkSize < md5ChunkSize ) {
memset( block, 0x0, md5ChunkSize );
memcpy( block, &ptr[i], chunkSize );
cpuHash( block, md5ChunkSize , &data->dcpInfoPosix.currentHashArray[hashIdx] );
} else {
cpuHash( &ptr[i], md5ChunkSize , &data->dcpInfoPosix.currentHashArray[hashIdx] );
}
}
return FTI_SCES;
}
/*-------------------------------------------------------------------------*/
/**
@brief This function computes the checksums of a Protected Variable
@param data Variable We need to compute the checksums
@return integer FTI_SCES if successfu.
This function initializes either computes directly the checksums of the CPU
dataVar or assigns the work to a worker thread
/*-------------------------------------------------------------------------*/
int FTI_MD5CPU(FTIT_dataset *data){
if ( usesAsync ){
work[totalWork].data= data;
work[totalWork].type= CPU;
totalWork++;
}else{
MD5CPU(data);
}
return 1;
}
/*-------------------------------------------------------------------------*/
/**
@brief This function computes the checksums of a Protected Variable
@param data Variable We need to compute the checksums
@return integer FTI_SCES if successfu.
This function initializes either computes directly the checksums of the GPU
dataVar or assigns the work to a worker thread
/*-------------------------------------------------------------------------*/
int FTI_MD5GPU(FTIT_dataset *data){
if ( usesAsync ){
work[totalWork].data= data;
work[totalWork].type= GPU;
totalWork++;
}
else{
MD5GPU(data);
syncDevice();
}
return 1;
}
/*-------------------------------------------------------------------------*/
/**
@brief This function synchronizes the file writes with the stable storages
@param f pointer to the file to be synchronized
@return integer FTI_SCES if successfull.
The function instracts the worker thread to close the file and immediately returns
**/
/*-------------------------------------------------------------------------*/
int FTI_CLOSE_ASYNC(FILE *f){
if ( usesAsync ){
work[totalWork].f= f;
work[totalWork].type= CFILE;
totalWork++;
pthread_mutex_unlock(&worker);
}
return 1;
}
/*-------------------------------------------------------------------------*/
/**
@brief This function waits until all MD5 checksums are computed
@return integer FTI_SCES if successfull.
The function waits until all MD5 Checksums are computed either by waiting the worker
thread or by immediately returning.
**/
/*-------------------------------------------------------------------------*/
int FTI_SyncMD5(){
if ( usesAsync ){
pthread_mutex_lock(&application);
}
return FTI_SCES;
}
/*-------------------------------------------------------------------------*/
/**
@brief This function starts the MD5 checksums computation
@return integer FTI_SCES if successfull.
The function starts the MD5 Checksums computation
**/
/*-------------------------------------------------------------------------*/
int FTI_startMD5(){
if ( usesAsync ){
pthread_mutex_unlock(&worker);
}
return FTI_SCES;
}
/*-------------------------------------------------------------------------*/
/**
@brief This function de-allocates or the MD5 related resources
@return integer FTI_SCES if successfull.
The function de-allocates or the MD5 related resources.
**/
/*-------------------------------------------------------------------------*/
int FTI_destroyMD5(){
if (usesAsync ){
worker_exit = 1;
pthread_mutex_unlock(&worker);
}
CUDA_ERROR_CHECK(hipHostFree((void *)Hin));
CUDA_ERROR_CHECK(hipHostFree((void *)Hout));
CUDA_ERROR_CHECK(hipFree(tempGpuBuffer));
return FTI_SCES;
}
| 9e4132693d4e13d110b4071c93df6637bce0cb4b.cu | /**
* Copyright (c) 2017 Leonardo A. Bautista-Gomez
* All rights reserved
*
* FTI - A multi-level checkpointing library for C/C++/Fortran applications
*
* Revision 1.0 : Fault Tolerance Interface (FTI)
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* @author Konstantinos Parasyris (konstantinos.parasyris@bsc.es)
* @file diff-checkpoint.c
* @date February, 2018
* @brief Routines to compute the MD5 checksum
*/
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <cuda_runtime.h>
#include "md5Opt.h"
#include <pthread.h>
#include <fti.h>
#include <interface.h>
#define CPU 1
#define GPU 2
#define CFILE 3
int MD5GPU(FTIT_dataset *);
int MD5CPU(FTIT_dataset *);
int usesAsync = 0;
pthread_t thread;
pthread_mutex_t worker;
pthread_mutex_t application;
long totalWork= 0;
long worker_exit = 0;
int deviceId;
unsigned char* (*cpuHash)( const unsigned char *data, unsigned long nBytes, unsigned char *hash );
typedef struct threadWork{
FTIT_dataset *data;
FILE *f;
unsigned int type;
}tw;
tw work[FTI_BUFS];
#define CUDA_ERROR_CHECK(fun) \
do { \
cudaError_t err = fun; \
char str[FTI_BUFS]; \
if (err != cudaSuccess) \
{ \
int device; \
cudaGetDevice(&device); \
sprintf(str, "Cuda error %d %s:: %s device(%d)", __LINE__, __func__, cudaGetErrorString(err),device); \
FTI_Print(str, FTI_WARN); \
return FTI_NSCS; \
} \
} while(0)
#define GETDIV(a,b) ((a/b) + (((a % b) == 0 )? 0:1))
MD5_u32plus *Hin,*Hout;
MD5_u32plus *in,*out,*tmp;
char *tempGpuBuffer;
long tempBufferSize;
long md5ChunkSize;
cudaStream_t Gstream;
#define F(x, y, z) ((z) ^ ((x) & ((y) ^ (z))))
#define G(x, y, z) ((y) ^ ((z) & ((x) ^ (y))))
#define H(x, y, z) (((x) ^ (y)) ^ (z))
#define H2(x, y, z) ((x) ^ ((y) ^ (z)))
#define I(x, y, z) ((y) ^ ((x) | ~(z)))
#define STEP(f, a, b, c, d, x, t, s) \
(a) += f((b), (c), (d)) + (x) + (t); \
(a) = (((a) << (s)) | (((a) & 0xffffffff) >> (32 - (s)))); \
(a) += (b);
#define SET(n) \
(*(MD5_u32plus *)&ptr[(n) * 4])
#define GET(n) \
SET(n)
/*
* This processes one or more 64-byte data blocks, but does NOT update the bit
* counters. There are no alignment requirements.
*/
#define OUT(dst, src) \
(dst)[0] = (unsigned char)(src); \
(dst)[1] = (unsigned char)((src) >> 8); \
(dst)[2] = (unsigned char)((src) >> 16); \
(dst)[3] = (unsigned char)((src) >> 24);
/*-------------------------------------------------------------------------*/
/**
@brief This CUDA function computes the MD5 DCP chunks for the data, and stores each
checksum on the corresponding out index
@param out Array containing all the MD5 checksums
@param data pointing to the actual data.
@param size Total Size of the data
@param md5ChunkSize Total bytes used to comute a single checksum
@return void.
This function computes the MD5 checksums of the data stored in the data ptr
the checksums are stored in the out memory location
**/
/*-------------------------------------------------------------------------*/
__global__
void body(MD5_u32plus *out, const void *data, unsigned long size, long md5ChunkSize )
{
const unsigned char *ptr;
MD5_u32plus a, b, c, d;
MD5_u32plus saved_a, saved_b, saved_c, saved_d;
long tid = threadIdx.x + blockIdx.x *blockDim.x;
long index = tid * md5ChunkSize;
unsigned char block[64];
int allocate = 0;
if (index > size)
return;
// unsigned char *block=&allBlock[blockIdx.x][0];
ptr = (const unsigned char *)data;
ptr = &ptr[index];
long localSize = md5ChunkSize;
if ( index+ md5ChunkSize > size){
allocate=1;
localSize = size-index;
unsigned char *tmp = (unsigned char *) malloc (md5ChunkSize);
memset(tmp, 0, md5ChunkSize);
memcpy(tmp, ptr,localSize);
ptr = tmp;
localSize = md5ChunkSize;
}
a = 0x67452301;
b = 0xefcdab89;
c = 0x98badcfe;
d = 0x10325476;
MD5_u32plus hi = localSize >> 29;
MD5_u32plus lo = localSize & 0x1fffffff;
while (localSize >= 64 ){
saved_a = a;
saved_b = b;
saved_c = c;
saved_d = d;
/* Round 1 */
STEP(F, a, b, c, d, SET(0), 0xd76aa478, 7)
STEP(F, d, a, b, c, SET(1), 0xe8c7b756, 12)
STEP(F, c, d, a, b, SET(2), 0x242070db, 17)
STEP(F, b, c, d, a, SET(3), 0xc1bdceee, 22)
STEP(F, a, b, c, d, SET(4), 0xf57c0faf, 7)
STEP(F, d, a, b, c, SET(5), 0x4787c62a, 12)
STEP(F, c, d, a, b, SET(6), 0xa8304613, 17)
STEP(F, b, c, d, a, SET(7), 0xfd469501, 22)
STEP(F, a, b, c, d, SET(8), 0x698098d8, 7)
STEP(F, d, a, b, c, SET(9), 0x8b44f7af, 12)
STEP(F, c, d, a, b, SET(10), 0xffff5bb1, 17)
STEP(F, b, c, d, a, SET(11), 0x895cd7be, 22)
STEP(F, a, b, c, d, SET(12), 0x6b901122, 7)
STEP(F, d, a, b, c, SET(13), 0xfd987193, 12)
STEP(F, c, d, a, b, SET(14), 0xa679438e, 17)
STEP(F, b, c, d, a, SET(15), 0x49b40821, 22)
/* Round 2 */
STEP(G, a, b, c, d, GET(1), 0xf61e2562, 5)
STEP(G, d, a, b, c, GET(6), 0xc040b340, 9)
STEP(G, c, d, a, b, GET(11), 0x265e5a51, 14)
STEP(G, b, c, d, a, GET(0), 0xe9b6c7aa, 20)
STEP(G, a, b, c, d, GET(5), 0xd62f105d, 5)
STEP(G, d, a, b, c, GET(10), 0x02441453, 9)
STEP(G, c, d, a, b, GET(15), 0xd8a1e681, 14)
STEP(G, b, c, d, a, GET(4), 0xe7d3fbc8, 20)
STEP(G, a, b, c, d, GET(9), 0x21e1cde6, 5)
STEP(G, d, a, b, c, GET(14), 0xc33707d6, 9)
STEP(G, c, d, a, b, GET(3), 0xf4d50d87, 14)
STEP(G, b, c, d, a, GET(8), 0x455a14ed, 20)
STEP(G, a, b, c, d, GET(13), 0xa9e3e905, 5)
STEP(G, d, a, b, c, GET(2), 0xfcefa3f8, 9)
STEP(G, c, d, a, b, GET(7), 0x676f02d9, 14)
STEP(G, b, c, d, a, GET(12), 0x8d2a4c8a, 20)
/* Round 3 */
STEP(H, a, b, c, d, GET(5), 0xfffa3942, 4)
STEP(H2, d, a, b, c, GET(8), 0x8771f681, 11)
STEP(H, c, d, a, b, GET(11), 0x6d9d6122, 16)
STEP(H2, b, c, d, a, GET(14), 0xfde5380c, 23)
STEP(H, a, b, c, d, GET(1), 0xa4beea44, 4)
STEP(H2, d, a, b, c, GET(4), 0x4bdecfa9, 11)
STEP(H, c, d, a, b, GET(7), 0xf6bb4b60, 16)
STEP(H2, b, c, d, a, GET(10), 0xbebfbc70, 23)
STEP(H, a, b, c, d, GET(13), 0x289b7ec6, 4)
STEP(H2, d, a, b, c, GET(0), 0xeaa127fa, 11)
STEP(H, c, d, a, b, GET(3), 0xd4ef3085, 16)
STEP(H2, b, c, d, a, GET(6), 0x04881d05, 23)
STEP(H, a, b, c, d, GET(9), 0xd9d4d039, 4)
STEP(H2, d, a, b, c, GET(12), 0xe6db99e5, 11)
STEP(H, c, d, a, b, GET(15), 0x1fa27cf8, 16)
STEP(H2, b, c, d, a, GET(2), 0xc4ac5665, 23)
STEP(I, a, b, c, d, GET(0), 0xf4292244, 6)
STEP(I, d, a, b, c, GET(7), 0x432aff97, 10)
STEP(I, c, d, a, b, GET(14), 0xab9423a7, 15)
STEP(I, b, c, d, a, GET(5), 0xfc93a039, 21)
STEP(I, a, b, c, d, GET(12), 0x655b59c3, 6)
STEP(I, d, a, b, c, GET(3), 0x8f0ccc92, 10)
STEP(I, c, d, a, b, GET(10), 0xffeff47d, 15)
STEP(I, b, c, d, a, GET(1), 0x85845dd1, 21)
STEP(I, a, b, c, d, GET(8), 0x6fa87e4f, 6)
STEP(I, d, a, b, c, GET(15), 0xfe2ce6e0, 10)
STEP(I, c, d, a, b, GET(6), 0xa3014314, 15)
STEP(I, b, c, d, a, GET(13), 0x4e0811a1, 21)
STEP(I, a, b, c, d, GET(4), 0xf7537e82, 6)
STEP(I, d, a, b, c, GET(11), 0xbd3af235, 10)
STEP(I, c, d, a, b, GET(2), 0x2ad7d2bb, 15)
STEP(I, b, c, d, a, GET(9), 0xeb86d391, 21)
a += saved_a;
b += saved_b;
c += saved_c;
d += saved_d;
ptr += 64;
localSize -=64;
}
long used;
for ( used = 0; used < localSize; used++){
block[used] = ptr[used];
}
block[used++]=0x80;
long available=64-used;
if ( available < 8 ){
ptr = block;
saved_a = a;
saved_b = b;
saved_c = c;
saved_d = d;
/* Round 1 */
STEP(F, a, b, c, d, SET(0), 0xd76aa478, 7)
STEP(F, d, a, b, c, SET(1), 0xe8c7b756, 12)
STEP(F, c, d, a, b, SET(2), 0x242070db, 17)
STEP(F, b, c, d, a, SET(3), 0xc1bdceee, 22)
STEP(F, a, b, c, d, SET(4), 0xf57c0faf, 7)
STEP(F, d, a, b, c, SET(5), 0x4787c62a, 12)
STEP(F, c, d, a, b, SET(6), 0xa8304613, 17)
STEP(F, b, c, d, a, SET(7), 0xfd469501, 22)
STEP(F, a, b, c, d, SET(8), 0x698098d8, 7)
STEP(F, d, a, b, c, SET(9), 0x8b44f7af, 12)
STEP(F, c, d, a, b, SET(10), 0xffff5bb1, 17)
STEP(F, b, c, d, a, SET(11), 0x895cd7be, 22)
STEP(F, a, b, c, d, SET(12), 0x6b901122, 7)
STEP(F, d, a, b, c, SET(13), 0xfd987193, 12)
STEP(F, c, d, a, b, SET(14), 0xa679438e, 17)
STEP(F, b, c, d, a, SET(15), 0x49b40821, 22)
/* Round 2 */
STEP(G, a, b, c, d, GET(1), 0xf61e2562, 5)
STEP(G, d, a, b, c, GET(6), 0xc040b340, 9)
STEP(G, c, d, a, b, GET(11), 0x265e5a51, 14)
STEP(G, b, c, d, a, GET(0), 0xe9b6c7aa, 20)
STEP(G, a, b, c, d, GET(5), 0xd62f105d, 5)
STEP(G, d, a, b, c, GET(10), 0x02441453, 9)
STEP(G, c, d, a, b, GET(15), 0xd8a1e681, 14)
STEP(G, b, c, d, a, GET(4), 0xe7d3fbc8, 20)
STEP(G, a, b, c, d, GET(9), 0x21e1cde6, 5)
STEP(G, d, a, b, c, GET(14), 0xc33707d6, 9)
STEP(G, c, d, a, b, GET(3), 0xf4d50d87, 14)
STEP(G, b, c, d, a, GET(8), 0x455a14ed, 20)
STEP(G, a, b, c, d, GET(13), 0xa9e3e905, 5)
STEP(G, d, a, b, c, GET(2), 0xfcefa3f8, 9)
STEP(G, c, d, a, b, GET(7), 0x676f02d9, 14)
STEP(G, b, c, d, a, GET(12), 0x8d2a4c8a, 20)
/* Round 3 */
STEP(H, a, b, c, d, GET(5), 0xfffa3942, 4)
STEP(H2, d, a, b, c, GET(8), 0x8771f681, 11)
STEP(H, c, d, a, b, GET(11), 0x6d9d6122, 16)
STEP(H2, b, c, d, a, GET(14), 0xfde5380c, 23)
STEP(H, a, b, c, d, GET(1), 0xa4beea44, 4)
STEP(H2, d, a, b, c, GET(4), 0x4bdecfa9, 11)
STEP(H, c, d, a, b, GET(7), 0xf6bb4b60, 16)
STEP(H2, b, c, d, a, GET(10), 0xbebfbc70, 23)
STEP(H, a, b, c, d, GET(13), 0x289b7ec6, 4)
STEP(H2, d, a, b, c, GET(0), 0xeaa127fa, 11)
STEP(H, c, d, a, b, GET(3), 0xd4ef3085, 16)
STEP(H2, b, c, d, a, GET(6), 0x04881d05, 23)
STEP(H, a, b, c, d, GET(9), 0xd9d4d039, 4)
STEP(H2, d, a, b, c, GET(12), 0xe6db99e5, 11)
STEP(H, c, d, a, b, GET(15), 0x1fa27cf8, 16)
STEP(H2, b, c, d, a, GET(2), 0xc4ac5665, 23)
STEP(I, a, b, c, d, GET(0), 0xf4292244, 6)
STEP(I, d, a, b, c, GET(7), 0x432aff97, 10)
STEP(I, c, d, a, b, GET(14), 0xab9423a7, 15)
STEP(I, b, c, d, a, GET(5), 0xfc93a039, 21)
STEP(I, a, b, c, d, GET(12), 0x655b59c3, 6)
STEP(I, d, a, b, c, GET(3), 0x8f0ccc92, 10)
STEP(I, c, d, a, b, GET(10), 0xffeff47d, 15)
STEP(I, b, c, d, a, GET(1), 0x85845dd1, 21)
STEP(I, a, b, c, d, GET(8), 0x6fa87e4f, 6)
STEP(I, d, a, b, c, GET(15), 0xfe2ce6e0, 10)
STEP(I, c, d, a, b, GET(6), 0xa3014314, 15)
STEP(I, b, c, d, a, GET(13), 0x4e0811a1, 21)
STEP(I, a, b, c, d, GET(4), 0xf7537e82, 6)
STEP(I, d, a, b, c, GET(11), 0xbd3af235, 10)
STEP(I, c, d, a, b, GET(2), 0x2ad7d2bb, 15)
STEP(I, b, c, d, a, GET(9), 0xeb86d391, 21)
a += saved_a;
b += saved_b;
c += saved_c;
d += saved_d;
used = 0;
available = 64;
}
lo <<=3;
memset(&block[used], 0, available-8);
OUT( &block[56], lo);
OUT( &block[60], hi);
ptr = block;
saved_a = a;
saved_b = b;
saved_c = c;
saved_d = d;
/* Round 1 */
STEP(F, a, b, c, d, SET(0), 0xd76aa478, 7)
STEP(F, d, a, b, c, SET(1), 0xe8c7b756, 12)
STEP(F, c, d, a, b, SET(2), 0x242070db, 17)
STEP(F, b, c, d, a, SET(3), 0xc1bdceee, 22)
STEP(F, a, b, c, d, SET(4), 0xf57c0faf, 7)
STEP(F, d, a, b, c, SET(5), 0x4787c62a, 12)
STEP(F, c, d, a, b, SET(6), 0xa8304613, 17)
STEP(F, b, c, d, a, SET(7), 0xfd469501, 22)
STEP(F, a, b, c, d, SET(8), 0x698098d8, 7)
STEP(F, d, a, b, c, SET(9), 0x8b44f7af, 12)
STEP(F, c, d, a, b, SET(10), 0xffff5bb1, 17)
STEP(F, b, c, d, a, SET(11), 0x895cd7be, 22)
STEP(F, a, b, c, d, SET(12), 0x6b901122, 7)
STEP(F, d, a, b, c, SET(13), 0xfd987193, 12)
STEP(F, c, d, a, b, SET(14), 0xa679438e, 17)
STEP(F, b, c, d, a, SET(15), 0x49b40821, 22)
/* Round 2 */
STEP(G, a, b, c, d, GET(1), 0xf61e2562, 5)
STEP(G, d, a, b, c, GET(6), 0xc040b340, 9)
STEP(G, c, d, a, b, GET(11), 0x265e5a51, 14)
STEP(G, b, c, d, a, GET(0), 0xe9b6c7aa, 20)
STEP(G, a, b, c, d, GET(5), 0xd62f105d, 5)
STEP(G, d, a, b, c, GET(10), 0x02441453, 9)
STEP(G, c, d, a, b, GET(15), 0xd8a1e681, 14)
STEP(G, b, c, d, a, GET(4), 0xe7d3fbc8, 20)
STEP(G, a, b, c, d, GET(9), 0x21e1cde6, 5)
STEP(G, d, a, b, c, GET(14), 0xc33707d6, 9)
STEP(G, c, d, a, b, GET(3), 0xf4d50d87, 14)
STEP(G, b, c, d, a, GET(8), 0x455a14ed, 20)
STEP(G, a, b, c, d, GET(13), 0xa9e3e905, 5)
STEP(G, d, a, b, c, GET(2), 0xfcefa3f8, 9)
STEP(G, c, d, a, b, GET(7), 0x676f02d9, 14)
STEP(G, b, c, d, a, GET(12), 0x8d2a4c8a, 20)
/* Round 3 */
STEP(H, a, b, c, d, GET(5), 0xfffa3942, 4)
STEP(H2, d, a, b, c, GET(8), 0x8771f681, 11)
STEP(H, c, d, a, b, GET(11), 0x6d9d6122, 16)
STEP(H2, b, c, d, a, GET(14), 0xfde5380c, 23)
STEP(H, a, b, c, d, GET(1), 0xa4beea44, 4)
STEP(H2, d, a, b, c, GET(4), 0x4bdecfa9, 11)
STEP(H, c, d, a, b, GET(7), 0xf6bb4b60, 16)
STEP(H2, b, c, d, a, GET(10), 0xbebfbc70, 23)
STEP(H, a, b, c, d, GET(13), 0x289b7ec6, 4)
STEP(H2, d, a, b, c, GET(0), 0xeaa127fa, 11)
STEP(H, c, d, a, b, GET(3), 0xd4ef3085, 16)
STEP(H2, b, c, d, a, GET(6), 0x04881d05, 23)
STEP(H, a, b, c, d, GET(9), 0xd9d4d039, 4)
STEP(H2, d, a, b, c, GET(12), 0xe6db99e5, 11)
STEP(H, c, d, a, b, GET(15), 0x1fa27cf8, 16)
STEP(H2, b, c, d, a, GET(2), 0xc4ac5665, 23)
STEP(I, a, b, c, d, GET(0), 0xf4292244, 6)
STEP(I, d, a, b, c, GET(7), 0x432aff97, 10)
STEP(I, c, d, a, b, GET(14), 0xab9423a7, 15)
STEP(I, b, c, d, a, GET(5), 0xfc93a039, 21)
STEP(I, a, b, c, d, GET(12), 0x655b59c3, 6)
STEP(I, d, a, b, c, GET(3), 0x8f0ccc92, 10)
STEP(I, c, d, a, b, GET(10), 0xffeff47d, 15)
STEP(I, b, c, d, a, GET(1), 0x85845dd1, 21)
STEP(I, a, b, c, d, GET(8), 0x6fa87e4f, 6)
STEP(I, d, a, b, c, GET(15), 0xfe2ce6e0, 10)
STEP(I, c, d, a, b, GET(6), 0xa3014314, 15)
STEP(I, b, c, d, a, GET(13), 0x4e0811a1, 21)
STEP(I, a, b, c, d, GET(4), 0xf7537e82, 6)
STEP(I, d, a, b, c, GET(11), 0xbd3af235, 10)
STEP(I, c, d, a, b, GET(2), 0x2ad7d2bb, 15)
STEP(I, b, c, d, a, GET(9), 0xeb86d391, 21)
a += saved_a;
b += saved_b;
c += saved_c;
d += saved_d;
out[tid*4] = a;
out[tid*4+1] = b;
out[tid*4+2] = c;
out[tid*4+3] = d;
if (allocate ){
free((void *)ptr);
}
return;
}
/*-------------------------------------------------------------------------*/
/**
@brief Syncrhonizes the CPU with the GPU stream that computes the checksims
Syncrhonizes the CPU with the GPU stream that computes the checksims
**/
/*-------------------------------------------------------------------------*/
int syncDevice(){
CUDA_ERROR_CHECK(cudaStreamSynchronize(Gstream));
return 1;
}
/*-------------------------------------------------------------------------*/
/**
@brief This is the main loop of the parallel thread responsible of computing
the MD5 checksums
@return void.
This is the main loop of the parallel thread responsible of computing
**/
/*-------------------------------------------------------------------------*/
void *workerMain(void *){
cudaSetDevice(deviceId);
long l;
int lock= 1;
while (1){
pthread_mutex_lock(&worker);
if (worker_exit ){
return NULL;
}
for ( l = 0; l < totalWork; l++){
if( work[l].type == CPU ){
MD5CPU(work[l].data);
}
else if ( work[l].type == GPU ){
MD5GPU(work[l].data);
}
else if ( work[l].type == CFILE ){
lock = 0;
char str[100];
double t0 = MPI_Wtime();
fsync(fileno(work[l].f));
fclose(work[l].f);
double t1 = MPI_Wtime();
sprintf(str,"In memory Ckpt Pushed in Stable Storage in : %.2f sec", t1-t0);
FTI_Print(str,FTI_INFO);
}
}
totalWork = 0;
syncDevice();
if ( lock ){
pthread_mutex_unlock(&application);
}
else {
lock = 1;
}
}
}
/*-------------------------------------------------------------------------*/
/**
@brief This function initializes the MD5 checksum functions for DCP
@param cSize Size of the chunk
@param tempSize Size of intermediate buffers (Not used in this file)
@param FTI_Conf Pointer to the configuration options
@return integer FTI_SCES if successfu.
This function initializes parameters for the computation of DCP MD5 checksums
and if requested spawns the worker thread.
**/
/*-------------------------------------------------------------------------*/
int FTI_initMD5(long cSize, long tempSize, FTIT_configuration *FTI_Conf){
if ( FTI_Conf->dcpInfoPosix.cachedCkpt)
usesAsync = 1;
else
usesAsync = 0;
//this will be use by the application to sync
cpuHash = FTI_Conf->dcpInfoPosix.hashFunc;
pthread_attr_t attr;
cudaGetDevice(&deviceId);
if (usesAsync){
if (pthread_mutex_init(&application, NULL) != 0){
return 1;
}
pthread_mutex_lock(&application);
// This will be used by the worker to sync
if (pthread_mutex_init(&worker, NULL) != 0){
return 1;
}
pthread_mutex_lock(&worker);
pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
pthread_attr_init(&attr);
if(pthread_create(&thread, &attr, workerMain, NULL)) {
return 1;
}
}
tempBufferSize = tempSize;
md5ChunkSize = cSize;
size_t lvl1Chunks,free,total;
CUDA_ERROR_CHECK(cudaMemGetInfo ( &free, &total));
lvl1Chunks = ((total)/cSize)*4; // 4 ints per md5
CUDA_ERROR_CHECK(cudaHostAlloc((void **)&Hin, sizeof(MD5_u32plus*) * lvl1Chunks , cudaHostAllocMapped));
CUDA_ERROR_CHECK(cudaHostAlloc((void **)&Hout, sizeof(MD5_u32plus*) * lvl1Chunks , cudaHostAllocMapped));
CUDA_ERROR_CHECK(cudaHostGetDevicePointer((void **)&in, (void *) Hin, 0));
CUDA_ERROR_CHECK(cudaHostGetDevicePointer((void **)&out, (void *) Hout, 0));
CUDA_ERROR_CHECK(cudaMallocManaged(&tempGpuBuffer, sizeof(char)*(tempBufferSize)));
CUDA_ERROR_CHECK(cudaStreamCreate(&Gstream));
return FTI_SCES;
}
/*-------------------------------------------------------------------------*/
/**
@brief This function computes the checksums of an Protected Variable
@param data Variable We need to compute the checksums
@return integer FTI_SCES if successfu.
This function computes the checksums of a specific variable stored in the
GPU and is called in the async mode by the worker thread
**/
/*-------------------------------------------------------------------------*/
int MD5GPU(FTIT_dataset *data){
size_t size = data->size;
long numKernels= GETDIV(size,md5ChunkSize);
long numThreads = min(numKernels,1024L);
long numGroups = GETDIV(numKernels,numThreads);// + ((( numKernels % numThreads ) == 0 ) ? 0:1);
unsigned char *tmp = (unsigned char*) malloc (sizeof(char)*size);
body<<<numGroups,numThreads,0,Gstream>>>((MD5_u32plus *) data->dcpInfoPosix.currentHashArray, data->devicePtr, size, md5ChunkSize);
return FTI_SCES;
}
/*-------------------------------------------------------------------------*/
/**
@brief This function computes the checksums of an Protected Variable
@param data Variable We need to compute the checksums
@return integer FTI_SCES if successfu.
This function computes the checksums of a specific variable stored in the
CPU and is called in the async mode by the worker thread
**/
/*-------------------------------------------------------------------------*/
int MD5CPU(FTIT_dataset *data){
unsigned long dataSize = data->size;
unsigned char block[md5ChunkSize];
size_t i;
unsigned char *ptr = (unsigned char *) data->ptr;
for ( i = 0 ; i < data->size; i+=md5ChunkSize){
unsigned int blockId = i/md5ChunkSize;
unsigned int hashIdx = blockId*16;
unsigned int chunkSize = ( (dataSize-i) < md5ChunkSize ) ? dataSize-i: md5ChunkSize;
if( chunkSize < md5ChunkSize ) {
memset( block, 0x0, md5ChunkSize );
memcpy( block, &ptr[i], chunkSize );
cpuHash( block, md5ChunkSize , &data->dcpInfoPosix.currentHashArray[hashIdx] );
} else {
cpuHash( &ptr[i], md5ChunkSize , &data->dcpInfoPosix.currentHashArray[hashIdx] );
}
}
return FTI_SCES;
}
/*-------------------------------------------------------------------------*/
/**
@brief This function computes the checksums of a Protected Variable
@param data Variable We need to compute the checksums
@return integer FTI_SCES if successfu.
This function initializes either computes directly the checksums of the CPU
dataVar or assigns the work to a worker thread
/*-------------------------------------------------------------------------*/
int FTI_MD5CPU(FTIT_dataset *data){
if ( usesAsync ){
work[totalWork].data= data;
work[totalWork].type= CPU;
totalWork++;
}else{
MD5CPU(data);
}
return 1;
}
/*-------------------------------------------------------------------------*/
/**
@brief This function computes the checksums of a Protected Variable
@param data Variable We need to compute the checksums
@return integer FTI_SCES if successfu.
This function initializes either computes directly the checksums of the GPU
dataVar or assigns the work to a worker thread
/*-------------------------------------------------------------------------*/
int FTI_MD5GPU(FTIT_dataset *data){
if ( usesAsync ){
work[totalWork].data= data;
work[totalWork].type= GPU;
totalWork++;
}
else{
MD5GPU(data);
syncDevice();
}
return 1;
}
/*-------------------------------------------------------------------------*/
/**
@brief This function synchronizes the file writes with the stable storages
@param f pointer to the file to be synchronized
@return integer FTI_SCES if successfull.
The function instracts the worker thread to close the file and immediately returns
**/
/*-------------------------------------------------------------------------*/
int FTI_CLOSE_ASYNC(FILE *f){
if ( usesAsync ){
work[totalWork].f= f;
work[totalWork].type= CFILE;
totalWork++;
pthread_mutex_unlock(&worker);
}
return 1;
}
/*-------------------------------------------------------------------------*/
/**
@brief This function waits until all MD5 checksums are computed
@return integer FTI_SCES if successfull.
The function waits until all MD5 Checksums are computed either by waiting the worker
thread or by immediately returning.
**/
/*-------------------------------------------------------------------------*/
int FTI_SyncMD5(){
if ( usesAsync ){
pthread_mutex_lock(&application);
}
return FTI_SCES;
}
/*-------------------------------------------------------------------------*/
/**
@brief This function starts the MD5 checksums computation
@return integer FTI_SCES if successfull.
The function starts the MD5 Checksums computation
**/
/*-------------------------------------------------------------------------*/
int FTI_startMD5(){
if ( usesAsync ){
pthread_mutex_unlock(&worker);
}
return FTI_SCES;
}
/*-------------------------------------------------------------------------*/
/**
@brief This function de-allocates or the MD5 related resources
@return integer FTI_SCES if successfull.
The function de-allocates or the MD5 related resources.
**/
/*-------------------------------------------------------------------------*/
int FTI_destroyMD5(){
if (usesAsync ){
worker_exit = 1;
pthread_mutex_unlock(&worker);
}
CUDA_ERROR_CHECK(cudaFreeHost((void *)Hin));
CUDA_ERROR_CHECK(cudaFreeHost((void *)Hout));
CUDA_ERROR_CHECK(cudaFree(tempGpuBuffer));
return FTI_SCES;
}
|
b774d3d5c1cd5d5e0858b1d992d71a68b3bae196.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <string>
#include <stdio.h>
#include <stdlib.h>
#include <dirent.h>
#include <unistd.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <hip/hip_runtime.h>
#include <opencv2/opencv.hpp>
#include <opencv2/core.hpp>
#define THREAD_DIM 256
#define NUM_STREAMS 32
#define MAX_IMAGE_SIZE (1920 * 1080)
using namespace cv;
using namespace std;
typedef struct FakeMat_ {
unsigned char *Ptr;
int rows;
int cols;
} FakeMat;
__global__ void rgbaToGreyscaleGPU(
uchar4 *rgbaImage,
unsigned char *greyImage,
int rows,
int cols
)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x > cols || y > rows)
{
return;
}
uchar4 rgba = rgbaImage[y * cols + x];
unsigned char greyValue = (0.299f * rgba.x) + (0.587f * rgba.y) + (0.114f * rgba.z);
greyImage[y * cols + x] = greyValue;
}
__global__ void medianFilterGPU(
unsigned char* greyImageData,
unsigned char *filteredImage,
int rows,
int cols
)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int windowSize = 3;
int filter[9] {
0, 1, 0,
1, 1, 1,
0, 1, 0
};
unsigned char pixelValues[9] = {0, 0, 0, 0, 0, 0, 0, 0, 0};
if (
x > cols - windowSize + 1 ||
y > rows - windowSize + 1 ||
x < windowSize - 1 ||
y < windowSize - 1
)
{
return;
}
for (int hh = 0; hh < windowSize; hh++)
{
for (int ww = 0; ww < windowSize; ww++)
{
if (filter[hh * windowSize + ww] == 1)
{
int idx = (y + hh - 1) * cols + (x + ww - 1);
pixelValues[hh * windowSize + ww] = greyImageData[idx];
}
}
}
// Get median pixel value and assign to filteredImage
for (int i = 0; i < (windowSize * windowSize); i++) {
for (int j = i + 1; j < (windowSize * windowSize); j++) {
if (pixelValues[i] > pixelValues[j]) {
//Swap the variables.
char tmp = pixelValues[i];
pixelValues[i] = pixelValues[j];
pixelValues[j] = tmp;
}
}
}
unsigned char filteredValue = pixelValues[(windowSize * windowSize) / 2];
filteredImage[y * cols + x] = filteredValue;
}
inline void printTime(string task, struct timespec start, struct timespec end)
{
uint64_t diff = (1000000000L * (end.tv_sec - start.tv_sec) + end.tv_nsec - start.tv_nsec) / 1e6;
printf("[INFO] %s operation lasted %llu ms\n", task.c_str(), diff);
}
void read_directory(string name, vector<string> *v)
{
DIR* dirp = opendir(name.c_str());
struct dirent * dp;
while ((dp = readdir(dirp)) != NULL) {
v->push_back(dp->d_name);
}
closedir(dirp);
}
int readImage(string filename, Mat* inputImage, Mat* imageGrey)
{
Mat image;
Mat imageRGBA;
Mat outputImage;
// printf("[DEBUG] %s", "Reading image\n");
image = imread(filename.c_str(), IMREAD_COLOR);
if (image.empty())
{
cerr << "[ERROR] Couldn't open file: " << filename << endl;
return 1;
}
// printf("[DEBUG] %s", "Convert color\n");
cvtColor(image, imageRGBA, COLOR_BGR2RGBA);
outputImage.create(image.rows, image.cols, CV_8UC1);
*inputImage = imageRGBA;
*imageGrey = outputImage;
return 0;
}
void writeImage(string dirname, string filename, string prefix, Mat outputImage)
{
string outFile = dirname + string("/") + prefix + filename;
cv::imwrite(outFile.c_str(), outputImage);
}
int main(int argc, char **argv)
{
if (argc < 2)
{
cerr << "Usage: ./main inputDirectory" << endl;
exit(1);
}
// Define Variables
// printf("[DEBUG] operating on directory `%s`\n", argv[1]);
string inputDir = string(argv[1]);
string outputDir = string("motified") + inputDir;
vector<string> inputFilenames;
read_directory(inputDir, &inputFilenames);
vector<Mat> outputImages;
vector<Mat> inputImages;
hipStream_t streams[NUM_STREAMS];
for (int i = 0; i < NUM_STREAMS; i++) { hipStreamCreate(&streams[i]); }
struct timespec start, end;
uchar4 *d_rgbaImage;
unsigned char *d_greyImage;
unsigned char *d_filteredImage;
// Allocate Memory
// printf("[DEBUG] %s\n", "Allocating Memory");
hipMalloc(&d_rgbaImage, sizeof(uchar4) * MAX_IMAGE_SIZE * NUM_STREAMS);
hipMalloc(&d_greyImage, sizeof(unsigned char) * MAX_IMAGE_SIZE * NUM_STREAMS);
hipMalloc(&d_filteredImage, sizeof(unsigned char) * MAX_IMAGE_SIZE * NUM_STREAMS);
// Read in images from the fs
for (int i = 0; i < inputFilenames.size(); i++)
{
Mat imageMat;
Mat outputMat;
string curImage = inputFilenames[i];
string filename = inputDir + curImage;
// Read in image
int err = readImage(
filename,
&imageMat,
&outputMat
);
if (err != 0) { continue; }
inputImages.push_back(imageMat);
outputImages.push_back(outputMat);
}
FakeMat *outputImagesArray;
hipHostMalloc(&outputImagesArray, sizeof(FakeMat) * inputImages.size());
clock_gettime(CLOCK_MONOTONIC_RAW, &start);
for (int i = 0; i < inputImages.size(); i++)
{
const int curStream = i % NUM_STREAMS;
Mat curImageMat = inputImages[i];
int rows = curImageMat.rows;
int cols = curImageMat.cols;
int size = rows * cols;
if (size >= MAX_IMAGE_SIZE) { continue; }
// printf("[DEBUG] %s\n", "Memsetting");
hipMemsetAsync(d_rgbaImage + MAX_IMAGE_SIZE * curStream, 0, sizeof(uchar4) * MAX_IMAGE_SIZE, streams[curStream]);
hipMemsetAsync(d_greyImage + MAX_IMAGE_SIZE * curStream, 0, sizeof(unsigned char) * MAX_IMAGE_SIZE, streams[curStream]);
hipMemsetAsync(d_filteredImage + MAX_IMAGE_SIZE * curStream, 0, sizeof(unsigned char) * MAX_IMAGE_SIZE, streams[curStream]);
// printf("[DEBUG] %s\n", "Done");
dim3 gridSize (ceil(cols / (float)THREAD_DIM), ceil(rows / (float)THREAD_DIM));
dim3 blockSize (THREAD_DIM, THREAD_DIM);
// Copy data to GPU
uchar4 *curImagePtr = (uchar4 *)curImageMat.ptr<unsigned char>(0);
if (curImagePtr == NULL) { continue; }
// printf("[DEBUG] %s\n", "Copying memory to GPU");
hipMemcpyAsync(
d_rgbaImage + MAX_IMAGE_SIZE * curStream,
curImagePtr,
sizeof(uchar4) * size,
hipMemcpyHostToDevice,
streams[curStream]
);
// printf("[DEBUG] %s\n", "Done");
// Run kernel(s)
hipLaunchKernelGGL(( rgbaToGreyscaleGPU), dim3(gridSize), dim3(blockSize), 0, streams[curStream] ,
d_rgbaImage + (MAX_IMAGE_SIZE * curStream),
d_greyImage + (MAX_IMAGE_SIZE * curStream),
rows,
cols
);
hipLaunchKernelGGL(( medianFilterGPU), dim3(gridSize), dim3(blockSize), 0, streams[curStream] ,
d_greyImage + (MAX_IMAGE_SIZE * curStream),
d_filteredImage + (MAX_IMAGE_SIZE * curStream),
rows,
cols
);
// Copy results to CPU
unsigned char *outputImagePtr = outputImages[i].ptr<unsigned char>(0);
// printf("[DEBUG] %s\n", "Copying memory from GPU");
hipMemcpyAsync(
outputImagePtr,
d_filteredImage + MAX_IMAGE_SIZE * curStream,
sizeof(unsigned char) * size,
hipMemcpyDeviceToHost,
streams[curStream]
);
// printf("[DEBUG] %s\n", "Done");
// printf("[DEBUG] %s\n", "Add to array");
outputImagesArray[i] = (FakeMat){ outputImagePtr, rows, cols };
// printf("[DEBUG] %s\n", "Done");
}
clock_gettime(CLOCK_MONOTONIC_RAW, &end);
printTime("total", start, end);
// sync and destroy streams
for (int i = 0; i < NUM_STREAMS; i++)
{
hipStreamSynchronize(streams[i]);
hipStreamDestroy(streams[i]);
}
// printf("[DEBUG] %s\n", "Mkdir");
struct stat st = {0};
if (stat(outputDir.c_str(), &st) == -1) {
mkdir(outputDir.c_str(), 0700);
}
// printf("[DEBUG] %s\n", "Done");
// printf("[DEBUG] %s\n", "Write images");
// Write modified images to the fs
for (int i = 0; i < inputImages.size(); i++)
{
if (outputImagesArray[i].Ptr == NULL) { continue; }
// if (i < 10) { printf("[DEBUG] %s + %d:%p\n", "output ptr", i, outputImagesArray[i].Ptr); }
Mat outputImageMat = Mat(
outputImagesArray[i].rows,
outputImagesArray[i].cols,
CV_8UC1,
outputImagesArray[i].Ptr
);
// Write Image
writeImage(outputDir, to_string(i) + string(".jpg"), "modified_", outputImageMat);
}
// printf("[DEBUG] %s\n", "Done");
// Free Memory
hipHostFree(&outputImages);
hipFree(&d_rgbaImage);
hipFree(&d_greyImage);
return 0;
}
| b774d3d5c1cd5d5e0858b1d992d71a68b3bae196.cu |
#include <iostream>
#include <string>
#include <stdio.h>
#include <stdlib.h>
#include <dirent.h>
#include <unistd.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <cuda_runtime.h>
#include <opencv2/opencv.hpp>
#include <opencv2/core.hpp>
#define THREAD_DIM 256
#define NUM_STREAMS 32
#define MAX_IMAGE_SIZE (1920 * 1080)
using namespace cv;
using namespace std;
typedef struct FakeMat_ {
unsigned char *Ptr;
int rows;
int cols;
} FakeMat;
__global__ void rgbaToGreyscaleGPU(
uchar4 *rgbaImage,
unsigned char *greyImage,
int rows,
int cols
)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x > cols || y > rows)
{
return;
}
uchar4 rgba = rgbaImage[y * cols + x];
unsigned char greyValue = (0.299f * rgba.x) + (0.587f * rgba.y) + (0.114f * rgba.z);
greyImage[y * cols + x] = greyValue;
}
__global__ void medianFilterGPU(
unsigned char* greyImageData,
unsigned char *filteredImage,
int rows,
int cols
)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int windowSize = 3;
int filter[9] {
0, 1, 0,
1, 1, 1,
0, 1, 0
};
unsigned char pixelValues[9] = {0, 0, 0, 0, 0, 0, 0, 0, 0};
if (
x > cols - windowSize + 1 ||
y > rows - windowSize + 1 ||
x < windowSize - 1 ||
y < windowSize - 1
)
{
return;
}
for (int hh = 0; hh < windowSize; hh++)
{
for (int ww = 0; ww < windowSize; ww++)
{
if (filter[hh * windowSize + ww] == 1)
{
int idx = (y + hh - 1) * cols + (x + ww - 1);
pixelValues[hh * windowSize + ww] = greyImageData[idx];
}
}
}
// Get median pixel value and assign to filteredImage
for (int i = 0; i < (windowSize * windowSize); i++) {
for (int j = i + 1; j < (windowSize * windowSize); j++) {
if (pixelValues[i] > pixelValues[j]) {
//Swap the variables.
char tmp = pixelValues[i];
pixelValues[i] = pixelValues[j];
pixelValues[j] = tmp;
}
}
}
unsigned char filteredValue = pixelValues[(windowSize * windowSize) / 2];
filteredImage[y * cols + x] = filteredValue;
}
inline void printTime(string task, struct timespec start, struct timespec end)
{
uint64_t diff = (1000000000L * (end.tv_sec - start.tv_sec) + end.tv_nsec - start.tv_nsec) / 1e6;
printf("[INFO] %s operation lasted %llu ms\n", task.c_str(), diff);
}
void read_directory(string name, vector<string> *v)
{
DIR* dirp = opendir(name.c_str());
struct dirent * dp;
while ((dp = readdir(dirp)) != NULL) {
v->push_back(dp->d_name);
}
closedir(dirp);
}
int readImage(string filename, Mat* inputImage, Mat* imageGrey)
{
Mat image;
Mat imageRGBA;
Mat outputImage;
// printf("[DEBUG] %s", "Reading image\n");
image = imread(filename.c_str(), IMREAD_COLOR);
if (image.empty())
{
cerr << "[ERROR] Couldn't open file: " << filename << endl;
return 1;
}
// printf("[DEBUG] %s", "Convert color\n");
cvtColor(image, imageRGBA, COLOR_BGR2RGBA);
outputImage.create(image.rows, image.cols, CV_8UC1);
*inputImage = imageRGBA;
*imageGrey = outputImage;
return 0;
}
void writeImage(string dirname, string filename, string prefix, Mat outputImage)
{
string outFile = dirname + string("/") + prefix + filename;
cv::imwrite(outFile.c_str(), outputImage);
}
int main(int argc, char **argv)
{
if (argc < 2)
{
cerr << "Usage: ./main inputDirectory" << endl;
exit(1);
}
// Define Variables
// printf("[DEBUG] operating on directory `%s`\n", argv[1]);
string inputDir = string(argv[1]);
string outputDir = string("motified") + inputDir;
vector<string> inputFilenames;
read_directory(inputDir, &inputFilenames);
vector<Mat> outputImages;
vector<Mat> inputImages;
cudaStream_t streams[NUM_STREAMS];
for (int i = 0; i < NUM_STREAMS; i++) { cudaStreamCreate(&streams[i]); }
struct timespec start, end;
uchar4 *d_rgbaImage;
unsigned char *d_greyImage;
unsigned char *d_filteredImage;
// Allocate Memory
// printf("[DEBUG] %s\n", "Allocating Memory");
cudaMalloc(&d_rgbaImage, sizeof(uchar4) * MAX_IMAGE_SIZE * NUM_STREAMS);
cudaMalloc(&d_greyImage, sizeof(unsigned char) * MAX_IMAGE_SIZE * NUM_STREAMS);
cudaMalloc(&d_filteredImage, sizeof(unsigned char) * MAX_IMAGE_SIZE * NUM_STREAMS);
// Read in images from the fs
for (int i = 0; i < inputFilenames.size(); i++)
{
Mat imageMat;
Mat outputMat;
string curImage = inputFilenames[i];
string filename = inputDir + curImage;
// Read in image
int err = readImage(
filename,
&imageMat,
&outputMat
);
if (err != 0) { continue; }
inputImages.push_back(imageMat);
outputImages.push_back(outputMat);
}
FakeMat *outputImagesArray;
cudaMallocHost(&outputImagesArray, sizeof(FakeMat) * inputImages.size());
clock_gettime(CLOCK_MONOTONIC_RAW, &start);
for (int i = 0; i < inputImages.size(); i++)
{
const int curStream = i % NUM_STREAMS;
Mat curImageMat = inputImages[i];
int rows = curImageMat.rows;
int cols = curImageMat.cols;
int size = rows * cols;
if (size >= MAX_IMAGE_SIZE) { continue; }
// printf("[DEBUG] %s\n", "Memsetting");
cudaMemsetAsync(d_rgbaImage + MAX_IMAGE_SIZE * curStream, 0, sizeof(uchar4) * MAX_IMAGE_SIZE, streams[curStream]);
cudaMemsetAsync(d_greyImage + MAX_IMAGE_SIZE * curStream, 0, sizeof(unsigned char) * MAX_IMAGE_SIZE, streams[curStream]);
cudaMemsetAsync(d_filteredImage + MAX_IMAGE_SIZE * curStream, 0, sizeof(unsigned char) * MAX_IMAGE_SIZE, streams[curStream]);
// printf("[DEBUG] %s\n", "Done");
dim3 gridSize (ceil(cols / (float)THREAD_DIM), ceil(rows / (float)THREAD_DIM));
dim3 blockSize (THREAD_DIM, THREAD_DIM);
// Copy data to GPU
uchar4 *curImagePtr = (uchar4 *)curImageMat.ptr<unsigned char>(0);
if (curImagePtr == NULL) { continue; }
// printf("[DEBUG] %s\n", "Copying memory to GPU");
cudaMemcpyAsync(
d_rgbaImage + MAX_IMAGE_SIZE * curStream,
curImagePtr,
sizeof(uchar4) * size,
cudaMemcpyHostToDevice,
streams[curStream]
);
// printf("[DEBUG] %s\n", "Done");
// Run kernel(s)
rgbaToGreyscaleGPU<<< gridSize, blockSize, 0, streams[curStream] >>>(
d_rgbaImage + (MAX_IMAGE_SIZE * curStream),
d_greyImage + (MAX_IMAGE_SIZE * curStream),
rows,
cols
);
medianFilterGPU<<< gridSize, blockSize, 0, streams[curStream] >>>(
d_greyImage + (MAX_IMAGE_SIZE * curStream),
d_filteredImage + (MAX_IMAGE_SIZE * curStream),
rows,
cols
);
// Copy results to CPU
unsigned char *outputImagePtr = outputImages[i].ptr<unsigned char>(0);
// printf("[DEBUG] %s\n", "Copying memory from GPU");
cudaMemcpyAsync(
outputImagePtr,
d_filteredImage + MAX_IMAGE_SIZE * curStream,
sizeof(unsigned char) * size,
cudaMemcpyDeviceToHost,
streams[curStream]
);
// printf("[DEBUG] %s\n", "Done");
// printf("[DEBUG] %s\n", "Add to array");
outputImagesArray[i] = (FakeMat){ outputImagePtr, rows, cols };
// printf("[DEBUG] %s\n", "Done");
}
clock_gettime(CLOCK_MONOTONIC_RAW, &end);
printTime("total", start, end);
// sync and destroy streams
for (int i = 0; i < NUM_STREAMS; i++)
{
cudaStreamSynchronize(streams[i]);
cudaStreamDestroy(streams[i]);
}
// printf("[DEBUG] %s\n", "Mkdir");
struct stat st = {0};
if (stat(outputDir.c_str(), &st) == -1) {
mkdir(outputDir.c_str(), 0700);
}
// printf("[DEBUG] %s\n", "Done");
// printf("[DEBUG] %s\n", "Write images");
// Write modified images to the fs
for (int i = 0; i < inputImages.size(); i++)
{
if (outputImagesArray[i].Ptr == NULL) { continue; }
// if (i < 10) { printf("[DEBUG] %s + %d:%p\n", "output ptr", i, outputImagesArray[i].Ptr); }
Mat outputImageMat = Mat(
outputImagesArray[i].rows,
outputImagesArray[i].cols,
CV_8UC1,
outputImagesArray[i].Ptr
);
// Write Image
writeImage(outputDir, to_string(i) + string(".jpg"), "modified_", outputImageMat);
}
// printf("[DEBUG] %s\n", "Done");
// Free Memory
cudaFreeHost(&outputImages);
cudaFree(&d_rgbaImage);
cudaFree(&d_greyImage);
return 0;
}
|
ee123416c138f5c24ae2699e16b8c75303865651.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2009-2019 The Regents of the University of Michigan
// This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
#include "ComputeFreeVolumeGPU.cuh"
#include "IntegratorHPMCMonoGPU.cuh"
#include "IntegratorHPMCMonoImplicitGPU.cuh"
#include "IntegratorHPMCMonoImplicitNewGPU.cuh"
#include "ShapeSpheropolyhedron.h"
namespace hpmc
{
namespace detail
{
//! HPMC kernels for ShapeSpheropolyhedron
template hipError_t gpu_hpmc_free_volume<ShapeSpheropolyhedron >(const hpmc_free_volume_args_t &args,
const typename ShapeSpheropolyhedron ::param_type *d_params);
template hipError_t gpu_hpmc_update<ShapeSpheropolyhedron >(const hpmc_args_t& args,
const typename ShapeSpheropolyhedron ::param_type *d_params);
template hipError_t gpu_hpmc_implicit_count_overlaps<ShapeSpheropolyhedron >(const hpmc_implicit_args_t& args,
const typename ShapeSpheropolyhedron ::param_type *d_params);
template hipError_t gpu_hpmc_implicit_accept_reject<ShapeSpheropolyhedron >(const hpmc_implicit_args_t& args,
const typename ShapeSpheropolyhedron ::param_type *d_params);
template hipError_t gpu_hpmc_insert_depletants_queue<ShapeSpheropolyhedron >(const hpmc_implicit_args_new_t& args,
const typename ShapeSpheropolyhedron ::param_type *d_params);
template hipError_t gpu_hpmc_implicit_accept_reject_new<ShapeSpheropolyhedron >(const hpmc_implicit_args_new_t& args,
const typename ShapeSpheropolyhedron ::param_type *d_params);
}; // end namespace detail
} // end namespace hpmc
| ee123416c138f5c24ae2699e16b8c75303865651.cu | // Copyright (c) 2009-2019 The Regents of the University of Michigan
// This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
#include "ComputeFreeVolumeGPU.cuh"
#include "IntegratorHPMCMonoGPU.cuh"
#include "IntegratorHPMCMonoImplicitGPU.cuh"
#include "IntegratorHPMCMonoImplicitNewGPU.cuh"
#include "ShapeSpheropolyhedron.h"
namespace hpmc
{
namespace detail
{
//! HPMC kernels for ShapeSpheropolyhedron
template cudaError_t gpu_hpmc_free_volume<ShapeSpheropolyhedron >(const hpmc_free_volume_args_t &args,
const typename ShapeSpheropolyhedron ::param_type *d_params);
template cudaError_t gpu_hpmc_update<ShapeSpheropolyhedron >(const hpmc_args_t& args,
const typename ShapeSpheropolyhedron ::param_type *d_params);
template cudaError_t gpu_hpmc_implicit_count_overlaps<ShapeSpheropolyhedron >(const hpmc_implicit_args_t& args,
const typename ShapeSpheropolyhedron ::param_type *d_params);
template cudaError_t gpu_hpmc_implicit_accept_reject<ShapeSpheropolyhedron >(const hpmc_implicit_args_t& args,
const typename ShapeSpheropolyhedron ::param_type *d_params);
template cudaError_t gpu_hpmc_insert_depletants_queue<ShapeSpheropolyhedron >(const hpmc_implicit_args_new_t& args,
const typename ShapeSpheropolyhedron ::param_type *d_params);
template cudaError_t gpu_hpmc_implicit_accept_reject_new<ShapeSpheropolyhedron >(const hpmc_implicit_args_new_t& args,
const typename ShapeSpheropolyhedron ::param_type *d_params);
}; // end namespace detail
} // end namespace hpmc
|
55ad3b1b9caaffdb0c6d35e7f90e1fe55952d7df.hip | // !!! This is a file automatically generated by hipify!!!
/** \file glvortens.cu
*/
#define __CUDA 1
#include "fargo.h"
#undef __CUDA
#include <stdarg.h>
#include <helper_cuda.h>
#include <hip/hip_runtime.h>
// BLOCK_X : in azimuth
//#define BLOCK_X DEF_BLOCK_X_GLVORTENS
#define BLOCK_X 32
// BLOCK_Y : in radius
#define BLOCK_Y 4
__device__ double CRadiiStuff[32768];
__global__ void kernel_adiabatic_soundspeed (double *energy,
double *dens,
double *cs,
double adiabatic_index,
int pitch) {
int jg = threadIdx.x + blockIdx.x * blockDim.x;
int ig = threadIdx.y + blockIdx.y * blockDim.y;
int idg = __mul24(ig, pitch) + jg;
cs[idg] = sqrt( adiabatic_index*(adiabatic_index-1.0)*energy[idg]/dens[idg]);
}
extern "C"
void CalcSoundSpeed_gpu (PolarGrid *Rho, PolarGrid *Energy, PolarGrid *CS) {
int nr = CS->Nrad;
int ns = CS->Nsec;
//dim3 grid;
dim3 block = dim3(BLOCK_X, BLOCK_Y);
dim3 grid ((ns+block.x-1)/block.x, (nr+block.y-1)/block.y);
checkCudaErrors(hipMemcpyToSymbol(CRadiiStuff, (void *)RadiiStuff, (size_t)(14*(NRAD+1))*sizeof(double), 0, hipMemcpyHostToDevice));
if (Adiabatic) {
hipLaunchKernelGGL(( kernel_adiabatic_soundspeed) , dim3(grid), dim3(block), 0, 0, Energy->gpu_field,
Rho->gpu_field,
CS->gpu_field,
ADIABATICINDEX,
CS->pitch/sizeof(double));
hipDeviceSynchronize();
getLastCudaError ("kernel_adiabatic_soundspeed failed");
// azimuthal averaging
//double csound[nr];
//AzimuthalAverage (CS, csound);
//for (int i = 0; i < nr; i++) {
// SOUNDSPEED[i] = csound[i]/(double)ns;
// CS2[i] = SOUNDSPEED[i]*SOUNDSPEED[i];
//}
}
}
| 55ad3b1b9caaffdb0c6d35e7f90e1fe55952d7df.cu | /** \file glvortens.cu
*/
#define __CUDA 1
#include "fargo.h"
#undef __CUDA
#include <stdarg.h>
#include <helper_cuda.h>
#include <cuda.h>
// BLOCK_X : in azimuth
//#define BLOCK_X DEF_BLOCK_X_GLVORTENS
#define BLOCK_X 32
// BLOCK_Y : in radius
#define BLOCK_Y 4
__device__ double CRadiiStuff[32768];
__global__ void kernel_adiabatic_soundspeed (double *energy,
double *dens,
double *cs,
double adiabatic_index,
int pitch) {
int jg = threadIdx.x + blockIdx.x * blockDim.x;
int ig = threadIdx.y + blockIdx.y * blockDim.y;
int idg = __mul24(ig, pitch) + jg;
cs[idg] = sqrt( adiabatic_index*(adiabatic_index-1.0)*energy[idg]/dens[idg]);
}
extern "C"
void CalcSoundSpeed_gpu (PolarGrid *Rho, PolarGrid *Energy, PolarGrid *CS) {
int nr = CS->Nrad;
int ns = CS->Nsec;
//dim3 grid;
dim3 block = dim3(BLOCK_X, BLOCK_Y);
dim3 grid ((ns+block.x-1)/block.x, (nr+block.y-1)/block.y);
checkCudaErrors(cudaMemcpyToSymbol(CRadiiStuff, (void *)RadiiStuff, (size_t)(14*(NRAD+1))*sizeof(double), 0, cudaMemcpyHostToDevice));
if (Adiabatic) {
kernel_adiabatic_soundspeed <<<grid, block>>> (Energy->gpu_field,
Rho->gpu_field,
CS->gpu_field,
ADIABATICINDEX,
CS->pitch/sizeof(double));
cudaThreadSynchronize();
getLastCudaError ("kernel_adiabatic_soundspeed failed");
// azimuthal averaging
//double csound[nr];
//AzimuthalAverage (CS, csound);
//for (int i = 0; i < nr; i++) {
// SOUNDSPEED[i] = csound[i]/(double)ns;
// CS2[i] = SOUNDSPEED[i]*SOUNDSPEED[i];
//}
}
}
|
b0a02e92c89e5c7710dd883b0323d7f145674c1f.hip | // !!! This is a file automatically generated by hipify!!!
// SPDX-FileCopyrightText: 2021 Benjamin Brock
//
// SPDX-License-Identifier: BSD-3-Clause
#define __thrust_compiler_fence() __sync_synchronize()
#include <cusp/io/matrix_market.h>
#include <cusp/csr_matrix.h>
#include <cusp/array2d.h>
#include <cusp/multiply.h>
#include <cusp/array2d.h>
#include <cusp/print.h>
#include <bcl/bcl.hpp>
#include <bcl/backends/experimental/nvshmem/backend.hpp>
#include <bcl/containers/experimental/cuda/CudaMatrix.hpp>
#include <bcl/containers/experimental/cuda/launch_kernel.cuh>
#include <thrust/sort.h>
#include <bcl/containers/experimental/cuda/CudaSPMatrix.hpp>
#include <bcl/containers/experimental/cuda/algorithms/algorithm.hpp>
#include <unordered_map>
#include <chrono>
#include <essl.h>
int main(int argc, char** argv) {
BCL::init(16);
BCL::cuda::init();
using T = float;
using index_type = int;
bool verify_result = true;
std::string fname = std::string(argv[1]);
// Number of vecs in SpMM (width of multi-vec, matrix)
size_t num_vecs = std::atoi(argv[2]);
auto matrix_shape = BCL::matrix_io::matrix_info(fname);
size_t m = matrix_shape.shape[0];
size_t k = matrix_shape.shape[1];
size_t n = num_vecs;
BCL::print("Choosing blocks...\n");
auto blocks = BCL::block_matmul(m, n, k);
using allocator_type = BCL::cuda::bcl_allocator<T>;
using indexing_type = BCL::cuda::RowMajorIndexing;
BCL::print("Reading matrices...\n");
BCL::cuda::SPMatrix<T, index_type> a(fname, std::move(blocks[0]));
BCL::cuda::Matrix<T, indexing_type> b(k, n, std::move(blocks[1]));
BCL::cuda::Matrix<T, indexing_type> c(m, n, std::move(blocks[2]));
b = 1;
c = 0;
BCL::cuda::barrier();
BCL::print("Info:\n");
if (BCL::rank() == 0) {
printf("A:\n");
a.print_info();
printf("B:\n");
b.print_info();
printf("C:\n");
c.print_info();
}
hipsparseStatus_t status = hipsparseCreate(&BCL::cuda::bcl_cusparse_handle_);
// printf("A taking %lf GB, B %lf GB\n", 1.0e-9*a.my_mem(), 1.0e-9*b.my_mem());
assert(a.grid_shape()[1] == b.grid_shape()[0]);
auto begin = std::chrono::high_resolution_clock::now();
BCL::cuda::gemm_bowns_onesided(a, b, c);
BCL::cuda::barrier();
auto end = std::chrono::high_resolution_clock::now();
double duration = std::chrono::duration<double>(end - begin).count();
double max_issue = BCL::allreduce(BCL::cuda::duration_issue, BCL::max<double>{});
double max_sync = BCL::allreduce(BCL::cuda::duration_sync, BCL::max<double>{});
double max_compute = BCL::allreduce(BCL::cuda::duration_compute, BCL::max<double>{});
double max_accumulate = BCL::allreduce(BCL::cuda::duration_accumulate, BCL::max<double>{});
double max_barrier = BCL::allreduce(BCL::cuda::duration_barrier, BCL::max<double>{});
double min_issue = BCL::allreduce(BCL::cuda::duration_issue, BCL::min<double>{});
double min_sync = BCL::allreduce(BCL::cuda::duration_sync, BCL::min<double>{});
double min_compute = BCL::allreduce(BCL::cuda::duration_compute, BCL::min<double>{});
double min_accumulate = BCL::allreduce(BCL::cuda::duration_accumulate, BCL::min<double>{});
double min_barrier = BCL::allreduce(BCL::cuda::duration_barrier, BCL::min<double>{});
BCL::cuda::duration_issue = BCL::allreduce(BCL::cuda::duration_issue, std::plus<double>{});
BCL::cuda::duration_sync = BCL::allreduce(BCL::cuda::duration_sync, std::plus<double>{});
BCL::cuda::duration_compute = BCL::allreduce(BCL::cuda::duration_compute, std::plus<double>{});
BCL::cuda::duration_accumulate = BCL::allreduce(BCL::cuda::duration_accumulate, std::plus<double>{});
BCL::cuda::duration_barrier = BCL::allreduce(BCL::cuda::duration_barrier, std::plus<double>{});
BCL::print("SpMM took %lf s\n", duration);
if (BCL::rank() == 0) {
printf("duration_issue %lf (%lf -> %lf)\n",
BCL::cuda::duration_issue / BCL::nprocs(),
min_issue, max_issue);
printf("duration_sync %lf (%lf -> %lf)\n",
BCL::cuda::duration_sync / BCL::nprocs(),
min_sync, max_sync);
printf("duration_compute %lf (%lf -> %lf)\n",
BCL::cuda::duration_compute / BCL::nprocs(),
min_compute, max_compute);
printf("duration_accumulate %lf (%lf -> %lf)\n",
BCL::cuda::duration_accumulate / BCL::nprocs(),
min_accumulate, max_accumulate);
printf("duration_barrier %lf (%lf -> %lf)\n",
BCL::cuda::duration_barrier / BCL::nprocs(),
min_barrier, max_barrier);
}
BCL::barrier();
fflush(stdout);
BCL::barrier();
if (BCL::rank() == 0 && verify_result) {
fprintf(stderr, "Reading in matrix...\n");
BCL::CSRMatrix<T, index_type> mat(fname);
fprintf(stderr, "Copying to GPU...\n");
auto local_a = BCL::cuda::to_gpu<T, index_type, allocator_type>(mat);
fprintf(stderr, "Creating local b...\n");
BCL::cuda::CudaMatrix<T, allocator_type, indexing_type> local_b({k, n});
fprintf(stderr, "Creating local c...\n");
BCL::cuda::CudaMatrix<T, allocator_type, indexing_type> local_c({m, n});
fprintf(stderr, "Writing to matrices...\n");
local_b = 1;
local_c = 0;
fprintf(stderr, "Doing local spmm...\n");
BCL::cuda::spmm_cusparse(local_a, local_b, local_c);
hipDeviceSynchronize();
fprintf(stderr, "Getting C matrix...\n");
auto distributed_c = c.get_matrix();
std::vector<T> local_data(local_c.size());
hipMemcpy(local_data.data(), local_c.data(), sizeof(T)*local_c.size(), hipMemcpyDeviceToHost);
assert(distributed_c.size() == local_c.size());
fprintf(stderr, "Checking accuracy...\n");
T eps = 1.0e-5;
size_t matching = 0;
bool print = false;
for (size_t i = 0; i < c.shape()[0]; i++) {
for (size_t j = 0; j < c.shape()[1]; j++) {
size_t d_idx = i*c.shape()[1] + j;
size_t l_idx = indexing_type().index(i, j, local_c.ld());
if (std::abs(distributed_c[d_idx] - local_data[l_idx]) > eps) {
// assert(false);
if (print) {
printf("O %2.2lf != %2.2lf ", distributed_c[d_idx], local_data[l_idx]);
}
} else {
if (print) {
printf("X %2.2lf == %2.2lf ", distributed_c[d_idx], local_data[l_idx]);
}
matching++;
}
}
if (print) {
printf("\n");
}
}
/*
for (size_t i = 0; i < distributed_c.size(); i++) {
if (std::abs(distributed_c[i] - local_data[i]) > eps) {
// fprintf(stderr, "[%lu] %f != %f\n", i, distributed_c[i], local_data[i]);
} else {
matching++;
}
}
*/
printf("%lu / %lu (%lf%%) indices match.\n", matching, distributed_c.size(),
100 * ((double) matching) / distributed_c.size());
if (matching == distributed_c.size()) {
printf("OK.\n");
} else {
printf("***FAILED!***\n");
}
}
BCL::finalize();
return 0;
}
| b0a02e92c89e5c7710dd883b0323d7f145674c1f.cu | // SPDX-FileCopyrightText: 2021 Benjamin Brock
//
// SPDX-License-Identifier: BSD-3-Clause
#define __thrust_compiler_fence() __sync_synchronize()
#include <cusp/io/matrix_market.h>
#include <cusp/csr_matrix.h>
#include <cusp/array2d.h>
#include <cusp/multiply.h>
#include <cusp/array2d.h>
#include <cusp/print.h>
#include <bcl/bcl.hpp>
#include <bcl/backends/experimental/nvshmem/backend.hpp>
#include <bcl/containers/experimental/cuda/CudaMatrix.hpp>
#include <bcl/containers/experimental/cuda/launch_kernel.cuh>
#include <thrust/sort.h>
#include <bcl/containers/experimental/cuda/CudaSPMatrix.hpp>
#include <bcl/containers/experimental/cuda/algorithms/algorithm.hpp>
#include <unordered_map>
#include <chrono>
#include <essl.h>
int main(int argc, char** argv) {
BCL::init(16);
BCL::cuda::init();
using T = float;
using index_type = int;
bool verify_result = true;
std::string fname = std::string(argv[1]);
// Number of vecs in SpMM (width of multi-vec, matrix)
size_t num_vecs = std::atoi(argv[2]);
auto matrix_shape = BCL::matrix_io::matrix_info(fname);
size_t m = matrix_shape.shape[0];
size_t k = matrix_shape.shape[1];
size_t n = num_vecs;
BCL::print("Choosing blocks...\n");
auto blocks = BCL::block_matmul(m, n, k);
using allocator_type = BCL::cuda::bcl_allocator<T>;
using indexing_type = BCL::cuda::RowMajorIndexing;
BCL::print("Reading matrices...\n");
BCL::cuda::SPMatrix<T, index_type> a(fname, std::move(blocks[0]));
BCL::cuda::Matrix<T, indexing_type> b(k, n, std::move(blocks[1]));
BCL::cuda::Matrix<T, indexing_type> c(m, n, std::move(blocks[2]));
b = 1;
c = 0;
BCL::cuda::barrier();
BCL::print("Info:\n");
if (BCL::rank() == 0) {
printf("A:\n");
a.print_info();
printf("B:\n");
b.print_info();
printf("C:\n");
c.print_info();
}
cusparseStatus_t status = cusparseCreate(&BCL::cuda::bcl_cusparse_handle_);
// printf("A taking %lf GB, B %lf GB\n", 1.0e-9*a.my_mem(), 1.0e-9*b.my_mem());
assert(a.grid_shape()[1] == b.grid_shape()[0]);
auto begin = std::chrono::high_resolution_clock::now();
BCL::cuda::gemm_bowns_onesided(a, b, c);
BCL::cuda::barrier();
auto end = std::chrono::high_resolution_clock::now();
double duration = std::chrono::duration<double>(end - begin).count();
double max_issue = BCL::allreduce(BCL::cuda::duration_issue, BCL::max<double>{});
double max_sync = BCL::allreduce(BCL::cuda::duration_sync, BCL::max<double>{});
double max_compute = BCL::allreduce(BCL::cuda::duration_compute, BCL::max<double>{});
double max_accumulate = BCL::allreduce(BCL::cuda::duration_accumulate, BCL::max<double>{});
double max_barrier = BCL::allreduce(BCL::cuda::duration_barrier, BCL::max<double>{});
double min_issue = BCL::allreduce(BCL::cuda::duration_issue, BCL::min<double>{});
double min_sync = BCL::allreduce(BCL::cuda::duration_sync, BCL::min<double>{});
double min_compute = BCL::allreduce(BCL::cuda::duration_compute, BCL::min<double>{});
double min_accumulate = BCL::allreduce(BCL::cuda::duration_accumulate, BCL::min<double>{});
double min_barrier = BCL::allreduce(BCL::cuda::duration_barrier, BCL::min<double>{});
BCL::cuda::duration_issue = BCL::allreduce(BCL::cuda::duration_issue, std::plus<double>{});
BCL::cuda::duration_sync = BCL::allreduce(BCL::cuda::duration_sync, std::plus<double>{});
BCL::cuda::duration_compute = BCL::allreduce(BCL::cuda::duration_compute, std::plus<double>{});
BCL::cuda::duration_accumulate = BCL::allreduce(BCL::cuda::duration_accumulate, std::plus<double>{});
BCL::cuda::duration_barrier = BCL::allreduce(BCL::cuda::duration_barrier, std::plus<double>{});
BCL::print("SpMM took %lf s\n", duration);
if (BCL::rank() == 0) {
printf("duration_issue %lf (%lf -> %lf)\n",
BCL::cuda::duration_issue / BCL::nprocs(),
min_issue, max_issue);
printf("duration_sync %lf (%lf -> %lf)\n",
BCL::cuda::duration_sync / BCL::nprocs(),
min_sync, max_sync);
printf("duration_compute %lf (%lf -> %lf)\n",
BCL::cuda::duration_compute / BCL::nprocs(),
min_compute, max_compute);
printf("duration_accumulate %lf (%lf -> %lf)\n",
BCL::cuda::duration_accumulate / BCL::nprocs(),
min_accumulate, max_accumulate);
printf("duration_barrier %lf (%lf -> %lf)\n",
BCL::cuda::duration_barrier / BCL::nprocs(),
min_barrier, max_barrier);
}
BCL::barrier();
fflush(stdout);
BCL::barrier();
if (BCL::rank() == 0 && verify_result) {
fprintf(stderr, "Reading in matrix...\n");
BCL::CSRMatrix<T, index_type> mat(fname);
fprintf(stderr, "Copying to GPU...\n");
auto local_a = BCL::cuda::to_gpu<T, index_type, allocator_type>(mat);
fprintf(stderr, "Creating local b...\n");
BCL::cuda::CudaMatrix<T, allocator_type, indexing_type> local_b({k, n});
fprintf(stderr, "Creating local c...\n");
BCL::cuda::CudaMatrix<T, allocator_type, indexing_type> local_c({m, n});
fprintf(stderr, "Writing to matrices...\n");
local_b = 1;
local_c = 0;
fprintf(stderr, "Doing local spmm...\n");
BCL::cuda::spmm_cusparse(local_a, local_b, local_c);
cudaDeviceSynchronize();
fprintf(stderr, "Getting C matrix...\n");
auto distributed_c = c.get_matrix();
std::vector<T> local_data(local_c.size());
cudaMemcpy(local_data.data(), local_c.data(), sizeof(T)*local_c.size(), cudaMemcpyDeviceToHost);
assert(distributed_c.size() == local_c.size());
fprintf(stderr, "Checking accuracy...\n");
T eps = 1.0e-5;
size_t matching = 0;
bool print = false;
for (size_t i = 0; i < c.shape()[0]; i++) {
for (size_t j = 0; j < c.shape()[1]; j++) {
size_t d_idx = i*c.shape()[1] + j;
size_t l_idx = indexing_type().index(i, j, local_c.ld());
if (std::abs(distributed_c[d_idx] - local_data[l_idx]) > eps) {
// assert(false);
if (print) {
printf("O %2.2lf != %2.2lf ", distributed_c[d_idx], local_data[l_idx]);
}
} else {
if (print) {
printf("X %2.2lf == %2.2lf ", distributed_c[d_idx], local_data[l_idx]);
}
matching++;
}
}
if (print) {
printf("\n");
}
}
/*
for (size_t i = 0; i < distributed_c.size(); i++) {
if (std::abs(distributed_c[i] - local_data[i]) > eps) {
// fprintf(stderr, "[%lu] %f != %f\n", i, distributed_c[i], local_data[i]);
} else {
matching++;
}
}
*/
printf("%lu / %lu (%lf%%) indices match.\n", matching, distributed_c.size(),
100 * ((double) matching) / distributed_c.size());
if (matching == distributed_c.size()) {
printf("OK.\n");
} else {
printf("***FAILED!***\n");
}
}
BCL::finalize();
return 0;
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.