hip_filename stringlengths 5 84 | hip_content stringlengths 79 9.69M | cuda_filename stringlengths 4 83 | cuda_content stringlengths 19 9.69M |
|---|---|---|---|
30e6dc983eb6596c8e4facd02668e5d46b58cce5.hip | // !!! This is a file automatically generated by hipify!!!
#include "layer.h"
#include <string.h>
// initialize: set all values to 0
void init_tensor(Tensor* const tensor)
{
memset(tensor, 0, sizeof(Tensor));
}
// allocate memory for tensor
// allocate GPU memory in GPU mode, or CPU memory in CPU mode
long int malloc_tensor_data(Tensor* const tensor)
{
const long int data_size = flatten_size(tensor);
#ifdef GPU
hipMalloc(&tensor->data, data_size * sizeof(real));
#else
tensor->data = (real*)malloc(data_size * sizeof(real));
#endif
return data_size * sizeof(real);
}
// deallocate memory & set all values to 0
void free_tensor_data(Tensor* const tensor)
{
#ifdef GPU
hipFree(tensor->data);
#else
free(tensor->data);
#endif
memset(tensor, 0, sizeof(Tensor));
}
// load binary data from file & store to CPU memory
// data: pointer to CPU memory for storing data
// if NULL, allocate new memory & load data & return pointer
real* load_data(const char* const filename,
int* const ndim,
int* const shape,
real* data)
{
FILE* fp = fopen(filename, "rb");
// load data shape
{
if ((int)fread(ndim, sizeof(int), 1, fp) < 1) {
printf("Error while reading ndim from %s\n", filename);
}
if ((int)fread(shape, sizeof(int), *ndim, fp) != *ndim) {
printf("Error while reading shape from %s\n", filename);
}
}
// compute total number of elements
{
const int ndim_ = *ndim;
int count = 1;
for (int i = 0; i < ndim_; ++i) {
count *= shape[i];
}
shape[ndim_] = count;
}
// memory allocation & load data
{
const int count = shape[*ndim];
if (data == NULL) {
data = (real*)malloc(count * sizeof(real));
}
if ((int)fread(data, sizeof(real), count, fp) != count) {
printf("Error while reading data from %s\n", filename);
}
// file close & return data
fclose(fp);
return data;
}
}
// load binary data from file & copy to memory where tensor occupies
// temp_data: pointer to CPU memory for loading data temporarily
// not used (i.e., can be NULL) if tensor occupies CPU memory
void load_tensor(const char* const filename,
Tensor* const tensor,
real* const temp_data)
{
int ndim;
int shape[g_max_ndim];
{
#ifdef GPU
long int data_size = 1;
load_data(filename, &ndim, shape, temp_data);
for (int i = 0; i < ndim; ++i) {
data_size *= shape[i];
}
if (data_size != flatten_size(tensor)) {
printf("[ERROR] Size mismatch: %s (%ld) != tensor (%ld)\n",
filename, data_size, flatten_size(tensor));
}
hipMemcpyAsync(tensor->data, temp_data, data_size * sizeof(real),
hipMemcpyHostToDevice);
#else
load_data(filename, &ndim, shape, tensor->data);
#endif
}
}
// save tensor data to binary file
// temp_data: pointer to CPU memory for storing data temporarily
// not used (i.e., can be NULL) if tensor occupies CPU memory
void save_tensor_data(const char* const filename,
const Tensor* const tensor,
real* const temp_data)
{
FILE* fp = fopen(filename, "wb");
real* p_temp_data;
{
#ifdef GPU
p_temp_data = temp_data;
hipMemcpyAsync(p_temp_data, tensor->data,
flatten_size(tensor) * sizeof(real),
hipMemcpyDeviceToHost);
#else
p_temp_data = tensor->data;
#endif
}
for (int n = 0; n < tensor->num_items; ++n)
{
int item_size = 1;
for (int i = 0; i < tensor->ndim; ++i) {
item_size *= tensor->shape[n][i];
}
fwrite(&tensor->ndim, sizeof(int), 1, fp);
fwrite(tensor->shape[n], sizeof(int), tensor->ndim, fp);
fwrite(p_temp_data, sizeof(real), item_size, fp);
p_temp_data += item_size;
}
fclose(fp);
}
// total number of elements in a tensor
long int flatten_size(const Tensor* const tensor)
{
long int total_size = 0;
for (int n = 0; n < tensor->num_items; ++n) {
long int size = 1;
for (int d = 0; d < tensor->ndim; ++d) {
size *= tensor->shape[n][d];
}
total_size += size;
}
return total_size;
}
// print shapes for all batch items in tensor
void print_tensor_info(const char* const name,
const Tensor* const tensor)
{
#ifndef DEBUG
{
printf("%s: ", name);
if (tensor->num_items > 1) {
printf("batch size = %d\n", tensor->num_items);
for (int n = 0; n < tensor->num_items; ++n) {
printf(" ");
for (int i = 0; i < tensor->ndim - 1; ++i) {
printf("%d x ", tensor->shape[n][i]);
}
printf("%d, ", tensor->shape[n][tensor->ndim - 1]);
printf("start = %d\n", tensor->start[n]);
}
}
else {
for (int i = 0; i < tensor->ndim - 1; ++i) {
printf("%d x ", tensor->shape[0][i]);
}
printf("%d\n", tensor->shape[0][tensor->ndim - 1]);
}
}
#endif
return;
}
| 30e6dc983eb6596c8e4facd02668e5d46b58cce5.cu | #include "layer.h"
#include <string.h>
// initialize: set all values to 0
void init_tensor(Tensor* const tensor)
{
memset(tensor, 0, sizeof(Tensor));
}
// allocate memory for tensor
// allocate GPU memory in GPU mode, or CPU memory in CPU mode
long int malloc_tensor_data(Tensor* const tensor)
{
const long int data_size = flatten_size(tensor);
#ifdef GPU
cudaMalloc(&tensor->data, data_size * sizeof(real));
#else
tensor->data = (real*)malloc(data_size * sizeof(real));
#endif
return data_size * sizeof(real);
}
// deallocate memory & set all values to 0
void free_tensor_data(Tensor* const tensor)
{
#ifdef GPU
cudaFree(tensor->data);
#else
free(tensor->data);
#endif
memset(tensor, 0, sizeof(Tensor));
}
// load binary data from file & store to CPU memory
// data: pointer to CPU memory for storing data
// if NULL, allocate new memory & load data & return pointer
real* load_data(const char* const filename,
int* const ndim,
int* const shape,
real* data)
{
FILE* fp = fopen(filename, "rb");
// load data shape
{
if ((int)fread(ndim, sizeof(int), 1, fp) < 1) {
printf("Error while reading ndim from %s\n", filename);
}
if ((int)fread(shape, sizeof(int), *ndim, fp) != *ndim) {
printf("Error while reading shape from %s\n", filename);
}
}
// compute total number of elements
{
const int ndim_ = *ndim;
int count = 1;
for (int i = 0; i < ndim_; ++i) {
count *= shape[i];
}
shape[ndim_] = count;
}
// memory allocation & load data
{
const int count = shape[*ndim];
if (data == NULL) {
data = (real*)malloc(count * sizeof(real));
}
if ((int)fread(data, sizeof(real), count, fp) != count) {
printf("Error while reading data from %s\n", filename);
}
// file close & return data
fclose(fp);
return data;
}
}
// load binary data from file & copy to memory where tensor occupies
// temp_data: pointer to CPU memory for loading data temporarily
// not used (i.e., can be NULL) if tensor occupies CPU memory
void load_tensor(const char* const filename,
Tensor* const tensor,
real* const temp_data)
{
int ndim;
int shape[g_max_ndim];
{
#ifdef GPU
long int data_size = 1;
load_data(filename, &ndim, shape, temp_data);
for (int i = 0; i < ndim; ++i) {
data_size *= shape[i];
}
if (data_size != flatten_size(tensor)) {
printf("[ERROR] Size mismatch: %s (%ld) != tensor (%ld)\n",
filename, data_size, flatten_size(tensor));
}
cudaMemcpyAsync(tensor->data, temp_data, data_size * sizeof(real),
cudaMemcpyHostToDevice);
#else
load_data(filename, &ndim, shape, tensor->data);
#endif
}
}
// save tensor data to binary file
// temp_data: pointer to CPU memory for storing data temporarily
// not used (i.e., can be NULL) if tensor occupies CPU memory
void save_tensor_data(const char* const filename,
const Tensor* const tensor,
real* const temp_data)
{
FILE* fp = fopen(filename, "wb");
real* p_temp_data;
{
#ifdef GPU
p_temp_data = temp_data;
cudaMemcpyAsync(p_temp_data, tensor->data,
flatten_size(tensor) * sizeof(real),
cudaMemcpyDeviceToHost);
#else
p_temp_data = tensor->data;
#endif
}
for (int n = 0; n < tensor->num_items; ++n)
{
int item_size = 1;
for (int i = 0; i < tensor->ndim; ++i) {
item_size *= tensor->shape[n][i];
}
fwrite(&tensor->ndim, sizeof(int), 1, fp);
fwrite(tensor->shape[n], sizeof(int), tensor->ndim, fp);
fwrite(p_temp_data, sizeof(real), item_size, fp);
p_temp_data += item_size;
}
fclose(fp);
}
// total number of elements in a tensor
long int flatten_size(const Tensor* const tensor)
{
long int total_size = 0;
for (int n = 0; n < tensor->num_items; ++n) {
long int size = 1;
for (int d = 0; d < tensor->ndim; ++d) {
size *= tensor->shape[n][d];
}
total_size += size;
}
return total_size;
}
// print shapes for all batch items in tensor
void print_tensor_info(const char* const name,
const Tensor* const tensor)
{
#ifndef DEBUG
{
printf("%s: ", name);
if (tensor->num_items > 1) {
printf("batch size = %d\n", tensor->num_items);
for (int n = 0; n < tensor->num_items; ++n) {
printf(" ");
for (int i = 0; i < tensor->ndim - 1; ++i) {
printf("%d x ", tensor->shape[n][i]);
}
printf("%d, ", tensor->shape[n][tensor->ndim - 1]);
printf("start = %d\n", tensor->start[n]);
}
}
else {
for (int i = 0; i < tensor->ndim - 1; ++i) {
printf("%d x ", tensor->shape[0][i]);
}
printf("%d\n", tensor->shape[0][tensor->ndim - 1]);
}
}
#endif
return;
}
|
ebf38dac03cc00f5b94080a597380f633424e9ea.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "Mandelbrot2D.cuh"
#include "hipComplex.cuh"
#include <GL/freeglut.h>
#include <GL/freeglut_ext.h>
#include <stdio.h>
#include <time.h>
__global__ void kernel(byte* buffer, const int side, const float sqrBailout, const float p, const int iters)
{
int offset = threadIdx.x + blockDim.x * blockIdx.x;
if (offset >= side * side)
return;
int x = offset % side;
int y = offset / side;
// Compute point at this position
int halfSide = side >> 1;
float jx = 2.0f * (float)(x - halfSide) / halfSide;
float jy = 2.0f * (float)(y - halfSide) / halfSide;
//jx -= 0.5f;
hipComplex c(jx, jy);
hipComplex z(jx, jy);
// Iterating
int i;
for (i = 0; i < iters; ++i)
{
z = (z ^ p) + c;
if (z.sqrMagnitude() > sqrBailout)
break;
}
float k = (float)i / iters;
// Setting point color
buffer[offset] = (byte)(k * 255);
}
bool Mandelbrot2D::compute(size_t width, size_t height, int iters, float setScalling)
{
sqrBailout = powf(4.0, 1.0 / (power - 1.0));
if (setScalling < 1.0)
setScalling = 1.0;
width *= setScalling;
height *= setScalling;
if (width > fMaxFractalSize)
width = fMaxFractalSize;
if (height > fMaxFractalSize)
height = fMaxFractalSize;
this->setScalling = setScalling;
if (points)
delete[] points;
this->width = width;
this->height = height;
int side = MAX(width, height);
const size_t sz = side * side;
points = new byte[sz];
byte* dev_buffer;
if (hipMalloc((void**)&dev_buffer, sz) != hipSuccess)
{
printf("Error on creating buffer of pixels in GPU\n");
return false;
}
printf("Rendering %d^2\n", side);
int threads = 1024;
int blocks = (sz + threads - 1) / threads;
clock_t tStart = clock();
kernel << <blocks, threads >> > (dev_buffer, side, sqrBailout, power, iters);
hipDeviceSynchronize();
clock_t tFinish = clock();
double tDelta = (double)(tFinish - tStart) / CLOCKS_PER_SEC;
printf("It tooks %.3f seconds\n", tDelta);
printf("Moving\n");
if (hipMemcpy((void*)points, dev_buffer, sz, hipMemcpyDeviceToHost) != hipSuccess)
{
printf("Error on getting buffer of pixels from GPU\n");
return false;
}
hipFree(dev_buffer);
return true;
}
void Mandelbrot2D::draw()
{
glBegin(GL_POINTS);
int side = MAX(width, height);
int shiftX = (width - side) / 2 - width / 2;
int shiftY = (height - side) / 2 - height / 2;
for (int y = 0; y < side; ++y)
{
for (int x = 0; x < side; ++x)
{
int i = side * y + x;
int k = points[i];
byte kRed = colorSpectrum[k][0];
byte kGreen = colorSpectrum[k][1];
byte kBlue = colorSpectrum[k][2];
glColor3ub(
kRed,
kGreen,
kBlue
);
glVertex2f(
(shiftX + x) / setScalling,
(shiftY + y) / setScalling
);
}
}
glEnd();
}
void Mandelbrot2D::initColorSpectrum(int index)
{
switch (index)
{
case 0:
initColorSpectrum0();
break;
case 1:
initColorSpectrum1();
break;
case 2:
initColorSpectrum2();
break;
}
}
void Mandelbrot2D::initColorSpectrum0()
{
for (int i = 0; i < 256; ++i)
{
float k = i / 255.0;
float b = sqrtf(k);
byte kRed = (byte)(k * 255);
byte kGreen = (byte)(k * k * 255);
byte kBlue = (byte)((1 - 4 * k * (1 - k)) * 255);
colorSpectrum[i][0] = kRed * b;
colorSpectrum[i][1] = kGreen * b;
colorSpectrum[i][2] = kBlue * b;
}
}
void Mandelbrot2D::initColorSpectrum1()
{
for (int i = 0; i < 256; ++i)
{
float k = i / 255.0;
float b = 4 * k * (1 - k);
byte kRed = (byte)((k < 0.5 ? 2 * k : (k < 0.75 ? 1.0 : 4 - 4 * k)) * 255);
byte kGreen = (byte)((k < 0.5 ? 2 * k : (k < 0.75 ? 1.5 - k : 3 - 3 * k)) * 255);
byte kBlue = (byte)((k < 0.5 ? 1 : 2 - 2 * k) * 255);
colorSpectrum[i][0] = kRed * b;
colorSpectrum[i][1] = kGreen * b;
colorSpectrum[i][2] = kBlue * b;
}
}
void Mandelbrot2D::initColorSpectrum2()
{
for (int i = 0; i < 256; ++i)
{
float k = i / 255.0;
float b = 4 * k * (1 - k);
byte kRed = (byte)((k < 0.5 ? 0 : (k < 0.75 ? 4 * k - 2 : 1.0)) * 255);
byte kGreen = (byte)((k < 0.5 ? 0 : (k < 0.75 ? 4 * k - 2 : 1.0)) * 255);
byte kBlue = (byte)((k < 0.5 ? 1 : 2 - 2 * k) * 255);
colorSpectrum[i][0] = kRed * b;
colorSpectrum[i][1] = kGreen * b;
colorSpectrum[i][2] = kBlue * b;
}
} | ebf38dac03cc00f5b94080a597380f633424e9ea.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "Mandelbrot2D.cuh"
#include "cuComplex.cuh"
#include <GL/freeglut.h>
#include <GL/freeglut_ext.h>
#include <stdio.h>
#include <time.h>
__global__ void kernel(byte* buffer, const int side, const float sqrBailout, const float p, const int iters)
{
int offset = threadIdx.x + blockDim.x * blockIdx.x;
if (offset >= side * side)
return;
int x = offset % side;
int y = offset / side;
// Compute point at this position
int halfSide = side >> 1;
float jx = 2.0f * (float)(x - halfSide) / halfSide;
float jy = 2.0f * (float)(y - halfSide) / halfSide;
//jx -= 0.5f;
cuComplex c(jx, jy);
cuComplex z(jx, jy);
// Iterating
int i;
for (i = 0; i < iters; ++i)
{
z = (z ^ p) + c;
if (z.sqrMagnitude() > sqrBailout)
break;
}
float k = (float)i / iters;
// Setting point color
buffer[offset] = (byte)(k * 255);
}
bool Mandelbrot2D::compute(size_t width, size_t height, int iters, float setScalling)
{
sqrBailout = powf(4.0, 1.0 / (power - 1.0));
if (setScalling < 1.0)
setScalling = 1.0;
width *= setScalling;
height *= setScalling;
if (width > fMaxFractalSize)
width = fMaxFractalSize;
if (height > fMaxFractalSize)
height = fMaxFractalSize;
this->setScalling = setScalling;
if (points)
delete[] points;
this->width = width;
this->height = height;
int side = MAX(width, height);
const size_t sz = side * side;
points = new byte[sz];
byte* dev_buffer;
if (cudaMalloc((void**)&dev_buffer, sz) != cudaSuccess)
{
printf("Error on creating buffer of pixels in GPU\n");
return false;
}
printf("Rendering %d^2\n", side);
int threads = 1024;
int blocks = (sz + threads - 1) / threads;
clock_t tStart = clock();
kernel << <blocks, threads >> > (dev_buffer, side, sqrBailout, power, iters);
cudaThreadSynchronize();
clock_t tFinish = clock();
double tDelta = (double)(tFinish - tStart) / CLOCKS_PER_SEC;
printf("It tooks %.3f seconds\n", tDelta);
printf("Moving\n");
if (cudaMemcpy((void*)points, dev_buffer, sz, cudaMemcpyDeviceToHost) != cudaSuccess)
{
printf("Error on getting buffer of pixels from GPU\n");
return false;
}
cudaFree(dev_buffer);
return true;
}
void Mandelbrot2D::draw()
{
glBegin(GL_POINTS);
int side = MAX(width, height);
int shiftX = (width - side) / 2 - width / 2;
int shiftY = (height - side) / 2 - height / 2;
for (int y = 0; y < side; ++y)
{
for (int x = 0; x < side; ++x)
{
int i = side * y + x;
int k = points[i];
byte kRed = colorSpectrum[k][0];
byte kGreen = colorSpectrum[k][1];
byte kBlue = colorSpectrum[k][2];
glColor3ub(
kRed,
kGreen,
kBlue
);
glVertex2f(
(shiftX + x) / setScalling,
(shiftY + y) / setScalling
);
}
}
glEnd();
}
void Mandelbrot2D::initColorSpectrum(int index)
{
switch (index)
{
case 0:
initColorSpectrum0();
break;
case 1:
initColorSpectrum1();
break;
case 2:
initColorSpectrum2();
break;
}
}
void Mandelbrot2D::initColorSpectrum0()
{
for (int i = 0; i < 256; ++i)
{
float k = i / 255.0;
float b = sqrtf(k);
byte kRed = (byte)(k * 255);
byte kGreen = (byte)(k * k * 255);
byte kBlue = (byte)((1 - 4 * k * (1 - k)) * 255);
colorSpectrum[i][0] = kRed * b;
colorSpectrum[i][1] = kGreen * b;
colorSpectrum[i][2] = kBlue * b;
}
}
void Mandelbrot2D::initColorSpectrum1()
{
for (int i = 0; i < 256; ++i)
{
float k = i / 255.0;
float b = 4 * k * (1 - k);
byte kRed = (byte)((k < 0.5 ? 2 * k : (k < 0.75 ? 1.0 : 4 - 4 * k)) * 255);
byte kGreen = (byte)((k < 0.5 ? 2 * k : (k < 0.75 ? 1.5 - k : 3 - 3 * k)) * 255);
byte kBlue = (byte)((k < 0.5 ? 1 : 2 - 2 * k) * 255);
colorSpectrum[i][0] = kRed * b;
colorSpectrum[i][1] = kGreen * b;
colorSpectrum[i][2] = kBlue * b;
}
}
void Mandelbrot2D::initColorSpectrum2()
{
for (int i = 0; i < 256; ++i)
{
float k = i / 255.0;
float b = 4 * k * (1 - k);
byte kRed = (byte)((k < 0.5 ? 0 : (k < 0.75 ? 4 * k - 2 : 1.0)) * 255);
byte kGreen = (byte)((k < 0.5 ? 0 : (k < 0.75 ? 4 * k - 2 : 1.0)) * 255);
byte kBlue = (byte)((k < 0.5 ? 1 : 2 - 2 * k) * 255);
colorSpectrum[i][0] = kRed * b;
colorSpectrum[i][1] = kGreen * b;
colorSpectrum[i][2] = kBlue * b;
}
} |
821b5d5cc54408ec1b731fc49bae79164cbd401c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <iostream>
#include "caffe/common.hpp"
#include "caffe/util/gpu_util.cuh"
#include "caffe/util/deformable_im2col.hpp"
namespace caffe {
template <typename Dtype>
__device__ Dtype deformable_im2col_bilinear(const Dtype* bottom_data, const int data_width,
const int height, const int width, Dtype h, Dtype w) {
int h_low = floor(h);
int w_low = floor(w);
int h_high;
int w_high;
if (h_low >= height - 1) {
h_high = h_low = height - 1;
h = (Dtype)h_low;
}
else {
h_high = h_low + 1;
}
if (w_low >= width - 1) {
w_high = w_low = width - 1;
w = (Dtype)w_low;
}
else {
w_high = w_low + 1;
}
Dtype lh = h - h_low;
Dtype lw = w - w_low;
Dtype hh = 1 - lh, hw = 1 - lw;
Dtype v1 = bottom_data[h_low * data_width + w_low];
Dtype v2 = bottom_data[h_low * data_width + w_high];
Dtype v3 = bottom_data[h_high * data_width + w_low];
Dtype v4 = bottom_data[h_high * data_width + w_high];
Dtype w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw;
Dtype val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
return val;
}
template <typename Dtype>
__device__ Dtype get_gradient_weight(Dtype argmax_h, Dtype argmax_w,
const int h, const int w, const int height, const int width) {
if (argmax_h < 0 || argmax_h > height || argmax_w < 0 || argmax_w > width) {
//empty
return 0;
}
argmax_h = max(argmax_h, (Dtype)0.0f);
argmax_w = max(argmax_w, (Dtype)0.0f);
int argmax_h_low = (int)argmax_h;
int argmax_w_low = (int)argmax_w;
int argmax_h_high;
int argmax_w_high;
if (argmax_h_low >= height - 1) {
argmax_h_high = argmax_h_low = height - 1;
argmax_h = (Dtype)argmax_h_low;
} else {
argmax_h_high = argmax_h_low + 1;
}
if (argmax_w_low >= width - 1)
{
argmax_w_high = argmax_w_low = width - 1;
argmax_w = (Dtype)argmax_w_low;
} else {
argmax_w_high = argmax_w_low + 1;
}
Dtype weight = 0;
if (h == argmax_h_low) {
if (w == argmax_w_low) {
weight = (h + 1 - argmax_h) * (w + 1 - argmax_w);
} else if (w == argmax_w_high) {
weight = (h + 1 - argmax_h) * (argmax_w + 1 - w);
}
} else if (h == argmax_h_high) {
if (w == argmax_w_low) {
weight = (argmax_h + 1 - h) * (w + 1 - argmax_w);
} else if (w == argmax_w_high) {
weight = (argmax_h + 1 - h) * (argmax_w + 1 - w);
}
}
return weight;
}
template <typename Dtype>
__device__ Dtype get_coordinate_weight(Dtype argmax_h, Dtype argmax_w,
const int height, const int width, const Dtype* im_data,
const int data_width, const int bp_dir) {
if (argmax_h < 0 || argmax_h > height || argmax_w < 0 || argmax_w > width)
{
//empty
return 0;
}
if (argmax_h < 0) argmax_h = 0;
if (argmax_w < 0) argmax_w = 0;
int argmax_h_low = (int)argmax_h;
int argmax_w_low = (int)argmax_w;
int argmax_h_high;
int argmax_w_high;
if (argmax_h_low >= height - 1) {
argmax_h_high = argmax_h_low = height - 1;
argmax_h = (Dtype)argmax_h_low;
} else {
argmax_h_high = argmax_h_low + 1;
}
if (argmax_w_low >= width - 1) {
argmax_w_high = argmax_w_low = width - 1;
argmax_w = (Dtype)argmax_w_low;
} else {
argmax_w_high = argmax_w_low + 1;
}
Dtype weight = 0;
if (bp_dir == 0) {
weight += -1 * (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_low * data_width + argmax_w_low];
weight += -1 * (argmax_w - argmax_w_low) * im_data[argmax_h_low * data_width + argmax_w_high];
weight += (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_high * data_width + argmax_w_low];
weight += (argmax_w - argmax_w_low) * im_data[argmax_h_high * data_width + argmax_w_high];
} else if (bp_dir == 1) {
weight += -1 * (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_low];
weight += (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_high];
weight += -1 * (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_low];
weight += (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_high];
}
return weight;
}
/*!
* \brief deformable_im2col gpu kernel.
* DO NOT call this directly. Use wrapper function im2col() instead;
*/
template <typename Dtype>
__global__ void deformable_im2col_gpu_kernel(const int n, const Dtype* data_im, const Dtype* data_offset,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int channel_per_deformable_group,
const int height_col, const int width_col,
Dtype* data_col) {
CUDA_KERNEL_LOOP(index, n) {
// index index of output matrix
const int w_col = index % width_col;
const int h_col = (index / width_col) % height_col;
const int c_im = (index / width_col) / height_col;
const int c_col = c_im * kernel_h * kernel_w;
// compute deformable group index
const int deformable_group_index = c_im / channel_per_deformable_group;
const int h_in = h_col * stride_h - pad_h;
const int w_in = w_col * stride_w - pad_w;
Dtype* data_col_ptr = data_col + (c_col * height_col + h_col) * width_col + w_col;
const Dtype* data_im_ptr = data_im + (c_im * height + h_in) * width + w_in;
const Dtype* data_offset_ptr = data_offset + deformable_group_index * 2 * kernel_h * kernel_w * height_col * width_col;
for (int i = 0; i < kernel_h; ++i) {
for (int j = 0; j < kernel_w; ++j) {
const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_col) * width_col + w_col;
const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_col) * width_col + w_col;
const Dtype offset_h = data_offset_ptr[data_offset_h_ptr];
const Dtype offset_w = data_offset_ptr[data_offset_w_ptr];
Dtype val = static_cast<Dtype>(0);
const Dtype h_im = h_in + i * dilation_h + offset_h;
const Dtype w_im = w_in + j * dilation_w + offset_w;
if (h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) {
const Dtype map_h = i * dilation_h + offset_h;
const Dtype map_w = j * dilation_w + offset_w;
const int cur_height = height - h_in;
const int cur_width = width - w_in;
val = deformable_im2col_bilinear(data_im_ptr, width, cur_height, cur_width, map_h, map_w);
}
*data_col_ptr = val;
data_col_ptr += height_col * width_col;
}
}
}
}
template <typename Dtype>
void deformable_im2col_gpu(const Dtype* data_im, const Dtype* data_offset, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const uint32_t deformable_group,
Dtype* data_col) {
// We are going to launch channels * height_col * width_col kernels, each
// kernel responsible for copying a single-channel grid.
int height_col = (height + 2 * pad_h -
(dilation_h * (kernel_h - 1) + 1)) / stride_h + 1;
int width_col = (width + 2 * pad_w -
(dilation_w * (kernel_w - 1) + 1)) / stride_w + 1;
int num_kernels = channels * height_col * width_col;
int channel_per_deformable_group = height / deformable_group;
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( deformable_im2col_gpu_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(num_kernels)),
dim3( CAFFE_CUDA_NUM_THREADS), 0, 0,
num_kernels, data_im, data_offset, height, width, kernel_h, kernel_w, pad_h,
pad_w, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group, height_col,
width_col, data_col);
CUDA_POST_KERNEL_CHECK;
}
template void deformable_im2col_gpu<float>(const float* data_im, const float* data_offset, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const uint32_t deformable_group,
float* data_col);
template void deformable_im2col_gpu<double>(const double* data_im, const double* data_offset, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const uint32_t deformable_group,
double* data_col);
template <typename Dtype>
__global__ void deformable_col2im_gpu_kernel(const int n, const Dtype* data_col, const Dtype* data_offset,
const int channels, const int height, const int width,
const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
int channel_per_deformable_group,
int height_col, int width_col,
Dtype* grad_im) {
CUDA_KERNEL_LOOP(index, n) {
const int j = (index / width_col / height_col) % kernel_w;
const int i = (index / width_col / height_col / kernel_w) % kernel_h;
const int c = index / width_col / height_col / kernel_w / kernel_h;
// compute the start and end of the output
const int deformable_group_index = c / channel_per_deformable_group;
int w_out = index % width_col;
int h_out = (index / width_col) % height_col;
int w_in = w_out * stride_w - pad_w;
int h_in = h_out * stride_h - pad_h;
const Dtype* data_offset_ptr = data_offset + deformable_group_index * 2 * kernel_h * kernel_w * height_col * width_col;
const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out;
const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out;
const Dtype offset_h = data_offset_ptr[data_offset_h_ptr];
const Dtype offset_w = data_offset_ptr[data_offset_w_ptr];
const Dtype cur_inv_h_data = h_in + i * dilation_h + offset_h;
const Dtype cur_inv_w_data = w_in + j * dilation_w + offset_w;
const Dtype cur_top_grad = data_col[index];
const int cur_h = (int)cur_inv_h_data;
const int cur_w = (int)cur_inv_w_data;
for (int dy = -2; dy <= 2; dy++) {
for (int dx = -2; dx <= 2; dx++) {
if (cur_h + dy >= 0 && cur_h + dy < height &&
cur_w + dx >= 0 && cur_w + dx < width &&
abs(cur_inv_h_data - (cur_h + dy)) < 1 &&
abs(cur_inv_w_data - (cur_w + dx)) < 1
) {
int cur_bottom_grad_pos = (c * height + cur_h + dy) * width + cur_w + dx;
Dtype weight = get_gradient_weight(cur_inv_h_data, cur_inv_w_data, cur_h + dy, cur_w + dx, height, width);
caffe_gpu_atomic_add(weight * cur_top_grad, grad_im + cur_bottom_grad_pos);
}
}
}
}
}
template <typename Dtype>
void deformable_col2im_gpu(const Dtype* data_col, const Dtype* data_offset, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h,
const int stride_w, const int dilation_h, const int dilation_w,
const uint32_t deformable_group, Dtype* grad_im) {
int height_col = (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) /
stride_h + 1;
int width_col = (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) /
stride_w + 1;
int num_kernels = channels * height * width;
int channel_per_deformable_group = height / deformable_group;
// To avoid involving atomic operations, we will launch one kernel per
// bottom dimension, and then in the kernel add up the top dimensions.
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( deformable_col2im_gpu_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(num_kernels)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num_kernels, data_col, data_offset, height, width, channels, kernel_h, kernel_w,
pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w,
channel_per_deformable_group, height_col, width_col, grad_im);
CUDA_POST_KERNEL_CHECK;
}
template void deformable_col2im_gpu<float>(const float* data_col, const float* data_offset, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h,
const int stride_w, const int dilation_h, const int dilation_w,
const uint32_t deformable_group, float* grad_im);
template void deformable_col2im_gpu<double>(const double* data_col, const double* data_offset, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h,
const int stride_w, const int dilation_h, const int dilation_w,
const uint32_t deformable_group, double* grad_im);
template <typename Dtype>
__global__ void deformable_col2im_coord_gpu_kernel(const int n, const Dtype* data_col,
const Dtype* data_im, const Dtype* data_offset,
const int channels, const int height, const int width,
const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int channel_per_deformable_group,
const int height_col, const int width_col,
Dtype* grad_offset) {
CUDA_KERNEL_LOOP(index, n) {
Dtype val = 0;
int w = index % width_col;
int h = (index / width_col) % height_col;
int c = index / width_col / height_col;
// compute the start and end of the output
const int deformable_group_index = c / (2 * kernel_h * kernel_w);
const int col_step = kernel_h * kernel_w;
int cnt = 0;
const Dtype* data_col_ptr = data_col + deformable_group_index * channel_per_deformable_group * width_col * height_col;
const Dtype* data_im_ptr = data_im + deformable_group_index * channel_per_deformable_group / kernel_h / kernel_w * height * width;
const Dtype* data_offset_ptr = data_offset + deformable_group_index * 2 * kernel_h * kernel_w * height_col * width_col;
const int offset_c = c - deformable_group_index * 2 * kernel_h * kernel_w;
for (int col_c = (offset_c / 2); col_c < channel_per_deformable_group; col_c += col_step) {
const int col_pos = ((col_c * height_col) + h) * width_col + w;
const int bp_dir = offset_c % 2;
int j = (col_pos / width_col / height_col) % kernel_w;
int i = (col_pos / width_col / height_col / kernel_w) % kernel_h;
int w_out = col_pos % width_col;
int h_out = (col_pos / width_col) % height_col;
int w_in = w_out * stride_w - pad_w;
int h_in = h_out * stride_h - pad_h;
const int data_offset_h_ptr = (((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out);
const int data_offset_w_ptr = (((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out);
const Dtype offset_h = data_offset_ptr[data_offset_h_ptr];
const Dtype offset_w = data_offset_ptr[data_offset_w_ptr];
Dtype inv_h = h_in + i * dilation_h + offset_h;
Dtype inv_w = w_in + j * dilation_w + offset_w;
if (inv_h < 0 || inv_w < 0 || inv_h >= height || inv_w >= width) {
inv_h = inv_w = -1;
}
const Dtype weight = get_coordinate_weight(
inv_h, inv_w,
height, width, data_im_ptr + cnt * height * width, width, bp_dir);
val += weight * data_col_ptr[col_pos];
cnt += 1;
}
grad_offset[index] = val;
}
}
template <typename Dtype>
void deformable_col2im_coord_gpu(const Dtype* data_col, const Dtype* data_offset, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h,
const int stride_w, const int dilation_h, const int dilation_w,
const uint32_t deformable_group, Dtype* grad_offset) {
int height_col = (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) /
stride_h + 1;
int width_col = (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) /
stride_w + 1;
int num_kernels = height_col * width_col * 2 * kernel_h * kernel_h * deformable_group;
int channel_per_deformable_group = height / deformable_group;
// To avoid involving atomic operations, we will launch one kernel per
// bottom dimension, and then in the kernel add up the top dimensions.
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( deformable_col2im_gpu_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(num_kernels)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num_kernels, data_col, data_offset, height, width, channels, kernel_h, kernel_w,
pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w,
channel_per_deformable_group, height_col, width_col, grad_offset);
CUDA_POST_KERNEL_CHECK;
}
template void deformable_col2im_coord_gpu<float>(const float* data_col, const float* data_offset, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h,
const int stride_w, const int dilation_h, const int dilation_w,
const uint32_t deformable_group, float* grad_offset);
template void deformable_col2im_coord_gpu<double>(const double* data_col, const double* data_offset, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h,
const int stride_w, const int dilation_h, const int dilation_w,
const uint32_t deformable_group, double* grad_offset);
}
| 821b5d5cc54408ec1b731fc49bae79164cbd401c.cu | #include <algorithm>
#include <iostream>
#include "caffe/common.hpp"
#include "caffe/util/gpu_util.cuh"
#include "caffe/util/deformable_im2col.hpp"
namespace caffe {
template <typename Dtype>
__device__ Dtype deformable_im2col_bilinear(const Dtype* bottom_data, const int data_width,
const int height, const int width, Dtype h, Dtype w) {
int h_low = floor(h);
int w_low = floor(w);
int h_high;
int w_high;
if (h_low >= height - 1) {
h_high = h_low = height - 1;
h = (Dtype)h_low;
}
else {
h_high = h_low + 1;
}
if (w_low >= width - 1) {
w_high = w_low = width - 1;
w = (Dtype)w_low;
}
else {
w_high = w_low + 1;
}
Dtype lh = h - h_low;
Dtype lw = w - w_low;
Dtype hh = 1 - lh, hw = 1 - lw;
Dtype v1 = bottom_data[h_low * data_width + w_low];
Dtype v2 = bottom_data[h_low * data_width + w_high];
Dtype v3 = bottom_data[h_high * data_width + w_low];
Dtype v4 = bottom_data[h_high * data_width + w_high];
Dtype w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw;
Dtype val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
return val;
}
template <typename Dtype>
__device__ Dtype get_gradient_weight(Dtype argmax_h, Dtype argmax_w,
const int h, const int w, const int height, const int width) {
if (argmax_h < 0 || argmax_h > height || argmax_w < 0 || argmax_w > width) {
//empty
return 0;
}
argmax_h = max(argmax_h, (Dtype)0.0f);
argmax_w = max(argmax_w, (Dtype)0.0f);
int argmax_h_low = (int)argmax_h;
int argmax_w_low = (int)argmax_w;
int argmax_h_high;
int argmax_w_high;
if (argmax_h_low >= height - 1) {
argmax_h_high = argmax_h_low = height - 1;
argmax_h = (Dtype)argmax_h_low;
} else {
argmax_h_high = argmax_h_low + 1;
}
if (argmax_w_low >= width - 1)
{
argmax_w_high = argmax_w_low = width - 1;
argmax_w = (Dtype)argmax_w_low;
} else {
argmax_w_high = argmax_w_low + 1;
}
Dtype weight = 0;
if (h == argmax_h_low) {
if (w == argmax_w_low) {
weight = (h + 1 - argmax_h) * (w + 1 - argmax_w);
} else if (w == argmax_w_high) {
weight = (h + 1 - argmax_h) * (argmax_w + 1 - w);
}
} else if (h == argmax_h_high) {
if (w == argmax_w_low) {
weight = (argmax_h + 1 - h) * (w + 1 - argmax_w);
} else if (w == argmax_w_high) {
weight = (argmax_h + 1 - h) * (argmax_w + 1 - w);
}
}
return weight;
}
template <typename Dtype>
__device__ Dtype get_coordinate_weight(Dtype argmax_h, Dtype argmax_w,
const int height, const int width, const Dtype* im_data,
const int data_width, const int bp_dir) {
if (argmax_h < 0 || argmax_h > height || argmax_w < 0 || argmax_w > width)
{
//empty
return 0;
}
if (argmax_h < 0) argmax_h = 0;
if (argmax_w < 0) argmax_w = 0;
int argmax_h_low = (int)argmax_h;
int argmax_w_low = (int)argmax_w;
int argmax_h_high;
int argmax_w_high;
if (argmax_h_low >= height - 1) {
argmax_h_high = argmax_h_low = height - 1;
argmax_h = (Dtype)argmax_h_low;
} else {
argmax_h_high = argmax_h_low + 1;
}
if (argmax_w_low >= width - 1) {
argmax_w_high = argmax_w_low = width - 1;
argmax_w = (Dtype)argmax_w_low;
} else {
argmax_w_high = argmax_w_low + 1;
}
Dtype weight = 0;
if (bp_dir == 0) {
weight += -1 * (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_low * data_width + argmax_w_low];
weight += -1 * (argmax_w - argmax_w_low) * im_data[argmax_h_low * data_width + argmax_w_high];
weight += (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_high * data_width + argmax_w_low];
weight += (argmax_w - argmax_w_low) * im_data[argmax_h_high * data_width + argmax_w_high];
} else if (bp_dir == 1) {
weight += -1 * (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_low];
weight += (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_high];
weight += -1 * (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_low];
weight += (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_high];
}
return weight;
}
/*!
* \brief deformable_im2col gpu kernel.
* DO NOT call this directly. Use wrapper function im2col() instead;
*/
template <typename Dtype>
__global__ void deformable_im2col_gpu_kernel(const int n, const Dtype* data_im, const Dtype* data_offset,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int channel_per_deformable_group,
const int height_col, const int width_col,
Dtype* data_col) {
CUDA_KERNEL_LOOP(index, n) {
// index index of output matrix
const int w_col = index % width_col;
const int h_col = (index / width_col) % height_col;
const int c_im = (index / width_col) / height_col;
const int c_col = c_im * kernel_h * kernel_w;
// compute deformable group index
const int deformable_group_index = c_im / channel_per_deformable_group;
const int h_in = h_col * stride_h - pad_h;
const int w_in = w_col * stride_w - pad_w;
Dtype* data_col_ptr = data_col + (c_col * height_col + h_col) * width_col + w_col;
const Dtype* data_im_ptr = data_im + (c_im * height + h_in) * width + w_in;
const Dtype* data_offset_ptr = data_offset + deformable_group_index * 2 * kernel_h * kernel_w * height_col * width_col;
for (int i = 0; i < kernel_h; ++i) {
for (int j = 0; j < kernel_w; ++j) {
const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_col) * width_col + w_col;
const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_col) * width_col + w_col;
const Dtype offset_h = data_offset_ptr[data_offset_h_ptr];
const Dtype offset_w = data_offset_ptr[data_offset_w_ptr];
Dtype val = static_cast<Dtype>(0);
const Dtype h_im = h_in + i * dilation_h + offset_h;
const Dtype w_im = w_in + j * dilation_w + offset_w;
if (h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) {
const Dtype map_h = i * dilation_h + offset_h;
const Dtype map_w = j * dilation_w + offset_w;
const int cur_height = height - h_in;
const int cur_width = width - w_in;
val = deformable_im2col_bilinear(data_im_ptr, width, cur_height, cur_width, map_h, map_w);
}
*data_col_ptr = val;
data_col_ptr += height_col * width_col;
}
}
}
}
template <typename Dtype>
void deformable_im2col_gpu(const Dtype* data_im, const Dtype* data_offset, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const uint32_t deformable_group,
Dtype* data_col) {
// We are going to launch channels * height_col * width_col kernels, each
// kernel responsible for copying a single-channel grid.
int height_col = (height + 2 * pad_h -
(dilation_h * (kernel_h - 1) + 1)) / stride_h + 1;
int width_col = (width + 2 * pad_w -
(dilation_w * (kernel_w - 1) + 1)) / stride_w + 1;
int num_kernels = channels * height_col * width_col;
int channel_per_deformable_group = height / deformable_group;
// NOLINT_NEXT_LINE(whitespace/operators)
deformable_im2col_gpu_kernel<Dtype><<<CAFFE_GET_BLOCKS(num_kernels),
CAFFE_CUDA_NUM_THREADS>>>(
num_kernels, data_im, data_offset, height, width, kernel_h, kernel_w, pad_h,
pad_w, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group, height_col,
width_col, data_col);
CUDA_POST_KERNEL_CHECK;
}
template void deformable_im2col_gpu<float>(const float* data_im, const float* data_offset, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const uint32_t deformable_group,
float* data_col);
template void deformable_im2col_gpu<double>(const double* data_im, const double* data_offset, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const uint32_t deformable_group,
double* data_col);
template <typename Dtype>
__global__ void deformable_col2im_gpu_kernel(const int n, const Dtype* data_col, const Dtype* data_offset,
const int channels, const int height, const int width,
const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
int channel_per_deformable_group,
int height_col, int width_col,
Dtype* grad_im) {
CUDA_KERNEL_LOOP(index, n) {
const int j = (index / width_col / height_col) % kernel_w;
const int i = (index / width_col / height_col / kernel_w) % kernel_h;
const int c = index / width_col / height_col / kernel_w / kernel_h;
// compute the start and end of the output
const int deformable_group_index = c / channel_per_deformable_group;
int w_out = index % width_col;
int h_out = (index / width_col) % height_col;
int w_in = w_out * stride_w - pad_w;
int h_in = h_out * stride_h - pad_h;
const Dtype* data_offset_ptr = data_offset + deformable_group_index * 2 * kernel_h * kernel_w * height_col * width_col;
const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out;
const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out;
const Dtype offset_h = data_offset_ptr[data_offset_h_ptr];
const Dtype offset_w = data_offset_ptr[data_offset_w_ptr];
const Dtype cur_inv_h_data = h_in + i * dilation_h + offset_h;
const Dtype cur_inv_w_data = w_in + j * dilation_w + offset_w;
const Dtype cur_top_grad = data_col[index];
const int cur_h = (int)cur_inv_h_data;
const int cur_w = (int)cur_inv_w_data;
for (int dy = -2; dy <= 2; dy++) {
for (int dx = -2; dx <= 2; dx++) {
if (cur_h + dy >= 0 && cur_h + dy < height &&
cur_w + dx >= 0 && cur_w + dx < width &&
abs(cur_inv_h_data - (cur_h + dy)) < 1 &&
abs(cur_inv_w_data - (cur_w + dx)) < 1
) {
int cur_bottom_grad_pos = (c * height + cur_h + dy) * width + cur_w + dx;
Dtype weight = get_gradient_weight(cur_inv_h_data, cur_inv_w_data, cur_h + dy, cur_w + dx, height, width);
caffe_gpu_atomic_add(weight * cur_top_grad, grad_im + cur_bottom_grad_pos);
}
}
}
}
}
template <typename Dtype>
void deformable_col2im_gpu(const Dtype* data_col, const Dtype* data_offset, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h,
const int stride_w, const int dilation_h, const int dilation_w,
const uint32_t deformable_group, Dtype* grad_im) {
int height_col = (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) /
stride_h + 1;
int width_col = (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) /
stride_w + 1;
int num_kernels = channels * height * width;
int channel_per_deformable_group = height / deformable_group;
// To avoid involving atomic operations, we will launch one kernel per
// bottom dimension, and then in the kernel add up the top dimensions.
// NOLINT_NEXT_LINE(whitespace/operators)
deformable_col2im_gpu_kernel<Dtype><<<CAFFE_GET_BLOCKS(num_kernels),
CAFFE_CUDA_NUM_THREADS>>>(
num_kernels, data_col, data_offset, height, width, channels, kernel_h, kernel_w,
pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w,
channel_per_deformable_group, height_col, width_col, grad_im);
CUDA_POST_KERNEL_CHECK;
}
template void deformable_col2im_gpu<float>(const float* data_col, const float* data_offset, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h,
const int stride_w, const int dilation_h, const int dilation_w,
const uint32_t deformable_group, float* grad_im);
template void deformable_col2im_gpu<double>(const double* data_col, const double* data_offset, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h,
const int stride_w, const int dilation_h, const int dilation_w,
const uint32_t deformable_group, double* grad_im);
template <typename Dtype>
__global__ void deformable_col2im_coord_gpu_kernel(const int n, const Dtype* data_col,
const Dtype* data_im, const Dtype* data_offset,
const int channels, const int height, const int width,
const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int channel_per_deformable_group,
const int height_col, const int width_col,
Dtype* grad_offset) {
CUDA_KERNEL_LOOP(index, n) {
Dtype val = 0;
int w = index % width_col;
int h = (index / width_col) % height_col;
int c = index / width_col / height_col;
// compute the start and end of the output
const int deformable_group_index = c / (2 * kernel_h * kernel_w);
const int col_step = kernel_h * kernel_w;
int cnt = 0;
const Dtype* data_col_ptr = data_col + deformable_group_index * channel_per_deformable_group * width_col * height_col;
const Dtype* data_im_ptr = data_im + deformable_group_index * channel_per_deformable_group / kernel_h / kernel_w * height * width;
const Dtype* data_offset_ptr = data_offset + deformable_group_index * 2 * kernel_h * kernel_w * height_col * width_col;
const int offset_c = c - deformable_group_index * 2 * kernel_h * kernel_w;
for (int col_c = (offset_c / 2); col_c < channel_per_deformable_group; col_c += col_step) {
const int col_pos = ((col_c * height_col) + h) * width_col + w;
const int bp_dir = offset_c % 2;
int j = (col_pos / width_col / height_col) % kernel_w;
int i = (col_pos / width_col / height_col / kernel_w) % kernel_h;
int w_out = col_pos % width_col;
int h_out = (col_pos / width_col) % height_col;
int w_in = w_out * stride_w - pad_w;
int h_in = h_out * stride_h - pad_h;
const int data_offset_h_ptr = (((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out);
const int data_offset_w_ptr = (((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out);
const Dtype offset_h = data_offset_ptr[data_offset_h_ptr];
const Dtype offset_w = data_offset_ptr[data_offset_w_ptr];
Dtype inv_h = h_in + i * dilation_h + offset_h;
Dtype inv_w = w_in + j * dilation_w + offset_w;
if (inv_h < 0 || inv_w < 0 || inv_h >= height || inv_w >= width) {
inv_h = inv_w = -1;
}
const Dtype weight = get_coordinate_weight(
inv_h, inv_w,
height, width, data_im_ptr + cnt * height * width, width, bp_dir);
val += weight * data_col_ptr[col_pos];
cnt += 1;
}
grad_offset[index] = val;
}
}
template <typename Dtype>
void deformable_col2im_coord_gpu(const Dtype* data_col, const Dtype* data_offset, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h,
const int stride_w, const int dilation_h, const int dilation_w,
const uint32_t deformable_group, Dtype* grad_offset) {
int height_col = (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) /
stride_h + 1;
int width_col = (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) /
stride_w + 1;
int num_kernels = height_col * width_col * 2 * kernel_h * kernel_h * deformable_group;
int channel_per_deformable_group = height / deformable_group;
// To avoid involving atomic operations, we will launch one kernel per
// bottom dimension, and then in the kernel add up the top dimensions.
// NOLINT_NEXT_LINE(whitespace/operators)
deformable_col2im_gpu_kernel<Dtype><<<CAFFE_GET_BLOCKS(num_kernels),
CAFFE_CUDA_NUM_THREADS>>>(
num_kernels, data_col, data_offset, height, width, channels, kernel_h, kernel_w,
pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w,
channel_per_deformable_group, height_col, width_col, grad_offset);
CUDA_POST_KERNEL_CHECK;
}
template void deformable_col2im_coord_gpu<float>(const float* data_col, const float* data_offset, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h,
const int stride_w, const int dilation_h, const int dilation_w,
const uint32_t deformable_group, float* grad_offset);
template void deformable_col2im_coord_gpu<double>(const double* data_col, const double* data_offset, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h,
const int stride_w, const int dilation_h, const int dilation_w,
const uint32_t deformable_group, double* grad_offset);
}
|
2ae68dd7a3020e50f0eda8baefd7fa1e4deb5afa.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <random>
#include <sys/time.h>
#define SEED 123
#define MARGIN 1e-6
double cpuSecond() {
struct timeval tp;
gettimeofday(&tp,NULL);
return ((double)tp.tv_sec + (double)tp.tv_usec*1.e-6);
}
float Uniform(){
std::default_random_engine generator;
std::uniform_real_distribution<float> uniform(-10,10);
return uniform(generator);
}
__host__ __device__ float gen_random(int id, int iter, int NUM_PARTICLES) {
return (SEED*id+iter) % NUM_PARTICLES;
}
class Particle {
public:
float3 position,velocity;
Particle() {
position.x=Uniform();
position.y=Uniform();
position.z=Uniform();
velocity.x=Uniform()/4;
velocity.y=Uniform()/4;
velocity.z=Uniform()/4;
}
__device__ __host__ void position_update() {
position.x+=velocity.x;
position.y+=velocity.y;
position.z+=velocity.z;
}
};
__global__ void one_step (Particle* particles,int iter,int NUM_PARTICLES){
int id =blockIdx.x*blockDim.x+threadIdx.x;
if(id<NUM_PARTICLES){
particles[id].position_update();
particles[id].velocity.x+=gen_random(id, iter, NUM_PARTICLES)/5;
particles[id].velocity.y+=gen_random(id, iter, NUM_PARTICLES)/4;
particles[id].velocity.z+=gen_random(id, iter, NUM_PARTICLES)/3;
}
}
int main(int argc, char* argv[]) {
double start,gpu_time=0;
int NUM_PARTICLES = 10000000;
int NUM_ITERATIONS = 100;
int BLOCK_SIZE = 256;
// printf("NUM_PARTICLES:%d\nNUM_ITERATIONS:%d\nBLOCK_SIZE:%d\n",NUM_PARTICLES,NUM_ITERATIONS,BLOCK_SIZE);
int nBytes=sizeof(Particle)*NUM_PARTICLES;
int grid_size =(NUM_PARTICLES+BLOCK_SIZE-1)/BLOCK_SIZE;
Particle* particles=new Particle[NUM_PARTICLES];
start=cpuSecond();
Particle* d_particles;
hipMalloc(&d_particles, nBytes);
for(int i=0;i<NUM_ITERATIONS;i++){
hipMemcpy(d_particles, particles, nBytes, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( one_step), dim3(grid_size),dim3(BLOCK_SIZE), 0, 0, d_particles,i,NUM_ITERATIONS);
hipDeviceSynchronize();
hipMemcpy(particles, d_particles, nBytes, hipMemcpyDeviceToHost);
}
gpu_time+=cpuSecond()-start;
printf("GPU costs:%lfs\n",gpu_time);
hipFree(d_particles);
} | 2ae68dd7a3020e50f0eda8baefd7fa1e4deb5afa.cu | #include <stdio.h>
#include <random>
#include <sys/time.h>
#define SEED 123
#define MARGIN 1e-6
double cpuSecond() {
struct timeval tp;
gettimeofday(&tp,NULL);
return ((double)tp.tv_sec + (double)tp.tv_usec*1.e-6);
}
float Uniform(){
std::default_random_engine generator;
std::uniform_real_distribution<float> uniform(-10,10);
return uniform(generator);
}
__host__ __device__ float gen_random(int id, int iter, int NUM_PARTICLES) {
return (SEED*id+iter) % NUM_PARTICLES;
}
class Particle {
public:
float3 position,velocity;
Particle() {
position.x=Uniform();
position.y=Uniform();
position.z=Uniform();
velocity.x=Uniform()/4;
velocity.y=Uniform()/4;
velocity.z=Uniform()/4;
}
__device__ __host__ void position_update() {
position.x+=velocity.x;
position.y+=velocity.y;
position.z+=velocity.z;
}
};
__global__ void one_step (Particle* particles,int iter,int NUM_PARTICLES){
int id =blockIdx.x*blockDim.x+threadIdx.x;
if(id<NUM_PARTICLES){
particles[id].position_update();
particles[id].velocity.x+=gen_random(id, iter, NUM_PARTICLES)/5;
particles[id].velocity.y+=gen_random(id, iter, NUM_PARTICLES)/4;
particles[id].velocity.z+=gen_random(id, iter, NUM_PARTICLES)/3;
}
}
int main(int argc, char* argv[]) {
double start,gpu_time=0;
int NUM_PARTICLES = 10000000;
int NUM_ITERATIONS = 100;
int BLOCK_SIZE = 256;
// printf("NUM_PARTICLES:%d\nNUM_ITERATIONS:%d\nBLOCK_SIZE:%d\n",NUM_PARTICLES,NUM_ITERATIONS,BLOCK_SIZE);
int nBytes=sizeof(Particle)*NUM_PARTICLES;
int grid_size =(NUM_PARTICLES+BLOCK_SIZE-1)/BLOCK_SIZE;
Particle* particles=new Particle[NUM_PARTICLES];
start=cpuSecond();
Particle* d_particles;
cudaMalloc(&d_particles, nBytes);
for(int i=0;i<NUM_ITERATIONS;i++){
cudaMemcpy(d_particles, particles, nBytes, cudaMemcpyHostToDevice);
one_step<<<grid_size,BLOCK_SIZE>>>(d_particles,i,NUM_ITERATIONS);
cudaDeviceSynchronize();
cudaMemcpy(particles, d_particles, nBytes, cudaMemcpyDeviceToHost);
}
gpu_time+=cpuSecond()-start;
printf("GPU costs:%lfs\n",gpu_time);
cudaFree(d_particles);
} |
5e04664b421e090add58d8ff5b665f59b89097a3.hip | // !!! This is a file automatically generated by hipify!!!
#include "SceECM.h"
#include "SceCells.h" // Because of forward declaration
//# define debugModeECM
// bending stiffness is given inside the code. It should be given as in input from a txt file.
//isInitPhase bool variable is not active anymore.
//Right now it is assumed that ECM stiffness is the same everywhere.
__constant__ double sceInterCell_ECM[5];
//__constant__ double wLCPara_ECM[4];
__constant__ double restLenECMAdhSpringGPU ;
__constant__ double maxLenECMAdhSpringGPU ;
__constant__ double kAdhECMGPU ;
__constant__ double stiffnessECMBasalGPU ;
__constant__ double stiffnessECMBCGPU ;
__constant__ double stiffnessECMPeripGPU ;
__constant__ double lknotECMBasalGPU ;
__constant__ double lknotECMBCGPU ;
__constant__ double lknotECMPeripGPU ;
const double smallNumber=.000001 ;
namespace patch{
template <typename T> std::string to_string (const T& n)
{
std:: ostringstream stm ;
stm << n ;
return stm.str() ;
}
}
__device__
void DefineECMStiffnessAndLknot ( EType nodeType, double & stiffness, double & sponLen) {
if (nodeType==excm) {
stiffness=stiffnessECMBasalGPU ;
sponLen=lknotECMBasalGPU ;
}
if (nodeType==perip) {
stiffness=stiffnessECMPeripGPU ;
sponLen=lknotECMPeripGPU ;
}
if (nodeType==bc2) {
stiffness=stiffnessECMBCGPU;
sponLen=lknotECMBCGPU ;
}
}
__device__
double calMorse_ECM(const double& linkLength ) {
double forceValue=0.0 ;
if (linkLength > sceInterCell_ECM[4]) {
forceValue = 0;
} else {
forceValue = -sceInterCell_ECM[0] / sceInterCell_ECM[2]
* exp(-linkLength / sceInterCell_ECM[2])
+ sceInterCell_ECM[1] / sceInterCell_ECM[3]
* exp(-linkLength / sceInterCell_ECM[3]);
// if (forceValue > 0) {
// forceValue = 0;
// }
}
return (forceValue) ;
}
__device__
double calMorseEnergy_ECM(const double& linkLength ) {
double energyValue=0.0 ;
if (linkLength > sceInterCell_ECM[4]) {
energyValue = 0;
} else {
energyValue = sceInterCell_ECM[0]* exp(-linkLength / sceInterCell_ECM[2])
- sceInterCell_ECM[1]* exp(-linkLength / sceInterCell_ECM[3]);
}
return (energyValue) ;
}
/*
__device__
double calWLC_ECM(const double& linkLength ) {
double x=linkLength/wLCPara_ECM[0] ;
return (wLCPara_ECM[1]*( 6*x+ ( x*x*(3.0-2*x))/( (1-x)*(1-x) ) )
-wLCPara_ECM[2]/pow(linkLength,wLCPara_ECM[3]) ) ;
}
*/
__device__
bool IsValidAdhPair(const double& dist ) {
if (dist > restLenECMAdhSpringGPU && dist < maxLenECMAdhSpringGPU){
return true ;
}
else {
return false ;
}
}
__device__
bool IsValidAdhPairForNotInitPhase(const double& dist ) {
if (dist > restLenECMAdhSpringGPU){
return true ;
}
else {
return false ;
}
}
__device__
double CalAdhECM(const double& dist ) {
return (kAdhECMGPU*(dist-restLenECMAdhSpringGPU));
// in the function IsValid pair, distance already checked to be greater than neutral length
}
__device__
double CalAdhEnergy(const double& dist ) {
return (0.5*kAdhECMGPU*(dist-restLenECMAdhSpringGPU)*(dist-restLenECMAdhSpringGPU));
// in the function IsValid pair, distance already checked to be greater than neutral length
}
EType SceECM:: ConvertStringToEType(string eNodeRead) {
if (eNodeRead=="perip") {
return perip ;
}
else if (eNodeRead=="bc2") {
return bc2 ;
}
else if (eNodeRead=="excm") {
return excm ;
}
else {
cout << "Error in defining type of external nodes" << endl ;
return excm ;// To just return something to avoid compiler complain
}
}
SceECM::SceECM() {
isECMNeighborSet=false ;
eCMRemoved=false ;
}
void SceECM::Initialize(uint maxAllNodePerCellECM, uint maxMembrNodePerCellECM, uint maxTotalNodesECM, int freqPlotData, string uniqueSymbol) {
maxAllNodePerCell=maxAllNodePerCellECM ;
maxMembrNodePerCell= maxMembrNodePerCellECM ;
maxTotalNodes=maxTotalNodesECM ; //Ali
this->freqPlotData=freqPlotData ;
this->uniqueSymbol=uniqueSymbol ;
std::fstream readCoord_ECM ;
std::fstream readInput_ECM ;
int numberNodes_ECM ;
double tmpPosX_ECM,tmpPosY_ECM ;
vector<double> posXIni_ECM,posYIni_ECM ;
vector <EType> eNodeVec ;
int resumeSimulation = globalConfigVars.getConfigValue(
"ResumeSimulation").toInt();
if (resumeSimulation==0) {
cout << " In the ECM module, I am in start mode" << endl ;
readCoord_ECM.open("./resources/coordinate_ECM21.txt") ;
}
else if(resumeSimulation==1) {
cout << " In the ECM module, I am in resume mode" << endl ;
std::string secondInputFileName = "./resources/DataFileECM_" + uniqueSymbol + "Resume.cfg";
readCoord_ECM.open(secondInputFileName.c_str()) ;
}
else{
throw std::invalid_argument(" ResumeSimulation parameter in the input file must be either 1 or 0. Error from ECM module");
}
if (readCoord_ECM.is_open()) {
cout << "ECM coordinates file opened successfully" <<endl ;
}
else {
cout << "ECM coordinates file is not opened successfully" << endl ;
}
string inputInfoText ;
string eNodeRead ;
readCoord_ECM>>numberNodes_ECM ;
for (int i=0 ; i<numberNodes_ECM ; i++){
readCoord_ECM>>tmpPosX_ECM>>tmpPosY_ECM>>eNodeRead ;
posXIni_ECM.push_back(tmpPosX_ECM) ;
posYIni_ECM.push_back(tmpPosY_ECM) ;
EType eNode=ConvertStringToEType(eNodeRead) ;
eNodeVec.push_back(eNode) ;
}
readInput_ECM.open("./resources/ECM_input.txt") ;
if (readInput_ECM.is_open()) {
cout << "ECM Mech input opened successfully" <<endl ;
}
else {
cout << "ECM Mech input is not opened successfully" << endl ;
}
readInput_ECM>> inputInfoText ;
for (int i=0 ; i<5; i++) {
readInput_ECM>> mechPara_ECM.sceInterCellCPU_ECM[i] ; //=39.0 ;
}
// readInput_ECM>>restLenECMSpring ;
// readInput_ECM>>eCMLinSpringStiff ;
readInput_ECM>>restLenECMAdhSpring ;
readInput_ECM>>maxLenECMAdhSpring ;
readInput_ECM>>kAdhECM ;
//for ( int i=0 ; i<4 ; i++) {
// readInput_ECM>>mechPara_ECM.wLCParaCPU_ECM[i] ;
// }
std::fstream secondInput_ECM ;
std:: string secondInputInfo ; //dummy
std::string secondInputFileName = "./resources/ECM_" + uniqueSymbol + "input.cfg";
secondInput_ECM.open(secondInputFileName.c_str()) ;
//secondInput_ECM.open("./resources/ECM_N01G00_input.cfg" ) ;
if (secondInput_ECM.is_open()) {
cout << "Second ECM Mech input opened successfully" <<endl ;
}
else {
cout << "Second ECM Mech input is not opened successfully" << endl ;
}
secondInput_ECM>>secondInputInfo ; // just for information no use in the code
secondInput_ECM>>stiffnessECMBasal ;
secondInput_ECM>>stiffnessECMBC ;
secondInput_ECM>>stiffnessECMPerip ;
secondInput_ECM>>lknotECMBasal ;
secondInput_ECM>>lknotECMBC ;
secondInput_ECM>>lknotECMPerip ;
secondInput_ECM>>dampBasal ;
secondInput_ECM>>dampBC ;
secondInput_ECM>>dampApical ;
cout <<" stiffness of ECM at the basal side is="<<stiffnessECMBasal <<endl ;
cout <<" stiffness of ECM at boundary is="<<stiffnessECMBC<<endl ;
cout <<" stiffness of ECM peripodial side is="<<stiffnessECMPerip<<endl ;
cout <<" rest len basal ECM is="<<lknotECMBasal<<endl ;
cout <<" rest len boundary ECM is= "<<lknotECMBC<<endl ;
cout << "rest len peripodial ECM is=" <<lknotECMPerip <<endl ;
cout << "Damping for basal ECM is="<<dampBasal<<endl ;
cout << "Damping for boundary ECM is= "<<dampBC<<endl ;
cout << "Damping for peripodial ECM is=" <<dampApical <<endl ;
cout << "number of ECM nodes is"<< numberNodes_ECM <<endl ;
for (int i=0 ; i<5; i++) {
cout <<"Morse parameter number"<<i<<" is " <<mechPara_ECM.sceInterCellCPU_ECM[i]<<endl ;
}
//cout <<"rest length of ECM spring is "<<restLenECMSpring<<endl ;
// cout <<"ECM spring stiffness is "<<eCMLinSpringStiff<<endl ;
cout <<"ECM Membrane neutral adhesion length is "<<restLenECMAdhSpring<<endl ;
cout <<"ECM Membrane max adhesion length is "<<maxLenECMAdhSpring<<endl ;
cout <<"ECM Membrane adhesion stiffness is "<<kAdhECM<<endl ;
cout << "ECM only applies adhesvie force" << endl ;
//for ( int i=0 ; i<4 ; i++) {
// cout<<"wLC parameter "<< i << " is "<<mechPara_ECM.wLCParaCPU_ECM[i]<<endl ; ;
//}
hipMemcpyToSymbol(sceInterCell_ECM,mechPara_ECM.sceInterCellCPU_ECM
,5*sizeof(double));
//hipMemcpyToSymbol(wLCPara_ECM,mechPara_ECM.wLCParaCPU_ECM
// ,4*sizeof(double));
hipMemcpyToSymbol(restLenECMAdhSpringGPU, &restLenECMAdhSpring,sizeof(double));
hipMemcpyToSymbol(maxLenECMAdhSpringGPU, &maxLenECMAdhSpring,sizeof(double));
hipMemcpyToSymbol(kAdhECMGPU, &kAdhECM,sizeof(double));
hipMemcpyToSymbol(stiffnessECMPeripGPU, &stiffnessECMPerip,sizeof(double));
hipMemcpyToSymbol(stiffnessECMBCGPU, &stiffnessECMBC,sizeof(double));
hipMemcpyToSymbol(stiffnessECMBasalGPU, &stiffnessECMBasal,sizeof(double));
hipMemcpyToSymbol(lknotECMPeripGPU, & lknotECMPerip,sizeof(double));
hipMemcpyToSymbol(lknotECMBCGPU, & lknotECMBC,sizeof(double));
hipMemcpyToSymbol(lknotECMBasalGPU, & lknotECMBasal,sizeof(double));
counter=100000 ; //large number
lastPrintECM=1000000 ; // large number
outputFrameECM=0 ;
numNodesECM= numberNodes_ECM ; //(eCMMaxX-eCMMinX)/eCMMinDist ;
indexECM.resize(numNodesECM,0) ;
peripORexcm.resize(numNodesECM,perip) ;
dampCoef.resize(numNodesECM) ;
nodeECMLocX.resize(numNodesECM,0.0) ;
nodeECMLocY.resize(numNodesECM,0.0) ;
cellNeighborId.resize(numNodesECM,-1) ;
stiffLevel.resize(numNodesECM) ;
sponLen.resize(numNodesECM) ;
linSpringForceECMX.resize(numNodesECM,0.0);
linSpringForceECMY.resize(numNodesECM,0.0);
linSpringAvgTension.resize(numNodesECM,0.0);
linSpringEnergy.resize(numNodesECM,0.0);
morseEnergy.resize(numNodesECM,0.0);
adhEnergy.resize(numNodesECM,0.0);
bendSpringForceECMX.resize(numNodesECM,0.0);
bendSpringForceECMY.resize(numNodesECM,0.0);
memMorseForceECMX.resize(numNodesECM,0.0);
memMorseForceECMY.resize(numNodesECM,0.0);
fBendCenterX.resize(numNodesECM,0.0);
fBendCenterY.resize(numNodesECM,0.0);
fBendLeftX.resize(numNodesECM,0.0);
fBendLeftY.resize(numNodesECM,0.0);
fBendRightX.resize(numNodesECM,0.0);
fBendRightY.resize(numNodesECM,0.0);
totalForceECMX.resize(numNodesECM,0.0);
totalForceECMY.resize(numNodesECM,0.0);
totalExplicitForceECMX.resize(numNodesECM,0.0);
totalExplicitForceECMY.resize(numNodesECM,0.0);
rHSX.resize(numNodesECM,0.0);
rHSY.resize(numNodesECM,0.0);
//memNodeType.resize(maxTotalNodes,notAssigned1) ;
nodeIsActive.resize(numNodesECM,true) ;
thrust::sequence (indexECM.begin(),indexECM.begin()+numNodesECM);
thrust::copy(posXIni_ECM.begin(),posXIni_ECM.end(),nodeECMLocX.begin()) ;
thrust::copy(posYIni_ECM.begin(),posYIni_ECM.end(),nodeECMLocY.begin()) ;
thrust::copy(eNodeVec.begin(),eNodeVec.end(),peripORexcm.begin()) ;
AssignDampCoef() ;
cout << "GPU level initial coordinates and type of external nodes are: " << endl ;
for (int i=0; i<nodeECMLocX.size() ; i++) {
cout<< nodeECMLocX[i]<<", "<<nodeECMLocY[i]<<", "<<peripORexcm[i] << endl;
}
PrintECM(0.0) ;
std::string cSVFileName = "./ECMFolder/EnergyExport_" + uniqueSymbol + ".CSV";
ofstream EnergyExport ;
EnergyExport.open(cSVFileName.c_str());
EnergyExport <<"Time,"<<"TotalMorseEnergyECM," << "TotalAdhEnergyECM,"<<"TotalLinSpringEnergy,"<<"TotalEnergy, " <<"TotalEnergyDerivative"<< std::endl;
} //initilaization function finished
void SceECM:: ApplyECMConstrain(int currentActiveCellCount, int totalNodeCountForActiveCellsECM, double curTime, double dt, double Damp_CoefCell, bool cellPolar, bool subCellPolar, bool isInitPhase){
if (eCMRemoved) {
PrintECMRemoved(curTime);
cout << "ECM is removed" << endl ;
return ;
}
#ifdef debugModeECM
hipEvent_t start1, start2, start3, start4, start5, start6, start7, start8, stop;
float elapsedTime1, elapsedTime2, elapsedTime3, elapsedTime4, elapsedTime5, elapsedTime6, elapsedTime7 , elapsedTime8 ;
hipEventCreate(&start1);
hipEventCreate(&start2);
hipEventCreate(&start3);
hipEventCreate(&start4);
hipEventCreate(&start5);
hipEventCreate(&start6);
hipEventCreate(&start7);
hipEventCreate(&start8);
hipEventCreate(&stop);
hipEventRecord(start1, 0);
#endif
nodeCellLocXOld.resize(totalNodeCountForActiveCellsECM) ;
nodeCellLocYOld.resize(totalNodeCountForActiveCellsECM) ;
adhPairECM_Cell.resize(totalNodeCountForActiveCellsECM,-1) ;
morseEnergyCell.resize(totalNodeCountForActiveCellsECM,0.0);
adhEnergyCell.resize(totalNodeCountForActiveCellsECM,0.0);
thrust::copy(nodesPointerECM->getInfoVecs().nodeLocX.begin(),nodesPointerECM->getInfoVecs().nodeLocX.begin()+totalNodeCountForActiveCellsECM,nodeCellLocXOld.begin()) ;
thrust::copy(nodesPointerECM->getInfoVecs().nodeLocY.begin(),nodesPointerECM->getInfoVecs().nodeLocY.begin()+totalNodeCountForActiveCellsECM,nodeCellLocYOld.begin()) ;
#ifdef debugModeECM
hipEventRecord(start2, 0);
hipEventSynchronize(start2);
hipEventElapsedTime(&elapsedTime1, start1, start2);
#endif
thrust:: transform (peripORexcm.begin(), peripORexcm.begin()+numNodesECM,
thrust::make_zip_iterator (thrust::make_tuple (stiffLevel.begin(),sponLen.begin())),MechProp());
cout << " Mechanical properties after assignment is " << stiffLevel[0] << endl ;
counter ++ ;
//if (counter>=100 || curTime<(100*dt) || isECMNeighborSet==false) {
if (curTime<(100*dt) || isECMNeighborSet==false) {
isECMNeighborSet=true ;
counter=0 ;
FindNeighborCandidateForCellsAndECMNodes();
}
#ifdef debugModeECM
hipEventRecord(start3, 0);
hipEventSynchronize(start3);
hipEventElapsedTime(&elapsedTime2, start2, start3);
#endif
MoveCellNodesByECMForces(totalNodeCountForActiveCellsECM,currentActiveCellCount,dt, Damp_CoefCell) ;
/* To reduce computational cost
energyECM.totalMorseEnergyCellECM = thrust::reduce( morseEnergyCell.begin(),morseEnergyCell.begin()+totalNodeCountForActiveCellsECM,(double) 0.0, thrust::plus<double>() );
energyECM.totalAdhEnergyCellECM = thrust::reduce( adhEnergyCell.begin() ,adhEnergyCell.begin() +totalNodeCountForActiveCellsECM,(double) 0.0, thrust::plus<double>() );
*/
CalLinSpringForce();
CalBendSpringForce();
#ifdef debugModeECM
hipEventRecord(start4, 0);
hipEventSynchronize(start4);
hipEventElapsedTime(&elapsedTime3, start3, start4);
#endif
CalCellForcesOnECM() ;
//energyECM.totalLinSpringEnergyECM = 0.5 * ( thrust::reduce( linSpringEnergy.begin(),linSpringEnergy.begin()+numNodesECM,(double) 0.0, thrust::plus<double>() ));
//to make sure it is based on the distance used for action force calculation.
/* To reduce computational cost
energyECM.totalMorseEnergyECMCell = thrust::reduce( morseEnergy.begin(),morseEnergy.begin()+numNodesECM,(double) 0.0, thrust::plus<double>() );
energyECM.totalAdhEnergyECMCell = thrust::reduce( adhEnergy.begin() ,adhEnergy.begin() +numNodesECM,(double) 0.0, thrust::plus<double>() );
*/
//CalSumForcesOnECM() ;
//MoveNodesBySumAllForces(dt) ;
CalSumOnlyExplicitForcesOnECM() ;
CalRHS(dt) ;
#ifdef debugModeECM
hipEventRecord(start5, 0);
hipEventSynchronize(start5);
hipEventElapsedTime(&elapsedTime4, start4, start5);
#endif
vector <double> tmpRHSX(numNodesECM);
vector <double> tmpRHSY(numNodesECM);
tmpHostNodeECMLocX.resize(numNodesECM);
tmpHostNodeECMLocY.resize(numNodesECM);
thrust::copy (rHSX.begin(), rHSX.begin()+numNodesECM, tmpRHSX.begin());
thrust::copy (rHSY.begin(), rHSY.begin()+numNodesECM, tmpRHSY.begin());
thrust::copy (nodeECMLocX.begin(), nodeECMLocX.begin()+numNodesECM, tmpHostNodeECMLocX.begin());
thrust::copy (nodeECMLocY.begin(), nodeECMLocY.begin()+numNodesECM, tmpHostNodeECMLocY.begin());
#ifdef debugModeECM
cout << "max RHSX is " << *max_element(tmpRHSX.begin(), tmpRHSX.begin()+numNodesECM) << endl ;
cout << "min RHSX is " << *min_element(tmpRHSX.begin(), tmpRHSX.begin()+numNodesECM) << endl ;
cout << "max RHSY is " << *max_element(tmpRHSY.begin(), tmpRHSY.begin()+numNodesECM) << endl ;
cout << "min RHSY is " << *min_element(tmpRHSY.begin(), tmpRHSY.begin()+numNodesECM) << endl ;
hipEventRecord(start6, 0);
hipEventSynchronize(start6);
hipEventElapsedTime(&elapsedTime5, start5, start6);
#endif
EquMotionCoef (dt);
#ifdef debugModeECM
hipEventRecord(start7, 0);
hipEventSynchronize(start7);
hipEventElapsedTime(&elapsedTime6, start6, start7);
#endif
tmpHostNodeECMLocX =solverPointer->SOR3DiagPeriodic(nodeIsActive,hCoefLd, hCoefD, hCoefUd,tmpRHSX,indexPrev, indexNext, tmpHostNodeECMLocX);
tmpHostNodeECMLocY =solverPointer->SOR3DiagPeriodic(nodeIsActive,hCoefLd, hCoefD, hCoefUd,tmpRHSY,indexPrev,indexNext, tmpHostNodeECMLocY);
thrust::copy (tmpHostNodeECMLocX.begin(), tmpHostNodeECMLocX.begin()+numNodesECM, nodeECMLocX.begin());
thrust::copy (tmpHostNodeECMLocY.begin(), tmpHostNodeECMLocY.begin()+numNodesECM, nodeECMLocY.begin());
#ifdef debugModeECM
hipEventRecord(start8, 0);
hipEventSynchronize(start8);
hipEventElapsedTime(&elapsedTime7, start7, start8);
#endif
/* To reduce computational cost
cout << "total Morse energy for cell-ECM is= "<< energyECM.totalMorseEnergyCellECM << endl ;
cout << "total Morse energy for ECM-cell is= "<< energyECM.totalMorseEnergyECMCell << endl ;
cout << "total adhesion energy for cell-ECM is= "<< energyECM.totalAdhEnergyCellECM << endl ;
cout << "total adhesion energy for ECM-cell is= "<< energyECM.totalAdhEnergyECMCell << endl ;
//assert (abs (energyECM.totalMorseEnergyCellECM-energyECM.totalMorseEnergyECMCell)<1.0) ;
//assert (abs (energyECM.totalAdhEnergyCellECM- energyECM.totalAdhEnergyECMCell) <1.0) ;
if ( (abs (energyECM.totalMorseEnergyCellECM-energyECM.totalMorseEnergyECMCell)>1.0) ||
(abs (energyECM.totalAdhEnergyCellECM- energyECM.totalAdhEnergyECMCell) >1.0)
) {
cout << "Warning: Action and reaction forces in the ECM do not match each other" << endl ;
}
*/
# ifdef debugModeECM
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime8, start8, stop);
std::cout << "time 1 spent in ECM module for moving the membrane node of cells and ECM nodes are: " << elapsedTime1 << endl ;
std::cout << "time 2 spent in ECM module for moving the membrane node of cells and ECM nodes are: " << elapsedTime2 << endl ;
std::cout << "time 3 spent in ECM module for moving the membrane node of cells and ECM nodes are: " << elapsedTime3 << endl ;
std::cout << "time 4 spent in ECM module for moving the membrane node of cells and ECM nodes are: " << elapsedTime4 << endl ;
std::cout << "time 5 spent in ECM module for moving the membrane node of cells and ECM nodes are: " << elapsedTime5 << endl ;
std::cout << "time 6 spent in ECM module for moving the membrane node of cells and ECM nodes are: " << elapsedTime6 << endl ;
std::cout << "time 7 spent in ECM module for moving the membrane node of cells and ECM nodes are: " << elapsedTime7 << endl ;
std::cout << "time 8 spent in ECM module for moving the membrane node of cells and ECM nodes are: " << elapsedTime8 << endl ;
#endif
//throw std::invalid_argument(" Solver called properly and I want to stop the code");
PrintECM(curTime);
}
void SceECM:: PrintECM(double curTime) {
lastPrintECM=lastPrintECM+1 ;
if (lastPrintECM>=freqPlotData) {
outputFrameECM++ ;
lastPrintECM=0 ;
cout << " I am in regular print function" << endl ;
// First ECM output file for paraview //
std::string vtkFileName = "./ECMFolder/ECM_" + uniqueSymbol +patch::to_string(outputFrameECM-1) + ".vtk";
ofstream ECMOut;
ECMOut.open(vtkFileName.c_str());
ECMOut<< "# vtk DataFile Version 3.0" << endl;
ECMOut<< "Result for paraview 2d code" << endl;
ECMOut << "ASCII" << endl;
ECMOut << "DATASET UNSTRUCTURED_GRID" << std::endl;
ECMOut << "POINTS " << nodeECMLocX.size() << " float" << std::endl;
for (uint i = 0; i < nodeECMLocX.size(); i++) {
ECMOut << nodeECMLocX[i] << " " << nodeECMLocY[i] << " "
<< 0.0 << std::endl;
}
ECMOut<< std::endl;
ECMOut<< "CELLS " << nodeECMLocX.size()<< " " << 3 *nodeECMLocX.size()<< std::endl;
for (uint i = 0; i < (nodeECMLocX.size()-1); i++) {
ECMOut << 2 << " " << indexECM[i] << " "
<< indexECM[i+1] << std::endl;
}
ECMOut << 2 << " " << indexECM[nodeECMLocX.size()-1] << " "<< indexECM[0] << std::endl; //last point to the first point
ECMOut << "CELL_TYPES " << nodeECMLocX.size()<< endl;
for (uint i = 0; i < nodeECMLocX.size() ; i++) {
ECMOut << "3" << endl;
}
ECMOut << "POINT_DATA "<<nodeECMLocX.size() <<endl ;
ECMOut << "SCALARS Avg_Tension " << "float"<< endl;
ECMOut << "LOOKUP_TABLE " << "default"<< endl;
for (uint i = 0; i < nodeECMLocX.size(); i++) {
ECMOut<<linSpringAvgTension[i] <<endl ;
}
ECMOut << "SCALARS Node_Type " << "float"<< endl;
ECMOut << "LOOKUP_TABLE " << "default"<< endl;
for (uint i = 0; i < nodeECMLocX.size(); i++) {
ECMOut<<peripORexcm[i] <<endl ;
}
ECMOut.close();
// second output file for curvature estimation //
std::string txtFileName = "./ECMFolder/ECMLocationExport_" + uniqueSymbol+ patch::to_string(outputFrameECM-1) + ".txt";
ofstream ECMLocationExport ;
ECMLocationExport.open(txtFileName.c_str());
//ECMExport << "ECM pouch coordinates" << std::endl;
for (uint i = 0; i < nodeECMLocX.size(); i++) {
// if (peripORexcm[i]==excm) {
ECMLocationExport<< nodeECMLocX[i] << " " << nodeECMLocY[i] << " " << 0.0 << " "<< peripORexcm[i]<<std::endl;
// }
}
//ECMExport << "ECM lumen side coordinates" << std::endl;
// for (uint i = 0; i < nodeECMLocX.size(); i++) {
// if (peripORexcm[i]==perip) {
// ECMLocationExport << nodeECMLocX[i] << " " << nodeECMLocY[i] << " "
// << 0.0 << std::endl;
// }
// }
ECMLocationExport.close();
//Third write file for ECM
txtFileName = "./ECMFolder/ECMTensionExport_" + uniqueSymbol+ patch::to_string(outputFrameECM-1) + ".txt";
ofstream ECMTensionExport ;
ECMTensionExport.open(txtFileName.c_str());
for (uint i = 0; i < nodeECMLocX.size(); i++) {
ECMTensionExport<< linSpringAvgTension[i]<< " " << peripORexcm[i]<< std::endl;
}
ECMTensionExport.close();
///
//Fourth write file for ECM
energyECM.totalEnergyECMOld=energyECM.totalEnergyECM ;
energyECM.totalEnergyECM= energyECM.totalMorseEnergyECMCell
+ energyECM.totalAdhEnergyECMCell
+ energyECM.totalLinSpringEnergyECM ;
std::string cSVFileName = "./ECMFolder/EnergyExport_" + uniqueSymbol+ ".CSV";
ofstream EnergyExport ;
EnergyExport.open(cSVFileName.c_str(),ofstream::app);
//EnergyExport <<"totalMorseEnergyCell " << "totalAdhEnergyCell "<< "totalMorseEnergy "<<"totalAdhEnergy "<< "totalLinSpringEnergy " << std::endl;
EnergyExport <<curTime<<","<<energyECM.totalMorseEnergyECMCell << "," << energyECM.totalAdhEnergyECMCell<< "," << energyECM.totalLinSpringEnergyECM <<"," << energyECM.totalEnergyECM <<","<<energyECM.totalEnergyPrimeECM <<std::endl;
}
}
// This is just to create a file to be able to generate the movie with consisten frames
void SceECM:: PrintECMRemoved(double curTime) {
lastPrintECM=lastPrintECM+1 ;
if (lastPrintECM>=freqPlotData) {
outputFrameECM++ ;
lastPrintECM=0 ;
cout << " I am in ECM removed print function" << endl ;
// First ECM output file for paraview //
std::string vtkFileName = "./ECMFolder/ECM_" + uniqueSymbol +patch::to_string(outputFrameECM-1) + ".vtk";
ofstream ECMOut;
ECMOut.open(vtkFileName.c_str());
ECMOut<< "# vtk DataFile Version 3.0" << endl;
ECMOut<< "Result for paraview 2d code" << endl;
ECMOut << "ASCII" << endl;
ECMOut << "DATASET UNSTRUCTURED_GRID" << std::endl;
ECMOut << "POINTS " << nodeECMLocX.size() << " float" << std::endl;
for (uint i = 0; i < nodeECMLocX.size(); i++) {
ECMOut << -500.0 << " " << -500.0 << " "
<< 0.0 << std::endl; // Just out of domain
}
ECMOut<< std::endl;
ECMOut<< "CELLS " << nodeECMLocX.size()<< " " << 3 *nodeECMLocX.size()<< std::endl;
for (uint i = 0; i < (nodeECMLocX.size()-1); i++) {
ECMOut << 2 << " " << indexECM[i] << " "
<< indexECM[i+1] << std::endl;
}
ECMOut << 2 << " " << indexECM[nodeECMLocX.size()-1] << " "<< indexECM[0] << std::endl; //last point to the first point
ECMOut << "CELL_TYPES " << nodeECMLocX.size()<< endl;
for (uint i = 0; i < nodeECMLocX.size() ; i++) {
ECMOut << "3" << endl;
}
ECMOut << "POINT_DATA "<<nodeECMLocX.size() <<endl ;
ECMOut << "SCALARS Avg_Tension " << "float"<< endl;
ECMOut << "LOOKUP_TABLE " << "default"<< endl;
for (uint i = 0; i < nodeECMLocX.size(); i++) {
ECMOut<<linSpringAvgTension[i] <<endl ;
}
ECMOut << "SCALARS Node_Type " << "float"<< endl;
ECMOut << "LOOKUP_TABLE " << "default"<< endl;
for (uint i = 0; i < nodeECMLocX.size(); i++) {
ECMOut<<peripORexcm[i] <<endl ;
}
ECMOut.close();
}
}
AniResumeData SceECM:: obtainResumeData() {
AniResumeData aniResumeData ;
thrust:: host_vector<double> hostTmpLocX;
thrust:: host_vector<double> hostTmpLocY;
thrust:: host_vector<EType> hostTmpType;
hostTmpLocX.resize(numNodesECM) ;
hostTmpLocY.resize(numNodesECM) ;
hostTmpType.resize(numNodesECM) ;
cout << " I am in obtainResumeData function" << endl ;
thrust::copy (
thrust::make_zip_iterator(
thrust::make_tuple(nodeECMLocX.begin(),nodeECMLocY.begin(),peripORexcm.begin())),
thrust::make_zip_iterator(
thrust::make_tuple(nodeECMLocX.begin(),nodeECMLocY.begin(),peripORexcm.begin()))+numNodesECM,
thrust::make_zip_iterator(
thrust::make_tuple(hostTmpLocX.begin(),hostTmpLocY.begin(),hostTmpType.begin())));
cout << " I start passing to regular vector variables" << endl ;
CVector tmp;
for( int i=0 ; i<numNodesECM ; i++) {
tmp=CVector (hostTmpLocX[i], hostTmpLocY[i], 0.0) ;
aniResumeData.nodePosArr.push_back(tmp) ;
aniResumeData.nodeECMType.push_back(hostTmpType[i]) ;
}
return aniResumeData ;
}
void SceECM::EquMotionCoef (double dt) {
vector <double> sponLenHost(numNodesECM) ;
vector <double> sponLenWithNext ;
vector <double> sponLenWithPrev ;
vector <double> distWithNext ;
vector <double> distWithPrev ;
vector <double> dampCoefHost ;
sponLenWithNext.clear();
sponLenWithPrev.clear();
distWithNext.clear() ;
distWithPrev.clear() ;
hCoefLd.clear() ;
hCoefUd.clear() ;
hCoefD.clear() ;
indexNext.clear() ;
indexPrev.clear() ;
dampCoefHost.clear() ;
indexNext.resize(numNodesECM) ;
indexPrev.resize(numNodesECM) ;
dampCoefHost.resize(numNodesECM) ;
thrust::copy(sponLen.begin(),sponLen.begin()+numNodesECM, sponLenHost.begin()) ;
thrust::copy(dampCoef.begin(),dampCoef.begin()+numNodesECM, dampCoefHost.begin()) ;
double k=stiffLevel[0] ; //Assumming ECM is homogenous in mechanical properties
for ( int i=0 ; i< numNodesECM ; i++) {
indexNext.at(i)=i+1 ;
indexPrev.at(i)=i-1 ;
if (i==numNodesECM-1){
indexNext.at(i)=0 ;
}
if (i==0){
indexPrev.at(i)=numNodesECM-1 ;
}
sponLenWithNext.push_back( 0.5*(sponLenHost[indexNext.at(i)]+sponLenHost[i]) );
sponLenWithPrev.push_back( 0.5*(sponLenHost[indexPrev.at(i)]+sponLenHost[i]) );
distWithNext.push_back(sqrt( pow(tmpHostNodeECMLocX[indexNext.at(i)]-tmpHostNodeECMLocX[i],2) +
pow(tmpHostNodeECMLocY[indexNext.at(i)]-tmpHostNodeECMLocY[i],2))) ;
distWithPrev.push_back(sqrt( pow(tmpHostNodeECMLocX[indexPrev.at(i)]-tmpHostNodeECMLocX[i],2) +
pow(tmpHostNodeECMLocY[indexPrev.at(i)]-tmpHostNodeECMLocY[i],2)));
}
for ( int i=0 ; i< numNodesECM ; i++) {
hCoefD.push_back (1 + k*dt/dampCoefHost.at(i)*( 2 - sponLenWithPrev.at(i)/(distWithPrev.at(i) + 0.0001 )
- sponLenWithNext.at(i)/(distWithNext.at(i) + 0.0001 ))) ;
hCoefLd.push_back( k*dt/dampCoefHost.at(i)*(-1 + sponLenWithPrev.at(i)/(distWithPrev.at(i) + 0.0001 ))) ;
hCoefUd.push_back( k*dt/dampCoefHost.at(i)*(-1 + sponLenWithNext.at(i)/(distWithNext.at(i) + 0.0001 ))) ;
}
#ifdef debugModeECM
cout <<"max distance with next node is" <<*max_element ( distWithNext.begin(), distWithNext.begin()+numNodesECM) <<endl ;
cout <<"min distance with next node is" << *min_element ( distWithNext.begin(), distWithNext.begin()+numNodesECM) <<endl ;
cout <<"max distance with previous node is" <<*max_element ( distWithPrev.begin(), distWithPrev.begin()+numNodesECM) <<endl ;
cout <<"min distance with previous node is" << *min_element ( distWithPrev.begin(), distWithPrev.begin()+numNodesECM) <<endl ;
vector < double> hCoefDAbs;
hCoefDAbs.clear() ;
for ( int i=0 ; i< numNodesECM ; i++) {
hCoefDAbs.push_back (abs(1 + k*dt/dampCoefHost.at(i)*( 2 - sponLenWithPrev.at(i)/(distWithPrev.at(i) + 0.0001 )
- sponLenWithNext.at(i)/(distWithNext.at(i) + 0.0001 )))) ;
}
cout <<"max main diag. elment is " << *max_element ( hCoefD.begin(), hCoefD.begin() +numNodesECM) <<endl ;
cout <<"min main diag. element is " << *min_element ( hCoefD.begin(), hCoefD.begin() +numNodesECM) <<endl ;
cout <<"min main Abs(diag.) element is " << *min_element ( hCoefDAbs.begin(), hCoefDAbs.begin() +numNodesECM) <<endl ;
cout <<"max upper diag. element is " << *max_element ( hCoefUd.begin(), hCoefUd.begin()+numNodesECM) <<endl ;
cout <<"min upper diag. element is " << *min_element ( hCoefUd.begin(), hCoefUd.begin()+numNodesECM) <<endl ;
cout <<"max lower diag. element is " << *max_element ( hCoefLd.begin(), hCoefLd.begin()+numNodesECM) <<endl ;
cout <<"min lower diag. element is " << *min_element ( hCoefLd.begin(), hCoefLd.begin()+numNodesECM) <<endl ;
cout <<"stiffness, time step and first element of damping vector is " << endl ;
cout << k <<","<< dt<<"," << dampCoefHost.at(0) << endl ;
cout << "constants for stiffness matrix calculated " << endl ;
cout << "last diagonal element is " << hCoefD.at(numNodesECM-1) << endl ;
cout << " number of ECM nodes is "<< numNodesECM << endl ;
# endif
}
void SceECM::MoveCellNodesByECMForces(int totalNodeCountForActiveCellsECM,int currentActiveCellCount, double dt, double Damp_CoefCell)
{
double* nodeECMLocXAddr= thrust::raw_pointer_cast (
&nodeECMLocX[0]) ;
double* nodeECMLocYAddr= thrust::raw_pointer_cast (
&nodeECMLocY[0]) ;
EType* peripORexcmAddr= thrust::raw_pointer_cast (
&peripORexcm[0]) ;
// move the nodes of epithelial cells
//// find the closest ECM node to each each cell //
int numCells = cellsPointerECM->getCellInfoVecs().basalLocX.size() ;
thrust::counting_iterator<int> iBegin(0) ;
thrust::counting_iterator<int> iBegin2(0) ;
//////////////////////////////////////////
thrust:: transform (
thrust::make_zip_iterator (
thrust:: make_tuple (
make_permutation_iterator(
cellsPointerECM->getCellInfoVecs().eCMNeighborId.begin(),
make_transform_iterator(iBegin2,
DivideFunctor2(
maxAllNodePerCell))),
make_transform_iterator (iBegin,
DivideFunctor2(maxAllNodePerCell)),
make_transform_iterator (iBegin,
ModuloFunctor2(maxAllNodePerCell)),
nodesPointerECM->getInfoVecs().nodeLocX.begin(),
nodesPointerECM->getInfoVecs().nodeLocY.begin(),
nodesPointerECM->getInfoVecs().nodeIsActive.begin(),
nodesPointerECM->getInfoVecs().memNodeType1.begin()
)),
thrust::make_zip_iterator (
thrust:: make_tuple (
make_permutation_iterator(
cellsPointerECM->getCellInfoVecs().eCMNeighborId.begin(),
make_transform_iterator(iBegin2,
DivideFunctor2(
maxAllNodePerCell))),
make_transform_iterator (iBegin,
DivideFunctor2(maxAllNodePerCell)),
make_transform_iterator (iBegin,
ModuloFunctor2(maxAllNodePerCell)),
nodesPointerECM->getInfoVecs().nodeLocX.begin(),
nodesPointerECM->getInfoVecs().nodeLocY.begin(),
nodesPointerECM->getInfoVecs().nodeIsActive.begin(),
nodesPointerECM->getInfoVecs().memNodeType1.begin()
))+totalNodeCountForActiveCellsECM,
thrust::make_zip_iterator (
thrust::make_tuple (
nodesPointerECM->getInfoVecs().nodeLocX.begin(),
nodesPointerECM->getInfoVecs().nodeLocY.begin(),
adhPairECM_Cell.begin(),
morseEnergyCell.begin(),
adhEnergyCell.begin())),
MoveNodes2_Cell(nodeECMLocXAddr,nodeECMLocYAddr,maxMembrNodePerCell,numNodesECM,dt,Damp_CoefCell,peripORexcmAddr,currentActiveCellCount));
}
void SceECM::CalLinSpringForce()
{
double* nodeECMLocXAddr= thrust::raw_pointer_cast (
&nodeECMLocX[0]) ;
double* nodeECMLocYAddr= thrust::raw_pointer_cast (
&nodeECMLocY[0]) ;
double* stiffLevelAddr=thrust::raw_pointer_cast (
&stiffLevel[0]) ;
double* sponLenAddr =thrust::raw_pointer_cast (
&sponLen[0]) ;
thrust:: transform (
thrust::make_zip_iterator (
thrust:: make_tuple (
indexECM.begin(),
nodeECMLocX.begin(),
nodeECMLocY.begin())),
thrust::make_zip_iterator (
thrust:: make_tuple (
indexECM.begin(),
nodeECMLocX.begin(),
nodeECMLocY.begin()))+numNodesECM,
thrust::make_zip_iterator (
thrust::make_tuple (
linSpringForceECMX.begin(),
linSpringForceECMY.begin(),
linSpringAvgTension.begin(),
linSpringEnergy.begin())),
LinSpringForceECM(numNodesECM,nodeECMLocXAddr,nodeECMLocYAddr,stiffLevelAddr,sponLenAddr));
//////////////////////////////////// find the closest Cell to each ECM node ///////////
///////////////////////////////////
//cout << " I am after FindCellNeighbor functor" << endl ;
}
void SceECM::CalBendSpringForce()
{
const double eCMBendStiff=6.0 ; // need to be an input
double* nodeECMLocXAddr= thrust::raw_pointer_cast (
&nodeECMLocX[0]) ;
double* nodeECMLocYAddr= thrust::raw_pointer_cast (
&nodeECMLocY[0]) ;
thrust:: transform (
thrust::make_zip_iterator (
thrust:: make_tuple (
indexECM.begin(),
nodeECMLocX.begin(),
nodeECMLocY.begin())),
thrust::make_zip_iterator (
thrust:: make_tuple (
indexECM.begin(),
nodeECMLocX.begin(),
nodeECMLocY.begin()))+numNodesECM,
thrust::make_zip_iterator (
thrust::make_tuple (
fBendCenterX.begin(),
fBendCenterY.begin(),
fBendLeftX.begin(),
fBendLeftY.begin(),
fBendRightX.begin(),
fBendRightY.begin())),
CalBendECM(nodeECMLocXAddr,nodeECMLocYAddr,numNodesECM,eCMBendStiff));
double* fBendLeftXAddr= thrust::raw_pointer_cast (
&fBendLeftX[0]) ;
double* fBendLeftYAddr= thrust::raw_pointer_cast (
&fBendLeftY[0]) ;
double* fBendRightXAddr= thrust::raw_pointer_cast (
&fBendRightX[0]) ;
double* fBendRightYAddr= thrust::raw_pointer_cast (
&fBendRightY[0]) ;
thrust:: transform (
thrust::make_zip_iterator (
thrust:: make_tuple (
indexECM.begin(),
fBendCenterX.begin(),
fBendCenterY.begin())),
thrust::make_zip_iterator (
thrust:: make_tuple (
indexECM.begin(),
fBendCenterX.begin(),
fBendCenterY.begin()))+numNodesECM,
thrust::make_zip_iterator (
thrust::make_tuple (
bendSpringForceECMX.begin(),
bendSpringForceECMY.begin())),
SumBendForce(fBendLeftXAddr,fBendLeftYAddr,fBendRightXAddr,fBendRightYAddr,numNodesECM));
}
void SceECM::CalCellForcesOnECM()
{
bool* nodeIsActiveAddr= thrust::raw_pointer_cast (
& (nodesPointerECM->getInfoVecs().nodeIsActive[0])) ;
int * adhPairECM_CellAddr= thrust::raw_pointer_cast (
&adhPairECM_Cell[0]) ;
//Old locations are chosen to make sure action-reaction balance of forces between ECM and cell nodes are fully satisfied.
double* nodeCellLocXAddr= thrust::raw_pointer_cast (
&nodeCellLocXOld[0]) ;
double* nodeCellLocYAddr= thrust::raw_pointer_cast (
&nodeCellLocYOld[0]) ;
int numCells = cellsPointerECM->getCellInfoVecs().basalLocX.size() ;
thrust:: transform (
thrust::make_zip_iterator (
thrust:: make_tuple (
indexECM.begin(),
nodeECMLocX.begin(),
nodeECMLocY.begin(),
cellNeighborId.begin())),
thrust::make_zip_iterator (
thrust:: make_tuple (
indexECM.begin(),
nodeECMLocX.begin(),
nodeECMLocY.begin(),
cellNeighborId.begin()))+numNodesECM,
thrust::make_zip_iterator (
thrust::make_tuple (
memMorseForceECMX.begin(),
memMorseForceECMY.begin(),
morseEnergy.begin(),
adhEnergy.begin())),
MorseAndAdhForceECM(numCells,maxAllNodePerCell,maxMembrNodePerCell,nodeCellLocXAddr,nodeCellLocYAddr,nodeIsActiveAddr,adhPairECM_CellAddr));
}
void SceECM::CalSumForcesOnECM()
{
double dummy=0.0 ;
thrust:: transform (
thrust::make_zip_iterator (
thrust:: make_tuple (
linSpringForceECMX.begin(),
linSpringForceECMY.begin(),
bendSpringForceECMX.begin(),
bendSpringForceECMY.begin(),
memMorseForceECMX.begin(),
memMorseForceECMY.begin())),
thrust::make_zip_iterator (
thrust:: make_tuple (
linSpringForceECMX.begin(),
linSpringForceECMY.begin(),
bendSpringForceECMX.begin(),
bendSpringForceECMY.begin(),
memMorseForceECMX.begin(),
memMorseForceECMY.begin()))+numNodesECM,
thrust::make_zip_iterator (
thrust::make_tuple (
totalForceECMX.begin(),
totalForceECMY.begin())),
TotalECMForceCompute(dummy));
}
void SceECM::CalSumOnlyExplicitForcesOnECM() {
thrust:: transform (
thrust::make_zip_iterator (
thrust:: make_tuple (
bendSpringForceECMX.begin(),
bendSpringForceECMY.begin(),
memMorseForceECMX.begin(),
memMorseForceECMY.begin())),
thrust::make_zip_iterator (
thrust:: make_tuple (
bendSpringForceECMX.begin(),
bendSpringForceECMY.begin(),
memMorseForceECMX.begin(),
memMorseForceECMY.begin()))+numNodesECM,
thrust::make_zip_iterator (
thrust::make_tuple (
totalExplicitForceECMX.begin(),
totalExplicitForceECMY.begin())),
TotalExplicitECMForceCompute());
}
void SceECM::CalRHS(double dt)
{
thrust:: transform (
thrust::make_zip_iterator (
thrust:: make_tuple (
totalExplicitForceECMX.begin(),
totalExplicitForceECMY.begin(),
nodeECMLocX.begin(),
nodeECMLocY.begin(),
dampCoef.begin())),
thrust::make_zip_iterator (
thrust:: make_tuple (
totalExplicitForceECMX.begin(),
totalExplicitForceECMY.begin(),
nodeECMLocX.begin(),
nodeECMLocY.begin(),
dampCoef.begin()))+numNodesECM,
thrust::make_zip_iterator (
thrust::make_tuple (
rHSX.begin(),
rHSY.begin())),
RHSCompute(dt));
}
void SceECM::MoveNodesBySumAllForces(double dt)
{
// move the nodes of ECM
thrust:: transform (
thrust::make_zip_iterator (
thrust:: make_tuple (
nodeECMLocX.begin(),
nodeECMLocY.begin(),
totalForceECMX.begin(),
totalForceECMY.begin(),
dampCoef.begin())),
thrust::make_zip_iterator (
thrust:: make_tuple (
nodeECMLocX.begin(),
nodeECMLocY.begin(),
totalForceECMX.begin(),
totalForceECMY.begin(),
dampCoef.begin()))+numNodesECM,
thrust::make_zip_iterator (
thrust::make_tuple (
nodeECMLocX.begin(),
nodeECMLocY.begin())),
MoveNodesECM(dt));
}
void SceECM::FindNeighborCandidateForCellsAndECMNodes()
{
double* nodeECMLocXAddr= thrust::raw_pointer_cast (
&nodeECMLocX[0]) ;
double* nodeECMLocYAddr= thrust::raw_pointer_cast (
&nodeECMLocY[0]) ;
double * basalCellLocXAddr= thrust::raw_pointer_cast (
& ( cellsPointerECM->getCellInfoVecs().basalLocX[0]) ) ;
double * basalCellLocYAddr= thrust::raw_pointer_cast (
& ( cellsPointerECM->getCellInfoVecs().basalLocY[0]) ) ;
EType* peripORexcmAddr= thrust::raw_pointer_cast (
&peripORexcm[0]) ;
int numCells = cellsPointerECM->getCellInfoVecs().basalLocX.size() ;
//// find the closest ECM node to each each cell //
thrust:: transform (
thrust::make_zip_iterator (
thrust:: make_tuple (
cellsPointerECM->getCellInfoVecs().basalLocX.begin(),
cellsPointerECM->getCellInfoVecs().basalLocY.begin())),
thrust::make_zip_iterator (
thrust:: make_tuple (
cellsPointerECM->getCellInfoVecs().basalLocX.begin(),
cellsPointerECM->getCellInfoVecs().basalLocY.begin()))+numCells,
cellsPointerECM->getCellInfoVecs().eCMNeighborId.begin(),
FindECMNeighborPerCell(nodeECMLocXAddr,nodeECMLocYAddr,numNodesECM ));
thrust:: transform (
thrust::make_zip_iterator (
thrust:: make_tuple (
nodeECMLocX.begin(),
nodeECMLocY.begin())),
thrust::make_zip_iterator (
thrust:: make_tuple (
nodeECMLocX.begin(),
nodeECMLocY.begin()))+numNodesECM,
cellNeighborId.begin(),
FindCellNeighborPerECMNode(basalCellLocXAddr,basalCellLocYAddr, numCells));
}
void SceECM::AssignDampCoef() {
thrust::transform ( peripORexcm.begin() ,peripORexcm.begin() +numNodesECM, dampCoef.begin(), AssignDamping(dampBasal,dampBC,dampApical) );
#ifdef debugModeECM
for (int i=0 ; i<numNodesECM ; i++) {
if (dampCoef[i] < smallNumber) {
cout << "damping of element " << i << " is " << dampCoef[i] << " which is wrong" <<endl ;
throw::invalid_argument ( "damping coefficients in ECM is not set correctly") ;
}
}
#endif
}
| 5e04664b421e090add58d8ff5b665f59b89097a3.cu | #include "SceECM.h"
#include "SceCells.h" // Because of forward declaration
//# define debugModeECM
// bending stiffness is given inside the code. It should be given as in input from a txt file.
//isInitPhase bool variable is not active anymore.
//Right now it is assumed that ECM stiffness is the same everywhere.
__constant__ double sceInterCell_ECM[5];
//__constant__ double wLCPara_ECM[4];
__constant__ double restLenECMAdhSpringGPU ;
__constant__ double maxLenECMAdhSpringGPU ;
__constant__ double kAdhECMGPU ;
__constant__ double stiffnessECMBasalGPU ;
__constant__ double stiffnessECMBCGPU ;
__constant__ double stiffnessECMPeripGPU ;
__constant__ double lknotECMBasalGPU ;
__constant__ double lknotECMBCGPU ;
__constant__ double lknotECMPeripGPU ;
const double smallNumber=.000001 ;
namespace patch{
template <typename T> std::string to_string (const T& n)
{
std:: ostringstream stm ;
stm << n ;
return stm.str() ;
}
}
__device__
void DefineECMStiffnessAndLknot ( EType nodeType, double & stiffness, double & sponLen) {
if (nodeType==excm) {
stiffness=stiffnessECMBasalGPU ;
sponLen=lknotECMBasalGPU ;
}
if (nodeType==perip) {
stiffness=stiffnessECMPeripGPU ;
sponLen=lknotECMPeripGPU ;
}
if (nodeType==bc2) {
stiffness=stiffnessECMBCGPU;
sponLen=lknotECMBCGPU ;
}
}
__device__
double calMorse_ECM(const double& linkLength ) {
double forceValue=0.0 ;
if (linkLength > sceInterCell_ECM[4]) {
forceValue = 0;
} else {
forceValue = -sceInterCell_ECM[0] / sceInterCell_ECM[2]
* exp(-linkLength / sceInterCell_ECM[2])
+ sceInterCell_ECM[1] / sceInterCell_ECM[3]
* exp(-linkLength / sceInterCell_ECM[3]);
// if (forceValue > 0) {
// forceValue = 0;
// }
}
return (forceValue) ;
}
__device__
double calMorseEnergy_ECM(const double& linkLength ) {
double energyValue=0.0 ;
if (linkLength > sceInterCell_ECM[4]) {
energyValue = 0;
} else {
energyValue = sceInterCell_ECM[0]* exp(-linkLength / sceInterCell_ECM[2])
- sceInterCell_ECM[1]* exp(-linkLength / sceInterCell_ECM[3]);
}
return (energyValue) ;
}
/*
__device__
double calWLC_ECM(const double& linkLength ) {
double x=linkLength/wLCPara_ECM[0] ;
return (wLCPara_ECM[1]*( 6*x+ ( x*x*(3.0-2*x))/( (1-x)*(1-x) ) )
-wLCPara_ECM[2]/pow(linkLength,wLCPara_ECM[3]) ) ;
}
*/
__device__
bool IsValidAdhPair(const double& dist ) {
if (dist > restLenECMAdhSpringGPU && dist < maxLenECMAdhSpringGPU){
return true ;
}
else {
return false ;
}
}
__device__
bool IsValidAdhPairForNotInitPhase(const double& dist ) {
if (dist > restLenECMAdhSpringGPU){
return true ;
}
else {
return false ;
}
}
__device__
double CalAdhECM(const double& dist ) {
return (kAdhECMGPU*(dist-restLenECMAdhSpringGPU));
// in the function IsValid pair, distance already checked to be greater than neutral length
}
__device__
double CalAdhEnergy(const double& dist ) {
return (0.5*kAdhECMGPU*(dist-restLenECMAdhSpringGPU)*(dist-restLenECMAdhSpringGPU));
// in the function IsValid pair, distance already checked to be greater than neutral length
}
EType SceECM:: ConvertStringToEType(string eNodeRead) {
if (eNodeRead=="perip") {
return perip ;
}
else if (eNodeRead=="bc2") {
return bc2 ;
}
else if (eNodeRead=="excm") {
return excm ;
}
else {
cout << "Error in defining type of external nodes" << endl ;
return excm ;// To just return something to avoid compiler complain
}
}
SceECM::SceECM() {
isECMNeighborSet=false ;
eCMRemoved=false ;
}
void SceECM::Initialize(uint maxAllNodePerCellECM, uint maxMembrNodePerCellECM, uint maxTotalNodesECM, int freqPlotData, string uniqueSymbol) {
maxAllNodePerCell=maxAllNodePerCellECM ;
maxMembrNodePerCell= maxMembrNodePerCellECM ;
maxTotalNodes=maxTotalNodesECM ; //Ali
this->freqPlotData=freqPlotData ;
this->uniqueSymbol=uniqueSymbol ;
std::fstream readCoord_ECM ;
std::fstream readInput_ECM ;
int numberNodes_ECM ;
double tmpPosX_ECM,tmpPosY_ECM ;
vector<double> posXIni_ECM,posYIni_ECM ;
vector <EType> eNodeVec ;
int resumeSimulation = globalConfigVars.getConfigValue(
"ResumeSimulation").toInt();
if (resumeSimulation==0) {
cout << " In the ECM module, I am in start mode" << endl ;
readCoord_ECM.open("./resources/coordinate_ECM21.txt") ;
}
else if(resumeSimulation==1) {
cout << " In the ECM module, I am in resume mode" << endl ;
std::string secondInputFileName = "./resources/DataFileECM_" + uniqueSymbol + "Resume.cfg";
readCoord_ECM.open(secondInputFileName.c_str()) ;
}
else{
throw std::invalid_argument(" ResumeSimulation parameter in the input file must be either 1 or 0. Error from ECM module");
}
if (readCoord_ECM.is_open()) {
cout << "ECM coordinates file opened successfully" <<endl ;
}
else {
cout << "ECM coordinates file is not opened successfully" << endl ;
}
string inputInfoText ;
string eNodeRead ;
readCoord_ECM>>numberNodes_ECM ;
for (int i=0 ; i<numberNodes_ECM ; i++){
readCoord_ECM>>tmpPosX_ECM>>tmpPosY_ECM>>eNodeRead ;
posXIni_ECM.push_back(tmpPosX_ECM) ;
posYIni_ECM.push_back(tmpPosY_ECM) ;
EType eNode=ConvertStringToEType(eNodeRead) ;
eNodeVec.push_back(eNode) ;
}
readInput_ECM.open("./resources/ECM_input.txt") ;
if (readInput_ECM.is_open()) {
cout << "ECM Mech input opened successfully" <<endl ;
}
else {
cout << "ECM Mech input is not opened successfully" << endl ;
}
readInput_ECM>> inputInfoText ;
for (int i=0 ; i<5; i++) {
readInput_ECM>> mechPara_ECM.sceInterCellCPU_ECM[i] ; //=39.0 ;
}
// readInput_ECM>>restLenECMSpring ;
// readInput_ECM>>eCMLinSpringStiff ;
readInput_ECM>>restLenECMAdhSpring ;
readInput_ECM>>maxLenECMAdhSpring ;
readInput_ECM>>kAdhECM ;
//for ( int i=0 ; i<4 ; i++) {
// readInput_ECM>>mechPara_ECM.wLCParaCPU_ECM[i] ;
// }
std::fstream secondInput_ECM ;
std:: string secondInputInfo ; //dummy
std::string secondInputFileName = "./resources/ECM_" + uniqueSymbol + "input.cfg";
secondInput_ECM.open(secondInputFileName.c_str()) ;
//secondInput_ECM.open("./resources/ECM_N01G00_input.cfg" ) ;
if (secondInput_ECM.is_open()) {
cout << "Second ECM Mech input opened successfully" <<endl ;
}
else {
cout << "Second ECM Mech input is not opened successfully" << endl ;
}
secondInput_ECM>>secondInputInfo ; // just for information no use in the code
secondInput_ECM>>stiffnessECMBasal ;
secondInput_ECM>>stiffnessECMBC ;
secondInput_ECM>>stiffnessECMPerip ;
secondInput_ECM>>lknotECMBasal ;
secondInput_ECM>>lknotECMBC ;
secondInput_ECM>>lknotECMPerip ;
secondInput_ECM>>dampBasal ;
secondInput_ECM>>dampBC ;
secondInput_ECM>>dampApical ;
cout <<" stiffness of ECM at the basal side is="<<stiffnessECMBasal <<endl ;
cout <<" stiffness of ECM at boundary is="<<stiffnessECMBC<<endl ;
cout <<" stiffness of ECM peripodial side is="<<stiffnessECMPerip<<endl ;
cout <<" rest len basal ECM is="<<lknotECMBasal<<endl ;
cout <<" rest len boundary ECM is= "<<lknotECMBC<<endl ;
cout << "rest len peripodial ECM is=" <<lknotECMPerip <<endl ;
cout << "Damping for basal ECM is="<<dampBasal<<endl ;
cout << "Damping for boundary ECM is= "<<dampBC<<endl ;
cout << "Damping for peripodial ECM is=" <<dampApical <<endl ;
cout << "number of ECM nodes is"<< numberNodes_ECM <<endl ;
for (int i=0 ; i<5; i++) {
cout <<"Morse parameter number"<<i<<" is " <<mechPara_ECM.sceInterCellCPU_ECM[i]<<endl ;
}
//cout <<"rest length of ECM spring is "<<restLenECMSpring<<endl ;
// cout <<"ECM spring stiffness is "<<eCMLinSpringStiff<<endl ;
cout <<"ECM Membrane neutral adhesion length is "<<restLenECMAdhSpring<<endl ;
cout <<"ECM Membrane max adhesion length is "<<maxLenECMAdhSpring<<endl ;
cout <<"ECM Membrane adhesion stiffness is "<<kAdhECM<<endl ;
cout << "ECM only applies adhesvie force" << endl ;
//for ( int i=0 ; i<4 ; i++) {
// cout<<"wLC parameter "<< i << " is "<<mechPara_ECM.wLCParaCPU_ECM[i]<<endl ; ;
//}
cudaMemcpyToSymbol(sceInterCell_ECM,mechPara_ECM.sceInterCellCPU_ECM
,5*sizeof(double));
//cudaMemcpyToSymbol(wLCPara_ECM,mechPara_ECM.wLCParaCPU_ECM
// ,4*sizeof(double));
cudaMemcpyToSymbol(restLenECMAdhSpringGPU, &restLenECMAdhSpring,sizeof(double));
cudaMemcpyToSymbol(maxLenECMAdhSpringGPU, &maxLenECMAdhSpring,sizeof(double));
cudaMemcpyToSymbol(kAdhECMGPU, &kAdhECM,sizeof(double));
cudaMemcpyToSymbol(stiffnessECMPeripGPU, &stiffnessECMPerip,sizeof(double));
cudaMemcpyToSymbol(stiffnessECMBCGPU, &stiffnessECMBC,sizeof(double));
cudaMemcpyToSymbol(stiffnessECMBasalGPU, &stiffnessECMBasal,sizeof(double));
cudaMemcpyToSymbol(lknotECMPeripGPU, & lknotECMPerip,sizeof(double));
cudaMemcpyToSymbol(lknotECMBCGPU, & lknotECMBC,sizeof(double));
cudaMemcpyToSymbol(lknotECMBasalGPU, & lknotECMBasal,sizeof(double));
counter=100000 ; //large number
lastPrintECM=1000000 ; // large number
outputFrameECM=0 ;
numNodesECM= numberNodes_ECM ; //(eCMMaxX-eCMMinX)/eCMMinDist ;
indexECM.resize(numNodesECM,0) ;
peripORexcm.resize(numNodesECM,perip) ;
dampCoef.resize(numNodesECM) ;
nodeECMLocX.resize(numNodesECM,0.0) ;
nodeECMLocY.resize(numNodesECM,0.0) ;
cellNeighborId.resize(numNodesECM,-1) ;
stiffLevel.resize(numNodesECM) ;
sponLen.resize(numNodesECM) ;
linSpringForceECMX.resize(numNodesECM,0.0);
linSpringForceECMY.resize(numNodesECM,0.0);
linSpringAvgTension.resize(numNodesECM,0.0);
linSpringEnergy.resize(numNodesECM,0.0);
morseEnergy.resize(numNodesECM,0.0);
adhEnergy.resize(numNodesECM,0.0);
bendSpringForceECMX.resize(numNodesECM,0.0);
bendSpringForceECMY.resize(numNodesECM,0.0);
memMorseForceECMX.resize(numNodesECM,0.0);
memMorseForceECMY.resize(numNodesECM,0.0);
fBendCenterX.resize(numNodesECM,0.0);
fBendCenterY.resize(numNodesECM,0.0);
fBendLeftX.resize(numNodesECM,0.0);
fBendLeftY.resize(numNodesECM,0.0);
fBendRightX.resize(numNodesECM,0.0);
fBendRightY.resize(numNodesECM,0.0);
totalForceECMX.resize(numNodesECM,0.0);
totalForceECMY.resize(numNodesECM,0.0);
totalExplicitForceECMX.resize(numNodesECM,0.0);
totalExplicitForceECMY.resize(numNodesECM,0.0);
rHSX.resize(numNodesECM,0.0);
rHSY.resize(numNodesECM,0.0);
//memNodeType.resize(maxTotalNodes,notAssigned1) ;
nodeIsActive.resize(numNodesECM,true) ;
thrust::sequence (indexECM.begin(),indexECM.begin()+numNodesECM);
thrust::copy(posXIni_ECM.begin(),posXIni_ECM.end(),nodeECMLocX.begin()) ;
thrust::copy(posYIni_ECM.begin(),posYIni_ECM.end(),nodeECMLocY.begin()) ;
thrust::copy(eNodeVec.begin(),eNodeVec.end(),peripORexcm.begin()) ;
AssignDampCoef() ;
cout << "GPU level initial coordinates and type of external nodes are: " << endl ;
for (int i=0; i<nodeECMLocX.size() ; i++) {
cout<< nodeECMLocX[i]<<", "<<nodeECMLocY[i]<<", "<<peripORexcm[i] << endl;
}
PrintECM(0.0) ;
std::string cSVFileName = "./ECMFolder/EnergyExport_" + uniqueSymbol + ".CSV";
ofstream EnergyExport ;
EnergyExport.open(cSVFileName.c_str());
EnergyExport <<"Time,"<<"TotalMorseEnergyECM," << "TotalAdhEnergyECM,"<<"TotalLinSpringEnergy,"<<"TotalEnergy, " <<"TotalEnergyDerivative"<< std::endl;
} //initilaization function finished
void SceECM:: ApplyECMConstrain(int currentActiveCellCount, int totalNodeCountForActiveCellsECM, double curTime, double dt, double Damp_CoefCell, bool cellPolar, bool subCellPolar, bool isInitPhase){
if (eCMRemoved) {
PrintECMRemoved(curTime);
cout << "ECM is removed" << endl ;
return ;
}
#ifdef debugModeECM
cudaEvent_t start1, start2, start3, start4, start5, start6, start7, start8, stop;
float elapsedTime1, elapsedTime2, elapsedTime3, elapsedTime4, elapsedTime5, elapsedTime6, elapsedTime7 , elapsedTime8 ;
cudaEventCreate(&start1);
cudaEventCreate(&start2);
cudaEventCreate(&start3);
cudaEventCreate(&start4);
cudaEventCreate(&start5);
cudaEventCreate(&start6);
cudaEventCreate(&start7);
cudaEventCreate(&start8);
cudaEventCreate(&stop);
cudaEventRecord(start1, 0);
#endif
nodeCellLocXOld.resize(totalNodeCountForActiveCellsECM) ;
nodeCellLocYOld.resize(totalNodeCountForActiveCellsECM) ;
adhPairECM_Cell.resize(totalNodeCountForActiveCellsECM,-1) ;
morseEnergyCell.resize(totalNodeCountForActiveCellsECM,0.0);
adhEnergyCell.resize(totalNodeCountForActiveCellsECM,0.0);
thrust::copy(nodesPointerECM->getInfoVecs().nodeLocX.begin(),nodesPointerECM->getInfoVecs().nodeLocX.begin()+totalNodeCountForActiveCellsECM,nodeCellLocXOld.begin()) ;
thrust::copy(nodesPointerECM->getInfoVecs().nodeLocY.begin(),nodesPointerECM->getInfoVecs().nodeLocY.begin()+totalNodeCountForActiveCellsECM,nodeCellLocYOld.begin()) ;
#ifdef debugModeECM
cudaEventRecord(start2, 0);
cudaEventSynchronize(start2);
cudaEventElapsedTime(&elapsedTime1, start1, start2);
#endif
thrust:: transform (peripORexcm.begin(), peripORexcm.begin()+numNodesECM,
thrust::make_zip_iterator (thrust::make_tuple (stiffLevel.begin(),sponLen.begin())),MechProp());
cout << " Mechanical properties after assignment is " << stiffLevel[0] << endl ;
counter ++ ;
//if (counter>=100 || curTime<(100*dt) || isECMNeighborSet==false) {
if (curTime<(100*dt) || isECMNeighborSet==false) {
isECMNeighborSet=true ;
counter=0 ;
FindNeighborCandidateForCellsAndECMNodes();
}
#ifdef debugModeECM
cudaEventRecord(start3, 0);
cudaEventSynchronize(start3);
cudaEventElapsedTime(&elapsedTime2, start2, start3);
#endif
MoveCellNodesByECMForces(totalNodeCountForActiveCellsECM,currentActiveCellCount,dt, Damp_CoefCell) ;
/* To reduce computational cost
energyECM.totalMorseEnergyCellECM = thrust::reduce( morseEnergyCell.begin(),morseEnergyCell.begin()+totalNodeCountForActiveCellsECM,(double) 0.0, thrust::plus<double>() );
energyECM.totalAdhEnergyCellECM = thrust::reduce( adhEnergyCell.begin() ,adhEnergyCell.begin() +totalNodeCountForActiveCellsECM,(double) 0.0, thrust::plus<double>() );
*/
CalLinSpringForce();
CalBendSpringForce();
#ifdef debugModeECM
cudaEventRecord(start4, 0);
cudaEventSynchronize(start4);
cudaEventElapsedTime(&elapsedTime3, start3, start4);
#endif
CalCellForcesOnECM() ;
//energyECM.totalLinSpringEnergyECM = 0.5 * ( thrust::reduce( linSpringEnergy.begin(),linSpringEnergy.begin()+numNodesECM,(double) 0.0, thrust::plus<double>() ));
//to make sure it is based on the distance used for action force calculation.
/* To reduce computational cost
energyECM.totalMorseEnergyECMCell = thrust::reduce( morseEnergy.begin(),morseEnergy.begin()+numNodesECM,(double) 0.0, thrust::plus<double>() );
energyECM.totalAdhEnergyECMCell = thrust::reduce( adhEnergy.begin() ,adhEnergy.begin() +numNodesECM,(double) 0.0, thrust::plus<double>() );
*/
//CalSumForcesOnECM() ;
//MoveNodesBySumAllForces(dt) ;
CalSumOnlyExplicitForcesOnECM() ;
CalRHS(dt) ;
#ifdef debugModeECM
cudaEventRecord(start5, 0);
cudaEventSynchronize(start5);
cudaEventElapsedTime(&elapsedTime4, start4, start5);
#endif
vector <double> tmpRHSX(numNodesECM);
vector <double> tmpRHSY(numNodesECM);
tmpHostNodeECMLocX.resize(numNodesECM);
tmpHostNodeECMLocY.resize(numNodesECM);
thrust::copy (rHSX.begin(), rHSX.begin()+numNodesECM, tmpRHSX.begin());
thrust::copy (rHSY.begin(), rHSY.begin()+numNodesECM, tmpRHSY.begin());
thrust::copy (nodeECMLocX.begin(), nodeECMLocX.begin()+numNodesECM, tmpHostNodeECMLocX.begin());
thrust::copy (nodeECMLocY.begin(), nodeECMLocY.begin()+numNodesECM, tmpHostNodeECMLocY.begin());
#ifdef debugModeECM
cout << "max RHSX is " << *max_element(tmpRHSX.begin(), tmpRHSX.begin()+numNodesECM) << endl ;
cout << "min RHSX is " << *min_element(tmpRHSX.begin(), tmpRHSX.begin()+numNodesECM) << endl ;
cout << "max RHSY is " << *max_element(tmpRHSY.begin(), tmpRHSY.begin()+numNodesECM) << endl ;
cout << "min RHSY is " << *min_element(tmpRHSY.begin(), tmpRHSY.begin()+numNodesECM) << endl ;
cudaEventRecord(start6, 0);
cudaEventSynchronize(start6);
cudaEventElapsedTime(&elapsedTime5, start5, start6);
#endif
EquMotionCoef (dt);
#ifdef debugModeECM
cudaEventRecord(start7, 0);
cudaEventSynchronize(start7);
cudaEventElapsedTime(&elapsedTime6, start6, start7);
#endif
tmpHostNodeECMLocX =solverPointer->SOR3DiagPeriodic(nodeIsActive,hCoefLd, hCoefD, hCoefUd,tmpRHSX,indexPrev, indexNext, tmpHostNodeECMLocX);
tmpHostNodeECMLocY =solverPointer->SOR3DiagPeriodic(nodeIsActive,hCoefLd, hCoefD, hCoefUd,tmpRHSY,indexPrev,indexNext, tmpHostNodeECMLocY);
thrust::copy (tmpHostNodeECMLocX.begin(), tmpHostNodeECMLocX.begin()+numNodesECM, nodeECMLocX.begin());
thrust::copy (tmpHostNodeECMLocY.begin(), tmpHostNodeECMLocY.begin()+numNodesECM, nodeECMLocY.begin());
#ifdef debugModeECM
cudaEventRecord(start8, 0);
cudaEventSynchronize(start8);
cudaEventElapsedTime(&elapsedTime7, start7, start8);
#endif
/* To reduce computational cost
cout << "total Morse energy for cell-ECM is= "<< energyECM.totalMorseEnergyCellECM << endl ;
cout << "total Morse energy for ECM-cell is= "<< energyECM.totalMorseEnergyECMCell << endl ;
cout << "total adhesion energy for cell-ECM is= "<< energyECM.totalAdhEnergyCellECM << endl ;
cout << "total adhesion energy for ECM-cell is= "<< energyECM.totalAdhEnergyECMCell << endl ;
//assert (abs (energyECM.totalMorseEnergyCellECM-energyECM.totalMorseEnergyECMCell)<1.0) ;
//assert (abs (energyECM.totalAdhEnergyCellECM- energyECM.totalAdhEnergyECMCell) <1.0) ;
if ( (abs (energyECM.totalMorseEnergyCellECM-energyECM.totalMorseEnergyECMCell)>1.0) ||
(abs (energyECM.totalAdhEnergyCellECM- energyECM.totalAdhEnergyECMCell) >1.0)
) {
cout << "Warning: Action and reaction forces in the ECM do not match each other" << endl ;
}
*/
# ifdef debugModeECM
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime8, start8, stop);
std::cout << "time 1 spent in ECM module for moving the membrane node of cells and ECM nodes are: " << elapsedTime1 << endl ;
std::cout << "time 2 spent in ECM module for moving the membrane node of cells and ECM nodes are: " << elapsedTime2 << endl ;
std::cout << "time 3 spent in ECM module for moving the membrane node of cells and ECM nodes are: " << elapsedTime3 << endl ;
std::cout << "time 4 spent in ECM module for moving the membrane node of cells and ECM nodes are: " << elapsedTime4 << endl ;
std::cout << "time 5 spent in ECM module for moving the membrane node of cells and ECM nodes are: " << elapsedTime5 << endl ;
std::cout << "time 6 spent in ECM module for moving the membrane node of cells and ECM nodes are: " << elapsedTime6 << endl ;
std::cout << "time 7 spent in ECM module for moving the membrane node of cells and ECM nodes are: " << elapsedTime7 << endl ;
std::cout << "time 8 spent in ECM module for moving the membrane node of cells and ECM nodes are: " << elapsedTime8 << endl ;
#endif
//throw std::invalid_argument(" Solver called properly and I want to stop the code");
PrintECM(curTime);
}
void SceECM:: PrintECM(double curTime) {
lastPrintECM=lastPrintECM+1 ;
if (lastPrintECM>=freqPlotData) {
outputFrameECM++ ;
lastPrintECM=0 ;
cout << " I am in regular print function" << endl ;
// First ECM output file for paraview //
std::string vtkFileName = "./ECMFolder/ECM_" + uniqueSymbol +patch::to_string(outputFrameECM-1) + ".vtk";
ofstream ECMOut;
ECMOut.open(vtkFileName.c_str());
ECMOut<< "# vtk DataFile Version 3.0" << endl;
ECMOut<< "Result for paraview 2d code" << endl;
ECMOut << "ASCII" << endl;
ECMOut << "DATASET UNSTRUCTURED_GRID" << std::endl;
ECMOut << "POINTS " << nodeECMLocX.size() << " float" << std::endl;
for (uint i = 0; i < nodeECMLocX.size(); i++) {
ECMOut << nodeECMLocX[i] << " " << nodeECMLocY[i] << " "
<< 0.0 << std::endl;
}
ECMOut<< std::endl;
ECMOut<< "CELLS " << nodeECMLocX.size()<< " " << 3 *nodeECMLocX.size()<< std::endl;
for (uint i = 0; i < (nodeECMLocX.size()-1); i++) {
ECMOut << 2 << " " << indexECM[i] << " "
<< indexECM[i+1] << std::endl;
}
ECMOut << 2 << " " << indexECM[nodeECMLocX.size()-1] << " "<< indexECM[0] << std::endl; //last point to the first point
ECMOut << "CELL_TYPES " << nodeECMLocX.size()<< endl;
for (uint i = 0; i < nodeECMLocX.size() ; i++) {
ECMOut << "3" << endl;
}
ECMOut << "POINT_DATA "<<nodeECMLocX.size() <<endl ;
ECMOut << "SCALARS Avg_Tension " << "float"<< endl;
ECMOut << "LOOKUP_TABLE " << "default"<< endl;
for (uint i = 0; i < nodeECMLocX.size(); i++) {
ECMOut<<linSpringAvgTension[i] <<endl ;
}
ECMOut << "SCALARS Node_Type " << "float"<< endl;
ECMOut << "LOOKUP_TABLE " << "default"<< endl;
for (uint i = 0; i < nodeECMLocX.size(); i++) {
ECMOut<<peripORexcm[i] <<endl ;
}
ECMOut.close();
// second output file for curvature estimation //
std::string txtFileName = "./ECMFolder/ECMLocationExport_" + uniqueSymbol+ patch::to_string(outputFrameECM-1) + ".txt";
ofstream ECMLocationExport ;
ECMLocationExport.open(txtFileName.c_str());
//ECMExport << "ECM pouch coordinates" << std::endl;
for (uint i = 0; i < nodeECMLocX.size(); i++) {
// if (peripORexcm[i]==excm) {
ECMLocationExport<< nodeECMLocX[i] << " " << nodeECMLocY[i] << " " << 0.0 << " "<< peripORexcm[i]<<std::endl;
// }
}
//ECMExport << "ECM lumen side coordinates" << std::endl;
// for (uint i = 0; i < nodeECMLocX.size(); i++) {
// if (peripORexcm[i]==perip) {
// ECMLocationExport << nodeECMLocX[i] << " " << nodeECMLocY[i] << " "
// << 0.0 << std::endl;
// }
// }
ECMLocationExport.close();
//Third write file for ECM
txtFileName = "./ECMFolder/ECMTensionExport_" + uniqueSymbol+ patch::to_string(outputFrameECM-1) + ".txt";
ofstream ECMTensionExport ;
ECMTensionExport.open(txtFileName.c_str());
for (uint i = 0; i < nodeECMLocX.size(); i++) {
ECMTensionExport<< linSpringAvgTension[i]<< " " << peripORexcm[i]<< std::endl;
}
ECMTensionExport.close();
///
//Fourth write file for ECM
energyECM.totalEnergyECMOld=energyECM.totalEnergyECM ;
energyECM.totalEnergyECM= energyECM.totalMorseEnergyECMCell
+ energyECM.totalAdhEnergyECMCell
+ energyECM.totalLinSpringEnergyECM ;
std::string cSVFileName = "./ECMFolder/EnergyExport_" + uniqueSymbol+ ".CSV";
ofstream EnergyExport ;
EnergyExport.open(cSVFileName.c_str(),ofstream::app);
//EnergyExport <<"totalMorseEnergyCell " << "totalAdhEnergyCell "<< "totalMorseEnergy "<<"totalAdhEnergy "<< "totalLinSpringEnergy " << std::endl;
EnergyExport <<curTime<<","<<energyECM.totalMorseEnergyECMCell << "," << energyECM.totalAdhEnergyECMCell<< "," << energyECM.totalLinSpringEnergyECM <<"," << energyECM.totalEnergyECM <<","<<energyECM.totalEnergyPrimeECM <<std::endl;
}
}
// This is just to create a file to be able to generate the movie with consisten frames
void SceECM:: PrintECMRemoved(double curTime) {
lastPrintECM=lastPrintECM+1 ;
if (lastPrintECM>=freqPlotData) {
outputFrameECM++ ;
lastPrintECM=0 ;
cout << " I am in ECM removed print function" << endl ;
// First ECM output file for paraview //
std::string vtkFileName = "./ECMFolder/ECM_" + uniqueSymbol +patch::to_string(outputFrameECM-1) + ".vtk";
ofstream ECMOut;
ECMOut.open(vtkFileName.c_str());
ECMOut<< "# vtk DataFile Version 3.0" << endl;
ECMOut<< "Result for paraview 2d code" << endl;
ECMOut << "ASCII" << endl;
ECMOut << "DATASET UNSTRUCTURED_GRID" << std::endl;
ECMOut << "POINTS " << nodeECMLocX.size() << " float" << std::endl;
for (uint i = 0; i < nodeECMLocX.size(); i++) {
ECMOut << -500.0 << " " << -500.0 << " "
<< 0.0 << std::endl; // Just out of domain
}
ECMOut<< std::endl;
ECMOut<< "CELLS " << nodeECMLocX.size()<< " " << 3 *nodeECMLocX.size()<< std::endl;
for (uint i = 0; i < (nodeECMLocX.size()-1); i++) {
ECMOut << 2 << " " << indexECM[i] << " "
<< indexECM[i+1] << std::endl;
}
ECMOut << 2 << " " << indexECM[nodeECMLocX.size()-1] << " "<< indexECM[0] << std::endl; //last point to the first point
ECMOut << "CELL_TYPES " << nodeECMLocX.size()<< endl;
for (uint i = 0; i < nodeECMLocX.size() ; i++) {
ECMOut << "3" << endl;
}
ECMOut << "POINT_DATA "<<nodeECMLocX.size() <<endl ;
ECMOut << "SCALARS Avg_Tension " << "float"<< endl;
ECMOut << "LOOKUP_TABLE " << "default"<< endl;
for (uint i = 0; i < nodeECMLocX.size(); i++) {
ECMOut<<linSpringAvgTension[i] <<endl ;
}
ECMOut << "SCALARS Node_Type " << "float"<< endl;
ECMOut << "LOOKUP_TABLE " << "default"<< endl;
for (uint i = 0; i < nodeECMLocX.size(); i++) {
ECMOut<<peripORexcm[i] <<endl ;
}
ECMOut.close();
}
}
AniResumeData SceECM:: obtainResumeData() {
AniResumeData aniResumeData ;
thrust:: host_vector<double> hostTmpLocX;
thrust:: host_vector<double> hostTmpLocY;
thrust:: host_vector<EType> hostTmpType;
hostTmpLocX.resize(numNodesECM) ;
hostTmpLocY.resize(numNodesECM) ;
hostTmpType.resize(numNodesECM) ;
cout << " I am in obtainResumeData function" << endl ;
thrust::copy (
thrust::make_zip_iterator(
thrust::make_tuple(nodeECMLocX.begin(),nodeECMLocY.begin(),peripORexcm.begin())),
thrust::make_zip_iterator(
thrust::make_tuple(nodeECMLocX.begin(),nodeECMLocY.begin(),peripORexcm.begin()))+numNodesECM,
thrust::make_zip_iterator(
thrust::make_tuple(hostTmpLocX.begin(),hostTmpLocY.begin(),hostTmpType.begin())));
cout << " I start passing to regular vector variables" << endl ;
CVector tmp;
for( int i=0 ; i<numNodesECM ; i++) {
tmp=CVector (hostTmpLocX[i], hostTmpLocY[i], 0.0) ;
aniResumeData.nodePosArr.push_back(tmp) ;
aniResumeData.nodeECMType.push_back(hostTmpType[i]) ;
}
return aniResumeData ;
}
void SceECM::EquMotionCoef (double dt) {
vector <double> sponLenHost(numNodesECM) ;
vector <double> sponLenWithNext ;
vector <double> sponLenWithPrev ;
vector <double> distWithNext ;
vector <double> distWithPrev ;
vector <double> dampCoefHost ;
sponLenWithNext.clear();
sponLenWithPrev.clear();
distWithNext.clear() ;
distWithPrev.clear() ;
hCoefLd.clear() ;
hCoefUd.clear() ;
hCoefD.clear() ;
indexNext.clear() ;
indexPrev.clear() ;
dampCoefHost.clear() ;
indexNext.resize(numNodesECM) ;
indexPrev.resize(numNodesECM) ;
dampCoefHost.resize(numNodesECM) ;
thrust::copy(sponLen.begin(),sponLen.begin()+numNodesECM, sponLenHost.begin()) ;
thrust::copy(dampCoef.begin(),dampCoef.begin()+numNodesECM, dampCoefHost.begin()) ;
double k=stiffLevel[0] ; //Assumming ECM is homogenous in mechanical properties
for ( int i=0 ; i< numNodesECM ; i++) {
indexNext.at(i)=i+1 ;
indexPrev.at(i)=i-1 ;
if (i==numNodesECM-1){
indexNext.at(i)=0 ;
}
if (i==0){
indexPrev.at(i)=numNodesECM-1 ;
}
sponLenWithNext.push_back( 0.5*(sponLenHost[indexNext.at(i)]+sponLenHost[i]) );
sponLenWithPrev.push_back( 0.5*(sponLenHost[indexPrev.at(i)]+sponLenHost[i]) );
distWithNext.push_back(sqrt( pow(tmpHostNodeECMLocX[indexNext.at(i)]-tmpHostNodeECMLocX[i],2) +
pow(tmpHostNodeECMLocY[indexNext.at(i)]-tmpHostNodeECMLocY[i],2))) ;
distWithPrev.push_back(sqrt( pow(tmpHostNodeECMLocX[indexPrev.at(i)]-tmpHostNodeECMLocX[i],2) +
pow(tmpHostNodeECMLocY[indexPrev.at(i)]-tmpHostNodeECMLocY[i],2)));
}
for ( int i=0 ; i< numNodesECM ; i++) {
hCoefD.push_back (1 + k*dt/dampCoefHost.at(i)*( 2 - sponLenWithPrev.at(i)/(distWithPrev.at(i) + 0.0001 )
- sponLenWithNext.at(i)/(distWithNext.at(i) + 0.0001 ))) ;
hCoefLd.push_back( k*dt/dampCoefHost.at(i)*(-1 + sponLenWithPrev.at(i)/(distWithPrev.at(i) + 0.0001 ))) ;
hCoefUd.push_back( k*dt/dampCoefHost.at(i)*(-1 + sponLenWithNext.at(i)/(distWithNext.at(i) + 0.0001 ))) ;
}
#ifdef debugModeECM
cout <<"max distance with next node is" <<*max_element ( distWithNext.begin(), distWithNext.begin()+numNodesECM) <<endl ;
cout <<"min distance with next node is" << *min_element ( distWithNext.begin(), distWithNext.begin()+numNodesECM) <<endl ;
cout <<"max distance with previous node is" <<*max_element ( distWithPrev.begin(), distWithPrev.begin()+numNodesECM) <<endl ;
cout <<"min distance with previous node is" << *min_element ( distWithPrev.begin(), distWithPrev.begin()+numNodesECM) <<endl ;
vector < double> hCoefDAbs;
hCoefDAbs.clear() ;
for ( int i=0 ; i< numNodesECM ; i++) {
hCoefDAbs.push_back (abs(1 + k*dt/dampCoefHost.at(i)*( 2 - sponLenWithPrev.at(i)/(distWithPrev.at(i) + 0.0001 )
- sponLenWithNext.at(i)/(distWithNext.at(i) + 0.0001 )))) ;
}
cout <<"max main diag. elment is " << *max_element ( hCoefD.begin(), hCoefD.begin() +numNodesECM) <<endl ;
cout <<"min main diag. element is " << *min_element ( hCoefD.begin(), hCoefD.begin() +numNodesECM) <<endl ;
cout <<"min main Abs(diag.) element is " << *min_element ( hCoefDAbs.begin(), hCoefDAbs.begin() +numNodesECM) <<endl ;
cout <<"max upper diag. element is " << *max_element ( hCoefUd.begin(), hCoefUd.begin()+numNodesECM) <<endl ;
cout <<"min upper diag. element is " << *min_element ( hCoefUd.begin(), hCoefUd.begin()+numNodesECM) <<endl ;
cout <<"max lower diag. element is " << *max_element ( hCoefLd.begin(), hCoefLd.begin()+numNodesECM) <<endl ;
cout <<"min lower diag. element is " << *min_element ( hCoefLd.begin(), hCoefLd.begin()+numNodesECM) <<endl ;
cout <<"stiffness, time step and first element of damping vector is " << endl ;
cout << k <<","<< dt<<"," << dampCoefHost.at(0) << endl ;
cout << "constants for stiffness matrix calculated " << endl ;
cout << "last diagonal element is " << hCoefD.at(numNodesECM-1) << endl ;
cout << " number of ECM nodes is "<< numNodesECM << endl ;
# endif
}
void SceECM::MoveCellNodesByECMForces(int totalNodeCountForActiveCellsECM,int currentActiveCellCount, double dt, double Damp_CoefCell)
{
double* nodeECMLocXAddr= thrust::raw_pointer_cast (
&nodeECMLocX[0]) ;
double* nodeECMLocYAddr= thrust::raw_pointer_cast (
&nodeECMLocY[0]) ;
EType* peripORexcmAddr= thrust::raw_pointer_cast (
&peripORexcm[0]) ;
// move the nodes of epithelial cells
//// find the closest ECM node to each each cell //
int numCells = cellsPointerECM->getCellInfoVecs().basalLocX.size() ;
thrust::counting_iterator<int> iBegin(0) ;
thrust::counting_iterator<int> iBegin2(0) ;
//////////////////////////////////////////
thrust:: transform (
thrust::make_zip_iterator (
thrust:: make_tuple (
make_permutation_iterator(
cellsPointerECM->getCellInfoVecs().eCMNeighborId.begin(),
make_transform_iterator(iBegin2,
DivideFunctor2(
maxAllNodePerCell))),
make_transform_iterator (iBegin,
DivideFunctor2(maxAllNodePerCell)),
make_transform_iterator (iBegin,
ModuloFunctor2(maxAllNodePerCell)),
nodesPointerECM->getInfoVecs().nodeLocX.begin(),
nodesPointerECM->getInfoVecs().nodeLocY.begin(),
nodesPointerECM->getInfoVecs().nodeIsActive.begin(),
nodesPointerECM->getInfoVecs().memNodeType1.begin()
)),
thrust::make_zip_iterator (
thrust:: make_tuple (
make_permutation_iterator(
cellsPointerECM->getCellInfoVecs().eCMNeighborId.begin(),
make_transform_iterator(iBegin2,
DivideFunctor2(
maxAllNodePerCell))),
make_transform_iterator (iBegin,
DivideFunctor2(maxAllNodePerCell)),
make_transform_iterator (iBegin,
ModuloFunctor2(maxAllNodePerCell)),
nodesPointerECM->getInfoVecs().nodeLocX.begin(),
nodesPointerECM->getInfoVecs().nodeLocY.begin(),
nodesPointerECM->getInfoVecs().nodeIsActive.begin(),
nodesPointerECM->getInfoVecs().memNodeType1.begin()
))+totalNodeCountForActiveCellsECM,
thrust::make_zip_iterator (
thrust::make_tuple (
nodesPointerECM->getInfoVecs().nodeLocX.begin(),
nodesPointerECM->getInfoVecs().nodeLocY.begin(),
adhPairECM_Cell.begin(),
morseEnergyCell.begin(),
adhEnergyCell.begin())),
MoveNodes2_Cell(nodeECMLocXAddr,nodeECMLocYAddr,maxMembrNodePerCell,numNodesECM,dt,Damp_CoefCell,peripORexcmAddr,currentActiveCellCount));
}
void SceECM::CalLinSpringForce()
{
double* nodeECMLocXAddr= thrust::raw_pointer_cast (
&nodeECMLocX[0]) ;
double* nodeECMLocYAddr= thrust::raw_pointer_cast (
&nodeECMLocY[0]) ;
double* stiffLevelAddr=thrust::raw_pointer_cast (
&stiffLevel[0]) ;
double* sponLenAddr =thrust::raw_pointer_cast (
&sponLen[0]) ;
thrust:: transform (
thrust::make_zip_iterator (
thrust:: make_tuple (
indexECM.begin(),
nodeECMLocX.begin(),
nodeECMLocY.begin())),
thrust::make_zip_iterator (
thrust:: make_tuple (
indexECM.begin(),
nodeECMLocX.begin(),
nodeECMLocY.begin()))+numNodesECM,
thrust::make_zip_iterator (
thrust::make_tuple (
linSpringForceECMX.begin(),
linSpringForceECMY.begin(),
linSpringAvgTension.begin(),
linSpringEnergy.begin())),
LinSpringForceECM(numNodesECM,nodeECMLocXAddr,nodeECMLocYAddr,stiffLevelAddr,sponLenAddr));
//////////////////////////////////// find the closest Cell to each ECM node ///////////
///////////////////////////////////
//cout << " I am after FindCellNeighbor functor" << endl ;
}
void SceECM::CalBendSpringForce()
{
const double eCMBendStiff=6.0 ; // need to be an input
double* nodeECMLocXAddr= thrust::raw_pointer_cast (
&nodeECMLocX[0]) ;
double* nodeECMLocYAddr= thrust::raw_pointer_cast (
&nodeECMLocY[0]) ;
thrust:: transform (
thrust::make_zip_iterator (
thrust:: make_tuple (
indexECM.begin(),
nodeECMLocX.begin(),
nodeECMLocY.begin())),
thrust::make_zip_iterator (
thrust:: make_tuple (
indexECM.begin(),
nodeECMLocX.begin(),
nodeECMLocY.begin()))+numNodesECM,
thrust::make_zip_iterator (
thrust::make_tuple (
fBendCenterX.begin(),
fBendCenterY.begin(),
fBendLeftX.begin(),
fBendLeftY.begin(),
fBendRightX.begin(),
fBendRightY.begin())),
CalBendECM(nodeECMLocXAddr,nodeECMLocYAddr,numNodesECM,eCMBendStiff));
double* fBendLeftXAddr= thrust::raw_pointer_cast (
&fBendLeftX[0]) ;
double* fBendLeftYAddr= thrust::raw_pointer_cast (
&fBendLeftY[0]) ;
double* fBendRightXAddr= thrust::raw_pointer_cast (
&fBendRightX[0]) ;
double* fBendRightYAddr= thrust::raw_pointer_cast (
&fBendRightY[0]) ;
thrust:: transform (
thrust::make_zip_iterator (
thrust:: make_tuple (
indexECM.begin(),
fBendCenterX.begin(),
fBendCenterY.begin())),
thrust::make_zip_iterator (
thrust:: make_tuple (
indexECM.begin(),
fBendCenterX.begin(),
fBendCenterY.begin()))+numNodesECM,
thrust::make_zip_iterator (
thrust::make_tuple (
bendSpringForceECMX.begin(),
bendSpringForceECMY.begin())),
SumBendForce(fBendLeftXAddr,fBendLeftYAddr,fBendRightXAddr,fBendRightYAddr,numNodesECM));
}
void SceECM::CalCellForcesOnECM()
{
bool* nodeIsActiveAddr= thrust::raw_pointer_cast (
& (nodesPointerECM->getInfoVecs().nodeIsActive[0])) ;
int * adhPairECM_CellAddr= thrust::raw_pointer_cast (
&adhPairECM_Cell[0]) ;
//Old locations are chosen to make sure action-reaction balance of forces between ECM and cell nodes are fully satisfied.
double* nodeCellLocXAddr= thrust::raw_pointer_cast (
&nodeCellLocXOld[0]) ;
double* nodeCellLocYAddr= thrust::raw_pointer_cast (
&nodeCellLocYOld[0]) ;
int numCells = cellsPointerECM->getCellInfoVecs().basalLocX.size() ;
thrust:: transform (
thrust::make_zip_iterator (
thrust:: make_tuple (
indexECM.begin(),
nodeECMLocX.begin(),
nodeECMLocY.begin(),
cellNeighborId.begin())),
thrust::make_zip_iterator (
thrust:: make_tuple (
indexECM.begin(),
nodeECMLocX.begin(),
nodeECMLocY.begin(),
cellNeighborId.begin()))+numNodesECM,
thrust::make_zip_iterator (
thrust::make_tuple (
memMorseForceECMX.begin(),
memMorseForceECMY.begin(),
morseEnergy.begin(),
adhEnergy.begin())),
MorseAndAdhForceECM(numCells,maxAllNodePerCell,maxMembrNodePerCell,nodeCellLocXAddr,nodeCellLocYAddr,nodeIsActiveAddr,adhPairECM_CellAddr));
}
void SceECM::CalSumForcesOnECM()
{
double dummy=0.0 ;
thrust:: transform (
thrust::make_zip_iterator (
thrust:: make_tuple (
linSpringForceECMX.begin(),
linSpringForceECMY.begin(),
bendSpringForceECMX.begin(),
bendSpringForceECMY.begin(),
memMorseForceECMX.begin(),
memMorseForceECMY.begin())),
thrust::make_zip_iterator (
thrust:: make_tuple (
linSpringForceECMX.begin(),
linSpringForceECMY.begin(),
bendSpringForceECMX.begin(),
bendSpringForceECMY.begin(),
memMorseForceECMX.begin(),
memMorseForceECMY.begin()))+numNodesECM,
thrust::make_zip_iterator (
thrust::make_tuple (
totalForceECMX.begin(),
totalForceECMY.begin())),
TotalECMForceCompute(dummy));
}
void SceECM::CalSumOnlyExplicitForcesOnECM() {
thrust:: transform (
thrust::make_zip_iterator (
thrust:: make_tuple (
bendSpringForceECMX.begin(),
bendSpringForceECMY.begin(),
memMorseForceECMX.begin(),
memMorseForceECMY.begin())),
thrust::make_zip_iterator (
thrust:: make_tuple (
bendSpringForceECMX.begin(),
bendSpringForceECMY.begin(),
memMorseForceECMX.begin(),
memMorseForceECMY.begin()))+numNodesECM,
thrust::make_zip_iterator (
thrust::make_tuple (
totalExplicitForceECMX.begin(),
totalExplicitForceECMY.begin())),
TotalExplicitECMForceCompute());
}
void SceECM::CalRHS(double dt)
{
thrust:: transform (
thrust::make_zip_iterator (
thrust:: make_tuple (
totalExplicitForceECMX.begin(),
totalExplicitForceECMY.begin(),
nodeECMLocX.begin(),
nodeECMLocY.begin(),
dampCoef.begin())),
thrust::make_zip_iterator (
thrust:: make_tuple (
totalExplicitForceECMX.begin(),
totalExplicitForceECMY.begin(),
nodeECMLocX.begin(),
nodeECMLocY.begin(),
dampCoef.begin()))+numNodesECM,
thrust::make_zip_iterator (
thrust::make_tuple (
rHSX.begin(),
rHSY.begin())),
RHSCompute(dt));
}
void SceECM::MoveNodesBySumAllForces(double dt)
{
// move the nodes of ECM
thrust:: transform (
thrust::make_zip_iterator (
thrust:: make_tuple (
nodeECMLocX.begin(),
nodeECMLocY.begin(),
totalForceECMX.begin(),
totalForceECMY.begin(),
dampCoef.begin())),
thrust::make_zip_iterator (
thrust:: make_tuple (
nodeECMLocX.begin(),
nodeECMLocY.begin(),
totalForceECMX.begin(),
totalForceECMY.begin(),
dampCoef.begin()))+numNodesECM,
thrust::make_zip_iterator (
thrust::make_tuple (
nodeECMLocX.begin(),
nodeECMLocY.begin())),
MoveNodesECM(dt));
}
void SceECM::FindNeighborCandidateForCellsAndECMNodes()
{
double* nodeECMLocXAddr= thrust::raw_pointer_cast (
&nodeECMLocX[0]) ;
double* nodeECMLocYAddr= thrust::raw_pointer_cast (
&nodeECMLocY[0]) ;
double * basalCellLocXAddr= thrust::raw_pointer_cast (
& ( cellsPointerECM->getCellInfoVecs().basalLocX[0]) ) ;
double * basalCellLocYAddr= thrust::raw_pointer_cast (
& ( cellsPointerECM->getCellInfoVecs().basalLocY[0]) ) ;
EType* peripORexcmAddr= thrust::raw_pointer_cast (
&peripORexcm[0]) ;
int numCells = cellsPointerECM->getCellInfoVecs().basalLocX.size() ;
//// find the closest ECM node to each each cell //
thrust:: transform (
thrust::make_zip_iterator (
thrust:: make_tuple (
cellsPointerECM->getCellInfoVecs().basalLocX.begin(),
cellsPointerECM->getCellInfoVecs().basalLocY.begin())),
thrust::make_zip_iterator (
thrust:: make_tuple (
cellsPointerECM->getCellInfoVecs().basalLocX.begin(),
cellsPointerECM->getCellInfoVecs().basalLocY.begin()))+numCells,
cellsPointerECM->getCellInfoVecs().eCMNeighborId.begin(),
FindECMNeighborPerCell(nodeECMLocXAddr,nodeECMLocYAddr,numNodesECM ));
thrust:: transform (
thrust::make_zip_iterator (
thrust:: make_tuple (
nodeECMLocX.begin(),
nodeECMLocY.begin())),
thrust::make_zip_iterator (
thrust:: make_tuple (
nodeECMLocX.begin(),
nodeECMLocY.begin()))+numNodesECM,
cellNeighborId.begin(),
FindCellNeighborPerECMNode(basalCellLocXAddr,basalCellLocYAddr, numCells));
}
void SceECM::AssignDampCoef() {
thrust::transform ( peripORexcm.begin() ,peripORexcm.begin() +numNodesECM, dampCoef.begin(), AssignDamping(dampBasal,dampBC,dampApical) );
#ifdef debugModeECM
for (int i=0 ; i<numNodesECM ; i++) {
if (dampCoef[i] < smallNumber) {
cout << "damping of element " << i << " is " << dampCoef[i] << " which is wrong" <<endl ;
throw::invalid_argument ( "damping coefficients in ECM is not set correctly") ;
}
}
#endif
}
|
77890a5bb4e65ab120f7dc0a651f0401ea7384be.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Copyright (c) 2017 Darius Rckert
* Licensed under the MIT License.
* See LICENSE file for more information.
*/
#include <thrust/device_vector.h>
#include "saiga/cuda/CudaInfo.h"
#include "saiga/cuda/cudaHelper.h"
#include "saiga/cuda/device_helper.h"
#include "saiga/cuda/event.h"
#include "saiga/cuda/pinned_vector.h"
#include "saiga/cuda/stream.h"
#include "saiga/core/math/math.h"
#include "saiga/core/util/assert.h"
#include <iostream>
#include <vector>
using namespace Saiga;
using Saiga::ArrayView;
using Saiga::CUDA::ThreadInfo;
//#define LECTURE
template <int K>
class Element
{
public:
vec4 data;
HD inline void operator()()
{
for (int k = 0; k < K * 512; ++k)
{
data = data * 3.1f + data;
}
}
};
template <typename T>
__global__ static void process(ArrayView<T> data)
{
ThreadInfo<> ti;
if (ti.thread_id >= data.size()) return;
T e = data[ti.thread_id];
e();
data[ti.thread_id] = e;
}
#ifdef LECTURE
template <int K>
static void uploadProcessDownloadAsync(int N)
{
using T = Element<K>;
thrust::host_vector<T> h_data(N);
thrust::device_vector<T> d_data(N);
{
Saiga::CUDA::CudaScopedTimerPrint timer("process");
// Compute launch arguments
const unsigned int BLOCK_SIZE = 128;
const unsigned int BLOCKS = Saiga::CUDA::getBlockCount(N, BLOCK_SIZE);
hipMemcpy(d_data.data().get(), h_data.data(), N * sizeof(T), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( process<T>), dim3(BLOCKS), dim3(BLOCK_SIZE), 0, 0, d_data);
hipMemcpy(h_data.data(), d_data.data().get(), N * sizeof(T), hipMemcpyDeviceToHost);
}
}
int main(int argc, char* argv[])
{
uploadProcessDownloadAsync<8>(1024 * 1024);
cout << "Done." << endl;
}
#else
template <int K>
static void uploadProcessDownloadAsync(int N, int slices, int streamCount)
{
using T = Element<K>;
Saiga::pinned_vector<T> h_data(N);
// thrust::host_vector<T> h_data(N);
thrust::device_vector<T> d_data(N);
// size_t size = N * sizeof(T);
SAIGA_ASSERT(N % slices == 0);
int sliceN = N / slices;
size_t slizeSize = sliceN * sizeof(T);
// Create a separate stream for each slice for maximum parallelism
std::vector<Saiga::CUDA::CudaStream> streams(streamCount);
{
// ArrayViews simplify slice creation
ArrayView<T> vd(d_data);
ArrayView<T> vh(h_data);
Saiga::CUDA::ScopedTimerPrint tim("uploadProcessDownloadAsync " + std::to_string(slices));
for (int i = 0; i < slices; ++i)
{
// Pick current stream and slice
auto& stream = streams[i % streamCount];
auto d_slice = vd.slice_n(i * sliceN, sliceN);
auto h_slice = vh.slice_n(i * sliceN, sliceN);
// Compute launch arguments
const unsigned int BLOCK_SIZE = 128;
const unsigned int BLOCKS = Saiga::CUDA::getBlockCount(sliceN, BLOCK_SIZE);
hipMemcpyAsync(d_slice.data(), h_slice.data(), slizeSize, hipMemcpyHostToDevice, stream);
hipLaunchKernelGGL(( process<T>), dim3(BLOCKS), dim3(BLOCK_SIZE), 0, stream, d_slice);
hipMemcpyAsync(h_slice.data(), d_slice.data(), slizeSize, hipMemcpyDeviceToHost, stream);
}
}
}
int main(int argc, char* argv[])
{
Saiga::CUDA::initCUDA();
uploadProcessDownloadAsync<8>(1024 * 1024, 1, 1);
uploadProcessDownloadAsync<8>(1024 * 1024, 2, 2);
uploadProcessDownloadAsync<8>(1024 * 1024, 4, 4);
uploadProcessDownloadAsync<8>(1024 * 1024, 8, 8);
uploadProcessDownloadAsync<8>(1024 * 1024, 16, 16);
uploadProcessDownloadAsync<8>(1024 * 1024, 64, 8);
uploadProcessDownloadAsync<8>(1024 * 1024, 64, 64);
std::cout << "Done." << std::endl;
}
#endif
| 77890a5bb4e65ab120f7dc0a651f0401ea7384be.cu | /**
* Copyright (c) 2017 Darius Rückert
* Licensed under the MIT License.
* See LICENSE file for more information.
*/
#include <thrust/device_vector.h>
#include "saiga/cuda/CudaInfo.h"
#include "saiga/cuda/cudaHelper.h"
#include "saiga/cuda/device_helper.h"
#include "saiga/cuda/event.h"
#include "saiga/cuda/pinned_vector.h"
#include "saiga/cuda/stream.h"
#include "saiga/core/math/math.h"
#include "saiga/core/util/assert.h"
#include <iostream>
#include <vector>
using namespace Saiga;
using Saiga::ArrayView;
using Saiga::CUDA::ThreadInfo;
//#define LECTURE
template <int K>
class Element
{
public:
vec4 data;
HD inline void operator()()
{
for (int k = 0; k < K * 512; ++k)
{
data = data * 3.1f + data;
}
}
};
template <typename T>
__global__ static void process(ArrayView<T> data)
{
ThreadInfo<> ti;
if (ti.thread_id >= data.size()) return;
T e = data[ti.thread_id];
e();
data[ti.thread_id] = e;
}
#ifdef LECTURE
template <int K>
static void uploadProcessDownloadAsync(int N)
{
using T = Element<K>;
thrust::host_vector<T> h_data(N);
thrust::device_vector<T> d_data(N);
{
Saiga::CUDA::CudaScopedTimerPrint timer("process");
// Compute launch arguments
const unsigned int BLOCK_SIZE = 128;
const unsigned int BLOCKS = Saiga::CUDA::getBlockCount(N, BLOCK_SIZE);
cudaMemcpy(d_data.data().get(), h_data.data(), N * sizeof(T), cudaMemcpyHostToDevice);
process<T><<<BLOCKS, BLOCK_SIZE, 0>>>(d_data);
cudaMemcpy(h_data.data(), d_data.data().get(), N * sizeof(T), cudaMemcpyDeviceToHost);
}
}
int main(int argc, char* argv[])
{
uploadProcessDownloadAsync<8>(1024 * 1024);
cout << "Done." << endl;
}
#else
template <int K>
static void uploadProcessDownloadAsync(int N, int slices, int streamCount)
{
using T = Element<K>;
Saiga::pinned_vector<T> h_data(N);
// thrust::host_vector<T> h_data(N);
thrust::device_vector<T> d_data(N);
// size_t size = N * sizeof(T);
SAIGA_ASSERT(N % slices == 0);
int sliceN = N / slices;
size_t slizeSize = sliceN * sizeof(T);
// Create a separate stream for each slice for maximum parallelism
std::vector<Saiga::CUDA::CudaStream> streams(streamCount);
{
// ArrayViews simplify slice creation
ArrayView<T> vd(d_data);
ArrayView<T> vh(h_data);
Saiga::CUDA::ScopedTimerPrint tim("uploadProcessDownloadAsync " + std::to_string(slices));
for (int i = 0; i < slices; ++i)
{
// Pick current stream and slice
auto& stream = streams[i % streamCount];
auto d_slice = vd.slice_n(i * sliceN, sliceN);
auto h_slice = vh.slice_n(i * sliceN, sliceN);
// Compute launch arguments
const unsigned int BLOCK_SIZE = 128;
const unsigned int BLOCKS = Saiga::CUDA::getBlockCount(sliceN, BLOCK_SIZE);
cudaMemcpyAsync(d_slice.data(), h_slice.data(), slizeSize, cudaMemcpyHostToDevice, stream);
process<T><<<BLOCKS, BLOCK_SIZE, 0, stream>>>(d_slice);
cudaMemcpyAsync(h_slice.data(), d_slice.data(), slizeSize, cudaMemcpyDeviceToHost, stream);
}
}
}
int main(int argc, char* argv[])
{
Saiga::CUDA::initCUDA();
uploadProcessDownloadAsync<8>(1024 * 1024, 1, 1);
uploadProcessDownloadAsync<8>(1024 * 1024, 2, 2);
uploadProcessDownloadAsync<8>(1024 * 1024, 4, 4);
uploadProcessDownloadAsync<8>(1024 * 1024, 8, 8);
uploadProcessDownloadAsync<8>(1024 * 1024, 16, 16);
uploadProcessDownloadAsync<8>(1024 * 1024, 64, 8);
uploadProcessDownloadAsync<8>(1024 * 1024, 64, 64);
std::cout << "Done." << std::endl;
}
#endif
|
afc505bf55e1ef2eb397402f89bc61db189bc4c7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2017, The OctNet authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of the <organization> nor the
// names of its contributors may be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL OCTNET AUTHORS BE LIABLE FOR ANY
// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "octnet/gpu/conv.h"
#include "octnet/gpu/gpu.h"
#include "octnet/gpu/oc2col.h"
#include "octnet/gpu/col2oc.h"
#include "octnet/gpu/buffer.h"
#include <thrust/fill.h>
#include <thrust/execution_policy.h>
void print_matrix_gpu(const ot_data_t* data_d, int rows, int cols) {
ot_data_t* data_h = device_to_host_malloc(data_d, rows*cols);
int idx = 0;
printf("[");
for(int row = 0; row < rows; ++row) {
if(row > 0) printf(" ");
printf("[ ");
for(int col = 0; col < cols; ++col) {
printf("%f", data_h[idx]);
idx++;
if(col < cols-1) {
printf(", ");
}
}
if(row < rows - 1) {
printf(" ], \n");
}
else {
printf(" ]] \n");
}
}
delete[] data_h;
}
__global__ void kernel_conv_mm_add_bias(ot_data_t* out, int n_leafs, int channels_out, const ot_data_t* bias) {
CUDA_KERNEL_LOOP(leaf_idx, n_leafs) {
for(int f = 0; f < channels_out; ++f) {
out[leaf_idx * channels_out + f] += bias[f];
}
}
}
void octree_conv_mm_gpu(hipblasHandle_t cublas_handle, const octree* in, const ot_data_t* weights, const ot_data_t* bias, int channels_out, octree* out) {
if(DEBUG) { printf("[DEBUG] octree_conv_mm_gpu\n"); }
octree_resize_gpu(in->n, in->grid_depth, in->grid_height, in->grid_width, channels_out, in->n_leafs, out);
octree_cpy_scalars(in, out);
out->feature_size = channels_out;
octree_cpy_trees_cpu_gpu(in, out);
octree_cpy_prefix_leafs_gpu_gpu(in, out);
ot_data_t_buffer_gpu& col_buffer = ot_data_t_buffer_gpu::i();
col_buffer.resize(in->n_leafs * K333 * in->feature_size);
oc2col_gpu(in, col_buffer.data(), col_buffer.capacity());
float alpha = 1;
float beta = 0;
int m = out->feature_size;
int n = in->n_leafs;
int k = in->feature_size * K333;
CUBLAS_CHECK(
hipblasSgemm(
cublas_handle,
HIPBLAS_OP_T, HIPBLAS_OP_N,
m, n, k,
&alpha,
weights, k,
col_buffer.data(), k,
&beta,
out->data, m
)
);
// add bias
hipLaunchKernelGGL(( kernel_conv_mm_add_bias), dim3(GET_BLOCKS(out->n_leafs)), dim3(CUDA_NUM_THREADS), 0, 0,
out->data, out->n_leafs, out->feature_size, bias
);
CUDA_POST_KERNEL_CHECK;
// printf("--------- col_buffer ----------\n");
// print_matrix_gpu(col_buffer->data, in->n_leafs, K333*in->feature_size);
// printf("--------- weights ----------\n");
// print_matrix_gpu(weights, out->feature_size, K333*in->feature_size);
// printf("--------- out ----------\n");
// print_matrix_gpu(out->data, in->n_leafs, out->feature_size);
}
void octree_conv_mm_bwd_gpu(hipblasHandle_t cublas_handle, const octree* grad_out, const ot_data_t* weights, int channels_in, octree* grad_in) {
if(DEBUG) { printf("[DEBUG] octree_conv_mm_bwd_gpu\n"); }
octree_resize_gpu(grad_out->n, grad_out->grid_depth, grad_out->grid_height, grad_out->grid_width, channels_in, grad_out->n_leafs, grad_in);
octree_cpy_scalars(grad_out, grad_in);
grad_in->feature_size = channels_in;
octree_cpy_trees_cpu_gpu(grad_out, grad_in);
octree_cpy_prefix_leafs_gpu_gpu(grad_out, grad_in);
ot_data_t_buffer_gpu& col_buffer = ot_data_t_buffer_gpu::i();
col_buffer.resize(grad_in->n_leafs * K333 * channels_in);
float alpha = 1;
float beta = 0;
int m = grad_in->feature_size * K333;
int n = grad_in->n_leafs;
int k = grad_out->feature_size;
CUBLAS_CHECK(
hipblasSgemm(
cublas_handle,
HIPBLAS_OP_N, HIPBLAS_OP_N,
m, n, k,
&alpha,
weights, m,
grad_out->data, k,
&beta,
col_buffer.data(), m
)
);
col2oc_gpu(col_buffer.data(), grad_in);
// printf("--------- weights ----------\n");
// print_matrix_gpu(weights, grad_out->feature_size, K333*grad_in->feature_size);
// printf("--------- grad_out ----------\n");
// print_matrix_gpu(grad_out->data, grad_in->n_leafs, grad_out->feature_size);
// printf("--------- col_buffer ----------\n");
// print_matrix_gpu(col_buffer->data, grad_in->n_leafs, K333*grad_in->feature_size);
}
// __global__ void kernel_conv_mm_set(ot_data_t* out, int N, const ot_data_t val) {
// CUDA_KERNEL_LOOP(idx, N) {
// // if(idx >= N) printf("[ERROR] idx >= N in kernel_conv_mm_set\n");
// out[idx] = val;
// }
// }
void octree_conv_mm_wbwd_gpu(hipblasHandle_t cublas_handle, const octree* in, const octree* grad_out, const float scale, ot_data_t* grad_weights, ot_data_t* grad_bias) {
if(DEBUG) { printf("[DEBUG] octree_conv_mm_wbwd_gpu\n"); }
ot_data_t_buffer_gpu& col_buffer = ot_data_t_buffer_gpu::i();
col_buffer.resize(in->n_leafs * K333 * in->feature_size);
oc2col_gpu(in, col_buffer.data(), col_buffer.capacity());
float alpha = scale;
float beta = 1;
int m = in->feature_size * K333;
int n = grad_out->feature_size;
int k = in->n_leafs;
CUBLAS_CHECK(
hipblasSgemm(
cublas_handle,
HIPBLAS_OP_N, HIPBLAS_OP_T,
m, n, k,
&alpha,
col_buffer.data(), m,
grad_out->data, n,
&beta,
grad_weights, m
)
);
if(DEBUG > 1) { printf("[DEBUG] octree_conv_mm_wbwd_gpu kernel_conv_mm_set call col_buffer.data=%p, col_buffer.capacity=%d\n", col_buffer.data(), col_buffer.capacity()); }
// kernel_conv_mm_set<<<GET_BLOCKS(in->n_leafs), CUDA_NUM_THREADS>>>(col_buffer->data, in->n_leafs, 1.f);
// kernel_conv_mm_set<<<GET_BLOCKS_T(col_buffer->capacity, 512), 512>>>(col_buffer->data, col_buffer->capacity, 1.f);
// CUDA_POST_KERNEL_CHECK;
// thrust::fill(thrust::device, col_buffer->data, col_buffer->data + col_buffer->capacity, 1.f);
thrust::fill(thrust::device, col_buffer.data(), col_buffer.data() + in->n_leafs, 1.f);
alpha = scale;
beta = 1;
m = grad_out->feature_size;
n = in->n_leafs;
CUBLAS_CHECK(
hipblasSgemv(
cublas_handle,
HIPBLAS_OP_N,
m, n,
&alpha,
grad_out->data, m,
col_buffer.data(), 1,
&beta,
grad_bias, 1
)
);
// printf("--------- grad_out ----------\n");
// print_matrix_gpu(grad_out->data, in->n_leafs, grad_out->feature_size);
// printf("--------- col_buffer ----------\n");
// print_matrix_gpu(col_buffer->data, in->n_leafs, 1);
// printf("--------- grad_bias ----------\n");
// print_matrix_gpu(grad_bias, grad_out->feature_size, 1);
}
| afc505bf55e1ef2eb397402f89bc61db189bc4c7.cu | // Copyright (c) 2017, The OctNet authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of the <organization> nor the
// names of its contributors may be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL OCTNET AUTHORS BE LIABLE FOR ANY
// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "octnet/gpu/conv.h"
#include "octnet/gpu/gpu.h"
#include "octnet/gpu/oc2col.h"
#include "octnet/gpu/col2oc.h"
#include "octnet/gpu/buffer.h"
#include <thrust/fill.h>
#include <thrust/execution_policy.h>
void print_matrix_gpu(const ot_data_t* data_d, int rows, int cols) {
ot_data_t* data_h = device_to_host_malloc(data_d, rows*cols);
int idx = 0;
printf("[");
for(int row = 0; row < rows; ++row) {
if(row > 0) printf(" ");
printf("[ ");
for(int col = 0; col < cols; ++col) {
printf("%f", data_h[idx]);
idx++;
if(col < cols-1) {
printf(", ");
}
}
if(row < rows - 1) {
printf(" ], \n");
}
else {
printf(" ]] \n");
}
}
delete[] data_h;
}
__global__ void kernel_conv_mm_add_bias(ot_data_t* out, int n_leafs, int channels_out, const ot_data_t* bias) {
CUDA_KERNEL_LOOP(leaf_idx, n_leafs) {
for(int f = 0; f < channels_out; ++f) {
out[leaf_idx * channels_out + f] += bias[f];
}
}
}
void octree_conv_mm_gpu(cublasHandle_t cublas_handle, const octree* in, const ot_data_t* weights, const ot_data_t* bias, int channels_out, octree* out) {
if(DEBUG) { printf("[DEBUG] octree_conv_mm_gpu\n"); }
octree_resize_gpu(in->n, in->grid_depth, in->grid_height, in->grid_width, channels_out, in->n_leafs, out);
octree_cpy_scalars(in, out);
out->feature_size = channels_out;
octree_cpy_trees_cpu_gpu(in, out);
octree_cpy_prefix_leafs_gpu_gpu(in, out);
ot_data_t_buffer_gpu& col_buffer = ot_data_t_buffer_gpu::i();
col_buffer.resize(in->n_leafs * K333 * in->feature_size);
oc2col_gpu(in, col_buffer.data(), col_buffer.capacity());
float alpha = 1;
float beta = 0;
int m = out->feature_size;
int n = in->n_leafs;
int k = in->feature_size * K333;
CUBLAS_CHECK(
cublasSgemm(
cublas_handle,
CUBLAS_OP_T, CUBLAS_OP_N,
m, n, k,
&alpha,
weights, k,
col_buffer.data(), k,
&beta,
out->data, m
)
);
// add bias
kernel_conv_mm_add_bias<<<GET_BLOCKS(out->n_leafs), CUDA_NUM_THREADS>>>(
out->data, out->n_leafs, out->feature_size, bias
);
CUDA_POST_KERNEL_CHECK;
// printf("--------- col_buffer ----------\n");
// print_matrix_gpu(col_buffer->data, in->n_leafs, K333*in->feature_size);
// printf("--------- weights ----------\n");
// print_matrix_gpu(weights, out->feature_size, K333*in->feature_size);
// printf("--------- out ----------\n");
// print_matrix_gpu(out->data, in->n_leafs, out->feature_size);
}
void octree_conv_mm_bwd_gpu(cublasHandle_t cublas_handle, const octree* grad_out, const ot_data_t* weights, int channels_in, octree* grad_in) {
if(DEBUG) { printf("[DEBUG] octree_conv_mm_bwd_gpu\n"); }
octree_resize_gpu(grad_out->n, grad_out->grid_depth, grad_out->grid_height, grad_out->grid_width, channels_in, grad_out->n_leafs, grad_in);
octree_cpy_scalars(grad_out, grad_in);
grad_in->feature_size = channels_in;
octree_cpy_trees_cpu_gpu(grad_out, grad_in);
octree_cpy_prefix_leafs_gpu_gpu(grad_out, grad_in);
ot_data_t_buffer_gpu& col_buffer = ot_data_t_buffer_gpu::i();
col_buffer.resize(grad_in->n_leafs * K333 * channels_in);
float alpha = 1;
float beta = 0;
int m = grad_in->feature_size * K333;
int n = grad_in->n_leafs;
int k = grad_out->feature_size;
CUBLAS_CHECK(
cublasSgemm(
cublas_handle,
CUBLAS_OP_N, CUBLAS_OP_N,
m, n, k,
&alpha,
weights, m,
grad_out->data, k,
&beta,
col_buffer.data(), m
)
);
col2oc_gpu(col_buffer.data(), grad_in);
// printf("--------- weights ----------\n");
// print_matrix_gpu(weights, grad_out->feature_size, K333*grad_in->feature_size);
// printf("--------- grad_out ----------\n");
// print_matrix_gpu(grad_out->data, grad_in->n_leafs, grad_out->feature_size);
// printf("--------- col_buffer ----------\n");
// print_matrix_gpu(col_buffer->data, grad_in->n_leafs, K333*grad_in->feature_size);
}
// __global__ void kernel_conv_mm_set(ot_data_t* out, int N, const ot_data_t val) {
// CUDA_KERNEL_LOOP(idx, N) {
// // if(idx >= N) printf("[ERROR] idx >= N in kernel_conv_mm_set\n");
// out[idx] = val;
// }
// }
void octree_conv_mm_wbwd_gpu(cublasHandle_t cublas_handle, const octree* in, const octree* grad_out, const float scale, ot_data_t* grad_weights, ot_data_t* grad_bias) {
if(DEBUG) { printf("[DEBUG] octree_conv_mm_wbwd_gpu\n"); }
ot_data_t_buffer_gpu& col_buffer = ot_data_t_buffer_gpu::i();
col_buffer.resize(in->n_leafs * K333 * in->feature_size);
oc2col_gpu(in, col_buffer.data(), col_buffer.capacity());
float alpha = scale;
float beta = 1;
int m = in->feature_size * K333;
int n = grad_out->feature_size;
int k = in->n_leafs;
CUBLAS_CHECK(
cublasSgemm(
cublas_handle,
CUBLAS_OP_N, CUBLAS_OP_T,
m, n, k,
&alpha,
col_buffer.data(), m,
grad_out->data, n,
&beta,
grad_weights, m
)
);
if(DEBUG > 1) { printf("[DEBUG] octree_conv_mm_wbwd_gpu kernel_conv_mm_set call col_buffer.data=%p, col_buffer.capacity=%d\n", col_buffer.data(), col_buffer.capacity()); }
// kernel_conv_mm_set<<<GET_BLOCKS(in->n_leafs), CUDA_NUM_THREADS>>>(col_buffer->data, in->n_leafs, 1.f);
// kernel_conv_mm_set<<<GET_BLOCKS_T(col_buffer->capacity, 512), 512>>>(col_buffer->data, col_buffer->capacity, 1.f);
// CUDA_POST_KERNEL_CHECK;
// thrust::fill(thrust::device, col_buffer->data, col_buffer->data + col_buffer->capacity, 1.f);
thrust::fill(thrust::device, col_buffer.data(), col_buffer.data() + in->n_leafs, 1.f);
alpha = scale;
beta = 1;
m = grad_out->feature_size;
n = in->n_leafs;
CUBLAS_CHECK(
cublasSgemv(
cublas_handle,
CUBLAS_OP_N,
m, n,
&alpha,
grad_out->data, m,
col_buffer.data(), 1,
&beta,
grad_bias, 1
)
);
// printf("--------- grad_out ----------\n");
// print_matrix_gpu(grad_out->data, in->n_leafs, grad_out->feature_size);
// printf("--------- col_buffer ----------\n");
// print_matrix_gpu(col_buffer->data, in->n_leafs, 1);
// printf("--------- grad_bias ----------\n");
// print_matrix_gpu(grad_bias, grad_out->feature_size, 1);
}
|
1577d978396097f57b855de9cec1c1a7a8de89d2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void stencilShared1(float *src, float *dst, int size, int raio)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ float buffer[1024+11];
for(int i = threadIdx.x; i < 1024+21; i = i + 1024)
{
buffer[i] = src[idx+i];
}
idx += raio+1;
if (idx >= size)
return;
__syncthreads();
float out = 0;
#pragma unroll
for(int i = -raio;i < raio; i++)
{
out += buffer[threadIdx.x+raio+i] * const_stencilWeight[i+raio];
}
dst[idx] = out;
} | 1577d978396097f57b855de9cec1c1a7a8de89d2.cu | #include "includes.h"
__global__ void stencilShared1(float *src, float *dst, int size, int raio)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ float buffer[1024+11];
for(int i = threadIdx.x; i < 1024+21; i = i + 1024)
{
buffer[i] = src[idx+i];
}
idx += raio+1;
if (idx >= size)
return;
__syncthreads();
float out = 0;
#pragma unroll
for(int i = -raio;i < raio; i++)
{
out += buffer[threadIdx.x+raio+i] * const_stencilWeight[i+raio];
}
dst[idx] = out;
} |
53ebb0f819bee995aabdb510558b646576ccb4e8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <time.h>
#include <stdbool.h>
#include <malloc.h>
//#include <stdlib.h>
#define nExperiments 5
static void HandleError(hipError_t err, const char *file, int line)
{
if (err != hipSuccess)
{
printf("%s in %s at line %d\n", hipGetErrorString(err), file, line);
exit( EXIT_FAILURE );
}
}
#define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ ))
int* CreateMatrix(int *MatrixSize, bool ZerosMatrix)
{
int *A;
A = (int*)malloc(*MatrixSize * *MatrixSize * sizeof(int));
for (int i = 0; i < *MatrixSize * *MatrixSize; ++i)
{
A[i] = ZerosMatrix ? 0 : rand() % 200 - 99;
}
return A;
}
void MatrixMultiplicationCPU(int *a, int *b, int *c, int n)
{
for (int i = 0; i < n; ++i)
{
for (int j = 0; j < n; ++j)
{
float temp = 0.0f;
for (int k = 0; k < n; ++k)
{
temp += a[i * n + k] * b[k * n + j];
}
c[i * n + j] = temp;
}
}
}
__global__ void MatrixMultiplicationGPU(int *a, int *b, int *c, int n)
{
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if (row < n && col < n)
{
float temp = 0.0f;
for (int i = 0; i < n; ++i)
{
temp += a[row * n + i] * b[i * n + col];
}
c[row * n + col] = temp;
}
}
int main(void)
{
char deviceToComputing;
printf("Choose the device to computing: only GPU[g], only CPU[c], both[_] ");
scanf("%c", &deviceToComputing);
// printf("Choose the device to computing: only [%c] ", &deviceToComputing);
int N;
printf("Matrix size: ");
scanf("%d", &N);
int *A = NULL, *B = NULL, *C,
//int *A, *B, *C,
*A_device, *B_device, *C_device;
clock_t startCPU, stopCPU;
double timeCPU[nExperiments], resultCPU = 0;
float timeGPU[nExperiments], resultGPU = 0;
int threadsPerBlockDim = 32;
dim3 blockDim(threadsPerBlockDim, threadsPerBlockDim, 1);
int blocksPerGridDimX = ceilf(N / (float)threadsPerBlockDim);
int blocksPerGridDimY = ceilf(N / (float)threadsPerBlockDim);
dim3 gridDim(blocksPerGridDimX, blocksPerGridDimY, 1);
for (int i = 0; i < nExperiments; ++i)
{
A = CreateMatrix(&N, false);
B = CreateMatrix(&N, false);
C = CreateMatrix(&N, true);
if (deviceToComputing != 'c' && deviceToComputing != 'C')
{
//Allocate the memory on the GPU.
HANDLE_ERROR(hipMalloc((void**)&A_device, N * N * sizeof(int)));
HANDLE_ERROR(hipMalloc((void**)&B_device, N * N * sizeof(int)));
HANDLE_ERROR(hipMalloc((void**)&C_device, N * N * sizeof(int)));
//Copy the arrays 'A' and 'B' to the GPU.
HANDLE_ERROR(hipMemcpy(A_device, A, N * N * sizeof(int), hipMemcpyHostToDevice));
HANDLE_ERROR(hipMemcpy(B_device, B, N * N * sizeof(int), hipMemcpyHostToDevice));
hipEvent_t startGPU, stopGPU;
timeGPU[i] = 0.0f;
hipEventCreate(&startGPU);
hipEventCreate(&stopGPU);
hipEventRecord(startGPU, 0);
hipLaunchKernelGGL(( MatrixMultiplicationGPU), dim3(gridDim), dim3(blockDim), 0, 0, A_device, B_device, C_device, N);
hipEventRecord(stopGPU, 0);
hipEventSynchronize(stopGPU);
hipEventElapsedTime(&timeGPU[i], startGPU, stopGPU);
hipEventDestroy(startGPU);
hipEventDestroy(stopGPU);
//Copy the array 'C' back from the GPU to the CPU.
HANDLE_ERROR(hipMemcpy(C, C_device, N * N * sizeof(int), hipMemcpyDeviceToHost));
printf("Time on GPU = %f seconds.\n", timeGPU[i]/1000);
//Free the memory allocated on the GPU.
HANDLE_ERROR(hipFree(A_device));
HANDLE_ERROR(hipFree(B_device));
HANDLE_ERROR(hipFree(C_device));
}
if (deviceToComputing != 'g' && deviceToComputing != 'G')
{
startCPU = clock();
MatrixMultiplicationCPU(A, B, C, N);
stopCPU = clock();
timeCPU[i] = (double)(stopCPU - startCPU) / CLOCKS_PER_SEC;
printf("Time on CPU = %lf seconds.\n", timeCPU[i]);
}
free(A);
free(B);
free(C);
}
if (deviceToComputing != 'c' && deviceToComputing != 'C')
{
for (int i = 0; i < nExperiments; ++i)
{
resultGPU += timeGPU[i];
}
resultGPU /= nExperiments * 1000;
printf("Average execution time on the GPU: %f.\n", resultGPU);
}
if (deviceToComputing != 'g' && deviceToComputing != 'G')
{
for (int i = 0; i < nExperiments; ++i)
{
resultCPU += timeCPU[i];
}
resultCPU /= nExperiments;
printf("Average execution time on the CPU: %lf.\n", resultCPU);
}
}
| 53ebb0f819bee995aabdb510558b646576ccb4e8.cu | #include <stdio.h>
#include <time.h>
#include <stdbool.h>
#include <malloc.h>
//#include <stdlib.h>
#define nExperiments 5
static void HandleError(cudaError_t err, const char *file, int line)
{
if (err != cudaSuccess)
{
printf("%s in %s at line %d\n", cudaGetErrorString(err), file, line);
exit( EXIT_FAILURE );
}
}
#define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ ))
int* CreateMatrix(int *MatrixSize, bool ZerosMatrix)
{
int *A;
A = (int*)malloc(*MatrixSize * *MatrixSize * sizeof(int));
for (int i = 0; i < *MatrixSize * *MatrixSize; ++i)
{
A[i] = ZerosMatrix ? 0 : rand() % 200 - 99;
}
return A;
}
void MatrixMultiplicationCPU(int *a, int *b, int *c, int n)
{
for (int i = 0; i < n; ++i)
{
for (int j = 0; j < n; ++j)
{
float temp = 0.0f;
for (int k = 0; k < n; ++k)
{
temp += a[i * n + k] * b[k * n + j];
}
c[i * n + j] = temp;
}
}
}
__global__ void MatrixMultiplicationGPU(int *a, int *b, int *c, int n)
{
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if (row < n && col < n)
{
float temp = 0.0f;
for (int i = 0; i < n; ++i)
{
temp += a[row * n + i] * b[i * n + col];
}
c[row * n + col] = temp;
}
}
int main(void)
{
char deviceToComputing;
printf("Choose the device to computing: only GPU[g], only CPU[c], both[_] ");
scanf("%c", &deviceToComputing);
// printf("Choose the device to computing: only [%c] ", &deviceToComputing);
int N;
printf("Matrix size: ");
scanf("%d", &N);
int *A = NULL, *B = NULL, *C,
//int *A, *B, *C,
*A_device, *B_device, *C_device;
clock_t startCPU, stopCPU;
double timeCPU[nExperiments], resultCPU = 0;
float timeGPU[nExperiments], resultGPU = 0;
int threadsPerBlockDim = 32;
dim3 blockDim(threadsPerBlockDim, threadsPerBlockDim, 1);
int blocksPerGridDimX = ceilf(N / (float)threadsPerBlockDim);
int blocksPerGridDimY = ceilf(N / (float)threadsPerBlockDim);
dim3 gridDim(blocksPerGridDimX, blocksPerGridDimY, 1);
for (int i = 0; i < nExperiments; ++i)
{
A = CreateMatrix(&N, false);
B = CreateMatrix(&N, false);
C = CreateMatrix(&N, true);
if (deviceToComputing != 'c' && deviceToComputing != 'C')
{
//Allocate the memory on the GPU.
HANDLE_ERROR(cudaMalloc((void**)&A_device, N * N * sizeof(int)));
HANDLE_ERROR(cudaMalloc((void**)&B_device, N * N * sizeof(int)));
HANDLE_ERROR(cudaMalloc((void**)&C_device, N * N * sizeof(int)));
//Copy the arrays 'A' and 'B' to the GPU.
HANDLE_ERROR(cudaMemcpy(A_device, A, N * N * sizeof(int), cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMemcpy(B_device, B, N * N * sizeof(int), cudaMemcpyHostToDevice));
cudaEvent_t startGPU, stopGPU;
timeGPU[i] = 0.0f;
cudaEventCreate(&startGPU);
cudaEventCreate(&stopGPU);
cudaEventRecord(startGPU, 0);
MatrixMultiplicationGPU<<<gridDim, blockDim>>>(A_device, B_device, C_device, N);
cudaEventRecord(stopGPU, 0);
cudaEventSynchronize(stopGPU);
cudaEventElapsedTime(&timeGPU[i], startGPU, stopGPU);
cudaEventDestroy(startGPU);
cudaEventDestroy(stopGPU);
//Copy the array 'C' back from the GPU to the CPU.
HANDLE_ERROR(cudaMemcpy(C, C_device, N * N * sizeof(int), cudaMemcpyDeviceToHost));
printf("Time on GPU = %f seconds.\n", timeGPU[i]/1000);
//Free the memory allocated on the GPU.
HANDLE_ERROR(cudaFree(A_device));
HANDLE_ERROR(cudaFree(B_device));
HANDLE_ERROR(cudaFree(C_device));
}
if (deviceToComputing != 'g' && deviceToComputing != 'G')
{
startCPU = clock();
MatrixMultiplicationCPU(A, B, C, N);
stopCPU = clock();
timeCPU[i] = (double)(stopCPU - startCPU) / CLOCKS_PER_SEC;
printf("Time on CPU = %lf seconds.\n", timeCPU[i]);
}
free(A);
free(B);
free(C);
}
if (deviceToComputing != 'c' && deviceToComputing != 'C')
{
for (int i = 0; i < nExperiments; ++i)
{
resultGPU += timeGPU[i];
}
resultGPU /= nExperiments * 1000;
printf("Average execution time on the GPU: %f.\n", resultGPU);
}
if (deviceToComputing != 'g' && deviceToComputing != 'G')
{
for (int i = 0; i < nExperiments; ++i)
{
resultCPU += timeCPU[i];
}
resultCPU /= nExperiments;
printf("Average execution time on the CPU: %lf.\n", resultCPU);
}
}
|
f264ef3ae2fe30f411a6f7c6cff1b3202e1aa4b8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int dims_update_halo_kernel1_fr1 [8][2];
static int dims_update_halo_kernel1_fr1_h [8][2] = {0};
//user function
__device__
inline void update_halo_kernel1_fr1_gpu(ACC<double> &density0,
ACC<double> &density1,
ACC<double> &energy0,
ACC<double> &energy1,
ACC<double> &pressure,
ACC<double> &viscosity,
ACC<double> &soundspeed,
const int* fields) {
if(fields[FIELD_DENSITY0] == 1) density0(0,0,0) = density0(0,0,-1);
if(fields[FIELD_DENSITY1] == 1) density1(0,0,0) = density1(0,0,-1);
if(fields[FIELD_ENERGY0] == 1) energy0(0,0,0) = energy0(0,0,-1);
if(fields[FIELD_ENERGY1] == 1) energy1(0,0,0) = energy1(0,0,-1);
if(fields[FIELD_PRESSURE] == 1) pressure(0,0,0) = pressure(0,0,-1);
if(fields[FIELD_VISCOSITY] == 1) viscosity(0,0,0) = viscosity(0,0,-1);
if(fields[FIELD_SOUNDSPEED] == 1) soundspeed(0,0,0) = soundspeed(0,0,-1);
}
__global__ void ops_update_halo_kernel1_fr1(
double* __restrict arg0,
double* __restrict arg1,
double* __restrict arg2,
double* __restrict arg3,
double* __restrict arg4,
double* __restrict arg5,
double* __restrict arg6,
const int* __restrict arg7,
int size0,
int size1,
int size2 ){
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel1_fr1[0][0] + idx_z * 1*1 * dims_update_halo_kernel1_fr1[0][0] * dims_update_halo_kernel1_fr1[0][1];
arg1 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel1_fr1[1][0] + idx_z * 1*1 * dims_update_halo_kernel1_fr1[1][0] * dims_update_halo_kernel1_fr1[1][1];
arg2 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel1_fr1[2][0] + idx_z * 1*1 * dims_update_halo_kernel1_fr1[2][0] * dims_update_halo_kernel1_fr1[2][1];
arg3 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel1_fr1[3][0] + idx_z * 1*1 * dims_update_halo_kernel1_fr1[3][0] * dims_update_halo_kernel1_fr1[3][1];
arg4 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel1_fr1[4][0] + idx_z * 1*1 * dims_update_halo_kernel1_fr1[4][0] * dims_update_halo_kernel1_fr1[4][1];
arg5 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel1_fr1[5][0] + idx_z * 1*1 * dims_update_halo_kernel1_fr1[5][0] * dims_update_halo_kernel1_fr1[5][1];
arg6 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel1_fr1[6][0] + idx_z * 1*1 * dims_update_halo_kernel1_fr1[6][0] * dims_update_halo_kernel1_fr1[6][1];
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
ACC<double> argp0(dims_update_halo_kernel1_fr1[0][0], dims_update_halo_kernel1_fr1[0][1], arg0);
ACC<double> argp1(dims_update_halo_kernel1_fr1[1][0], dims_update_halo_kernel1_fr1[1][1], arg1);
ACC<double> argp2(dims_update_halo_kernel1_fr1[2][0], dims_update_halo_kernel1_fr1[2][1], arg2);
ACC<double> argp3(dims_update_halo_kernel1_fr1[3][0], dims_update_halo_kernel1_fr1[3][1], arg3);
ACC<double> argp4(dims_update_halo_kernel1_fr1[4][0], dims_update_halo_kernel1_fr1[4][1], arg4);
ACC<double> argp5(dims_update_halo_kernel1_fr1[5][0], dims_update_halo_kernel1_fr1[5][1], arg5);
ACC<double> argp6(dims_update_halo_kernel1_fr1[6][0], dims_update_halo_kernel1_fr1[6][1], arg6);
update_halo_kernel1_fr1_gpu(argp0, argp1, argp2, argp3,
argp4, argp5, argp6, arg7);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_update_halo_kernel1_fr1(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3,
ops_arg arg4, ops_arg arg5, ops_arg arg6, ops_arg arg7) {
#else
void ops_par_loop_update_halo_kernel1_fr1_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
#if OPS_MPI
ops_block block = desc->block;
#endif
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
ops_arg arg3 = desc->args[3];
ops_arg arg4 = desc->args[4];
ops_arg arg5 = desc->args[5];
ops_arg arg6 = desc->args[6];
ops_arg arg7 = desc->args[7];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[8] = { arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,8,range,22)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(22,"update_halo_kernel1_fr1");
OPS_kernels[22].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
#endif //OPS_MPI
#ifdef OPS_MPI
int arg_idx[3];
#endif
#ifdef OPS_MPI
if (compute_ranges(args, 8,block, range, start, end, arg_idx) < 0) return;
#else //OPS_MPI
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
int xdim2 = args[2].dat->size[0];
int ydim2 = args[2].dat->size[1];
int xdim3 = args[3].dat->size[0];
int ydim3 = args[3].dat->size[1];
int xdim4 = args[4].dat->size[0];
int ydim4 = args[4].dat->size[1];
int xdim5 = args[5].dat->size[0];
int ydim5 = args[5].dat->size[1];
int xdim6 = args[6].dat->size[0];
int ydim6 = args[6].dat->size[1];
if (xdim0 != dims_update_halo_kernel1_fr1_h[0][0] || ydim0 != dims_update_halo_kernel1_fr1_h[0][1] || xdim1 != dims_update_halo_kernel1_fr1_h[1][0] || ydim1 != dims_update_halo_kernel1_fr1_h[1][1] || xdim2 != dims_update_halo_kernel1_fr1_h[2][0] || ydim2 != dims_update_halo_kernel1_fr1_h[2][1] || xdim3 != dims_update_halo_kernel1_fr1_h[3][0] || ydim3 != dims_update_halo_kernel1_fr1_h[3][1] || xdim4 != dims_update_halo_kernel1_fr1_h[4][0] || ydim4 != dims_update_halo_kernel1_fr1_h[4][1] || xdim5 != dims_update_halo_kernel1_fr1_h[5][0] || ydim5 != dims_update_halo_kernel1_fr1_h[5][1] || xdim6 != dims_update_halo_kernel1_fr1_h[6][0] || ydim6 != dims_update_halo_kernel1_fr1_h[6][1]) {
dims_update_halo_kernel1_fr1_h[0][0] = xdim0;
dims_update_halo_kernel1_fr1_h[0][1] = ydim0;
dims_update_halo_kernel1_fr1_h[1][0] = xdim1;
dims_update_halo_kernel1_fr1_h[1][1] = ydim1;
dims_update_halo_kernel1_fr1_h[2][0] = xdim2;
dims_update_halo_kernel1_fr1_h[2][1] = ydim2;
dims_update_halo_kernel1_fr1_h[3][0] = xdim3;
dims_update_halo_kernel1_fr1_h[3][1] = ydim3;
dims_update_halo_kernel1_fr1_h[4][0] = xdim4;
dims_update_halo_kernel1_fr1_h[4][1] = ydim4;
dims_update_halo_kernel1_fr1_h[5][0] = xdim5;
dims_update_halo_kernel1_fr1_h[5][1] = ydim5;
dims_update_halo_kernel1_fr1_h[6][0] = xdim6;
dims_update_halo_kernel1_fr1_h[6][1] = ydim6;
cutilSafeCall(hipMemcpyToSymbol( dims_update_halo_kernel1_fr1, dims_update_halo_kernel1_fr1_h, sizeof(dims_update_halo_kernel1_fr1)));
}
int *arg7h = (int *)arg7.data;
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg7.data = OPS_consts_h + consts_bytes;
arg7.data_d = OPS_consts_d + consts_bytes;
for (int d=0; d<NUM_FIELDS; d++) ((int *)arg7.data)[d] = arg7h[d];
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size);
int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size);
int dat4 = (OPS_soa ? args[4].dat->type_size : args[4].dat->elem_size);
int dat5 = (OPS_soa ? args[5].dat->type_size : args[5].dat->elem_size);
int dat6 = (OPS_soa ? args[6].dat->type_size : args[6].dat->elem_size);
char *p_a[8];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
int base2 = args[2].dat->base_offset +
dat2 * 1 * (start[0] * args[2].stencil->stride[0]);
base2 = base2+ dat2 *
args[2].dat->size[0] *
(start[1] * args[2].stencil->stride[1]);
base2 = base2+ dat2 *
args[2].dat->size[0] *
args[2].dat->size[1] *
(start[2] * args[2].stencil->stride[2]);
p_a[2] = (char *)args[2].data_d + base2;
int base3 = args[3].dat->base_offset +
dat3 * 1 * (start[0] * args[3].stencil->stride[0]);
base3 = base3+ dat3 *
args[3].dat->size[0] *
(start[1] * args[3].stencil->stride[1]);
base3 = base3+ dat3 *
args[3].dat->size[0] *
args[3].dat->size[1] *
(start[2] * args[3].stencil->stride[2]);
p_a[3] = (char *)args[3].data_d + base3;
int base4 = args[4].dat->base_offset +
dat4 * 1 * (start[0] * args[4].stencil->stride[0]);
base4 = base4+ dat4 *
args[4].dat->size[0] *
(start[1] * args[4].stencil->stride[1]);
base4 = base4+ dat4 *
args[4].dat->size[0] *
args[4].dat->size[1] *
(start[2] * args[4].stencil->stride[2]);
p_a[4] = (char *)args[4].data_d + base4;
int base5 = args[5].dat->base_offset +
dat5 * 1 * (start[0] * args[5].stencil->stride[0]);
base5 = base5+ dat5 *
args[5].dat->size[0] *
(start[1] * args[5].stencil->stride[1]);
base5 = base5+ dat5 *
args[5].dat->size[0] *
args[5].dat->size[1] *
(start[2] * args[5].stencil->stride[2]);
p_a[5] = (char *)args[5].data_d + base5;
int base6 = args[6].dat->base_offset +
dat6 * 1 * (start[0] * args[6].stencil->stride[0]);
base6 = base6+ dat6 *
args[6].dat->size[0] *
(start[1] * args[6].stencil->stride[1]);
base6 = base6+ dat6 *
args[6].dat->size[0] *
args[6].dat->size[1] *
(start[2] * args[6].stencil->stride[2]);
p_a[6] = (char *)args[6].data_d + base6;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 8);
ops_halo_exchanges(args,8,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[22].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0 && z_size > 0)
hipLaunchKernelGGL(( ops_update_halo_kernel1_fr1), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1],
(double *)p_a[2], (double *)p_a[3],
(double *)p_a[4], (double *)p_a[5],
(double *)p_a[6], (int *)arg7.data_d,x_size, y_size, z_size);
cutilSafeCall(hipGetLastError());
if (OPS_diags>1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[22].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 8);
ops_set_halo_dirtybit3(&args[0],range);
ops_set_halo_dirtybit3(&args[1],range);
ops_set_halo_dirtybit3(&args[2],range);
ops_set_halo_dirtybit3(&args[3],range);
ops_set_halo_dirtybit3(&args[4],range);
ops_set_halo_dirtybit3(&args[5],range);
ops_set_halo_dirtybit3(&args[6],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[22].mpi_time += t2-t1;
OPS_kernels[22].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[22].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[22].transfer += ops_compute_transfer(dim, start, end, &arg2);
OPS_kernels[22].transfer += ops_compute_transfer(dim, start, end, &arg3);
OPS_kernels[22].transfer += ops_compute_transfer(dim, start, end, &arg4);
OPS_kernels[22].transfer += ops_compute_transfer(dim, start, end, &arg5);
OPS_kernels[22].transfer += ops_compute_transfer(dim, start, end, &arg6);
}
}
#ifdef OPS_LAZY
void ops_par_loop_update_halo_kernel1_fr1(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4, ops_arg arg5, ops_arg arg6, ops_arg arg7) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 22;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 22;
for ( int i=0; i<6; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 8;
desc->args = (ops_arg*)malloc(8*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index;
desc->args[3] = arg3;
desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index;
desc->args[4] = arg4;
desc->hash = ((desc->hash << 5) + desc->hash) + arg4.dat->index;
desc->args[5] = arg5;
desc->hash = ((desc->hash << 5) + desc->hash) + arg5.dat->index;
desc->args[6] = arg6;
desc->hash = ((desc->hash << 5) + desc->hash) + arg6.dat->index;
desc->args[7] = arg7;
char *tmp = (char*)malloc(NUM_FIELDS*sizeof(int));
memcpy(tmp, arg7.data,NUM_FIELDS*sizeof(int));
desc->args[7].data = tmp;
desc->function = ops_par_loop_update_halo_kernel1_fr1_execute;
if (OPS_diags > 1) {
ops_timing_realloc(22,"update_halo_kernel1_fr1");
}
ops_enqueue_kernel(desc);
}
#endif
| f264ef3ae2fe30f411a6f7c6cff1b3202e1aa4b8.cu | //
// auto-generated by ops.py
//
__constant__ int dims_update_halo_kernel1_fr1 [8][2];
static int dims_update_halo_kernel1_fr1_h [8][2] = {0};
//user function
__device__
inline void update_halo_kernel1_fr1_gpu(ACC<double> &density0,
ACC<double> &density1,
ACC<double> &energy0,
ACC<double> &energy1,
ACC<double> &pressure,
ACC<double> &viscosity,
ACC<double> &soundspeed,
const int* fields) {
if(fields[FIELD_DENSITY0] == 1) density0(0,0,0) = density0(0,0,-1);
if(fields[FIELD_DENSITY1] == 1) density1(0,0,0) = density1(0,0,-1);
if(fields[FIELD_ENERGY0] == 1) energy0(0,0,0) = energy0(0,0,-1);
if(fields[FIELD_ENERGY1] == 1) energy1(0,0,0) = energy1(0,0,-1);
if(fields[FIELD_PRESSURE] == 1) pressure(0,0,0) = pressure(0,0,-1);
if(fields[FIELD_VISCOSITY] == 1) viscosity(0,0,0) = viscosity(0,0,-1);
if(fields[FIELD_SOUNDSPEED] == 1) soundspeed(0,0,0) = soundspeed(0,0,-1);
}
__global__ void ops_update_halo_kernel1_fr1(
double* __restrict arg0,
double* __restrict arg1,
double* __restrict arg2,
double* __restrict arg3,
double* __restrict arg4,
double* __restrict arg5,
double* __restrict arg6,
const int* __restrict arg7,
int size0,
int size1,
int size2 ){
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel1_fr1[0][0] + idx_z * 1*1 * dims_update_halo_kernel1_fr1[0][0] * dims_update_halo_kernel1_fr1[0][1];
arg1 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel1_fr1[1][0] + idx_z * 1*1 * dims_update_halo_kernel1_fr1[1][0] * dims_update_halo_kernel1_fr1[1][1];
arg2 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel1_fr1[2][0] + idx_z * 1*1 * dims_update_halo_kernel1_fr1[2][0] * dims_update_halo_kernel1_fr1[2][1];
arg3 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel1_fr1[3][0] + idx_z * 1*1 * dims_update_halo_kernel1_fr1[3][0] * dims_update_halo_kernel1_fr1[3][1];
arg4 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel1_fr1[4][0] + idx_z * 1*1 * dims_update_halo_kernel1_fr1[4][0] * dims_update_halo_kernel1_fr1[4][1];
arg5 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel1_fr1[5][0] + idx_z * 1*1 * dims_update_halo_kernel1_fr1[5][0] * dims_update_halo_kernel1_fr1[5][1];
arg6 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel1_fr1[6][0] + idx_z * 1*1 * dims_update_halo_kernel1_fr1[6][0] * dims_update_halo_kernel1_fr1[6][1];
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
ACC<double> argp0(dims_update_halo_kernel1_fr1[0][0], dims_update_halo_kernel1_fr1[0][1], arg0);
ACC<double> argp1(dims_update_halo_kernel1_fr1[1][0], dims_update_halo_kernel1_fr1[1][1], arg1);
ACC<double> argp2(dims_update_halo_kernel1_fr1[2][0], dims_update_halo_kernel1_fr1[2][1], arg2);
ACC<double> argp3(dims_update_halo_kernel1_fr1[3][0], dims_update_halo_kernel1_fr1[3][1], arg3);
ACC<double> argp4(dims_update_halo_kernel1_fr1[4][0], dims_update_halo_kernel1_fr1[4][1], arg4);
ACC<double> argp5(dims_update_halo_kernel1_fr1[5][0], dims_update_halo_kernel1_fr1[5][1], arg5);
ACC<double> argp6(dims_update_halo_kernel1_fr1[6][0], dims_update_halo_kernel1_fr1[6][1], arg6);
update_halo_kernel1_fr1_gpu(argp0, argp1, argp2, argp3,
argp4, argp5, argp6, arg7);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_update_halo_kernel1_fr1(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3,
ops_arg arg4, ops_arg arg5, ops_arg arg6, ops_arg arg7) {
#else
void ops_par_loop_update_halo_kernel1_fr1_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
#if OPS_MPI
ops_block block = desc->block;
#endif
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
ops_arg arg3 = desc->args[3];
ops_arg arg4 = desc->args[4];
ops_arg arg5 = desc->args[5];
ops_arg arg6 = desc->args[6];
ops_arg arg7 = desc->args[7];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[8] = { arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,8,range,22)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(22,"update_halo_kernel1_fr1");
OPS_kernels[22].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
#endif //OPS_MPI
#ifdef OPS_MPI
int arg_idx[3];
#endif
#ifdef OPS_MPI
if (compute_ranges(args, 8,block, range, start, end, arg_idx) < 0) return;
#else //OPS_MPI
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
int xdim2 = args[2].dat->size[0];
int ydim2 = args[2].dat->size[1];
int xdim3 = args[3].dat->size[0];
int ydim3 = args[3].dat->size[1];
int xdim4 = args[4].dat->size[0];
int ydim4 = args[4].dat->size[1];
int xdim5 = args[5].dat->size[0];
int ydim5 = args[5].dat->size[1];
int xdim6 = args[6].dat->size[0];
int ydim6 = args[6].dat->size[1];
if (xdim0 != dims_update_halo_kernel1_fr1_h[0][0] || ydim0 != dims_update_halo_kernel1_fr1_h[0][1] || xdim1 != dims_update_halo_kernel1_fr1_h[1][0] || ydim1 != dims_update_halo_kernel1_fr1_h[1][1] || xdim2 != dims_update_halo_kernel1_fr1_h[2][0] || ydim2 != dims_update_halo_kernel1_fr1_h[2][1] || xdim3 != dims_update_halo_kernel1_fr1_h[3][0] || ydim3 != dims_update_halo_kernel1_fr1_h[3][1] || xdim4 != dims_update_halo_kernel1_fr1_h[4][0] || ydim4 != dims_update_halo_kernel1_fr1_h[4][1] || xdim5 != dims_update_halo_kernel1_fr1_h[5][0] || ydim5 != dims_update_halo_kernel1_fr1_h[5][1] || xdim6 != dims_update_halo_kernel1_fr1_h[6][0] || ydim6 != dims_update_halo_kernel1_fr1_h[6][1]) {
dims_update_halo_kernel1_fr1_h[0][0] = xdim0;
dims_update_halo_kernel1_fr1_h[0][1] = ydim0;
dims_update_halo_kernel1_fr1_h[1][0] = xdim1;
dims_update_halo_kernel1_fr1_h[1][1] = ydim1;
dims_update_halo_kernel1_fr1_h[2][0] = xdim2;
dims_update_halo_kernel1_fr1_h[2][1] = ydim2;
dims_update_halo_kernel1_fr1_h[3][0] = xdim3;
dims_update_halo_kernel1_fr1_h[3][1] = ydim3;
dims_update_halo_kernel1_fr1_h[4][0] = xdim4;
dims_update_halo_kernel1_fr1_h[4][1] = ydim4;
dims_update_halo_kernel1_fr1_h[5][0] = xdim5;
dims_update_halo_kernel1_fr1_h[5][1] = ydim5;
dims_update_halo_kernel1_fr1_h[6][0] = xdim6;
dims_update_halo_kernel1_fr1_h[6][1] = ydim6;
cutilSafeCall(cudaMemcpyToSymbol( dims_update_halo_kernel1_fr1, dims_update_halo_kernel1_fr1_h, sizeof(dims_update_halo_kernel1_fr1)));
}
int *arg7h = (int *)arg7.data;
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg7.data = OPS_consts_h + consts_bytes;
arg7.data_d = OPS_consts_d + consts_bytes;
for (int d=0; d<NUM_FIELDS; d++) ((int *)arg7.data)[d] = arg7h[d];
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size);
int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size);
int dat4 = (OPS_soa ? args[4].dat->type_size : args[4].dat->elem_size);
int dat5 = (OPS_soa ? args[5].dat->type_size : args[5].dat->elem_size);
int dat6 = (OPS_soa ? args[6].dat->type_size : args[6].dat->elem_size);
char *p_a[8];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
int base2 = args[2].dat->base_offset +
dat2 * 1 * (start[0] * args[2].stencil->stride[0]);
base2 = base2+ dat2 *
args[2].dat->size[0] *
(start[1] * args[2].stencil->stride[1]);
base2 = base2+ dat2 *
args[2].dat->size[0] *
args[2].dat->size[1] *
(start[2] * args[2].stencil->stride[2]);
p_a[2] = (char *)args[2].data_d + base2;
int base3 = args[3].dat->base_offset +
dat3 * 1 * (start[0] * args[3].stencil->stride[0]);
base3 = base3+ dat3 *
args[3].dat->size[0] *
(start[1] * args[3].stencil->stride[1]);
base3 = base3+ dat3 *
args[3].dat->size[0] *
args[3].dat->size[1] *
(start[2] * args[3].stencil->stride[2]);
p_a[3] = (char *)args[3].data_d + base3;
int base4 = args[4].dat->base_offset +
dat4 * 1 * (start[0] * args[4].stencil->stride[0]);
base4 = base4+ dat4 *
args[4].dat->size[0] *
(start[1] * args[4].stencil->stride[1]);
base4 = base4+ dat4 *
args[4].dat->size[0] *
args[4].dat->size[1] *
(start[2] * args[4].stencil->stride[2]);
p_a[4] = (char *)args[4].data_d + base4;
int base5 = args[5].dat->base_offset +
dat5 * 1 * (start[0] * args[5].stencil->stride[0]);
base5 = base5+ dat5 *
args[5].dat->size[0] *
(start[1] * args[5].stencil->stride[1]);
base5 = base5+ dat5 *
args[5].dat->size[0] *
args[5].dat->size[1] *
(start[2] * args[5].stencil->stride[2]);
p_a[5] = (char *)args[5].data_d + base5;
int base6 = args[6].dat->base_offset +
dat6 * 1 * (start[0] * args[6].stencil->stride[0]);
base6 = base6+ dat6 *
args[6].dat->size[0] *
(start[1] * args[6].stencil->stride[1]);
base6 = base6+ dat6 *
args[6].dat->size[0] *
args[6].dat->size[1] *
(start[2] * args[6].stencil->stride[2]);
p_a[6] = (char *)args[6].data_d + base6;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 8);
ops_halo_exchanges(args,8,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[22].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0 && z_size > 0)
ops_update_halo_kernel1_fr1<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1],
(double *)p_a[2], (double *)p_a[3],
(double *)p_a[4], (double *)p_a[5],
(double *)p_a[6], (int *)arg7.data_d,x_size, y_size, z_size);
cutilSafeCall(cudaGetLastError());
if (OPS_diags>1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[22].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 8);
ops_set_halo_dirtybit3(&args[0],range);
ops_set_halo_dirtybit3(&args[1],range);
ops_set_halo_dirtybit3(&args[2],range);
ops_set_halo_dirtybit3(&args[3],range);
ops_set_halo_dirtybit3(&args[4],range);
ops_set_halo_dirtybit3(&args[5],range);
ops_set_halo_dirtybit3(&args[6],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[22].mpi_time += t2-t1;
OPS_kernels[22].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[22].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[22].transfer += ops_compute_transfer(dim, start, end, &arg2);
OPS_kernels[22].transfer += ops_compute_transfer(dim, start, end, &arg3);
OPS_kernels[22].transfer += ops_compute_transfer(dim, start, end, &arg4);
OPS_kernels[22].transfer += ops_compute_transfer(dim, start, end, &arg5);
OPS_kernels[22].transfer += ops_compute_transfer(dim, start, end, &arg6);
}
}
#ifdef OPS_LAZY
void ops_par_loop_update_halo_kernel1_fr1(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4, ops_arg arg5, ops_arg arg6, ops_arg arg7) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 22;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 22;
for ( int i=0; i<6; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 8;
desc->args = (ops_arg*)malloc(8*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index;
desc->args[3] = arg3;
desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index;
desc->args[4] = arg4;
desc->hash = ((desc->hash << 5) + desc->hash) + arg4.dat->index;
desc->args[5] = arg5;
desc->hash = ((desc->hash << 5) + desc->hash) + arg5.dat->index;
desc->args[6] = arg6;
desc->hash = ((desc->hash << 5) + desc->hash) + arg6.dat->index;
desc->args[7] = arg7;
char *tmp = (char*)malloc(NUM_FIELDS*sizeof(int));
memcpy(tmp, arg7.data,NUM_FIELDS*sizeof(int));
desc->args[7].data = tmp;
desc->function = ops_par_loop_update_halo_kernel1_fr1_execute;
if (OPS_diags > 1) {
ops_timing_realloc(22,"update_halo_kernel1_fr1");
}
ops_enqueue_kernel(desc);
}
#endif
|
d6a5a4afeed782a2423bd3407a43dcae6848deef.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <vector>
#include <cfloat>
#include "caffe/layer.hpp"
#include "caffe/util/im2col.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/layers/mex_layer.hpp"
#include "math_constants.h"
#include "caffe/util/ggemm.cuh"
#include "caffe/layers/mex_layer_shared.cuh"
#include <thrust/device_vector.h>
#include <thrust/execution_policy.h>
namespace caffe {
template <typename Dtype>
__global__ void linear_offsets_gradient_kernel(const int n, const Dtype* logspace_offsets,
const Dtype* logspace_offsets_diff, const Dtype fudge_factor, Dtype* offsets_diff) {
CUDA_KERNEL_LOOP(i, n) {
offsets_diff[i] += logspace_offsets_diff[i] / max(exp(logspace_offsets[i]), fudge_factor);
}
}
template <typename Dtype>
void linear_offsets_gradient(const int n, const Dtype* logspace_offsets,
const Dtype* logspace_offsets_diff, const Dtype fudge_factor, Dtype* offsets_diff) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( linear_offsets_gradient_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(n)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
n, logspace_offsets, logspace_offsets_diff, fudge_factor, offsets_diff);
}
template <typename Dtype>
Dtype MEXLayer<Dtype>::test_init_step_objective_gpu(const vector<Blob<Dtype>*>& bottom) {
if (!needs_unsupervised_init()) {
return INFINITY;
}
int batch_size = 0;
for (int i = 0; i < bottom.size(); ++i) {
if (expects_labels_ && i % 2 == 1) continue;
batch_size += N_ * bottom[i]->num();
}
input_for_learner_[0]->Reshape(batch_size, K_, 1, 1);
if (expects_labels_) {
input_for_learner_[1]->Reshape(batch_size, 1, 1, 1);
}
Dtype* patches_data = input_for_learner_[0]->mutable_gpu_data();
for (int bottom_idx = 0; bottom_idx < bottom.size(); ++bottom_idx) {
if (expects_labels_ && bottom_idx % 2 == 1) continue;
const Dtype* bottom_data = bottom[bottom_idx]->gpu_data();
Dtype* col_buff = NULL;
if (!is_1x1_ || normalize_patches_) {
col_buff = col_buffer_.mutable_gpu_data();
}
for (int n = 0; n < num_; ++n) {
// im2col transformation: unroll input regions for filtering
// into column matrix for multplication.
if (!is_1x1_) {
im2col_3d_gpu(
bottom_data + bottom[bottom_idx]->offset(n),
channels_, height_, width_,
block_c_, block_h_, block_w_,
pad_c_, pad_h_, pad_w_,
stride_c_, stride_h_, stride_w_,
col_buff,
blocks_round_down_, blocks_out_of_bounds_value_);
} else { // special case for 1x1 convolution
if (!normalize_patches_) {
col_buff = bottom[bottom_idx]->mutable_gpu_data() + bottom[bottom_idx]->offset(n);
} else {
caffe_copy(N_ * K_, bottom[bottom_idx]->gpu_data() + bottom[bottom_idx]->offset(n), col_buff);
}
}
if (normalize_patches_) {
caffe_gpu_transpose(K_, N_,
col_buff, patches_data + (bottom_idx * num_ + n) * K_ * N_);
caffe_gpu_normalize_patches_rows_forward(K_, N_,
normalization_fudge_factor_, patches_data + (bottom_idx * num_ + n) * K_ * N_, normalize_variance_);
} else {
caffe_gpu_transpose(K_, N_,
col_buff, patches_data + (bottom_idx * num_ + n) * K_ * N_);
}
}
}
if (expects_labels_) {
Dtype* labels_data = input_for_learner_[1]->mutable_gpu_data();
for (int bottom_idx = 1; bottom_idx < bottom.size(); bottom_idx += 2) {
const Dtype* labels = bottom[bottom_idx]->gpu_data();
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num_, N_, 1,
Dtype(1), labels, one_zero_vec_.gpu_data(),
Dtype(0), labels_data + ((bottom_idx - 1) / 2) * num_ * N_);
}
}
return unsupervised_learner_->objective_gpu(input_for_learner_);
}
template <typename Dtype>
bool MEXLayer<Dtype>::init_step_gpu(const vector<Blob<Dtype>*>& bottom, Dtype* objective) {
if (!needs_unsupervised_init()) {
return false;
}
int batch_size = 0;
for (int i = 0; i < bottom.size(); ++i) {
if (expects_labels_ && i % 2 == 1) continue;
batch_size += N_ * bottom[i]->num();
}
input_for_learner_[0]->Reshape(batch_size, K_, 1, 1);
if (expects_labels_) {
input_for_learner_[1]->Reshape(batch_size, 1, 1, 1);
}
Dtype* patches_data = input_for_learner_[0]->mutable_gpu_data();
for (int bottom_idx = 0; bottom_idx < bottom.size(); ++bottom_idx) {
if (expects_labels_ && bottom_idx % 2 == 1) continue;
const Dtype* bottom_data = bottom[bottom_idx]->gpu_data();
Dtype* col_buff = NULL;
if (!is_1x1_ || normalize_patches_) {
col_buff = col_buffer_.mutable_gpu_data();
}
for (int n = 0; n < num_; ++n) {
// im2col transformation: unroll input regions for filtering
// into column matrix for multplication.
if (!is_1x1_) {
im2col_3d_gpu(
bottom_data + bottom[bottom_idx]->offset(n),
channels_, height_, width_,
block_c_, block_h_, block_w_,
pad_c_, pad_h_, pad_w_,
stride_c_, stride_h_, stride_w_,
col_buff,
blocks_round_down_, blocks_out_of_bounds_value_);
} else { // special case for 1x1 convolution
if (!normalize_patches_) {
col_buff = bottom[bottom_idx]->mutable_gpu_data() + bottom[bottom_idx]->offset(n);
} else {
caffe_copy(N_ * K_, bottom[bottom_idx]->gpu_data() + bottom[bottom_idx]->offset(n), col_buff);
}
}
if (normalize_patches_) {
caffe_gpu_transpose(K_, N_,
col_buff, patches_data + (bottom_idx * num_ + n) * K_ * N_);
caffe_gpu_normalize_patches_rows_forward(K_, N_,
normalization_fudge_factor_, patches_data + (bottom_idx * num_ + n) * K_ * N_, normalize_variance_);
} else {
caffe_gpu_transpose(K_, N_,
col_buff, patches_data + (bottom_idx * num_ + n) * K_ * N_);
}
}
}
if (expects_labels_) {
Dtype* labels_data = input_for_learner_[1]->mutable_gpu_data();
for (int bottom_idx = 1; bottom_idx < bottom.size(); bottom_idx += 2) {
const Dtype* labels = bottom[bottom_idx]->gpu_data();
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num_, N_, 1,
Dtype(1), labels, one_zero_vec_.gpu_data(),
Dtype(0), labels_data + ((bottom_idx - 1) / 2) * num_ * N_);
}
}
bool not_finished = unsupervised_learner_->step_gpu(input_for_learner_, objective);
if (!not_finished) {
const vector<shared_ptr<Blob<Dtype> > > blobs(1, this->blobs_[1]);
unsupervised_learner_->fill_gpu(blobs);
for (int i = 0; i < input_for_learner_.size(); ++i) {
input_for_learner_[i].reset();
}
input_for_learner_.clear();
unsupervised_learner_.reset();
param_initialized_ = true;
}
return not_finished;
}
template <typename Dtype, bool REVERSE>
__global__ void split_patches_kernel(const int num_kernels, const int N, const int Dim,
const int W, const int H, const int C,
const int W_Gs, const int H_Gs, const int C_Gs,
const int W_Step, const int H_Step, const int C_Step,
typename std::conditional<REVERSE, Dtype*, const Dtype*>::type in,
Dtype* out, const bool use_unshared_regions_) {
const int step_out = C_Step * H_Step * W_Step;
const int group_step_w = !use_unshared_regions_ ? W_Step : 1;
const int group_step_h = !use_unshared_regions_ ? H_Step : 1;
const int group_step_c = !use_unshared_regions_ ? C_Step : 1;
const int region_step_w = !use_unshared_regions_ ? 1 : W_Gs;
const int region_step_h = !use_unshared_regions_ ? 1 : H_Gs;
const int region_step_c = !use_unshared_regions_ ? 1 : C_Gs;
Dtype* in_unconst = NULL;
if (REVERSE) {
in_unconst = (Dtype*)in;
}
CUDA_KERNEL_LOOP(index, num_kernels) {
const int i = index % W_Step;
const int i_index = index / W_Step;
const int j = i_index % H_Step;
const int j_index = i_index / H_Step;
const int l = j_index % C_Step;
const int l_index = j_index / C_Step;
const int w_g = l_index % W_Gs;
const int w_index = l_index / W_Gs;
const int h_g = w_index % H_Gs;
const int h_index = w_index / H_Gs;
const int c_g = h_index;
// "inner loop"
Dtype* o = out + ((c_g * H_Gs + h_g) * W_Gs + w_g) * step_out * Dim;
const int group_addr = (c_g * group_step_c * H + h_g * group_step_h) * W + w_g * group_step_w;
const int base_addr_out = (l * H_Step + j) * W_Step + i;
const int base_addr_in = group_addr + (l * region_step_c * H + j * region_step_h) * W + i * region_step_w;
if (w_g * W_Step + i < W &&
h_g * H_Step + j < H &&
c_g * C_Step + l < C) {
for (int k = 0; k < Dim; ++k) {
if (!REVERSE) {
o[base_addr_out + k * step_out] = in[base_addr_in + k * N];
} else {
in_unconst[base_addr_in + k * N] = o[base_addr_out + k * step_out];
}
}
}
}
}
template <typename Dtype, bool REVERSE>
void split_patches_gpu(const int N, const int Dim,
const int W, const int H, const int C,
const int W_Gs, const int H_Gs, const int C_Gs,
const int W_Step, const int H_Step, const int C_Step,
typename std::conditional<REVERSE, Dtype*, const Dtype*>::type in,
Dtype* out, const bool use_unshared_regions) {
const int num_kernels = W_Step * H_Step * C_Step * W_Gs * H_Gs * C_Gs;
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( split_patches_kernel<Dtype, REVERSE>), dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num_kernels, N, Dim, W, H, C, W_Gs, H_Gs, C_Gs, W_Step, H_Step, C_Step, in, out, use_unshared_regions);
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
void mex_forward_gpu(const int M, const int N, const int K, const bool softmax_mode,
const Dtype epsilon, const Dtype* offsets, const Dtype* in, Dtype* out, const int batch_size = 1) {
const Dtype init_value = epsilon > 0 ? -INFINITY : INFINITY;
if (epsilon > 0) {
ggemm_gpu
<Dtype, Dtype, Dtype, uint8_t,
ggemm_add<Dtype, uint8_t>, ggemm_max<Dtype>, false,
true, true, true>
(M, N, K, offsets, in, out,
init_value, init_value, init_value, 0, batch_size);
} else {
ggemm_gpu
<Dtype, Dtype, Dtype, uint8_t,
ggemm_add<Dtype, uint8_t>, ggemm_min<Dtype>, false,
true, true, true>
(M, N, K, offsets, in, out,
init_value, init_value, init_value, 0, batch_size);
}
if (std::isfinite(epsilon)) {
ggemm_readc_gpu
<false, false, Dtype, Dtype, Dtype, typename vec<Dtype>::vec2,
mex_forward_exp<Dtype>, ggemm_add<Dtype>, true, mex_forward_out<Dtype>, true,
true, true, true>
(M, N, K, offsets, in, out, out,
init_value, init_value, 0, make_vec2<Dtype>(epsilon, softmax_mode ? Dtype(0) : (Dtype)-::log(K)), batch_size);
}
}
template <typename Dtype>
void MEXLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
Dtype* col_buff = NULL;
if (!is_1x1_ || normalize_patches_) {
col_buff = col_buffer_.mutable_gpu_data();
}
const Dtype epsilon = this->blobs_[0]->cpu_data()[0];
Dtype* split_patches_in = NULL;
Dtype* split_patches_out = NULL;
const Dtype* offsets = this->blobs_[1]->gpu_data();
if (!use_log_space_parameters_) {
caffe_gpu_clip_min<Dtype>(num_regions_ * M_ * K_, this->blobs_[1]->cpu_data(), this->blobs_[1]->mutable_gpu_data(), linear_space_min_value_);
caffe_gpu_log<Dtype>(num_regions_ * M_ * K_, this->blobs_[1]->cpu_data(), this->blobs_[1]->mutable_gpu_data());
}
if (normalize_offsets_) {
mex_forward_gpu<Dtype>(M_ * num_regions_, 1, K_, softmax_mode_, epsilon,
offsets, one_zero_vec_.gpu_diff(), offsets_norm_factor_.mutable_gpu_data());
Dtype* offsets_mutable = NULL;
if (!normalize_offsets_projected_) {
caffe_copy<Dtype>(num_regions_ * M_ * K_, offsets, normed_offsets_.mutable_gpu_data());
offsets = normed_offsets_.gpu_data();
offsets_mutable = normed_offsets_.mutable_gpu_data();
} else {
offsets_mutable = this->blobs_[1]->mutable_gpu_data();
}
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, M_ * num_regions_, K_, 1,
-1, offsets_norm_factor_.gpu_data(), one_zero_vec_.gpu_data(),
1, offsets_mutable);
}
for (int bottom_idx = 0; bottom_idx < bottom.size(); ++bottom_idx) {
const int top_idx = expects_labels_ ? bottom_idx / 2 : bottom_idx;
const Dtype* bottom_data = bottom[bottom_idx]->gpu_data();
Dtype* top_data = top[top_idx]->mutable_gpu_data();
for (int n = 0; n < num_; ++n) {
// im2col transformation: unroll input regions for filtering
// into column matrix for multplication.
if (!is_1x1_) {
im2col_3d_gpu(
bottom_data + bottom[bottom_idx]->offset(n),
channels_, height_, width_,
block_c_, block_h_, block_w_,
pad_c_, pad_h_, pad_w_,
stride_c_, stride_h_, stride_w_,
col_buff,
blocks_round_down_, blocks_out_of_bounds_value_);
} else { // special case for 1x1 convolution
if (!normalize_patches_) {
col_buff = bottom[bottom_idx]->mutable_gpu_data() + bottom[bottom_idx]->offset(n);
} else {
caffe_copy(N_ * K_, bottom[bottom_idx]->gpu_data() + bottom[bottom_idx]->offset(n), col_buff);
}
}
if (normalize_patches_) {
caffe_gpu_transpose(K_, N_,
col_buff,
row_buffer_.mutable_gpu_data());
caffe_gpu_normalize_patches_rows_forward(K_, N_, normalization_fudge_factor_,
row_buffer_.mutable_gpu_data(), normalize_variance_);
caffe_gpu_transpose(N_, K_,
row_buffer_.gpu_data(),
col_buff);
}
// Prepare input
Dtype* current_top = top_data + top[top_idx]->offset(n);
if (num_regions_ > 1) {
split_patches_in = split_patches_in_.mutable_gpu_data();
split_patches_out = split_patches_out_.mutable_gpu_data();
split_patches_gpu<Dtype, false>(N_, K_,
width_out_, height_out_, channels_out_,
offsets_w_, offsets_h_, offsets_c_,
shared_offsets_region_w_, shared_offsets_region_h_, shared_offsets_region_c_,
col_buff, split_patches_in, use_unshared_regions_);
} else {
split_patches_in = col_buff;
split_patches_out = current_top;
}
// Calculate
mex_forward_gpu<Dtype>(M_, region_size_, K_, softmax_mode_, epsilon,
offsets, split_patches_in, split_patches_out, num_regions_);
// Copy to output if needed
if (num_regions_ > 1) {
split_patches_gpu<Dtype, true>(N_, M_,
width_out_, height_out_, channels_out_,
offsets_w_, offsets_h_, offsets_c_,
shared_offsets_region_w_, shared_offsets_region_h_, shared_offsets_region_c_,
current_top, split_patches_out, use_unshared_regions_);
}
}
}
if (!use_log_space_parameters_) {
caffe_gpu_exp<Dtype>(num_regions_ * M_ * K_, this->blobs_[1]->cpu_data(), this->blobs_[1]->mutable_gpu_data());
caffe_gpu_clip_min<Dtype>(num_regions_ * M_ * K_, this->blobs_[1]->cpu_data(), this->blobs_[1]->mutable_gpu_data(), linear_space_min_value_);
}
}
template <typename Dtype>
void MEXLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
Dtype* split_patches_in = NULL;
Dtype* split_patches_in_diff = NULL;
Dtype* split_patches_out = NULL;
Dtype* split_patches_out_diff = NULL;
typename vec<Dtype>::vec2* split_patches_out_inter = NULL;
const Dtype epsilon = this->blobs_[0]->cpu_data()[0];
Dtype epsilon_diff = 0;
Dtype* epsilon_helper = NULL;
if (this->param_propagate_down_[0]) {
epsilon_helper = static_cast<Dtype*>(epsilon_helper_->mutable_gpu_data());
}
const Dtype* offsets = this->blobs_[1]->gpu_data();
if (!use_log_space_parameters_) {
caffe_gpu_clip_min<Dtype>(num_regions_ * M_ * K_, offsets, this->blobs_[1]->mutable_gpu_data(), linear_space_min_value_);
caffe_gpu_log<Dtype>(num_regions_ * M_ * K_, offsets, this->blobs_[1]->mutable_gpu_data());
}
if (normalize_offsets_) {
mex_forward_gpu<Dtype>(M_ * num_regions_, 1, K_, softmax_mode_, epsilon,
offsets, one_zero_vec_.gpu_diff(), offsets_norm_factor_.mutable_gpu_data());
Dtype* offsets_mutable = NULL;
if (!normalize_offsets_projected_) {
caffe_copy<Dtype>(num_regions_ * M_ * K_, offsets, normed_offsets_.mutable_gpu_data());
offsets = normed_offsets_.gpu_data();
offsets_mutable = normed_offsets_.mutable_gpu_data();
} else {
offsets_mutable = this->blobs_[1]->mutable_gpu_data();
}
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, M_ * num_regions_, K_, 1,
-1, offsets_norm_factor_.gpu_data(), one_zero_vec_.gpu_data(),
1, offsets_mutable);
}
Dtype* offsets_diff = NULL;
if (this->param_propagate_down_[1]) {
if (use_log_space_parameters_) {
offsets_diff = this->blobs_[1]->mutable_gpu_diff();
} else {
offsets_diff = normed_offsets_.mutable_gpu_diff();
}
}
bool propagate_down_any = false;
for (int top_idx = 0; top_idx < top.size(); ++top_idx) {
if (propagate_down[top_idx]) {
propagate_down_any = true;
break;
}
}
const Dtype* transposed_offsets = NULL;
if (propagate_down_any) {
transposed_offsets = static_cast<const Dtype*>(transposed_offsets_->gpu_data());
for (int r = 0; r < num_regions_; ++r) {
const int offsets_idx = r * M_ * K_;
caffe_gpu_transpose(M_, K_,
offsets + offsets_idx,
static_cast<Dtype*>(transposed_offsets_->mutable_gpu_data()) + offsets_idx);
}
}
for (int top_idx = 0; top_idx < top.size(); ++top_idx) {
const int bottom_idx = expects_labels_ ? top_idx * 2 : top_idx;
if (this->param_propagate_down_[0] ||
this->param_propagate_down_[1] ||
propagate_down[top_idx]) {
const Dtype* top_diff = top[top_idx]->gpu_diff();
const Dtype* top_data = top[top_idx]->gpu_data();
Dtype* col_buff = NULL;
Dtype* col_diff = NULL;
if (!is_1x1_ || normalize_patches_) {
col_buff = col_buffer_.mutable_gpu_data();
}
if (!is_1x1_) {
col_diff = col_buffer_.mutable_gpu_diff();
}
const Dtype* bottom_data = bottom[bottom_idx]->gpu_data();
Dtype* bottom_diff = bottom[bottom_idx]->mutable_gpu_diff();
for (int n = 0; n < num_; ++n) {
// Since we saved memory in the forward pass by not storing all col
// data, we will need to recompute them.
if (!is_1x1_) {
im2col_3d_gpu(
bottom_data + bottom[bottom_idx]->offset(n),
channels_, height_, width_,
block_c_, block_h_, block_w_,
pad_c_, pad_h_, pad_w_,
stride_c_, stride_h_, stride_w_,
col_buff,
blocks_round_down_, blocks_out_of_bounds_value_);
} else { // special case for 1x1 convolution
col_diff = bottom_diff + bottom[bottom_idx]->offset(n);
if (!normalize_patches_) {
col_buff = bottom[bottom_idx]->mutable_gpu_data() + bottom[bottom_idx]->offset(n);
} else {
caffe_copy(N_ * K_, bottom[bottom_idx]->mutable_gpu_data() + bottom[bottom_idx]->offset(n), col_buff);
}
}
if (normalize_patches_) {
caffe_gpu_transpose(K_, N_,
col_buff,
row_buffer_.mutable_gpu_data());
caffe_copy(K_ * N_,
row_buffer_.gpu_data(),
row_buffer_.mutable_gpu_diff());
caffe_gpu_normalize_patches_rows_forward(K_, N_, normalization_fudge_factor_,
row_buffer_.mutable_gpu_data(), normalize_variance_);
caffe_gpu_transpose(N_, K_,
row_buffer_.gpu_data(),
col_buff);
}
// Prepare input for backprop
const Dtype* current_top_data = top_data + n * M_ * N_;
const Dtype* current_top_diff = top_diff + n * M_ * N_;
if (num_regions_ > 1) {
split_patches_in = split_patches_in_.mutable_gpu_data();
split_patches_in_diff = split_patches_in_.mutable_gpu_diff();
split_patches_out = split_patches_out_.mutable_gpu_data();
split_patches_out_diff = split_patches_out_.mutable_gpu_diff();
split_patches_gpu<Dtype, false>(N_, K_,
width_out_, height_out_, channels_out_,
offsets_w_, offsets_h_, offsets_c_,
shared_offsets_region_w_, shared_offsets_region_h_, shared_offsets_region_c_,
col_buff, split_patches_in, use_unshared_regions_);
split_patches_gpu<Dtype, false>(N_, M_,
width_out_, height_out_, channels_out_,
offsets_w_, offsets_h_, offsets_c_,
shared_offsets_region_w_, shared_offsets_region_h_, shared_offsets_region_c_,
current_top_data, split_patches_out, use_unshared_regions_);
split_patches_gpu<Dtype, false>(N_, M_,
width_out_, height_out_, channels_out_,
offsets_w_, offsets_h_, offsets_c_,
shared_offsets_region_w_, shared_offsets_region_h_, shared_offsets_region_c_,
current_top_diff, split_patches_out_diff, use_unshared_regions_);
} else {
split_patches_in = col_buff;
split_patches_in_diff = col_diff;
split_patches_out = (Dtype*)current_top_data;
split_patches_out_diff = (Dtype*)current_top_diff;
}
split_patches_out_inter = static_cast<typename vec<Dtype>::vec2 *>(
split_patches_out_inter_->mutable_gpu_data());
interlace_gpu(num_regions_ * M_ * region_size_, split_patches_out, split_patches_out_diff,
split_patches_out_inter);
// Caculate backprop
if (this->param_propagate_down_[0] && std::isfinite(epsilon)) { // epsilon = inf => epsilon_diff = 0
if (!normalize_offsets_ || normalize_offsets_projected_) {
ggemm_readc_gpu
<false, true, typename vec<Dtype>::vec2, Dtype, Dtype, Dtype,
mex_backward_epsilon<Dtype>, ggemm_add<Dtype>, false, no_op<Dtype, Dtype>, false,
true, true, true>
(M_, K_, region_size_, split_patches_out_inter, split_patches_in,
offsets, epsilon_helper,
make_vec2<Dtype>(0, 0), 0, 0, epsilon, num_regions_);
} else {
ggemm_readc_gpu
<false, true, typename vec<Dtype>::vec2, Dtype, Dtype, Dtype,
mex_backward_epsilon_with_normalized_offsets<Dtype>, ggemm_add<Dtype>, false,
no_op<Dtype, Dtype>, false,
true, true, true>
(M_, K_, region_size_, split_patches_out_inter, split_patches_in,
offsets, epsilon_helper,
make_vec2<Dtype>(0, 0), 0, 0, epsilon, num_regions_);
}
thrust::device_ptr<Dtype> cptr = thrust::device_pointer_cast(epsilon_helper);
const Dtype sum_offsets_diff = thrust::reduce(cptr, cptr + num_regions_ * M_ * K_);
epsilon_diff += sum_offsets_diff / (epsilon * K_);
}
if (this->param_propagate_down_[1]) {
if (!use_log_space_parameters_) {
caffe_gpu_set(M_ * K_ * num_regions_, Dtype(0), offsets_diff);
}
if (!normalize_offsets_ || normalize_offsets_projected_) {
if (std::isfinite(epsilon)) {
ggemm_readc_gpu
<false, true, typename vec<Dtype>::vec2, Dtype, Dtype, typename vec<Dtype>::vec2,
mex_backward_offsets_finite<Dtype>, ggemm_add<Dtype>, true, no_op<Dtype, typename vec<Dtype>::vec2>, false,
true, true, true>
(M_, K_, region_size_, split_patches_out_inter, split_patches_in,
offsets, offsets_diff,
make_vec2<Dtype>(epsilon > 0 ? INFINITY : -INFINITY, 0), 0, 0,
make_vec2<Dtype>(epsilon, softmax_mode_ ? Dtype(0) : (Dtype)-::log(K_)), num_regions_);
} else {
ggemm_readc_gpu
<false, true, typename vec<Dtype>::vec2, Dtype, Dtype, uint8_t,
mex_backward_offsets_infinite<Dtype>, ggemm_add<Dtype>, true, no_op<Dtype, uint8_t>, false,
true, true, true>
(M_, K_, region_size_, split_patches_out_inter, split_patches_in,
offsets, offsets_diff,
make_vec2<Dtype>(0, 0), 0, 0, 0, num_regions_);
}
} else {
if (std::isfinite(epsilon)) {
ggemm_readc_gpu
<false, true, typename vec<Dtype>::vec2, Dtype, Dtype, typename vec<Dtype>::vec2,
mex_backward_normalized_offsets_finite<Dtype>, ggemm_add<Dtype>, true,
no_op<Dtype, typename vec<Dtype>::vec2>, false,
true, true, true>
(M_, K_, region_size_, split_patches_out_inter, split_patches_in,
offsets, offsets_diff,
make_vec2<Dtype>(epsilon > 0 ? INFINITY : -INFINITY, 0), 0, 0,
make_vec2<Dtype>(epsilon, softmax_mode_ ? Dtype(0) : (Dtype)-::log(K_)), num_regions_);
} else {
ggemm_readc_gpu
<false, true, typename vec<Dtype>::vec2, Dtype, Dtype, uint8_t,
mex_backward_normalized_offsets_infinite<Dtype>, ggemm_add<Dtype>, true,
no_op<Dtype, uint8_t>, false,
true, true, true>
(M_, K_, region_size_, split_patches_out_inter, split_patches_in,
offsets, offsets_diff,
make_vec2<Dtype>(0, 0), 0, 0, 0, num_regions_);
}
}
}
if (propagate_down[top_idx]) {
if (std::isfinite(epsilon)) {
ggemm_readc_gpu
<false, false, Dtype, typename vec<Dtype>::vec2, Dtype, typename vec<Dtype>::vec2,
mex_backward_bottom_finite<Dtype>, ggemm_add<Dtype>, false, no_op<Dtype, typename vec<Dtype>::vec2>, false,
true, true, true>
(K_, region_size_, M_, transposed_offsets, split_patches_out_inter,
split_patches_in, split_patches_in_diff, 0, make_vec2<Dtype>(epsilon > 0 ? INFINITY : -INFINITY, 0), 0,
make_vec2<Dtype>(epsilon, softmax_mode_ ? Dtype(0) : (Dtype)-::log(K_)), num_regions_);
} else {
ggemm_readc_gpu
<false, false, Dtype, typename vec<Dtype>::vec2, Dtype, uint8_t,
mex_backward_bottom_infinite<Dtype>, ggemm_add<Dtype>, false, no_op<Dtype, uint8_t>, false,
true, true, true>
(K_, region_size_, M_, transposed_offsets, split_patches_out_inter,
split_patches_in, split_patches_in_diff, 0, make_vec2<Dtype>(0, 0), 0, 0, num_regions_);
}
}
// Copy to bottom if needed
if (num_regions_ > 1) {
split_patches_gpu<Dtype, true>(N_, K_,
width_out_, height_out_, channels_out_,
offsets_w_, offsets_h_, offsets_c_,
shared_offsets_region_w_, shared_offsets_region_h_, shared_offsets_region_c_,
col_diff, split_patches_in_diff, use_unshared_regions_);
}
// Backprop for patch normalization
if (normalize_patches_ && propagate_down[top_idx]) {
caffe_gpu_transpose(K_, N_, col_diff, col_buff);
caffe_gpu_normalize_patches_rows_backward(K_, N_, normalization_fudge_factor_,
row_buffer_.gpu_diff(), row_buffer_.gpu_data(), col_buff, normalize_variance_);
caffe_gpu_transpose(N_, K_, col_buff, col_diff);
}
if (propagate_down[top_idx] && !is_1x1_) {
col2im_3d_gpu(
col_diff,
channels_, height_, width_,
block_c_, block_h_, block_w_,
pad_c_, pad_h_, pad_w_,
stride_c_, stride_h_, stride_w_,
bottom_diff + bottom[bottom_idx]->offset(n),
blocks_round_down_);
}
if (!use_log_space_parameters_ && this->param_propagate_down_[1]) {
const Dtype* original_logspace_offsets = this->blobs_[1]->gpu_data();
Dtype* original_offsets_diff = this->blobs_[1]->mutable_gpu_diff();
linear_offsets_gradient<Dtype>(num_regions_ * M_ * K_, original_logspace_offsets,
offsets_diff, linear_space_min_value_, original_offsets_diff);
}
}
}
}
if (this->param_propagate_down_[0]) {
this->blobs_[0]->mutable_cpu_diff()[0] = epsilon_diff;
}
if (use_log_space_parameters_ && this->param_propagate_down_[1] && this->maximum_entropy_regularization_coeff_ > Dtype(0)) {
caffe_gpu_maximum_entropy_regularization(num_regions_ * M_, K_, offsets, normed_offsets_.mutable_gpu_diff());
caffe_gpu_axpy(num_regions_ * M_ * K_, maximum_entropy_regularization_coeff_, normed_offsets_.gpu_diff(), offsets_diff);
}
if (!use_log_space_parameters_) {
caffe_gpu_exp<Dtype>(num_regions_ * M_ * K_, this->blobs_[1]->gpu_data(), this->blobs_[1]->mutable_gpu_data());
caffe_gpu_clip_min<Dtype>(num_regions_ * M_ * K_, this->blobs_[1]->gpu_data(), this->blobs_[1]->mutable_gpu_data(), linear_space_min_value_);
}
}
INSTANTIATE_LAYER_GPU_FUNCS(MEXLayer);
INSTANTIATE_LAYER_GPU_INIT_STEP(MEXLayer);
} // namespace caffe
| d6a5a4afeed782a2423bd3407a43dcae6848deef.cu | #include <algorithm>
#include <vector>
#include <cfloat>
#include "caffe/layer.hpp"
#include "caffe/util/im2col.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/layers/mex_layer.hpp"
#include "math_constants.h"
#include "caffe/util/ggemm.cuh"
#include "caffe/layers/mex_layer_shared.cuh"
#include <thrust/device_vector.h>
#include <thrust/execution_policy.h>
namespace caffe {
template <typename Dtype>
__global__ void linear_offsets_gradient_kernel(const int n, const Dtype* logspace_offsets,
const Dtype* logspace_offsets_diff, const Dtype fudge_factor, Dtype* offsets_diff) {
CUDA_KERNEL_LOOP(i, n) {
offsets_diff[i] += logspace_offsets_diff[i] / max(exp(logspace_offsets[i]), fudge_factor);
}
}
template <typename Dtype>
void linear_offsets_gradient(const int n, const Dtype* logspace_offsets,
const Dtype* logspace_offsets_diff, const Dtype fudge_factor, Dtype* offsets_diff) {
// NOLINT_NEXT_LINE(whitespace/operators)
linear_offsets_gradient_kernel<Dtype><<<CAFFE_GET_BLOCKS(n), CAFFE_CUDA_NUM_THREADS>>>(
n, logspace_offsets, logspace_offsets_diff, fudge_factor, offsets_diff);
}
template <typename Dtype>
Dtype MEXLayer<Dtype>::test_init_step_objective_gpu(const vector<Blob<Dtype>*>& bottom) {
if (!needs_unsupervised_init()) {
return INFINITY;
}
int batch_size = 0;
for (int i = 0; i < bottom.size(); ++i) {
if (expects_labels_ && i % 2 == 1) continue;
batch_size += N_ * bottom[i]->num();
}
input_for_learner_[0]->Reshape(batch_size, K_, 1, 1);
if (expects_labels_) {
input_for_learner_[1]->Reshape(batch_size, 1, 1, 1);
}
Dtype* patches_data = input_for_learner_[0]->mutable_gpu_data();
for (int bottom_idx = 0; bottom_idx < bottom.size(); ++bottom_idx) {
if (expects_labels_ && bottom_idx % 2 == 1) continue;
const Dtype* bottom_data = bottom[bottom_idx]->gpu_data();
Dtype* col_buff = NULL;
if (!is_1x1_ || normalize_patches_) {
col_buff = col_buffer_.mutable_gpu_data();
}
for (int n = 0; n < num_; ++n) {
// im2col transformation: unroll input regions for filtering
// into column matrix for multplication.
if (!is_1x1_) {
im2col_3d_gpu(
bottom_data + bottom[bottom_idx]->offset(n),
channels_, height_, width_,
block_c_, block_h_, block_w_,
pad_c_, pad_h_, pad_w_,
stride_c_, stride_h_, stride_w_,
col_buff,
blocks_round_down_, blocks_out_of_bounds_value_);
} else { // special case for 1x1 convolution
if (!normalize_patches_) {
col_buff = bottom[bottom_idx]->mutable_gpu_data() + bottom[bottom_idx]->offset(n);
} else {
caffe_copy(N_ * K_, bottom[bottom_idx]->gpu_data() + bottom[bottom_idx]->offset(n), col_buff);
}
}
if (normalize_patches_) {
caffe_gpu_transpose(K_, N_,
col_buff, patches_data + (bottom_idx * num_ + n) * K_ * N_);
caffe_gpu_normalize_patches_rows_forward(K_, N_,
normalization_fudge_factor_, patches_data + (bottom_idx * num_ + n) * K_ * N_, normalize_variance_);
} else {
caffe_gpu_transpose(K_, N_,
col_buff, patches_data + (bottom_idx * num_ + n) * K_ * N_);
}
}
}
if (expects_labels_) {
Dtype* labels_data = input_for_learner_[1]->mutable_gpu_data();
for (int bottom_idx = 1; bottom_idx < bottom.size(); bottom_idx += 2) {
const Dtype* labels = bottom[bottom_idx]->gpu_data();
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num_, N_, 1,
Dtype(1), labels, one_zero_vec_.gpu_data(),
Dtype(0), labels_data + ((bottom_idx - 1) / 2) * num_ * N_);
}
}
return unsupervised_learner_->objective_gpu(input_for_learner_);
}
template <typename Dtype>
bool MEXLayer<Dtype>::init_step_gpu(const vector<Blob<Dtype>*>& bottom, Dtype* objective) {
if (!needs_unsupervised_init()) {
return false;
}
int batch_size = 0;
for (int i = 0; i < bottom.size(); ++i) {
if (expects_labels_ && i % 2 == 1) continue;
batch_size += N_ * bottom[i]->num();
}
input_for_learner_[0]->Reshape(batch_size, K_, 1, 1);
if (expects_labels_) {
input_for_learner_[1]->Reshape(batch_size, 1, 1, 1);
}
Dtype* patches_data = input_for_learner_[0]->mutable_gpu_data();
for (int bottom_idx = 0; bottom_idx < bottom.size(); ++bottom_idx) {
if (expects_labels_ && bottom_idx % 2 == 1) continue;
const Dtype* bottom_data = bottom[bottom_idx]->gpu_data();
Dtype* col_buff = NULL;
if (!is_1x1_ || normalize_patches_) {
col_buff = col_buffer_.mutable_gpu_data();
}
for (int n = 0; n < num_; ++n) {
// im2col transformation: unroll input regions for filtering
// into column matrix for multplication.
if (!is_1x1_) {
im2col_3d_gpu(
bottom_data + bottom[bottom_idx]->offset(n),
channels_, height_, width_,
block_c_, block_h_, block_w_,
pad_c_, pad_h_, pad_w_,
stride_c_, stride_h_, stride_w_,
col_buff,
blocks_round_down_, blocks_out_of_bounds_value_);
} else { // special case for 1x1 convolution
if (!normalize_patches_) {
col_buff = bottom[bottom_idx]->mutable_gpu_data() + bottom[bottom_idx]->offset(n);
} else {
caffe_copy(N_ * K_, bottom[bottom_idx]->gpu_data() + bottom[bottom_idx]->offset(n), col_buff);
}
}
if (normalize_patches_) {
caffe_gpu_transpose(K_, N_,
col_buff, patches_data + (bottom_idx * num_ + n) * K_ * N_);
caffe_gpu_normalize_patches_rows_forward(K_, N_,
normalization_fudge_factor_, patches_data + (bottom_idx * num_ + n) * K_ * N_, normalize_variance_);
} else {
caffe_gpu_transpose(K_, N_,
col_buff, patches_data + (bottom_idx * num_ + n) * K_ * N_);
}
}
}
if (expects_labels_) {
Dtype* labels_data = input_for_learner_[1]->mutable_gpu_data();
for (int bottom_idx = 1; bottom_idx < bottom.size(); bottom_idx += 2) {
const Dtype* labels = bottom[bottom_idx]->gpu_data();
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num_, N_, 1,
Dtype(1), labels, one_zero_vec_.gpu_data(),
Dtype(0), labels_data + ((bottom_idx - 1) / 2) * num_ * N_);
}
}
bool not_finished = unsupervised_learner_->step_gpu(input_for_learner_, objective);
if (!not_finished) {
const vector<shared_ptr<Blob<Dtype> > > blobs(1, this->blobs_[1]);
unsupervised_learner_->fill_gpu(blobs);
for (int i = 0; i < input_for_learner_.size(); ++i) {
input_for_learner_[i].reset();
}
input_for_learner_.clear();
unsupervised_learner_.reset();
param_initialized_ = true;
}
return not_finished;
}
template <typename Dtype, bool REVERSE>
__global__ void split_patches_kernel(const int num_kernels, const int N, const int Dim,
const int W, const int H, const int C,
const int W_Gs, const int H_Gs, const int C_Gs,
const int W_Step, const int H_Step, const int C_Step,
typename std::conditional<REVERSE, Dtype*, const Dtype*>::type in,
Dtype* out, const bool use_unshared_regions_) {
const int step_out = C_Step * H_Step * W_Step;
const int group_step_w = !use_unshared_regions_ ? W_Step : 1;
const int group_step_h = !use_unshared_regions_ ? H_Step : 1;
const int group_step_c = !use_unshared_regions_ ? C_Step : 1;
const int region_step_w = !use_unshared_regions_ ? 1 : W_Gs;
const int region_step_h = !use_unshared_regions_ ? 1 : H_Gs;
const int region_step_c = !use_unshared_regions_ ? 1 : C_Gs;
Dtype* in_unconst = NULL;
if (REVERSE) {
in_unconst = (Dtype*)in;
}
CUDA_KERNEL_LOOP(index, num_kernels) {
const int i = index % W_Step;
const int i_index = index / W_Step;
const int j = i_index % H_Step;
const int j_index = i_index / H_Step;
const int l = j_index % C_Step;
const int l_index = j_index / C_Step;
const int w_g = l_index % W_Gs;
const int w_index = l_index / W_Gs;
const int h_g = w_index % H_Gs;
const int h_index = w_index / H_Gs;
const int c_g = h_index;
// "inner loop"
Dtype* o = out + ((c_g * H_Gs + h_g) * W_Gs + w_g) * step_out * Dim;
const int group_addr = (c_g * group_step_c * H + h_g * group_step_h) * W + w_g * group_step_w;
const int base_addr_out = (l * H_Step + j) * W_Step + i;
const int base_addr_in = group_addr + (l * region_step_c * H + j * region_step_h) * W + i * region_step_w;
if (w_g * W_Step + i < W &&
h_g * H_Step + j < H &&
c_g * C_Step + l < C) {
for (int k = 0; k < Dim; ++k) {
if (!REVERSE) {
o[base_addr_out + k * step_out] = in[base_addr_in + k * N];
} else {
in_unconst[base_addr_in + k * N] = o[base_addr_out + k * step_out];
}
}
}
}
}
template <typename Dtype, bool REVERSE>
void split_patches_gpu(const int N, const int Dim,
const int W, const int H, const int C,
const int W_Gs, const int H_Gs, const int C_Gs,
const int W_Step, const int H_Step, const int C_Step,
typename std::conditional<REVERSE, Dtype*, const Dtype*>::type in,
Dtype* out, const bool use_unshared_regions) {
const int num_kernels = W_Step * H_Step * C_Step * W_Gs * H_Gs * C_Gs;
// NOLINT_NEXT_LINE(whitespace/operators)
split_patches_kernel<Dtype, REVERSE><<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>(
num_kernels, N, Dim, W, H, C, W_Gs, H_Gs, C_Gs, W_Step, H_Step, C_Step, in, out, use_unshared_regions);
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
void mex_forward_gpu(const int M, const int N, const int K, const bool softmax_mode,
const Dtype epsilon, const Dtype* offsets, const Dtype* in, Dtype* out, const int batch_size = 1) {
const Dtype init_value = epsilon > 0 ? -INFINITY : INFINITY;
if (epsilon > 0) {
ggemm_gpu
<Dtype, Dtype, Dtype, uint8_t,
ggemm_add<Dtype, uint8_t>, ggemm_max<Dtype>, false,
true, true, true>
(M, N, K, offsets, in, out,
init_value, init_value, init_value, 0, batch_size);
} else {
ggemm_gpu
<Dtype, Dtype, Dtype, uint8_t,
ggemm_add<Dtype, uint8_t>, ggemm_min<Dtype>, false,
true, true, true>
(M, N, K, offsets, in, out,
init_value, init_value, init_value, 0, batch_size);
}
if (std::isfinite(epsilon)) {
ggemm_readc_gpu
<false, false, Dtype, Dtype, Dtype, typename vec<Dtype>::vec2,
mex_forward_exp<Dtype>, ggemm_add<Dtype>, true, mex_forward_out<Dtype>, true,
true, true, true>
(M, N, K, offsets, in, out, out,
init_value, init_value, 0, make_vec2<Dtype>(epsilon, softmax_mode ? Dtype(0) : (Dtype)-std::log(K)), batch_size);
}
}
template <typename Dtype>
void MEXLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
Dtype* col_buff = NULL;
if (!is_1x1_ || normalize_patches_) {
col_buff = col_buffer_.mutable_gpu_data();
}
const Dtype epsilon = this->blobs_[0]->cpu_data()[0];
Dtype* split_patches_in = NULL;
Dtype* split_patches_out = NULL;
const Dtype* offsets = this->blobs_[1]->gpu_data();
if (!use_log_space_parameters_) {
caffe_gpu_clip_min<Dtype>(num_regions_ * M_ * K_, this->blobs_[1]->cpu_data(), this->blobs_[1]->mutable_gpu_data(), linear_space_min_value_);
caffe_gpu_log<Dtype>(num_regions_ * M_ * K_, this->blobs_[1]->cpu_data(), this->blobs_[1]->mutable_gpu_data());
}
if (normalize_offsets_) {
mex_forward_gpu<Dtype>(M_ * num_regions_, 1, K_, softmax_mode_, epsilon,
offsets, one_zero_vec_.gpu_diff(), offsets_norm_factor_.mutable_gpu_data());
Dtype* offsets_mutable = NULL;
if (!normalize_offsets_projected_) {
caffe_copy<Dtype>(num_regions_ * M_ * K_, offsets, normed_offsets_.mutable_gpu_data());
offsets = normed_offsets_.gpu_data();
offsets_mutable = normed_offsets_.mutable_gpu_data();
} else {
offsets_mutable = this->blobs_[1]->mutable_gpu_data();
}
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, M_ * num_regions_, K_, 1,
-1, offsets_norm_factor_.gpu_data(), one_zero_vec_.gpu_data(),
1, offsets_mutable);
}
for (int bottom_idx = 0; bottom_idx < bottom.size(); ++bottom_idx) {
const int top_idx = expects_labels_ ? bottom_idx / 2 : bottom_idx;
const Dtype* bottom_data = bottom[bottom_idx]->gpu_data();
Dtype* top_data = top[top_idx]->mutable_gpu_data();
for (int n = 0; n < num_; ++n) {
// im2col transformation: unroll input regions for filtering
// into column matrix for multplication.
if (!is_1x1_) {
im2col_3d_gpu(
bottom_data + bottom[bottom_idx]->offset(n),
channels_, height_, width_,
block_c_, block_h_, block_w_,
pad_c_, pad_h_, pad_w_,
stride_c_, stride_h_, stride_w_,
col_buff,
blocks_round_down_, blocks_out_of_bounds_value_);
} else { // special case for 1x1 convolution
if (!normalize_patches_) {
col_buff = bottom[bottom_idx]->mutable_gpu_data() + bottom[bottom_idx]->offset(n);
} else {
caffe_copy(N_ * K_, bottom[bottom_idx]->gpu_data() + bottom[bottom_idx]->offset(n), col_buff);
}
}
if (normalize_patches_) {
caffe_gpu_transpose(K_, N_,
col_buff,
row_buffer_.mutable_gpu_data());
caffe_gpu_normalize_patches_rows_forward(K_, N_, normalization_fudge_factor_,
row_buffer_.mutable_gpu_data(), normalize_variance_);
caffe_gpu_transpose(N_, K_,
row_buffer_.gpu_data(),
col_buff);
}
// Prepare input
Dtype* current_top = top_data + top[top_idx]->offset(n);
if (num_regions_ > 1) {
split_patches_in = split_patches_in_.mutable_gpu_data();
split_patches_out = split_patches_out_.mutable_gpu_data();
split_patches_gpu<Dtype, false>(N_, K_,
width_out_, height_out_, channels_out_,
offsets_w_, offsets_h_, offsets_c_,
shared_offsets_region_w_, shared_offsets_region_h_, shared_offsets_region_c_,
col_buff, split_patches_in, use_unshared_regions_);
} else {
split_patches_in = col_buff;
split_patches_out = current_top;
}
// Calculate
mex_forward_gpu<Dtype>(M_, region_size_, K_, softmax_mode_, epsilon,
offsets, split_patches_in, split_patches_out, num_regions_);
// Copy to output if needed
if (num_regions_ > 1) {
split_patches_gpu<Dtype, true>(N_, M_,
width_out_, height_out_, channels_out_,
offsets_w_, offsets_h_, offsets_c_,
shared_offsets_region_w_, shared_offsets_region_h_, shared_offsets_region_c_,
current_top, split_patches_out, use_unshared_regions_);
}
}
}
if (!use_log_space_parameters_) {
caffe_gpu_exp<Dtype>(num_regions_ * M_ * K_, this->blobs_[1]->cpu_data(), this->blobs_[1]->mutable_gpu_data());
caffe_gpu_clip_min<Dtype>(num_regions_ * M_ * K_, this->blobs_[1]->cpu_data(), this->blobs_[1]->mutable_gpu_data(), linear_space_min_value_);
}
}
template <typename Dtype>
void MEXLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
Dtype* split_patches_in = NULL;
Dtype* split_patches_in_diff = NULL;
Dtype* split_patches_out = NULL;
Dtype* split_patches_out_diff = NULL;
typename vec<Dtype>::vec2* split_patches_out_inter = NULL;
const Dtype epsilon = this->blobs_[0]->cpu_data()[0];
Dtype epsilon_diff = 0;
Dtype* epsilon_helper = NULL;
if (this->param_propagate_down_[0]) {
epsilon_helper = static_cast<Dtype*>(epsilon_helper_->mutable_gpu_data());
}
const Dtype* offsets = this->blobs_[1]->gpu_data();
if (!use_log_space_parameters_) {
caffe_gpu_clip_min<Dtype>(num_regions_ * M_ * K_, offsets, this->blobs_[1]->mutable_gpu_data(), linear_space_min_value_);
caffe_gpu_log<Dtype>(num_regions_ * M_ * K_, offsets, this->blobs_[1]->mutable_gpu_data());
}
if (normalize_offsets_) {
mex_forward_gpu<Dtype>(M_ * num_regions_, 1, K_, softmax_mode_, epsilon,
offsets, one_zero_vec_.gpu_diff(), offsets_norm_factor_.mutable_gpu_data());
Dtype* offsets_mutable = NULL;
if (!normalize_offsets_projected_) {
caffe_copy<Dtype>(num_regions_ * M_ * K_, offsets, normed_offsets_.mutable_gpu_data());
offsets = normed_offsets_.gpu_data();
offsets_mutable = normed_offsets_.mutable_gpu_data();
} else {
offsets_mutable = this->blobs_[1]->mutable_gpu_data();
}
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, M_ * num_regions_, K_, 1,
-1, offsets_norm_factor_.gpu_data(), one_zero_vec_.gpu_data(),
1, offsets_mutable);
}
Dtype* offsets_diff = NULL;
if (this->param_propagate_down_[1]) {
if (use_log_space_parameters_) {
offsets_diff = this->blobs_[1]->mutable_gpu_diff();
} else {
offsets_diff = normed_offsets_.mutable_gpu_diff();
}
}
bool propagate_down_any = false;
for (int top_idx = 0; top_idx < top.size(); ++top_idx) {
if (propagate_down[top_idx]) {
propagate_down_any = true;
break;
}
}
const Dtype* transposed_offsets = NULL;
if (propagate_down_any) {
transposed_offsets = static_cast<const Dtype*>(transposed_offsets_->gpu_data());
for (int r = 0; r < num_regions_; ++r) {
const int offsets_idx = r * M_ * K_;
caffe_gpu_transpose(M_, K_,
offsets + offsets_idx,
static_cast<Dtype*>(transposed_offsets_->mutable_gpu_data()) + offsets_idx);
}
}
for (int top_idx = 0; top_idx < top.size(); ++top_idx) {
const int bottom_idx = expects_labels_ ? top_idx * 2 : top_idx;
if (this->param_propagate_down_[0] ||
this->param_propagate_down_[1] ||
propagate_down[top_idx]) {
const Dtype* top_diff = top[top_idx]->gpu_diff();
const Dtype* top_data = top[top_idx]->gpu_data();
Dtype* col_buff = NULL;
Dtype* col_diff = NULL;
if (!is_1x1_ || normalize_patches_) {
col_buff = col_buffer_.mutable_gpu_data();
}
if (!is_1x1_) {
col_diff = col_buffer_.mutable_gpu_diff();
}
const Dtype* bottom_data = bottom[bottom_idx]->gpu_data();
Dtype* bottom_diff = bottom[bottom_idx]->mutable_gpu_diff();
for (int n = 0; n < num_; ++n) {
// Since we saved memory in the forward pass by not storing all col
// data, we will need to recompute them.
if (!is_1x1_) {
im2col_3d_gpu(
bottom_data + bottom[bottom_idx]->offset(n),
channels_, height_, width_,
block_c_, block_h_, block_w_,
pad_c_, pad_h_, pad_w_,
stride_c_, stride_h_, stride_w_,
col_buff,
blocks_round_down_, blocks_out_of_bounds_value_);
} else { // special case for 1x1 convolution
col_diff = bottom_diff + bottom[bottom_idx]->offset(n);
if (!normalize_patches_) {
col_buff = bottom[bottom_idx]->mutable_gpu_data() + bottom[bottom_idx]->offset(n);
} else {
caffe_copy(N_ * K_, bottom[bottom_idx]->mutable_gpu_data() + bottom[bottom_idx]->offset(n), col_buff);
}
}
if (normalize_patches_) {
caffe_gpu_transpose(K_, N_,
col_buff,
row_buffer_.mutable_gpu_data());
caffe_copy(K_ * N_,
row_buffer_.gpu_data(),
row_buffer_.mutable_gpu_diff());
caffe_gpu_normalize_patches_rows_forward(K_, N_, normalization_fudge_factor_,
row_buffer_.mutable_gpu_data(), normalize_variance_);
caffe_gpu_transpose(N_, K_,
row_buffer_.gpu_data(),
col_buff);
}
// Prepare input for backprop
const Dtype* current_top_data = top_data + n * M_ * N_;
const Dtype* current_top_diff = top_diff + n * M_ * N_;
if (num_regions_ > 1) {
split_patches_in = split_patches_in_.mutable_gpu_data();
split_patches_in_diff = split_patches_in_.mutable_gpu_diff();
split_patches_out = split_patches_out_.mutable_gpu_data();
split_patches_out_diff = split_patches_out_.mutable_gpu_diff();
split_patches_gpu<Dtype, false>(N_, K_,
width_out_, height_out_, channels_out_,
offsets_w_, offsets_h_, offsets_c_,
shared_offsets_region_w_, shared_offsets_region_h_, shared_offsets_region_c_,
col_buff, split_patches_in, use_unshared_regions_);
split_patches_gpu<Dtype, false>(N_, M_,
width_out_, height_out_, channels_out_,
offsets_w_, offsets_h_, offsets_c_,
shared_offsets_region_w_, shared_offsets_region_h_, shared_offsets_region_c_,
current_top_data, split_patches_out, use_unshared_regions_);
split_patches_gpu<Dtype, false>(N_, M_,
width_out_, height_out_, channels_out_,
offsets_w_, offsets_h_, offsets_c_,
shared_offsets_region_w_, shared_offsets_region_h_, shared_offsets_region_c_,
current_top_diff, split_patches_out_diff, use_unshared_regions_);
} else {
split_patches_in = col_buff;
split_patches_in_diff = col_diff;
split_patches_out = (Dtype*)current_top_data;
split_patches_out_diff = (Dtype*)current_top_diff;
}
split_patches_out_inter = static_cast<typename vec<Dtype>::vec2 *>(
split_patches_out_inter_->mutable_gpu_data());
interlace_gpu(num_regions_ * M_ * region_size_, split_patches_out, split_patches_out_diff,
split_patches_out_inter);
// Caculate backprop
if (this->param_propagate_down_[0] && std::isfinite(epsilon)) { // epsilon = ±inf => epsilon_diff = 0
if (!normalize_offsets_ || normalize_offsets_projected_) {
ggemm_readc_gpu
<false, true, typename vec<Dtype>::vec2, Dtype, Dtype, Dtype,
mex_backward_epsilon<Dtype>, ggemm_add<Dtype>, false, no_op<Dtype, Dtype>, false,
true, true, true>
(M_, K_, region_size_, split_patches_out_inter, split_patches_in,
offsets, epsilon_helper,
make_vec2<Dtype>(0, 0), 0, 0, epsilon, num_regions_);
} else {
ggemm_readc_gpu
<false, true, typename vec<Dtype>::vec2, Dtype, Dtype, Dtype,
mex_backward_epsilon_with_normalized_offsets<Dtype>, ggemm_add<Dtype>, false,
no_op<Dtype, Dtype>, false,
true, true, true>
(M_, K_, region_size_, split_patches_out_inter, split_patches_in,
offsets, epsilon_helper,
make_vec2<Dtype>(0, 0), 0, 0, epsilon, num_regions_);
}
thrust::device_ptr<Dtype> cptr = thrust::device_pointer_cast(epsilon_helper);
const Dtype sum_offsets_diff = thrust::reduce(cptr, cptr + num_regions_ * M_ * K_);
epsilon_diff += sum_offsets_diff / (epsilon * K_);
}
if (this->param_propagate_down_[1]) {
if (!use_log_space_parameters_) {
caffe_gpu_set(M_ * K_ * num_regions_, Dtype(0), offsets_diff);
}
if (!normalize_offsets_ || normalize_offsets_projected_) {
if (std::isfinite(epsilon)) {
ggemm_readc_gpu
<false, true, typename vec<Dtype>::vec2, Dtype, Dtype, typename vec<Dtype>::vec2,
mex_backward_offsets_finite<Dtype>, ggemm_add<Dtype>, true, no_op<Dtype, typename vec<Dtype>::vec2>, false,
true, true, true>
(M_, K_, region_size_, split_patches_out_inter, split_patches_in,
offsets, offsets_diff,
make_vec2<Dtype>(epsilon > 0 ? INFINITY : -INFINITY, 0), 0, 0,
make_vec2<Dtype>(epsilon, softmax_mode_ ? Dtype(0) : (Dtype)-std::log(K_)), num_regions_);
} else {
ggemm_readc_gpu
<false, true, typename vec<Dtype>::vec2, Dtype, Dtype, uint8_t,
mex_backward_offsets_infinite<Dtype>, ggemm_add<Dtype>, true, no_op<Dtype, uint8_t>, false,
true, true, true>
(M_, K_, region_size_, split_patches_out_inter, split_patches_in,
offsets, offsets_diff,
make_vec2<Dtype>(0, 0), 0, 0, 0, num_regions_);
}
} else {
if (std::isfinite(epsilon)) {
ggemm_readc_gpu
<false, true, typename vec<Dtype>::vec2, Dtype, Dtype, typename vec<Dtype>::vec2,
mex_backward_normalized_offsets_finite<Dtype>, ggemm_add<Dtype>, true,
no_op<Dtype, typename vec<Dtype>::vec2>, false,
true, true, true>
(M_, K_, region_size_, split_patches_out_inter, split_patches_in,
offsets, offsets_diff,
make_vec2<Dtype>(epsilon > 0 ? INFINITY : -INFINITY, 0), 0, 0,
make_vec2<Dtype>(epsilon, softmax_mode_ ? Dtype(0) : (Dtype)-std::log(K_)), num_regions_);
} else {
ggemm_readc_gpu
<false, true, typename vec<Dtype>::vec2, Dtype, Dtype, uint8_t,
mex_backward_normalized_offsets_infinite<Dtype>, ggemm_add<Dtype>, true,
no_op<Dtype, uint8_t>, false,
true, true, true>
(M_, K_, region_size_, split_patches_out_inter, split_patches_in,
offsets, offsets_diff,
make_vec2<Dtype>(0, 0), 0, 0, 0, num_regions_);
}
}
}
if (propagate_down[top_idx]) {
if (std::isfinite(epsilon)) {
ggemm_readc_gpu
<false, false, Dtype, typename vec<Dtype>::vec2, Dtype, typename vec<Dtype>::vec2,
mex_backward_bottom_finite<Dtype>, ggemm_add<Dtype>, false, no_op<Dtype, typename vec<Dtype>::vec2>, false,
true, true, true>
(K_, region_size_, M_, transposed_offsets, split_patches_out_inter,
split_patches_in, split_patches_in_diff, 0, make_vec2<Dtype>(epsilon > 0 ? INFINITY : -INFINITY, 0), 0,
make_vec2<Dtype>(epsilon, softmax_mode_ ? Dtype(0) : (Dtype)-std::log(K_)), num_regions_);
} else {
ggemm_readc_gpu
<false, false, Dtype, typename vec<Dtype>::vec2, Dtype, uint8_t,
mex_backward_bottom_infinite<Dtype>, ggemm_add<Dtype>, false, no_op<Dtype, uint8_t>, false,
true, true, true>
(K_, region_size_, M_, transposed_offsets, split_patches_out_inter,
split_patches_in, split_patches_in_diff, 0, make_vec2<Dtype>(0, 0), 0, 0, num_regions_);
}
}
// Copy to bottom if needed
if (num_regions_ > 1) {
split_patches_gpu<Dtype, true>(N_, K_,
width_out_, height_out_, channels_out_,
offsets_w_, offsets_h_, offsets_c_,
shared_offsets_region_w_, shared_offsets_region_h_, shared_offsets_region_c_,
col_diff, split_patches_in_diff, use_unshared_regions_);
}
// Backprop for patch normalization
if (normalize_patches_ && propagate_down[top_idx]) {
caffe_gpu_transpose(K_, N_, col_diff, col_buff);
caffe_gpu_normalize_patches_rows_backward(K_, N_, normalization_fudge_factor_,
row_buffer_.gpu_diff(), row_buffer_.gpu_data(), col_buff, normalize_variance_);
caffe_gpu_transpose(N_, K_, col_buff, col_diff);
}
if (propagate_down[top_idx] && !is_1x1_) {
col2im_3d_gpu(
col_diff,
channels_, height_, width_,
block_c_, block_h_, block_w_,
pad_c_, pad_h_, pad_w_,
stride_c_, stride_h_, stride_w_,
bottom_diff + bottom[bottom_idx]->offset(n),
blocks_round_down_);
}
if (!use_log_space_parameters_ && this->param_propagate_down_[1]) {
const Dtype* original_logspace_offsets = this->blobs_[1]->gpu_data();
Dtype* original_offsets_diff = this->blobs_[1]->mutable_gpu_diff();
linear_offsets_gradient<Dtype>(num_regions_ * M_ * K_, original_logspace_offsets,
offsets_diff, linear_space_min_value_, original_offsets_diff);
}
}
}
}
if (this->param_propagate_down_[0]) {
this->blobs_[0]->mutable_cpu_diff()[0] = epsilon_diff;
}
if (use_log_space_parameters_ && this->param_propagate_down_[1] && this->maximum_entropy_regularization_coeff_ > Dtype(0)) {
caffe_gpu_maximum_entropy_regularization(num_regions_ * M_, K_, offsets, normed_offsets_.mutable_gpu_diff());
caffe_gpu_axpy(num_regions_ * M_ * K_, maximum_entropy_regularization_coeff_, normed_offsets_.gpu_diff(), offsets_diff);
}
if (!use_log_space_parameters_) {
caffe_gpu_exp<Dtype>(num_regions_ * M_ * K_, this->blobs_[1]->gpu_data(), this->blobs_[1]->mutable_gpu_data());
caffe_gpu_clip_min<Dtype>(num_regions_ * M_ * K_, this->blobs_[1]->gpu_data(), this->blobs_[1]->mutable_gpu_data(), linear_space_min_value_);
}
}
INSTANTIATE_LAYER_GPU_FUNCS(MEXLayer);
INSTANTIATE_LAYER_GPU_INIT_STEP(MEXLayer);
} // namespace caffe
|
d57a84be45ee46eb889d9e4ca672e8af87e6909b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
tdo.cu
Developed for the master thesis project: GPU-accelerated Thermodynamic Topology Optimization
Author: Wan Arif bin Wan Abhar
Institution: Ruhr Universitaet Bochum
*/
#include <iostream>
#include <cmath>
#include "../include/assemble.h"
#include "../include/cudakernels.h"
#include "../include/tdo.h"
#include <fstream>
#include <stdexcept>
#include <sstream>
#include <string>
using namespace std;
void WriteVectorToVTK_df(vector<double> &df, vector<double> &u, const std::string& filename, size_t dim, vector<size_t> numNodesPerDim, double h, size_t numElements, size_t numNodes)
{
std::ofstream ofs(filename, std::ios::out);
if (ofs.bad())
{
std::ostringstream oss;
oss << "File '" << filename << "' could not be opened for writing.";
throw std::runtime_error(oss.str());
}
ofs << "# vtk DataFile Version 2.0" << std::endl;
ofs << "Thermodynamics Topology Optimzation" << std::endl;
ofs << "ASCII" << std::endl;
ofs << endl;
ofs << "DATASET STRUCTURED_GRID" << std::endl;
if ( dim == 2 )
numNodesPerDim.push_back(1);
// specify number of nodes in each dimension
ofs << "DIMENSIONS";
for (std::size_t i = 0; i < 3; ++i)
ofs << " " << numNodesPerDim[i];
// for (std::size_t i = dim; i < 3; ++i)
// ofs << " " << 1;
ofs << std::endl;
// specify the coordinates of all points
ofs << "POINTS ";
ofs << numNodes << " float" << endl;
if ( dim == 2)
{
for (std::size_t z = 0; z < numNodesPerDim[2]; ++z)
{
for (std::size_t y = 0; y < numNodesPerDim[1]; ++y)
{
for (std::size_t x = 0; x < numNodesPerDim[0]; ++x)
ofs << " " << h*x << " " << h*z << " " << h*y << endl;
}
}
}
else
{
for (std::size_t z = 0; z < numNodesPerDim[2]; ++z)
{
for (std::size_t y = 0; y < numNodesPerDim[1]; ++y)
{
for (std::size_t x = 0; x < numNodesPerDim[0]; ++x)
ofs << " " << h*x << " " << h*y << " " << h*z << endl;
}
}
}
ofs << endl;
// specifying the design variable in each element
ofs << "CELL_DATA " << numElements << endl;
ofs << "SCALARS df double" << endl;
ofs << "LOOKUP_TABLE default" << endl;
for (int i = 0 ; i < numElements ; i++)
ofs << " " << df[i] << endl;
ofs << endl;
// specifying the displacements for all dimensions in each point
ofs << "POINT_DATA " << numNodes << std::endl;
ofs << "VECTORS displacements double" << std::endl;
for ( int i = 0 ; i < numNodes ; i++ )
{
if ( dim == 2 )
{
for ( int j = 0 ; j < 2 ; j++ )
ofs << " " << u[dim*i + j];
// setting displacement in z-dimension to zero
ofs << " 0";
}
else
{
for ( int j = 0 ; j < dim ; j++ )
ofs << " " << u[dim*i + j];
}
ofs << endl;
}
}
void WriteVectorToVTK_laplacian(vector<double> &laplacian, vector<double> &u, const std::string& filename, size_t dim, vector<size_t> numNodesPerDim, double h, size_t numElements, size_t numNodes)
{
std::ofstream ofs(filename, std::ios::out);
if (ofs.bad())
{
std::ostringstream oss;
oss << "File '" << filename << "' could not be opened for writing.";
throw std::runtime_error(oss.str());
}
ofs << "# vtk DataFile Version 2.0" << std::endl;
ofs << "Thermodynamics Topology Optimzation" << std::endl;
ofs << "ASCII" << std::endl;
ofs << endl;
ofs << "DATASET STRUCTURED_GRID" << std::endl;
if ( dim == 2 )
numNodesPerDim.push_back(1);
// specify number of nodes in each dimension
ofs << "DIMENSIONS";
for (std::size_t i = 0; i < 3; ++i)
ofs << " " << numNodesPerDim[i];
// for (std::size_t i = dim; i < 3; ++i)
// ofs << " " << 1;
ofs << std::endl;
// specify the coordinates of all points
ofs << "POINTS ";
ofs << numNodes << " float" << endl;
for (std::size_t z = 0; z < numNodesPerDim[2]; ++z)
{
for (std::size_t y = 0; y < numNodesPerDim[1]; ++y)
{
for (std::size_t x = 0; x < numNodesPerDim[0]; ++x)
ofs << " " << h*x << " " << h*z << " " << h*y << endl;
}
}
ofs << endl;
// specifying the laplacian in each element
ofs << "CELL_DATA " << numElements << endl;
ofs << "SCALARS lp double" << endl;
ofs << "LOOKUP_TABLE default" << endl;
for (int i = 0 ; i < numElements ; i++)
ofs << " " << laplacian[i] << endl;
ofs << endl;
// specifying the displacements for all dimensions in each point
ofs << "POINT_DATA " << numNodes << std::endl;
ofs << "VECTORS displacements double" << std::endl;
for ( int i = 0 ; i < numNodes ; i++ )
{
if ( dim == 2 )
{
for ( int j = 0 ; j < 2 ; j++ )
ofs << " " << u[dim*i + j];
// setting displacement in z-dimension to zero
ofs << " 0";
}
else
{
for ( int j = 0 ; j < dim ; j++ )
ofs << " " << u[dim*i + j];
}
ofs << endl;
}
}
void TDO::setBM(bool x){ bm_switch = x; }
int TDO::getCounter(){ return m_counter; }
float TDO::getSum(){ return m_sum_it; }
TDO::TDO(double* d_u, double* d_chi, double h, size_t dim, double betastar, double etastar, size_t numElements, size_t num_rows, double* d_A_local, vector<size_t*> d_node_index, vector<size_t> N, double rho, size_t numLevels, size_t p, size_t* &d_node_index_)
: m_d_u(d_u), m_d_chi(d_chi), m_h(h), m_dim(dim), m_numElements(numElements), m_num_rows(num_rows), m_d_A_local(d_A_local), m_d_node_index(d_node_index), m_rho(rho), m_etastar(etastar), m_betastar(betastar), m_numLevels(numLevels), m_p(p), m_d_node_index_(d_node_index_)
{
// inner loop frequency, n
m_n = (6 / m_etastar) * ( m_betastar / (m_h*m_h) );
m_del_t = 1.0 / m_n;
m_Nx = N[0];
m_Ny = N[1];
if (N.size() == 3)
m_Nz = N[2];
else
m_Nz = 0;
// local volume
m_local_volume = pow(m_h, m_dim);
bm_switch = 0;
}
// destructor performs device memory deallocation
TDO::~TDO()
{
CUDA_CALL( hipFree(m_d_df) );
CUDA_CALL( hipFree(m_d_beta) );
CUDA_CALL( hipFree(m_d_eta) );
CUDA_CALL( hipFree(m_d_mutex) );
CUDA_CALL( hipFree(m_d_lambda_tr) );
CUDA_CALL( hipFree(m_d_lambda_l) );
CUDA_CALL( hipFree(m_d_lambda_u) );
CUDA_CALL( hipFree(m_d_chi_tr) );
CUDA_CALL( hipFree(m_d_rho_tr) );
CUDA_CALL( hipFree(m_d_p_w) );
CUDA_CALL( hipFree(m_d_tdo_foo) );
CUDA_CALL( hipFree(m_d_sum_g) );
CUDA_CALL( hipFree(m_d_sum_df_g) );
}
bool TDO::init()
{
calculateDimensions(m_numElements, m_gridDim, m_blockDim);
CUDA_CALL( hipMalloc( (void**)&m_d_df, sizeof(double) * m_numElements ) );
CUDA_CALL( hipMemset( m_d_df, 0, sizeof(double) * m_numElements) );
CUDA_CALL( hipMalloc( (void**)&m_d_beta, sizeof(double) ) );
CUDA_CALL( hipMemset( m_d_beta, 0, sizeof(double)) );
CUDA_CALL( hipMalloc( (void**)&m_d_eta, sizeof(double) ) );
CUDA_CALL( hipMemset( m_d_eta, 0, sizeof(double)) );
CUDA_CALL( hipMalloc( (void**)&m_d_mutex, sizeof(int) ) );
CUDA_CALL( hipMalloc( (void**)&m_d_lambda_tr, sizeof(double) ) );
CUDA_CALL( hipMalloc( (void**)&m_d_lambda_l, sizeof(double) ) );
CUDA_CALL( hipMalloc( (void**)&m_d_lambda_u, sizeof(double) ) );
CUDA_CALL( hipMalloc( (void**)&m_d_chi_tr, sizeof(double) * m_numElements) );
CUDA_CALL( hipMalloc( (void**)&m_d_rho_tr, sizeof(double) ) );
CUDA_CALL( hipMalloc( (void**)&m_d_p_w, sizeof(double) ) );
CUDA_CALL( hipMemset( m_d_lambda_l, 0, sizeof(double) ) );
CUDA_CALL( hipMemset( m_d_lambda_tr, 0, sizeof(double) ) );
CUDA_CALL( hipMemset( m_d_lambda_u, 0, sizeof(double) ) );
CUDA_CALL( hipMemset( m_d_chi_tr, 0, sizeof(double) * m_numElements) );
CUDA_CALL( hipMemset( m_d_rho_tr, 0, sizeof(double) ) );
CUDA_CALL( hipMemset( m_d_p_w, 0, sizeof(double) ) );
CUDA_CALL( hipMalloc( (void**)&m_d_tdo_foo, sizeof(bool) ) );
CUDA_CALL( hipMemcpy( m_d_tdo_foo, &m_tdo_foo, sizeof(bool), hipMemcpyHostToDevice) );
CUDA_CALL( hipMalloc( (void**)&m_d_sum_g, sizeof(double) ) );
CUDA_CALL( hipMalloc( (void**)&m_d_sum_df_g, sizeof(double) ) );
return true;
}
void TDO::set_verbose(bool verbose) { m_verbose = verbose; }
void TDO::print_VTK(bool foo) { m_printVTK = foo; }
bool TDO::innerloop(double* &d_u, double* &d_chi, double* &d_c, double* &d_MOD, ofstream& ofssbm)
{
// benchmark output
// ofstream ofssbm(filename, ios::out);
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
float milliseconds;
static int inner_counter = 0;
// per iteration
hipEvent_t start_it, stop_it;
hipEventCreate(&start_it);
hipEventCreate(&stop_it);
float milliseconds_it;
m_d_u = d_u;
m_d_chi = d_chi;
m_tdo_foo = true;
hipLaunchKernelGGL(( setToTrue), dim3(1),dim3(1), 0, 0, m_d_tdo_foo );
hipLaunchKernelGGL(( setToZero), dim3(1),dim3(1), 0, 0, m_d_p_w, 1 );
hipLaunchKernelGGL(( setToZero), dim3(1),dim3(1), 0, 0, m_d_sum_g, 1 );
hipLaunchKernelGGL(( setToZero), dim3(1),dim3(1), 0, 0, m_d_sum_df_g, 1 );
hipLaunchKernelGGL(( setToZero), dim3(1),dim3(1), 0, 0, m_d_df, m_numElements );
//// loop n times
for ( int j = 0 ; j < m_n ; j++ )
{
// calculating the driving force of each element
// df[] = ( 1 / 2*local_volume ) * ( p * pow(chi[], p - 1 ) ) * ( u^T * A_local * u )
hipEventRecord(start);
hipLaunchKernelGGL(( calcDrivingForce), dim3(m_gridDim), dim3(m_blockDim), 0, 0, m_d_df, m_d_u, m_d_chi, m_p, m_d_node_index_, m_d_A_local, m_num_rows, m_dim, m_local_volume, m_numElements);
hipEventRecord(stop);
hipEventSynchronize(stop);
milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
if ( inner_counter == 0 ) ofssbm << "calcDrivingForce()\t\t" << milliseconds << endl;
// calculating average weighted driving force, p_w
hipEventRecord(start);
calcP_w_(m_d_p_w, m_d_sum_g, m_d_sum_df_g, m_d_df, m_d_chi, m_numElements, m_local_volume);
hipEventRecord(stop);
hipEventSynchronize(stop);
milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
if ( inner_counter == 0 ) ofssbm << "calcP_w()\t\t\t" << milliseconds << endl;
// calculating eta and beta
hipEventRecord(start);
hipLaunchKernelGGL(( calcEtaBeta), dim3(1),dim3(2), 0, 0, m_d_eta, m_d_beta, m_etastar, m_betastar, m_d_p_w );
hipEventRecord(stop);
hipEventSynchronize(stop);
milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
if ( inner_counter == 0 ) ofssbm << "calcEtaBeta()\t\t\t" << milliseconds << endl;
// bisection algo:
hipLaunchKernelGGL(( setToZero), dim3(1),dim3(1), 0, 0, m_d_lambda_tr, 1);
hipEventRecord(start);
// computing lambda lower
hipLaunchKernelGGL(( calcLambdaLower), dim3(m_gridDim), dim3(m_blockDim) , 0, 0, m_d_df, m_d_lambda_l, m_d_mutex, m_d_beta, m_d_chi, m_d_eta, m_Nx, m_Ny, m_Nz, m_numElements, m_h);
hipEventRecord(stop);
hipEventSynchronize(stop);
milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
if ( inner_counter == 0 ) ofssbm << "calcLambdaLower()\t\t" << milliseconds << endl;
// computing lambda upper
hipEventRecord(start);
hipLaunchKernelGGL(( calcLambdaUpper), dim3(m_gridDim), dim3(m_blockDim) , 0, 0, m_d_df, m_d_lambda_u, m_d_mutex, m_d_beta, m_d_chi, m_d_eta, m_Nx, m_Ny, m_Nz, m_numElements, m_h);
hipEventRecord(stop);
hipEventSynchronize(stop);
milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
if ( inner_counter == 0 ) ofssbm << "calcLambdaLower()\t\t" << milliseconds << endl;
// remaining operations on lambda lower and upper
hipLaunchKernelGGL(( minus_GPU), dim3(1),dim3(1), 0, 0, m_d_lambda_l, m_d_eta);
hipLaunchKernelGGL(( add_GPU), dim3(1),dim3(1), 0, 0, m_d_lambda_u, m_d_eta);
m_sum_it = 0;
m_counter = 0;
while(m_tdo_foo)
{
hipEventRecord(start_it);
// computing chi_trial
hipEventRecord(start);
hipLaunchKernelGGL(( calcChiTrial), dim3(m_gridDim),dim3(m_blockDim), 0, 0, m_d_chi, m_d_df, m_d_lambda_tr, m_del_t, m_d_eta, m_d_beta, m_d_chi_tr, m_Nx, m_Ny, m_Nz, m_numElements, m_h);
hipEventRecord(stop);
hipEventSynchronize(stop);
milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
if ( inner_counter == 0 ) ofssbm << "calcChiTrial()\t\t\t" << milliseconds << endl;
// computing rho_trial
hipLaunchKernelGGL(( setToZero), dim3(1),dim3(1), 0, 0, m_d_rho_tr, 1);
hipEventRecord(start);
hipLaunchKernelGGL(( sumOfVector_GPU) , dim3(m_gridDim), dim3(m_blockDim) , 0, 0, m_d_rho_tr, m_d_chi_tr, m_numElements);
hipLaunchKernelGGL(( calcRhoTrial), dim3(1),dim3(1), 0, 0, m_d_rho_tr, m_local_volume, m_numElements);
hipEventRecord(stop);
hipEventSynchronize(stop);
milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
if ( inner_counter == 0 ) ofssbm << "calcRhoTrial()\t\t\t" << milliseconds << endl;
hipEventRecord(start);
hipLaunchKernelGGL(( calcLambdaTrial), dim3(1),dim3(1), 0, 0, m_d_rho_tr, m_rho, m_d_lambda_l, m_d_lambda_u, m_d_lambda_tr);
hipEventRecord(stop);
hipEventSynchronize(stop);
milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
if ( inner_counter == 0 ) ofssbm << "calcLambdaTrial()\t\t" << milliseconds << endl;
hipLaunchKernelGGL(( checkTDOConvergence), dim3(1),dim3(1), 0, 0, m_d_tdo_foo, m_rho, m_d_rho_tr);
CUDA_CALL( hipMemcpy( &m_tdo_foo, m_d_tdo_foo, sizeof(bool), hipMemcpyDeviceToHost) );
hipEventRecord(stop_it);
hipEventSynchronize(stop_it);
milliseconds = 0;
hipEventElapsedTime(&milliseconds_it, start_it, stop_it);
m_sum_it += milliseconds_it;
inner_counter++;
m_counter++;
}
// computing compliance, c = 0.5 * sum( u^T * K * u )
hipLaunchKernelGGL(( setToZero), dim3(1),dim3(1), 0, 0, d_c, 1 );
hipLaunchKernelGGL(( calcCompliance), dim3(m_gridDim), dim3(m_blockDim) , 0, 0, d_c, m_d_u, d_chi, m_d_node_index_, m_d_A_local, m_local_volume, m_num_rows, m_dim, m_numElements);
// computing MOD
hipLaunchKernelGGL(( setToZero), dim3(1),dim3(1), 0, 0, d_MOD, 1 );
hipLaunchKernelGGL(( calcMOD), dim3(m_gridDim), dim3(m_blockDim) , 0, 0, d_MOD, d_chi, m_local_volume, m_numElements);
if ( bm_switch == 0) ofssbm << "Total time of bisection algo \t" << m_sum_it << endl;
if ( bm_switch == 0) ofssbm << "Number of steps \t\t" << inner_counter << endl;
if ( bm_switch == 0) ofssbm << "Average time per bisection step " << m_sum_it/inner_counter << endl;
// chi(j) = chi(j+1)
hipLaunchKernelGGL(( vectorEquals_GPU), dim3(m_gridDim),dim3(m_blockDim), 0, 0, m_d_chi, m_d_chi_tr, m_numElements );
}
return true;
}
| d57a84be45ee46eb889d9e4ca672e8af87e6909b.cu | /*
tdo.cu
Developed for the master thesis project: GPU-accelerated Thermodynamic Topology Optimization
Author: Wan Arif bin Wan Abhar
Institution: Ruhr Universitaet Bochum
*/
#include <iostream>
#include <cmath>
#include "../include/assemble.h"
#include "../include/cudakernels.h"
#include "../include/tdo.h"
#include <fstream>
#include <stdexcept>
#include <sstream>
#include <string>
using namespace std;
void WriteVectorToVTK_df(vector<double> &df, vector<double> &u, const std::string& filename, size_t dim, vector<size_t> numNodesPerDim, double h, size_t numElements, size_t numNodes)
{
std::ofstream ofs(filename, std::ios::out);
if (ofs.bad())
{
std::ostringstream oss;
oss << "File '" << filename << "' could not be opened for writing.";
throw std::runtime_error(oss.str());
}
ofs << "# vtk DataFile Version 2.0" << std::endl;
ofs << "Thermodynamics Topology Optimzation" << std::endl;
ofs << "ASCII" << std::endl;
ofs << endl;
ofs << "DATASET STRUCTURED_GRID" << std::endl;
if ( dim == 2 )
numNodesPerDim.push_back(1);
// specify number of nodes in each dimension
ofs << "DIMENSIONS";
for (std::size_t i = 0; i < 3; ++i)
ofs << " " << numNodesPerDim[i];
// for (std::size_t i = dim; i < 3; ++i)
// ofs << " " << 1;
ofs << std::endl;
// specify the coordinates of all points
ofs << "POINTS ";
ofs << numNodes << " float" << endl;
if ( dim == 2)
{
for (std::size_t z = 0; z < numNodesPerDim[2]; ++z)
{
for (std::size_t y = 0; y < numNodesPerDim[1]; ++y)
{
for (std::size_t x = 0; x < numNodesPerDim[0]; ++x)
ofs << " " << h*x << " " << h*z << " " << h*y << endl;
}
}
}
else
{
for (std::size_t z = 0; z < numNodesPerDim[2]; ++z)
{
for (std::size_t y = 0; y < numNodesPerDim[1]; ++y)
{
for (std::size_t x = 0; x < numNodesPerDim[0]; ++x)
ofs << " " << h*x << " " << h*y << " " << h*z << endl;
}
}
}
ofs << endl;
// specifying the design variable in each element
ofs << "CELL_DATA " << numElements << endl;
ofs << "SCALARS df double" << endl;
ofs << "LOOKUP_TABLE default" << endl;
for (int i = 0 ; i < numElements ; i++)
ofs << " " << df[i] << endl;
ofs << endl;
// specifying the displacements for all dimensions in each point
ofs << "POINT_DATA " << numNodes << std::endl;
ofs << "VECTORS displacements double" << std::endl;
for ( int i = 0 ; i < numNodes ; i++ )
{
if ( dim == 2 )
{
for ( int j = 0 ; j < 2 ; j++ )
ofs << " " << u[dim*i + j];
// setting displacement in z-dimension to zero
ofs << " 0";
}
else
{
for ( int j = 0 ; j < dim ; j++ )
ofs << " " << u[dim*i + j];
}
ofs << endl;
}
}
void WriteVectorToVTK_laplacian(vector<double> &laplacian, vector<double> &u, const std::string& filename, size_t dim, vector<size_t> numNodesPerDim, double h, size_t numElements, size_t numNodes)
{
std::ofstream ofs(filename, std::ios::out);
if (ofs.bad())
{
std::ostringstream oss;
oss << "File '" << filename << "' could not be opened for writing.";
throw std::runtime_error(oss.str());
}
ofs << "# vtk DataFile Version 2.0" << std::endl;
ofs << "Thermodynamics Topology Optimzation" << std::endl;
ofs << "ASCII" << std::endl;
ofs << endl;
ofs << "DATASET STRUCTURED_GRID" << std::endl;
if ( dim == 2 )
numNodesPerDim.push_back(1);
// specify number of nodes in each dimension
ofs << "DIMENSIONS";
for (std::size_t i = 0; i < 3; ++i)
ofs << " " << numNodesPerDim[i];
// for (std::size_t i = dim; i < 3; ++i)
// ofs << " " << 1;
ofs << std::endl;
// specify the coordinates of all points
ofs << "POINTS ";
ofs << numNodes << " float" << endl;
for (std::size_t z = 0; z < numNodesPerDim[2]; ++z)
{
for (std::size_t y = 0; y < numNodesPerDim[1]; ++y)
{
for (std::size_t x = 0; x < numNodesPerDim[0]; ++x)
ofs << " " << h*x << " " << h*z << " " << h*y << endl;
}
}
ofs << endl;
// specifying the laplacian in each element
ofs << "CELL_DATA " << numElements << endl;
ofs << "SCALARS lp double" << endl;
ofs << "LOOKUP_TABLE default" << endl;
for (int i = 0 ; i < numElements ; i++)
ofs << " " << laplacian[i] << endl;
ofs << endl;
// specifying the displacements for all dimensions in each point
ofs << "POINT_DATA " << numNodes << std::endl;
ofs << "VECTORS displacements double" << std::endl;
for ( int i = 0 ; i < numNodes ; i++ )
{
if ( dim == 2 )
{
for ( int j = 0 ; j < 2 ; j++ )
ofs << " " << u[dim*i + j];
// setting displacement in z-dimension to zero
ofs << " 0";
}
else
{
for ( int j = 0 ; j < dim ; j++ )
ofs << " " << u[dim*i + j];
}
ofs << endl;
}
}
void TDO::setBM(bool x){ bm_switch = x; }
int TDO::getCounter(){ return m_counter; }
float TDO::getSum(){ return m_sum_it; }
TDO::TDO(double* d_u, double* d_chi, double h, size_t dim, double betastar, double etastar, size_t numElements, size_t num_rows, double* d_A_local, vector<size_t*> d_node_index, vector<size_t> N, double rho, size_t numLevels, size_t p, size_t* &d_node_index_)
: m_d_u(d_u), m_d_chi(d_chi), m_h(h), m_dim(dim), m_numElements(numElements), m_num_rows(num_rows), m_d_A_local(d_A_local), m_d_node_index(d_node_index), m_rho(rho), m_etastar(etastar), m_betastar(betastar), m_numLevels(numLevels), m_p(p), m_d_node_index_(d_node_index_)
{
// inner loop frequency, n
m_n = (6 / m_etastar) * ( m_betastar / (m_h*m_h) );
m_del_t = 1.0 / m_n;
m_Nx = N[0];
m_Ny = N[1];
if (N.size() == 3)
m_Nz = N[2];
else
m_Nz = 0;
// local volume
m_local_volume = pow(m_h, m_dim);
bm_switch = 0;
}
// destructor performs device memory deallocation
TDO::~TDO()
{
CUDA_CALL( cudaFree(m_d_df) );
CUDA_CALL( cudaFree(m_d_beta) );
CUDA_CALL( cudaFree(m_d_eta) );
CUDA_CALL( cudaFree(m_d_mutex) );
CUDA_CALL( cudaFree(m_d_lambda_tr) );
CUDA_CALL( cudaFree(m_d_lambda_l) );
CUDA_CALL( cudaFree(m_d_lambda_u) );
CUDA_CALL( cudaFree(m_d_chi_tr) );
CUDA_CALL( cudaFree(m_d_rho_tr) );
CUDA_CALL( cudaFree(m_d_p_w) );
CUDA_CALL( cudaFree(m_d_tdo_foo) );
CUDA_CALL( cudaFree(m_d_sum_g) );
CUDA_CALL( cudaFree(m_d_sum_df_g) );
}
bool TDO::init()
{
calculateDimensions(m_numElements, m_gridDim, m_blockDim);
CUDA_CALL( cudaMalloc( (void**)&m_d_df, sizeof(double) * m_numElements ) );
CUDA_CALL( cudaMemset( m_d_df, 0, sizeof(double) * m_numElements) );
CUDA_CALL( cudaMalloc( (void**)&m_d_beta, sizeof(double) ) );
CUDA_CALL( cudaMemset( m_d_beta, 0, sizeof(double)) );
CUDA_CALL( cudaMalloc( (void**)&m_d_eta, sizeof(double) ) );
CUDA_CALL( cudaMemset( m_d_eta, 0, sizeof(double)) );
CUDA_CALL( cudaMalloc( (void**)&m_d_mutex, sizeof(int) ) );
CUDA_CALL( cudaMalloc( (void**)&m_d_lambda_tr, sizeof(double) ) );
CUDA_CALL( cudaMalloc( (void**)&m_d_lambda_l, sizeof(double) ) );
CUDA_CALL( cudaMalloc( (void**)&m_d_lambda_u, sizeof(double) ) );
CUDA_CALL( cudaMalloc( (void**)&m_d_chi_tr, sizeof(double) * m_numElements) );
CUDA_CALL( cudaMalloc( (void**)&m_d_rho_tr, sizeof(double) ) );
CUDA_CALL( cudaMalloc( (void**)&m_d_p_w, sizeof(double) ) );
CUDA_CALL( cudaMemset( m_d_lambda_l, 0, sizeof(double) ) );
CUDA_CALL( cudaMemset( m_d_lambda_tr, 0, sizeof(double) ) );
CUDA_CALL( cudaMemset( m_d_lambda_u, 0, sizeof(double) ) );
CUDA_CALL( cudaMemset( m_d_chi_tr, 0, sizeof(double) * m_numElements) );
CUDA_CALL( cudaMemset( m_d_rho_tr, 0, sizeof(double) ) );
CUDA_CALL( cudaMemset( m_d_p_w, 0, sizeof(double) ) );
CUDA_CALL( cudaMalloc( (void**)&m_d_tdo_foo, sizeof(bool) ) );
CUDA_CALL( cudaMemcpy( m_d_tdo_foo, &m_tdo_foo, sizeof(bool), cudaMemcpyHostToDevice) );
CUDA_CALL( cudaMalloc( (void**)&m_d_sum_g, sizeof(double) ) );
CUDA_CALL( cudaMalloc( (void**)&m_d_sum_df_g, sizeof(double) ) );
return true;
}
void TDO::set_verbose(bool verbose) { m_verbose = verbose; }
void TDO::print_VTK(bool foo) { m_printVTK = foo; }
bool TDO::innerloop(double* &d_u, double* &d_chi, double* &d_c, double* &d_MOD, ofstream& ofssbm)
{
// benchmark output
// ofstream ofssbm(filename, ios::out);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
float milliseconds;
static int inner_counter = 0;
// per iteration
cudaEvent_t start_it, stop_it;
cudaEventCreate(&start_it);
cudaEventCreate(&stop_it);
float milliseconds_it;
m_d_u = d_u;
m_d_chi = d_chi;
m_tdo_foo = true;
setToTrue<<<1,1>>>( m_d_tdo_foo );
setToZero<<<1,1>>>( m_d_p_w, 1 );
setToZero<<<1,1>>>( m_d_sum_g, 1 );
setToZero<<<1,1>>>( m_d_sum_df_g, 1 );
setToZero<<<1,1>>>( m_d_df, m_numElements );
//// loop n times
for ( int j = 0 ; j < m_n ; j++ )
{
// calculating the driving force of each element
// df[] = ( 1 / 2*local_volume ) * ( p * pow(chi[], p - 1 ) ) * ( u^T * A_local * u )
cudaEventRecord(start);
calcDrivingForce<<<m_gridDim, m_blockDim>>>(m_d_df, m_d_u, m_d_chi, m_p, m_d_node_index_, m_d_A_local, m_num_rows, m_dim, m_local_volume, m_numElements);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
if ( inner_counter == 0 ) ofssbm << "calcDrivingForce()\t\t" << milliseconds << endl;
// calculating average weighted driving force, p_w
cudaEventRecord(start);
calcP_w_(m_d_p_w, m_d_sum_g, m_d_sum_df_g, m_d_df, m_d_chi, m_numElements, m_local_volume);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
if ( inner_counter == 0 ) ofssbm << "calcP_w()\t\t\t" << milliseconds << endl;
// calculating eta and beta
cudaEventRecord(start);
calcEtaBeta<<<1,2>>>( m_d_eta, m_d_beta, m_etastar, m_betastar, m_d_p_w );
cudaEventRecord(stop);
cudaEventSynchronize(stop);
milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
if ( inner_counter == 0 ) ofssbm << "calcEtaBeta()\t\t\t" << milliseconds << endl;
// bisection algo:
setToZero<<<1,1>>>(m_d_lambda_tr, 1);
cudaEventRecord(start);
// computing lambda lower
calcLambdaLower<<< m_gridDim, m_blockDim >>> (m_d_df, m_d_lambda_l, m_d_mutex, m_d_beta, m_d_chi, m_d_eta, m_Nx, m_Ny, m_Nz, m_numElements, m_h);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
if ( inner_counter == 0 ) ofssbm << "calcLambdaLower()\t\t" << milliseconds << endl;
// computing lambda upper
cudaEventRecord(start);
calcLambdaUpper<<< m_gridDim, m_blockDim >>> (m_d_df, m_d_lambda_u, m_d_mutex, m_d_beta, m_d_chi, m_d_eta, m_Nx, m_Ny, m_Nz, m_numElements, m_h);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
if ( inner_counter == 0 ) ofssbm << "calcLambdaLower()\t\t" << milliseconds << endl;
// remaining operations on lambda lower and upper
minus_GPU<<<1,1>>>( m_d_lambda_l, m_d_eta);
add_GPU<<<1,1>>>( m_d_lambda_u, m_d_eta);
m_sum_it = 0;
m_counter = 0;
while(m_tdo_foo)
{
cudaEventRecord(start_it);
// computing chi_trial
cudaEventRecord(start);
calcChiTrial<<<m_gridDim,m_blockDim>>> ( m_d_chi, m_d_df, m_d_lambda_tr, m_del_t, m_d_eta, m_d_beta, m_d_chi_tr, m_Nx, m_Ny, m_Nz, m_numElements, m_h);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
if ( inner_counter == 0 ) ofssbm << "calcChiTrial()\t\t\t" << milliseconds << endl;
// computing rho_trial
setToZero<<<1,1>>>(m_d_rho_tr, 1);
cudaEventRecord(start);
sumOfVector_GPU <<< m_gridDim, m_blockDim >>> (m_d_rho_tr, m_d_chi_tr, m_numElements);
calcRhoTrial<<<1,1>>>(m_d_rho_tr, m_local_volume, m_numElements);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
if ( inner_counter == 0 ) ofssbm << "calcRhoTrial()\t\t\t" << milliseconds << endl;
cudaEventRecord(start);
calcLambdaTrial<<<1,1>>>( m_d_rho_tr, m_rho, m_d_lambda_l, m_d_lambda_u, m_d_lambda_tr);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
if ( inner_counter == 0 ) ofssbm << "calcLambdaTrial()\t\t" << milliseconds << endl;
checkTDOConvergence<<<1,1>>> ( m_d_tdo_foo, m_rho, m_d_rho_tr);
CUDA_CALL( cudaMemcpy( &m_tdo_foo, m_d_tdo_foo, sizeof(bool), cudaMemcpyDeviceToHost) );
cudaEventRecord(stop_it);
cudaEventSynchronize(stop_it);
milliseconds = 0;
cudaEventElapsedTime(&milliseconds_it, start_it, stop_it);
m_sum_it += milliseconds_it;
inner_counter++;
m_counter++;
}
// computing compliance, c = 0.5 * sum( u^T * K * u )
setToZero<<<1,1>>> ( d_c, 1 );
calcCompliance<<< m_gridDim, m_blockDim >>> (d_c, m_d_u, d_chi, m_d_node_index_, m_d_A_local, m_local_volume, m_num_rows, m_dim, m_numElements);
// computing MOD
setToZero<<<1,1>>> ( d_MOD, 1 );
calcMOD<<< m_gridDim, m_blockDim >>> (d_MOD, d_chi, m_local_volume, m_numElements);
if ( bm_switch == 0) ofssbm << "Total time of bisection algo \t" << m_sum_it << endl;
if ( bm_switch == 0) ofssbm << "Number of steps \t\t" << inner_counter << endl;
if ( bm_switch == 0) ofssbm << "Average time per bisection step " << m_sum_it/inner_counter << endl;
// chi(j) = chi(j+1)
vectorEquals_GPU<<<m_gridDim,m_blockDim>>>( m_d_chi, m_d_chi_tr, m_numElements );
}
return true;
}
|
e5d3abb8f1fe86ef69b0c956ad69f05e61a63da1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
// This file is auto-generated. See "generate_kernels.py"
#include <ATen/native/transformers/hip/mem_eff_attention/kernel_backward.h>
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm50, float, true, true, false, 64, 64, 32>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm50, float, true, true, false, 64, 64, 32>::kMinBlocksPerSm)
fmha_cutlassB_f32_aligned_64x64_k32_dropout_sm50(typename AttentionBackwardKernel<cutlass::arch::Sm50, float, true, true, false, 64, 64, 32>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 500
#if __CUDA_ARCH__ < 700
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm50, float, true, true, false, 64, 64, 32>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f32_aligned_64x64_k32_dropout_sm50` is for sm50-sm70, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm70, float, true, true, false, 64, 64, 32>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm70, float, true, true, false, 64, 64, 32>::kMinBlocksPerSm)
fmha_cutlassB_f32_aligned_64x64_k32_dropout_sm70(typename AttentionBackwardKernel<cutlass::arch::Sm70, float, true, true, false, 64, 64, 32>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 700
#if __CUDA_ARCH__ < 750
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm70, float, true, true, false, 64, 64, 32>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f32_aligned_64x64_k32_dropout_sm70` is for sm70-sm75, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm75, float, true, true, false, 64, 64, 32>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm75, float, true, true, false, 64, 64, 32>::kMinBlocksPerSm)
fmha_cutlassB_f32_aligned_64x64_k32_dropout_sm75(typename AttentionBackwardKernel<cutlass::arch::Sm75, float, true, true, false, 64, 64, 32>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 750
#if __CUDA_ARCH__ < 800
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm75, float, true, true, false, 64, 64, 32>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f32_aligned_64x64_k32_dropout_sm75` is for sm75-sm80, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm80, float, true, true, false, 64, 64, 32>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm80, float, true, true, false, 64, 64, 32>::kMinBlocksPerSm)
fmha_cutlassB_f32_aligned_64x64_k32_dropout_sm80(typename AttentionBackwardKernel<cutlass::arch::Sm80, float, true, true, false, 64, 64, 32>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 800
#if __CUDA_ARCH__ < 900
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm80, float, true, true, false, 64, 64, 32>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f32_aligned_64x64_k32_dropout_sm80` is for sm80-sm90, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
| e5d3abb8f1fe86ef69b0c956ad69f05e61a63da1.cu | /*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
// This file is auto-generated. See "generate_kernels.py"
#include <ATen/native/transformers/cuda/mem_eff_attention/kernel_backward.h>
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm50, float, true, true, false, 64, 64, 32>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm50, float, true, true, false, 64, 64, 32>::kMinBlocksPerSm)
fmha_cutlassB_f32_aligned_64x64_k32_dropout_sm50(typename AttentionBackwardKernel<cutlass::arch::Sm50, float, true, true, false, 64, 64, 32>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 500
#if __CUDA_ARCH__ < 700
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm50, float, true, true, false, 64, 64, 32>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f32_aligned_64x64_k32_dropout_sm50` is for sm50-sm70, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm70, float, true, true, false, 64, 64, 32>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm70, float, true, true, false, 64, 64, 32>::kMinBlocksPerSm)
fmha_cutlassB_f32_aligned_64x64_k32_dropout_sm70(typename AttentionBackwardKernel<cutlass::arch::Sm70, float, true, true, false, 64, 64, 32>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 700
#if __CUDA_ARCH__ < 750
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm70, float, true, true, false, 64, 64, 32>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f32_aligned_64x64_k32_dropout_sm70` is for sm70-sm75, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm75, float, true, true, false, 64, 64, 32>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm75, float, true, true, false, 64, 64, 32>::kMinBlocksPerSm)
fmha_cutlassB_f32_aligned_64x64_k32_dropout_sm75(typename AttentionBackwardKernel<cutlass::arch::Sm75, float, true, true, false, 64, 64, 32>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 750
#if __CUDA_ARCH__ < 800
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm75, float, true, true, false, 64, 64, 32>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f32_aligned_64x64_k32_dropout_sm75` is for sm75-sm80, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm80, float, true, true, false, 64, 64, 32>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm80, float, true, true, false, 64, 64, 32>::kMinBlocksPerSm)
fmha_cutlassB_f32_aligned_64x64_k32_dropout_sm80(typename AttentionBackwardKernel<cutlass::arch::Sm80, float, true, true, false, 64, 64, 32>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 800
#if __CUDA_ARCH__ < 900
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm80, float, true, true, false, 64, 64, 32>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f32_aligned_64x64_k32_dropout_sm80` is for sm80-sm90, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
|
ccbe1fd91a16bee826b2fecc4a1f3d296db184e3.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 2018 BlazingDB, Inc.
* Copyright 2018 Alexander Ocsa <alexander@blazingdb.com>
* Copyright 2018 Felipe Aramburu <felipe@blazingdb.com>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "gtest/gtest.h"
#include <iostream>
#include <cudf.h>
#include <cudf/functions.h>
#include <thrust/functional.h>
#include <thrust/device_ptr.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/execution_policy.h>
#include <hip/hip_runtime.h>
#include "helper/utils.cuh"
#include "tests/utilities/cudf_test_fixtures.h"
using ValueType = int16_t;
struct GdfConcat : public GdfTest {};
/*
============================================================================
Description : Compute concat of gdf_columns using Thrust on GPU
============================================================================
*/
TEST_F(GdfConcat, usage_example) {
const size_t lhs_size = 10;
const size_t rhs_size = 20;
gdf_column lhs = gen_gdb_column<ValueType>(lhs_size, 2);
gdf_column rhs = gen_gdb_column<ValueType>(rhs_size, 3);
std::cout << "*****left**************\n";
print_column(&lhs);
std::cout << "*****right**************\n";
print_column(&rhs);
std::cout << "*******************\n";
// reserve space for gdf_column output
gdf_column output = gen_gdb_column<ValueType>(lhs_size + rhs_size, 0);
//call gpu_concat
gpu_concat(&lhs, &rhs, &output);
std::cout << "*****output**************\n";
print_column(&output);
// check results
check_column_for_concat_operation<ValueType>(&lhs, &rhs, &output);
delete_gdf_column(&lhs);
delete_gdf_column(&rhs);
delete_gdf_column(&output);
}
TEST_F(GdfConcat, CaseWithZeroLeft)
{
// 0 + 2
// 2
const size_t lhs_size = 0;
const size_t rhs_size = 2;
gdf_column lhs = gen_gdb_column<ValueType>(lhs_size, 2);
gdf_column rhs = gen_gdb_column<ValueType>(rhs_size, 3);
std::cout << "*****left**************\n";
print_column(&lhs);
std::cout << "*****right**************\n";
print_column(&rhs);
std::cout << "*******************\n";
gdf_column output = gen_gdb_column<ValueType>(lhs_size + rhs_size, 0);
gpu_concat(&lhs, &rhs, &output);
check_column_for_concat_operation<ValueType>(&lhs, &rhs, &output);
delete_gdf_column(&lhs);
delete_gdf_column(&rhs);
delete_gdf_column(&output);
}
TEST_F(GdfConcat, CaseWithZeroRight)
{
// 2 + 0
// 2
const size_t lhs_size = 2;
const size_t rhs_size = 0;
gdf_column lhs = gen_gdb_column<ValueType>(lhs_size, 2);
gdf_column rhs = gen_gdb_column<ValueType>(rhs_size, 3);
std::cout << "*****left**************\n";
print_column(&lhs);
std::cout << "*****right**************\n";
print_column(&rhs);
std::cout << "*******************\n";
gdf_column output = gen_gdb_column<ValueType>(lhs_size + rhs_size, 0);
gpu_concat(&lhs, &rhs, &output);
check_column_for_concat_operation<ValueType>(&lhs, &rhs, &output);
delete_gdf_column(&lhs);
delete_gdf_column(&rhs);
delete_gdf_column(&output);
}
TEST_F(GdfConcat, CaseWithOutputOfOneByte)
{
// 3 + 4
// 3|4
const size_t lhs_size = 3;
const size_t rhs_size = 4;
gdf_column lhs = gen_gdb_column<ValueType>(lhs_size, 2);
gdf_column rhs = gen_gdb_column<ValueType>(rhs_size, 3);
std::cout << "*****left**************\n";
print_column(&lhs);
std::cout << "*****right**************\n";
print_column(&rhs);
std::cout << "*******************\n";
gdf_column output = gen_gdb_column<ValueType>(lhs_size + rhs_size, 0);
gpu_concat(&lhs, &rhs, &output);
check_column_for_concat_operation<ValueType>(&lhs, &rhs, &output);
delete_gdf_column(&lhs);
delete_gdf_column(&rhs);
delete_gdf_column(&output);
}
TEST_F(GdfConcat, CaseWithOutputOfTwoBytes)
{
// 3 + 7 // caso especial
// 3|5, 3
const size_t lhs_size = 3;
const size_t rhs_size = 7;
gdf_column lhs = gen_gdb_column<ValueType>(lhs_size, 2);
gdf_column rhs = gen_gdb_column<ValueType>(rhs_size, 3);
std::cout << "*****left**************\n";
print_column(&lhs);
std::cout << "*****right**************\n";
print_column(&rhs);
std::cout << "*******************\n";
gdf_column output = gen_gdb_column<ValueType>(lhs_size + rhs_size, 0);
gpu_concat(&lhs, &rhs, &output);
check_column_for_concat_operation<ValueType>(&lhs, &rhs, &output);
delete_gdf_column(&lhs);
delete_gdf_column(&rhs);
delete_gdf_column(&output);
}
TEST_F(GdfConcat, CaseWithInput_2_2_Output3)
{
// 8, 3 + 8, 1
// 8, 3|5, 3|1
const size_t lhs_size = 8 + 3;
const size_t rhs_size = 8 + 1;
gdf_column lhs = gen_gdb_column<ValueType>(lhs_size, 2);
gdf_column rhs = gen_gdb_column<ValueType>(rhs_size, 3);
gdf_column output = gen_gdb_column<ValueType>(lhs_size + rhs_size, 0);
std::cout << "*****left**************\n";
print_column(&lhs);
std::cout << "*****right**************\n";
print_column(&rhs);
std::cout << "*******************\n";
gpu_concat(&lhs, &rhs, &output);
check_column_for_concat_operation<ValueType>(&lhs, &rhs, &output);
delete_gdf_column(&lhs);
delete_gdf_column(&rhs);
delete_gdf_column(&output);
}
TEST_F(GdfConcat, CaseWithInput_2_5_Output5)
{
// 8, 2 + 8, 8, 8, 8, 5
// 8, 2|6, 2|6, 2|6, 2|5
const size_t lhs_size = 8 + 2;
const size_t rhs_size = 8 + 8 + 8 + 8 + 5;
gdf_column lhs = gen_gdb_column<ValueType>(lhs_size, 2);
gdf_column rhs = gen_gdb_column<ValueType>(rhs_size, 3);
gdf_column output = gen_gdb_column<ValueType>(lhs_size + rhs_size, 0);
gpu_concat(&lhs, &rhs, &output);
check_column_for_concat_operation<ValueType>(&lhs, &rhs, &output);
delete_gdf_column(&lhs);
delete_gdf_column(&rhs);
delete_gdf_column(&output);
}
TEST_F(GdfConcat, CaseWithInput_1_4_Output5)
{
// 3 + 8, 8, 8, 7 // caso especial
// 3|5, 3|5, 3|5, 3|5, 2
// 100
// 10101111 10101111 10101111 10000 00
// 100 10101
//
// 11110101
// 11110101
// 11110000
// 00
const size_t lhs_size = 3;
const size_t rhs_size = 8 + 8 + 8 + 7;
gdf_column lhs = gen_gdb_column<ValueType>(lhs_size, 2);
gdf_column rhs = gen_gdb_column<ValueType>(rhs_size, 3);
gdf_column output = gen_gdb_column<ValueType>(lhs_size + rhs_size, 0);
std::cout << "*****left**************\n";
print_column(&lhs);
std::cout << "*****right**************\n";
print_column(&rhs);
std::cout << "*******************\n";
gpu_concat(&lhs, &rhs, &output);
std::cout << "*****output**************\n";
print_column(&output);
std::cout << "*******************\n";
check_column_for_concat_operation<ValueType>(&lhs, &rhs, &output);
delete_gdf_column(&lhs);
delete_gdf_column(&rhs);
delete_gdf_column(&output);
}
TEST_F(GdfConcat, CaseWithInput_0_9_Output2)
{
// 3 + 8, 8, 8, 7 // caso especial
// 3|5, 3|5, 3|5, 3|5, 2
// 100
// 10101111 10101111 10101111 10000 00
// 100 10101
//
// 11110101
// 11110101
// 11110000
// 00
const size_t lhs_size = 0;
const size_t rhs_size = 8 + 1;
gdf_column lhs = gen_gdb_column<ValueType>(lhs_size, 2);
gdf_column rhs = gen_gdb_column<ValueType>(rhs_size, 3);
gdf_column output = gen_gdb_column<ValueType>(lhs_size + rhs_size, 0);
std::cout << "*****left**************\n";
print_column(&lhs);
std::cout << "*****right**************\n";
print_column(&rhs);
std::cout << "*******************\n";
gpu_concat(&lhs, &rhs, &output);
check_column_for_concat_operation<ValueType>(&lhs, &rhs, &output);
delete_gdf_column(&lhs);
delete_gdf_column(&rhs);
delete_gdf_column(&output);
}
TEST_F(GdfConcat, CaseWithInput_5_11_Output22)
{
// 3 + 8, 8, 8, 7 // caso especial
// 3|5, 3|5, 3|5, 3|5, 2
// 100
// 10101111 10101111 10101111 10000 00
// 100 10101
//
// 11110101
// 11110101
// 11110000
// 00
const size_t lhs_size = 5;
const size_t rhs_size = 8 + 5;
gdf_column lhs = gen_gdb_column<ValueType>(lhs_size, 2);
gdf_column rhs = gen_gdb_column<ValueType>(rhs_size, 3);
gdf_column output = gen_gdb_column<ValueType>(lhs_size + rhs_size, 0);
std::cout << "*****left**************\n";
print_column(&lhs);
std::cout << "*****right**************\n";
print_column(&rhs);
std::cout << "*******************\n";
gpu_concat(&lhs, &rhs, &output);
std::cout << "*****output**************\n";
print_column(&output);
std::cout << "*******************\n";
check_column_for_concat_operation<ValueType>(&lhs, &rhs, &output);
delete_gdf_column(&lhs);
delete_gdf_column(&rhs);
delete_gdf_column(&output);
}
TEST_F(GdfConcat, WithDifferentColumnSizes)
{
using ValueType = int16_t;
//0, ..., 100,
//100, 10000, 10000, 100000
for (int lhs_size = 0; lhs_size < 100; lhs_size += 1)
{
for (int rhs_size = 0; rhs_size < 100; rhs_size += 1)
{
gdf_column lhs = gen_gdb_column<ValueType>(lhs_size, 2);
gdf_column rhs = gen_gdb_column<ValueType>(rhs_size, 3);
gdf_column output = gen_gdb_column<ValueType>(lhs_size + rhs_size, 0);
gdf_error error = gpu_concat(&lhs, &rhs, &output);
// std::cout << "Output" << std::endl;
// print_column<ValueType>(&output);
check_column_for_concat_operation<ValueType>(&lhs, &rhs, &output);
delete_gdf_column(&lhs);
delete_gdf_column(&rhs);
delete_gdf_column(&output);
}
}
} | ccbe1fd91a16bee826b2fecc4a1f3d296db184e3.cu | /*
* Copyright 2018 BlazingDB, Inc.
* Copyright 2018 Alexander Ocsa <alexander@blazingdb.com>
* Copyright 2018 Felipe Aramburu <felipe@blazingdb.com>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "gtest/gtest.h"
#include <iostream>
#include <cudf.h>
#include <cudf/functions.h>
#include <thrust/functional.h>
#include <thrust/device_ptr.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/execution_policy.h>
#include <cuda_runtime.h>
#include "helper/utils.cuh"
#include "tests/utilities/cudf_test_fixtures.h"
using ValueType = int16_t;
struct GdfConcat : public GdfTest {};
/*
============================================================================
Description : Compute concat of gdf_columns using Thrust on GPU
============================================================================
*/
TEST_F(GdfConcat, usage_example) {
const size_t lhs_size = 10;
const size_t rhs_size = 20;
gdf_column lhs = gen_gdb_column<ValueType>(lhs_size, 2);
gdf_column rhs = gen_gdb_column<ValueType>(rhs_size, 3);
std::cout << "*****left**************\n";
print_column(&lhs);
std::cout << "*****right**************\n";
print_column(&rhs);
std::cout << "*******************\n";
// reserve space for gdf_column output
gdf_column output = gen_gdb_column<ValueType>(lhs_size + rhs_size, 0);
//call gpu_concat
gpu_concat(&lhs, &rhs, &output);
std::cout << "*****output**************\n";
print_column(&output);
// check results
check_column_for_concat_operation<ValueType>(&lhs, &rhs, &output);
delete_gdf_column(&lhs);
delete_gdf_column(&rhs);
delete_gdf_column(&output);
}
TEST_F(GdfConcat, CaseWithZeroLeft)
{
// 0 + 2
// 2
const size_t lhs_size = 0;
const size_t rhs_size = 2;
gdf_column lhs = gen_gdb_column<ValueType>(lhs_size, 2);
gdf_column rhs = gen_gdb_column<ValueType>(rhs_size, 3);
std::cout << "*****left**************\n";
print_column(&lhs);
std::cout << "*****right**************\n";
print_column(&rhs);
std::cout << "*******************\n";
gdf_column output = gen_gdb_column<ValueType>(lhs_size + rhs_size, 0);
gpu_concat(&lhs, &rhs, &output);
check_column_for_concat_operation<ValueType>(&lhs, &rhs, &output);
delete_gdf_column(&lhs);
delete_gdf_column(&rhs);
delete_gdf_column(&output);
}
TEST_F(GdfConcat, CaseWithZeroRight)
{
// 2 + 0
// 2
const size_t lhs_size = 2;
const size_t rhs_size = 0;
gdf_column lhs = gen_gdb_column<ValueType>(lhs_size, 2);
gdf_column rhs = gen_gdb_column<ValueType>(rhs_size, 3);
std::cout << "*****left**************\n";
print_column(&lhs);
std::cout << "*****right**************\n";
print_column(&rhs);
std::cout << "*******************\n";
gdf_column output = gen_gdb_column<ValueType>(lhs_size + rhs_size, 0);
gpu_concat(&lhs, &rhs, &output);
check_column_for_concat_operation<ValueType>(&lhs, &rhs, &output);
delete_gdf_column(&lhs);
delete_gdf_column(&rhs);
delete_gdf_column(&output);
}
TEST_F(GdfConcat, CaseWithOutputOfOneByte)
{
// 3 + 4
// 3|4
const size_t lhs_size = 3;
const size_t rhs_size = 4;
gdf_column lhs = gen_gdb_column<ValueType>(lhs_size, 2);
gdf_column rhs = gen_gdb_column<ValueType>(rhs_size, 3);
std::cout << "*****left**************\n";
print_column(&lhs);
std::cout << "*****right**************\n";
print_column(&rhs);
std::cout << "*******************\n";
gdf_column output = gen_gdb_column<ValueType>(lhs_size + rhs_size, 0);
gpu_concat(&lhs, &rhs, &output);
check_column_for_concat_operation<ValueType>(&lhs, &rhs, &output);
delete_gdf_column(&lhs);
delete_gdf_column(&rhs);
delete_gdf_column(&output);
}
TEST_F(GdfConcat, CaseWithOutputOfTwoBytes)
{
// 3 + 7 // caso especial
// 3|5, 3
const size_t lhs_size = 3;
const size_t rhs_size = 7;
gdf_column lhs = gen_gdb_column<ValueType>(lhs_size, 2);
gdf_column rhs = gen_gdb_column<ValueType>(rhs_size, 3);
std::cout << "*****left**************\n";
print_column(&lhs);
std::cout << "*****right**************\n";
print_column(&rhs);
std::cout << "*******************\n";
gdf_column output = gen_gdb_column<ValueType>(lhs_size + rhs_size, 0);
gpu_concat(&lhs, &rhs, &output);
check_column_for_concat_operation<ValueType>(&lhs, &rhs, &output);
delete_gdf_column(&lhs);
delete_gdf_column(&rhs);
delete_gdf_column(&output);
}
TEST_F(GdfConcat, CaseWithInput_2_2_Output3)
{
// 8, 3 + 8, 1
// 8, 3|5, 3|1
const size_t lhs_size = 8 + 3;
const size_t rhs_size = 8 + 1;
gdf_column lhs = gen_gdb_column<ValueType>(lhs_size, 2);
gdf_column rhs = gen_gdb_column<ValueType>(rhs_size, 3);
gdf_column output = gen_gdb_column<ValueType>(lhs_size + rhs_size, 0);
std::cout << "*****left**************\n";
print_column(&lhs);
std::cout << "*****right**************\n";
print_column(&rhs);
std::cout << "*******************\n";
gpu_concat(&lhs, &rhs, &output);
check_column_for_concat_operation<ValueType>(&lhs, &rhs, &output);
delete_gdf_column(&lhs);
delete_gdf_column(&rhs);
delete_gdf_column(&output);
}
TEST_F(GdfConcat, CaseWithInput_2_5_Output5)
{
// 8, 2 + 8, 8, 8, 8, 5
// 8, 2|6, 2|6, 2|6, 2|5
const size_t lhs_size = 8 + 2;
const size_t rhs_size = 8 + 8 + 8 + 8 + 5;
gdf_column lhs = gen_gdb_column<ValueType>(lhs_size, 2);
gdf_column rhs = gen_gdb_column<ValueType>(rhs_size, 3);
gdf_column output = gen_gdb_column<ValueType>(lhs_size + rhs_size, 0);
gpu_concat(&lhs, &rhs, &output);
check_column_for_concat_operation<ValueType>(&lhs, &rhs, &output);
delete_gdf_column(&lhs);
delete_gdf_column(&rhs);
delete_gdf_column(&output);
}
TEST_F(GdfConcat, CaseWithInput_1_4_Output5)
{
// 3 + 8, 8, 8, 7 // caso especial
// 3|5, 3|5, 3|5, 3|5, 2
// 100
// 10101111 10101111 10101111 10000 00
// 100 10101
//
// 11110101
// 11110101
// 11110000
// 00
const size_t lhs_size = 3;
const size_t rhs_size = 8 + 8 + 8 + 7;
gdf_column lhs = gen_gdb_column<ValueType>(lhs_size, 2);
gdf_column rhs = gen_gdb_column<ValueType>(rhs_size, 3);
gdf_column output = gen_gdb_column<ValueType>(lhs_size + rhs_size, 0);
std::cout << "*****left**************\n";
print_column(&lhs);
std::cout << "*****right**************\n";
print_column(&rhs);
std::cout << "*******************\n";
gpu_concat(&lhs, &rhs, &output);
std::cout << "*****output**************\n";
print_column(&output);
std::cout << "*******************\n";
check_column_for_concat_operation<ValueType>(&lhs, &rhs, &output);
delete_gdf_column(&lhs);
delete_gdf_column(&rhs);
delete_gdf_column(&output);
}
TEST_F(GdfConcat, CaseWithInput_0_9_Output2)
{
// 3 + 8, 8, 8, 7 // caso especial
// 3|5, 3|5, 3|5, 3|5, 2
// 100
// 10101111 10101111 10101111 10000 00
// 100 10101
//
// 11110101
// 11110101
// 11110000
// 00
const size_t lhs_size = 0;
const size_t rhs_size = 8 + 1;
gdf_column lhs = gen_gdb_column<ValueType>(lhs_size, 2);
gdf_column rhs = gen_gdb_column<ValueType>(rhs_size, 3);
gdf_column output = gen_gdb_column<ValueType>(lhs_size + rhs_size, 0);
std::cout << "*****left**************\n";
print_column(&lhs);
std::cout << "*****right**************\n";
print_column(&rhs);
std::cout << "*******************\n";
gpu_concat(&lhs, &rhs, &output);
check_column_for_concat_operation<ValueType>(&lhs, &rhs, &output);
delete_gdf_column(&lhs);
delete_gdf_column(&rhs);
delete_gdf_column(&output);
}
TEST_F(GdfConcat, CaseWithInput_5_11_Output22)
{
// 3 + 8, 8, 8, 7 // caso especial
// 3|5, 3|5, 3|5, 3|5, 2
// 100
// 10101111 10101111 10101111 10000 00
// 100 10101
//
// 11110101
// 11110101
// 11110000
// 00
const size_t lhs_size = 5;
const size_t rhs_size = 8 + 5;
gdf_column lhs = gen_gdb_column<ValueType>(lhs_size, 2);
gdf_column rhs = gen_gdb_column<ValueType>(rhs_size, 3);
gdf_column output = gen_gdb_column<ValueType>(lhs_size + rhs_size, 0);
std::cout << "*****left**************\n";
print_column(&lhs);
std::cout << "*****right**************\n";
print_column(&rhs);
std::cout << "*******************\n";
gpu_concat(&lhs, &rhs, &output);
std::cout << "*****output**************\n";
print_column(&output);
std::cout << "*******************\n";
check_column_for_concat_operation<ValueType>(&lhs, &rhs, &output);
delete_gdf_column(&lhs);
delete_gdf_column(&rhs);
delete_gdf_column(&output);
}
TEST_F(GdfConcat, WithDifferentColumnSizes)
{
using ValueType = int16_t;
//0, ..., 100,
//100, 10000, 10000, 100000
for (int lhs_size = 0; lhs_size < 100; lhs_size += 1)
{
for (int rhs_size = 0; rhs_size < 100; rhs_size += 1)
{
gdf_column lhs = gen_gdb_column<ValueType>(lhs_size, 2);
gdf_column rhs = gen_gdb_column<ValueType>(rhs_size, 3);
gdf_column output = gen_gdb_column<ValueType>(lhs_size + rhs_size, 0);
gdf_error error = gpu_concat(&lhs, &rhs, &output);
// std::cout << "Output" << std::endl;
// print_column<ValueType>(&output);
check_column_for_concat_operation<ValueType>(&lhs, &rhs, &output);
delete_gdf_column(&lhs);
delete_gdf_column(&rhs);
delete_gdf_column(&output);
}
}
} |
dd2a8e9a9fa4d6dd1c51adabdcda1d6a37368337.hip | // !!! This is a file automatically generated by hipify!!!
#include <cstdio>
#include <cstdlib>
#include <hip/hip_runtime.h>
#include "P4Stereographic.h"
#include "Coordinate.h"
#include "Math.h"
// from CUDA Toolkit samples
#include <helper_cuda.h>
__device__ P4Stereographic::P4Stereographic() {
init( 0, 90, 1, 1 ) ;
}
__device__ void P4Stereographic::init( double lam0, double phi1, double R, double k0 ) {
this->lam0 = lam0 ;
this->phi1 = phi1 ;
sincospi( phi1/180, &sinphi1, &cosphi1 ) ;
this->R = R ;
this->k0 = k0 ;
if ( phi1 == 90 )
mode = M_NORTH ;
else if ( phi1 == -90 )
mode = M_SOUTH ;
else if ( phi1 == 0 )
mode = M_EQUATOR ;
else
mode = M_OBLIQUE ;
}
__device__ Coordinate& P4Stereographic::forward( const Coordinate& lamphi, Coordinate& xy ) {
double sinlamdif, coslamdif ;
double sinphi, cosphi, k, t ;
sincospi( ( lamphi.x-lam0 )/180, &sinlamdif, &coslamdif ) ;
sincospi( lamphi.y/180, &sinphi, &cosphi ) ;
switch ( mode ) {
case M_NORTH:
t = tan( radians( 45-lamphi.y/2 ) ) ;
xy.x = 2*R*k0*t*sinlamdif ;
xy.y = -2*R*k0*t*coslamdif ;
break ;
case M_SOUTH:
t = tan( radians( 45+lamphi.y/2 ) ) ;
xy.x = 2*R*k0*t*sinlamdif ;
xy.y = 2*R*k0*t*coslamdif ;
break ;
case M_EQUATOR:
k = 2*k0/( 1+cosphi*coslamdif ) ;
xy.x = R*k*cosphi*sinlamdif ;
xy.y = R*k*sinphi ;
break ;
case M_OBLIQUE:
k = 2*k0/( 1+sinphi1*sinphi+cosphi1*cosphi*coslamdif ) ;
xy.x = R*k*cosphi*sinlamdif ;
xy.y = R*k*( cosphi1*sinphi-sinphi1*cosphi*coslamdif ) ;
break ;
}
return xy ;
}
__device__ Coordinate& P4Stereographic::inverse( const Coordinate& xy, Coordinate& lamphi ) {
double p, c, sinc, cosc ;
p = sqrt( xy.x*xy.x+xy.y*xy.y ) ;
c = 2*degrees( atan2( p, 2*R*k0 ) ) ;
sincospi( c/180, &sinc, &cosc ) ;
lamphi.y = degrees( asin( cosc*sinphi1+( xy.y*sinc*cosphi1/p ) ) ) ;
switch ( mode ) {
case M_NORTH:
lamphi.x = lam0+degrees( atan2( xy.x, -xy.y ) ) ;
break ;
case M_SOUTH:
lamphi.x = lam0+degrees( atan2( xy.x, xy.y ) ) ;
break ;
case M_EQUATOR:
case M_OBLIQUE:
lamphi.x = lam0+degrees( atan2( xy.x*sinc, p*cosphi1*cosc-xy.y*sinphi1*sinc ) ) ;
break ;
}
return lamphi ;
}
#ifdef P4STEREOGRAPHIC_MAIN
// kernel
__global__ void p4stereographic( double* buf ) {
P4Stereographic proj ;
Coordinate lamphi, xy, res ;
int i = threadIdx.x ;
lamphi.set( (double) i, (double) ( i%90 ), 0 ) ;
proj.forward( lamphi, xy ) ;
proj.inverse( xy, res ) ;
buf[2*i] = res.x ;
buf[2*i+1] = res.y ;
}
#define NUM_BLOCKS 1
#define NUM_THREADS 360
int main( int argc, char** argv ) {
// host buffer
double buf[2*NUM_THREADS] ;
// device buffer
double* dbuf = NULL ;
hipDeviceProp_t devProp ;
int devID ;
// find device and output compute capability on stderr
devID = gpuGetMaxGflopsDeviceId() ;
checkCudaErrors( hipSetDevice( devID ) ) ;
checkCudaErrors( hipGetDeviceProperties( &devProp, devID ) ) ;
fprintf( stderr, "%d%d\n", devProp.major, devProp.minor ) ;
// allocate device buffer memory
checkCudaErrors( hipMalloc( (void**) &dbuf, sizeof( double )*2*NUM_THREADS ) ) ;
// run kernel
hipLaunchKernelGGL(( p4stereographic), dim3(NUM_BLOCKS), dim3(NUM_THREADS), 0, 0, dbuf ) ;
// copy kernel results from device buffer to host
checkCudaErrors( hipMemcpy( buf, dbuf, sizeof( double )*2*NUM_THREADS, hipMemcpyDeviceToHost ) ) ;
checkCudaErrors( hipFree( dbuf ) ) ;
// output result on stdout
for ( int i=0 ; NUM_THREADS>i ; i++ )
printf( "%.4f %.4f\n", buf[2*i], buf[2*i+1] ) ;
return EXIT_SUCCESS ;
}
#endif // P4STEREOGRAPHIC_MAIN
| dd2a8e9a9fa4d6dd1c51adabdcda1d6a37368337.cu | #include <cstdio>
#include <cstdlib>
#include <cuda_runtime.h>
#include "P4Stereographic.h"
#include "Coordinate.h"
#include "Math.h"
// from CUDA Toolkit samples
#include <helper_cuda.h>
__device__ P4Stereographic::P4Stereographic() {
init( 0, 90, 1, 1 ) ;
}
__device__ void P4Stereographic::init( double lam0, double phi1, double R, double k0 ) {
this->lam0 = lam0 ;
this->phi1 = phi1 ;
sincospi( phi1/180, &sinphi1, &cosphi1 ) ;
this->R = R ;
this->k0 = k0 ;
if ( phi1 == 90 )
mode = M_NORTH ;
else if ( phi1 == -90 )
mode = M_SOUTH ;
else if ( phi1 == 0 )
mode = M_EQUATOR ;
else
mode = M_OBLIQUE ;
}
__device__ Coordinate& P4Stereographic::forward( const Coordinate& lamphi, Coordinate& xy ) {
double sinlamdif, coslamdif ;
double sinphi, cosphi, k, t ;
sincospi( ( lamphi.x-lam0 )/180, &sinlamdif, &coslamdif ) ;
sincospi( lamphi.y/180, &sinphi, &cosphi ) ;
switch ( mode ) {
case M_NORTH:
t = tan( radians( 45-lamphi.y/2 ) ) ;
xy.x = 2*R*k0*t*sinlamdif ;
xy.y = -2*R*k0*t*coslamdif ;
break ;
case M_SOUTH:
t = tan( radians( 45+lamphi.y/2 ) ) ;
xy.x = 2*R*k0*t*sinlamdif ;
xy.y = 2*R*k0*t*coslamdif ;
break ;
case M_EQUATOR:
k = 2*k0/( 1+cosphi*coslamdif ) ;
xy.x = R*k*cosphi*sinlamdif ;
xy.y = R*k*sinphi ;
break ;
case M_OBLIQUE:
k = 2*k0/( 1+sinphi1*sinphi+cosphi1*cosphi*coslamdif ) ;
xy.x = R*k*cosphi*sinlamdif ;
xy.y = R*k*( cosphi1*sinphi-sinphi1*cosphi*coslamdif ) ;
break ;
}
return xy ;
}
__device__ Coordinate& P4Stereographic::inverse( const Coordinate& xy, Coordinate& lamphi ) {
double p, c, sinc, cosc ;
p = sqrt( xy.x*xy.x+xy.y*xy.y ) ;
c = 2*degrees( atan2( p, 2*R*k0 ) ) ;
sincospi( c/180, &sinc, &cosc ) ;
lamphi.y = degrees( asin( cosc*sinphi1+( xy.y*sinc*cosphi1/p ) ) ) ;
switch ( mode ) {
case M_NORTH:
lamphi.x = lam0+degrees( atan2( xy.x, -xy.y ) ) ;
break ;
case M_SOUTH:
lamphi.x = lam0+degrees( atan2( xy.x, xy.y ) ) ;
break ;
case M_EQUATOR:
case M_OBLIQUE:
lamphi.x = lam0+degrees( atan2( xy.x*sinc, p*cosphi1*cosc-xy.y*sinphi1*sinc ) ) ;
break ;
}
return lamphi ;
}
#ifdef P4STEREOGRAPHIC_MAIN
// kernel
__global__ void p4stereographic( double* buf ) {
P4Stereographic proj ;
Coordinate lamphi, xy, res ;
int i = threadIdx.x ;
lamphi.set( (double) i, (double) ( i%90 ), 0 ) ;
proj.forward( lamphi, xy ) ;
proj.inverse( xy, res ) ;
buf[2*i] = res.x ;
buf[2*i+1] = res.y ;
}
#define NUM_BLOCKS 1
#define NUM_THREADS 360
int main( int argc, char** argv ) {
// host buffer
double buf[2*NUM_THREADS] ;
// device buffer
double* dbuf = NULL ;
cudaDeviceProp devProp ;
int devID ;
// find device and output compute capability on stderr
devID = gpuGetMaxGflopsDeviceId() ;
checkCudaErrors( cudaSetDevice( devID ) ) ;
checkCudaErrors( cudaGetDeviceProperties( &devProp, devID ) ) ;
fprintf( stderr, "%d%d\n", devProp.major, devProp.minor ) ;
// allocate device buffer memory
checkCudaErrors( cudaMalloc( (void**) &dbuf, sizeof( double )*2*NUM_THREADS ) ) ;
// run kernel
p4stereographic<<<NUM_BLOCKS, NUM_THREADS>>>( dbuf ) ;
// copy kernel results from device buffer to host
checkCudaErrors( cudaMemcpy( buf, dbuf, sizeof( double )*2*NUM_THREADS, cudaMemcpyDeviceToHost ) ) ;
checkCudaErrors( cudaFree( dbuf ) ) ;
// output result on stdout
for ( int i=0 ; NUM_THREADS>i ; i++ )
printf( "%.4f %.4f\n", buf[2*i], buf[2*i+1] ) ;
return EXIT_SUCCESS ;
}
#endif // P4STEREOGRAPHIC_MAIN
|
a38ccc23e0beb346dae8fd1524d45858a8ca952c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2014
@generated from zlobpcg_shift.cu normal z -> d, Wed Sep 17 15:08:43 2014
*/
#include "common_magma.h"
__global__ void
magma_dlobpcg_shift_kernel( magma_int_t num_rows, magma_int_t num_vecs,
magma_int_t shift, double *x ){
int idx = threadIdx.x ; // thread in row
int row = blockIdx.y * gridDim.x + blockIdx.x; // global block index
if( row<num_rows){
double tmp = x[idx];
__syncthreads();
if( idx > shift-1 ){
idx-=shift;
x[idx] = tmp;
__syncthreads();
}
}
}
/**
Purpose
-------
For a Block-LOBPCG, the set of residuals (entries consecutive in memory)
shrinks and the vectors are shifted in case shift residuals drop below
threshold. The memory layout of x is:
/ x1[0] x2[0] x3[0] \
| x1[1] x2[1] x3[1] |
x = | x1[2] x2[2] x3[2] | = x1[0] x2[0] x3[0] x1[1] x2[1] x3[1] x1[2] .
| x1[3] x2[3] x3[3] |
\ x1[4] x2[4] x3[4] /
Arguments
---------
@param
num_rows magma_int_t
number of rows
@param
num_vecs magma_int_t
number of vectors
@param
shift magma_int_t
shift number
@param
x double*
input/output vector x
@ingroup magmasparse_daux
********************************************************************/
extern "C" magma_int_t
magma_dlobpcg_shift( magma_int_t num_rows,
magma_int_t num_vecs,
magma_int_t shift,
double *x ){
magma_int_t num_threads = num_vecs;
// every thread handles one row containing the
if ( num_threads > 1024 )
printf("error: too many threads requested.\n");
int Ms = num_threads * sizeof( double );
if ( Ms > 1024*8 )
printf("error: too much shared memory requested.\n");
dim3 block( num_threads, 1, 1 );
int dimgrid1 = sqrt(num_rows);
int dimgrid2 = (num_rows + dimgrid1 -1 ) / dimgrid1;
dim3 grid( dimgrid1, dimgrid2, 1);
hipLaunchKernelGGL(( magma_dlobpcg_shift_kernel), dim3(grid), dim3(block), Ms, magma_stream ,
num_rows, num_vecs, shift, x );
return MAGMA_SUCCESS;
}
| a38ccc23e0beb346dae8fd1524d45858a8ca952c.cu | /*
-- MAGMA (version 1.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2014
@generated from zlobpcg_shift.cu normal z -> d, Wed Sep 17 15:08:43 2014
*/
#include "common_magma.h"
__global__ void
magma_dlobpcg_shift_kernel( magma_int_t num_rows, magma_int_t num_vecs,
magma_int_t shift, double *x ){
int idx = threadIdx.x ; // thread in row
int row = blockIdx.y * gridDim.x + blockIdx.x; // global block index
if( row<num_rows){
double tmp = x[idx];
__syncthreads();
if( idx > shift-1 ){
idx-=shift;
x[idx] = tmp;
__syncthreads();
}
}
}
/**
Purpose
-------
For a Block-LOBPCG, the set of residuals (entries consecutive in memory)
shrinks and the vectors are shifted in case shift residuals drop below
threshold. The memory layout of x is:
/ x1[0] x2[0] x3[0] \
| x1[1] x2[1] x3[1] |
x = | x1[2] x2[2] x3[2] | = x1[0] x2[0] x3[0] x1[1] x2[1] x3[1] x1[2] .
| x1[3] x2[3] x3[3] |
\ x1[4] x2[4] x3[4] /
Arguments
---------
@param
num_rows magma_int_t
number of rows
@param
num_vecs magma_int_t
number of vectors
@param
shift magma_int_t
shift number
@param
x double*
input/output vector x
@ingroup magmasparse_daux
********************************************************************/
extern "C" magma_int_t
magma_dlobpcg_shift( magma_int_t num_rows,
magma_int_t num_vecs,
magma_int_t shift,
double *x ){
magma_int_t num_threads = num_vecs;
// every thread handles one row containing the
if ( num_threads > 1024 )
printf("error: too many threads requested.\n");
int Ms = num_threads * sizeof( double );
if ( Ms > 1024*8 )
printf("error: too much shared memory requested.\n");
dim3 block( num_threads, 1, 1 );
int dimgrid1 = sqrt(num_rows);
int dimgrid2 = (num_rows + dimgrid1 -1 ) / dimgrid1;
dim3 grid( dimgrid1, dimgrid2, 1);
magma_dlobpcg_shift_kernel<<< grid, block, Ms, magma_stream >>>
( num_rows, num_vecs, shift, x );
return MAGMA_SUCCESS;
}
|
d42f233ef48de48e3a1443f4a0e41124274fb3b8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Points to Voxels & Voxels to Points (Modified from SparseConv)
Written by Li Jiang
All Rights Reserved 2020.
*/
#include "voxelize.h"
template <typename T>
__global__ void voxelize_fp_cuda_(Int nOutputRows, Int maxActive, Int nPlanes, T *feats, T *output_feats, Int *rules, bool average){
for(int row = blockIdx.x; row < nOutputRows; row += gridDim.x){
T *out = output_feats + row * nPlanes;
Int *r = rules + row * (maxActive + 1);
Int nActive = r[0];
T multiplier = (average and nActive > 0) ? (T) 1 / nActive : (T) 1;
for(int i = 1; i <= nActive; i++){
T *inp = feats + r[i] * nPlanes;
for(int plane = threadIdx.x; plane < nPlanes; plane += blockDim.x){
atomicAdd(&out[plane], multiplier * inp[plane]);
}
}
}
}
// input: feats N * C
// input: rules M * (1 + maxActive)
// output: output_feats M * C
template <typename T>
void voxelize_fp_cuda(Int nOutputRows, Int maxActive, Int nPlanes, T *feats, T *output_feats, Int *rules, bool average){
hipLaunchKernelGGL(( voxelize_fp_cuda_<T>), dim3(::min(nOutputRows, (Int)32768)), dim3(::min(nPlanes, (Int)32)), 0, 0, nOutputRows, maxActive, nPlanes, feats, output_feats, rules, average);
}
template <typename T>
__global__ void voxelize_bp_cuda_(Int nOutputRows, Int maxActive, Int nPlanes, T *d_output_feats, T *d_feats, Int *rules, bool average){
for(int row = blockIdx.x; row < nOutputRows; row += gridDim.x){
T *out = d_output_feats + row * nPlanes;
Int *r = rules + row * (maxActive + 1);
Int nActive = r[0];
T multiplier = (average and nActive > 0) ? (T) 1 / nActive : (T) 1;
for(int i = 1; i <= nActive; i++){
T *inp = d_feats + r[i] * nPlanes;
for(int plane = threadIdx.x; plane < nPlanes; plane += blockDim.x){
atomicAdd(&inp[plane], multiplier * out[plane]);
}
}
}
}
template <typename T>
void voxelize_bp_cuda(Int nOutputRows, Int maxActive, Int nPlanes, T *d_output_feats, T *d_feats, Int *rules, bool average){
hipLaunchKernelGGL(( voxelize_bp_cuda_<T>), dim3(::min(nOutputRows, (Int)32768)), dim3(::min(nPlanes, (Int)32)), 0, 0, nOutputRows, maxActive, nPlanes, d_output_feats, d_feats, rules, average);
}
| d42f233ef48de48e3a1443f4a0e41124274fb3b8.cu | /*
Points to Voxels & Voxels to Points (Modified from SparseConv)
Written by Li Jiang
All Rights Reserved 2020.
*/
#include "voxelize.h"
template <typename T>
__global__ void voxelize_fp_cuda_(Int nOutputRows, Int maxActive, Int nPlanes, T *feats, T *output_feats, Int *rules, bool average){
for(int row = blockIdx.x; row < nOutputRows; row += gridDim.x){
T *out = output_feats + row * nPlanes;
Int *r = rules + row * (maxActive + 1);
Int nActive = r[0];
T multiplier = (average and nActive > 0) ? (T) 1 / nActive : (T) 1;
for(int i = 1; i <= nActive; i++){
T *inp = feats + r[i] * nPlanes;
for(int plane = threadIdx.x; plane < nPlanes; plane += blockDim.x){
atomicAdd(&out[plane], multiplier * inp[plane]);
}
}
}
}
// input: feats N * C
// input: rules M * (1 + maxActive)
// output: output_feats M * C
template <typename T>
void voxelize_fp_cuda(Int nOutputRows, Int maxActive, Int nPlanes, T *feats, T *output_feats, Int *rules, bool average){
voxelize_fp_cuda_<T><<<std::min(nOutputRows, (Int)32768), std::min(nPlanes, (Int)32)>>>(nOutputRows, maxActive, nPlanes, feats, output_feats, rules, average);
}
template <typename T>
__global__ void voxelize_bp_cuda_(Int nOutputRows, Int maxActive, Int nPlanes, T *d_output_feats, T *d_feats, Int *rules, bool average){
for(int row = blockIdx.x; row < nOutputRows; row += gridDim.x){
T *out = d_output_feats + row * nPlanes;
Int *r = rules + row * (maxActive + 1);
Int nActive = r[0];
T multiplier = (average and nActive > 0) ? (T) 1 / nActive : (T) 1;
for(int i = 1; i <= nActive; i++){
T *inp = d_feats + r[i] * nPlanes;
for(int plane = threadIdx.x; plane < nPlanes; plane += blockDim.x){
atomicAdd(&inp[plane], multiplier * out[plane]);
}
}
}
}
template <typename T>
void voxelize_bp_cuda(Int nOutputRows, Int maxActive, Int nPlanes, T *d_output_feats, T *d_feats, Int *rules, bool average){
voxelize_bp_cuda_<T><<<std::min(nOutputRows, (Int)32768), std::min(nPlanes, (Int)32)>>>(nOutputRows, maxActive, nPlanes, d_output_feats, d_feats, rules, average);
}
|
4ad3eb589d53d4bf492435aa1d88152c547385e6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <assert.h>
#include "yololayer.h"
#include "utils.h"
using namespace Yolo;
namespace nvinfer1
{
YoloLayerPlugin::YoloLayerPlugin(int classCount, int netWidth, int netHeight, int maxOut, const std::vector<Yolo::YoloKernel>& vYoloKernel)
{
mClassCount = classCount;
mYoloV5NetWidth = netWidth;
mYoloV5NetHeight = netHeight;
mMaxOutObject = maxOut;
mYoloKernel = vYoloKernel;
mKernelCount = vYoloKernel.size();
CUDA_CHECK(hipHostMalloc(&mAnchor, mKernelCount * sizeof(void*)));
size_t AnchorLen = sizeof(float)* CHECK_COUNT * 2;
for (int ii = 0; ii < mKernelCount; ii++)
{
CUDA_CHECK(hipMalloc(&mAnchor[ii], AnchorLen));
const auto& yolo = mYoloKernel[ii];
CUDA_CHECK(hipMemcpy(mAnchor[ii], yolo.anchors, AnchorLen, hipMemcpyHostToDevice));
}
}
YoloLayerPlugin::~YoloLayerPlugin()
{
for (int ii = 0; ii < mKernelCount; ii++)
{
CUDA_CHECK(hipFree(mAnchor[ii]));
}
CUDA_CHECK(hipHostFree(mAnchor));
}
// create the plugin at runtime from a byte stream
YoloLayerPlugin::YoloLayerPlugin(const void* data, size_t length)
{
using namespace Tn;
const char *d = reinterpret_cast<const char *>(data), *a = d;
read(d, mClassCount);
read(d, mThreadCount);
read(d, mKernelCount);
read(d, mYoloV5NetWidth);
read(d, mYoloV5NetHeight);
read(d, mMaxOutObject);
mYoloKernel.resize(mKernelCount);
auto kernelSize = mKernelCount * sizeof(YoloKernel);
memcpy(mYoloKernel.data(), d, kernelSize);
d += kernelSize;
CUDA_CHECK(hipHostMalloc(&mAnchor, mKernelCount * sizeof(void*)));
size_t AnchorLen = sizeof(float)* CHECK_COUNT * 2;
for (int ii = 0; ii < mKernelCount; ii++)
{
CUDA_CHECK(hipMalloc(&mAnchor[ii], AnchorLen));
const auto& yolo = mYoloKernel[ii];
CUDA_CHECK(hipMemcpy(mAnchor[ii], yolo.anchors, AnchorLen, hipMemcpyHostToDevice));
}
assert(d == a + length);
}
void YoloLayerPlugin::serialize(void* buffer) const
{
using namespace Tn;
char* d = static_cast<char*>(buffer), *a = d;
write(d, mClassCount);
write(d, mThreadCount);
write(d, mKernelCount);
write(d, mYoloV5NetWidth);
write(d, mYoloV5NetHeight);
write(d, mMaxOutObject);
auto kernelSize = mKernelCount * sizeof(YoloKernel);
memcpy(d, mYoloKernel.data(), kernelSize);
d += kernelSize;
assert(d == a + getSerializationSize());
}
size_t YoloLayerPlugin::getSerializationSize() const
{
return sizeof(mClassCount) + sizeof(mThreadCount) + sizeof(mKernelCount) + sizeof(Yolo::YoloKernel) * mYoloKernel.size() + sizeof(mYoloV5NetWidth) + sizeof(mYoloV5NetHeight) + sizeof(mMaxOutObject);
}
int YoloLayerPlugin::initialize()
{
return 0;
}
Dims YoloLayerPlugin::getOutputDimensions(int index, const Dims* inputs, int nbInputDims)
{
//output the result to channel
int totalsize = mMaxOutObject * sizeof(Detection) / sizeof(float);
return Dims3(totalsize + 1, 1, 1);
}
// Set plugin namespace
void YoloLayerPlugin::setPluginNamespace(const char* pluginNamespace)
{
mPluginNamespace = pluginNamespace;
}
const char* YoloLayerPlugin::getPluginNamespace() const
{
return mPluginNamespace;
}
// Return the DataType of the plugin output at the requested index
DataType YoloLayerPlugin::getOutputDataType(int index, const nvinfer1::DataType* inputTypes, int nbInputs) const
{
return DataType::kFLOAT;
}
// Return true if output tensor is broadcast across a batch.
bool YoloLayerPlugin::isOutputBroadcastAcrossBatch(int outputIndex, const bool* inputIsBroadcasted, int nbInputs) const
{
return false;
}
// Return true if plugin can use input that is broadcast across batch without replication.
bool YoloLayerPlugin::canBroadcastInputAcrossBatch(int inputIndex) const
{
return false;
}
void YoloLayerPlugin::configurePlugin(const PluginTensorDesc* in, int nbInput, const PluginTensorDesc* out, int nbOutput)
{
}
// Attach the plugin object to an execution context and grant the plugin the access to some context resource.
void YoloLayerPlugin::attachToContext(cudnnContext* cudnnContext, cublasContext* cublasContext, IGpuAllocator* gpuAllocator)
{
}
// Detach the plugin object from its execution context.
void YoloLayerPlugin::detachFromContext() {}
const char* YoloLayerPlugin::getPluginType() const
{
return "YoloLayer_TRT";
}
const char* YoloLayerPlugin::getPluginVersion() const
{
return "1";
}
void YoloLayerPlugin::destroy()
{
delete this;
}
// Clone the plugin
IPluginV2IOExt* YoloLayerPlugin::clone() const
{
YoloLayerPlugin* p = new YoloLayerPlugin(mClassCount, mYoloV5NetWidth, mYoloV5NetHeight, mMaxOutObject, mYoloKernel);
p->setPluginNamespace(mPluginNamespace);
return p;
}
__device__ float Logist(float data) { return 1.0f / (1.0f + expf(-data)); };
__global__ void CalDetection(const float *input, float *output, int noElements,
const int netwidth, const int netheight, int maxoutobject, int yoloWidth, int yoloHeight, const float anchors[CHECK_COUNT * 2], int classes, int outputElem)
{
int idx = threadIdx.x + blockDim.x * blockIdx.x;
if (idx >= noElements) return;
int total_grid = yoloWidth * yoloHeight;
int bnIdx = idx / total_grid;
idx = idx - total_grid * bnIdx;
int info_len_i = 5 + classes;
const float* curInput = input + bnIdx * (info_len_i * total_grid * CHECK_COUNT);
for (int k = 0; k < 3; ++k) {
float box_prob = Logist(curInput[idx + k * info_len_i * total_grid + 4 * total_grid]);
if (box_prob < IGNORE_THRESH) continue;
int class_id = 0;
float max_cls_prob = 0.0;
for (int i = 5; i < info_len_i; ++i) {
float p = Logist(curInput[idx + k * info_len_i * total_grid + i * total_grid]);
if (p > max_cls_prob) {
max_cls_prob = p;
class_id = i - 5;
}
}
float *res_count = output + bnIdx * outputElem;
int count = (int)atomicAdd(res_count, 1);
if (count >= maxoutobject) return;
char* data = (char *)res_count + sizeof(float) + count * sizeof(Detection);
Detection* det = (Detection*)(data);
int row = idx / yoloWidth;
int col = idx % yoloWidth;
//Location
// pytorch:
// y = x[i].sigmoid()
// y[..., 0:2] = (y[..., 0:2] * 2. - 0.5 + self.grid[i].to(x[i].device)) * self.stride[i] # xy
// y[..., 2:4] = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh
// X: (sigmoid(tx) + cx)/FeaturemapW * netwidth
det->bbox[0] = (col - 0.5f + 2.0f * Logist(curInput[idx + k * info_len_i * total_grid + 0 * total_grid])) * netwidth / yoloWidth;
det->bbox[1] = (row - 0.5f + 2.0f * Logist(curInput[idx + k * info_len_i * total_grid + 1 * total_grid])) * netheight / yoloHeight;
// W: (Pw * e^tw) / FeaturemapW * netwidth
// v5: https://github.com/ultralytics/yolov5/issues/471
det->bbox[2] = 2.0f * Logist(curInput[idx + k * info_len_i * total_grid + 2 * total_grid]);
det->bbox[2] = det->bbox[2] * det->bbox[2] * anchors[2 * k];
det->bbox[3] = 2.0f * Logist(curInput[idx + k * info_len_i * total_grid + 3 * total_grid]);
det->bbox[3] = det->bbox[3] * det->bbox[3] * anchors[2 * k + 1];
det->conf = box_prob * max_cls_prob;
det->class_id = class_id;
}
}
void YoloLayerPlugin::forwardGpu(const float *const * inputs, float* output, hipStream_t stream, int batchSize)
{
int outputElem = 1 + mMaxOutObject * sizeof(Detection) / sizeof(float);
for (int idx = 0; idx < batchSize; ++idx) {
CUDA_CHECK(hipMemset(output + idx * outputElem, 0, sizeof(float)));
}
int numElem = 0;
for (unsigned int i = 0; i < mYoloKernel.size(); ++i)
{
const auto& yolo = mYoloKernel[i];
numElem = yolo.width*yolo.height*batchSize;
if (numElem < mThreadCount)
mThreadCount = numElem;
//printf("Net: %d %d \n", mYoloV5NetWidth, mYoloV5NetHeight);
CalDetection << < (yolo.width*yolo.height*batchSize + mThreadCount - 1) / mThreadCount, mThreadCount,0,stream >> >
(inputs[i], output, numElem, mYoloV5NetWidth, mYoloV5NetHeight, mMaxOutObject, yolo.width, yolo.height, (float *)mAnchor[i], mClassCount, outputElem);
}
}
int YoloLayerPlugin::enqueue(int batchSize, const void*const * inputs, void** outputs, void* workspace, hipStream_t stream)
{
forwardGpu((const float *const *)inputs, (float*)outputs[0], stream, batchSize);
return 0;
}
PluginFieldCollection YoloPluginCreator::mFC{};
std::vector<PluginField> YoloPluginCreator::mPluginAttributes;
YoloPluginCreator::YoloPluginCreator()
{
mPluginAttributes.clear();
mFC.nbFields = mPluginAttributes.size();
mFC.fields = mPluginAttributes.data();
}
const char* YoloPluginCreator::getPluginName() const
{
return "YoloLayer_TRT";
}
const char* YoloPluginCreator::getPluginVersion() const
{
return "1";
}
const PluginFieldCollection* YoloPluginCreator::getFieldNames()
{
return &mFC;
}
IPluginV2IOExt* YoloPluginCreator::createPlugin(const char* name, const PluginFieldCollection* fc)
{
int class_count = 80;
int input_w = 416;
int input_h = 416;
int max_output_object_count = 1000;
std::vector<Yolo::YoloKernel> yolo_kernels(3);
const PluginField* fields = fc->fields;
for (int i = 0; i < fc->nbFields; i++) {
if (strcmp(fields[i].name, "netdata") == 0) {
assert(fields[i].type == PluginFieldType::kFLOAT32);
int *tmp = (int*)(fields[i].data);
class_count = tmp[0];
input_w = tmp[1];
input_h = tmp[2];
max_output_object_count = tmp[3];
} else if (strstr(fields[i].name, "yolodata") != NULL) {
assert(fields[i].type == PluginFieldType::kFLOAT32);
int *tmp = (int*)(fields[i].data);
YoloKernel kernel;
kernel.width = tmp[0];
kernel.height = tmp[1];
for (int j = 0; j < fields[i].length - 2; j++) {
kernel.anchors[j] = tmp[j + 2];
}
yolo_kernels[2 - (fields[i].name[8] - '1')] = kernel;
}
}
YoloLayerPlugin* obj = new YoloLayerPlugin(class_count, input_w, input_h, max_output_object_count, yolo_kernels);
obj->setPluginNamespace(mNamespace.c_str());
return obj;
}
IPluginV2IOExt* YoloPluginCreator::deserializePlugin(const char* name, const void* serialData, size_t serialLength)
{
// This object will be deleted when the network is destroyed, which will
// call YoloLayerPlugin::destroy()
YoloLayerPlugin* obj = new YoloLayerPlugin(serialData, serialLength);
obj->setPluginNamespace(mNamespace.c_str());
return obj;
}
}
| 4ad3eb589d53d4bf492435aa1d88152c547385e6.cu | #include <assert.h>
#include "yololayer.h"
#include "utils.h"
using namespace Yolo;
namespace nvinfer1
{
YoloLayerPlugin::YoloLayerPlugin(int classCount, int netWidth, int netHeight, int maxOut, const std::vector<Yolo::YoloKernel>& vYoloKernel)
{
mClassCount = classCount;
mYoloV5NetWidth = netWidth;
mYoloV5NetHeight = netHeight;
mMaxOutObject = maxOut;
mYoloKernel = vYoloKernel;
mKernelCount = vYoloKernel.size();
CUDA_CHECK(cudaMallocHost(&mAnchor, mKernelCount * sizeof(void*)));
size_t AnchorLen = sizeof(float)* CHECK_COUNT * 2;
for (int ii = 0; ii < mKernelCount; ii++)
{
CUDA_CHECK(cudaMalloc(&mAnchor[ii], AnchorLen));
const auto& yolo = mYoloKernel[ii];
CUDA_CHECK(cudaMemcpy(mAnchor[ii], yolo.anchors, AnchorLen, cudaMemcpyHostToDevice));
}
}
YoloLayerPlugin::~YoloLayerPlugin()
{
for (int ii = 0; ii < mKernelCount; ii++)
{
CUDA_CHECK(cudaFree(mAnchor[ii]));
}
CUDA_CHECK(cudaFreeHost(mAnchor));
}
// create the plugin at runtime from a byte stream
YoloLayerPlugin::YoloLayerPlugin(const void* data, size_t length)
{
using namespace Tn;
const char *d = reinterpret_cast<const char *>(data), *a = d;
read(d, mClassCount);
read(d, mThreadCount);
read(d, mKernelCount);
read(d, mYoloV5NetWidth);
read(d, mYoloV5NetHeight);
read(d, mMaxOutObject);
mYoloKernel.resize(mKernelCount);
auto kernelSize = mKernelCount * sizeof(YoloKernel);
memcpy(mYoloKernel.data(), d, kernelSize);
d += kernelSize;
CUDA_CHECK(cudaMallocHost(&mAnchor, mKernelCount * sizeof(void*)));
size_t AnchorLen = sizeof(float)* CHECK_COUNT * 2;
for (int ii = 0; ii < mKernelCount; ii++)
{
CUDA_CHECK(cudaMalloc(&mAnchor[ii], AnchorLen));
const auto& yolo = mYoloKernel[ii];
CUDA_CHECK(cudaMemcpy(mAnchor[ii], yolo.anchors, AnchorLen, cudaMemcpyHostToDevice));
}
assert(d == a + length);
}
void YoloLayerPlugin::serialize(void* buffer) const
{
using namespace Tn;
char* d = static_cast<char*>(buffer), *a = d;
write(d, mClassCount);
write(d, mThreadCount);
write(d, mKernelCount);
write(d, mYoloV5NetWidth);
write(d, mYoloV5NetHeight);
write(d, mMaxOutObject);
auto kernelSize = mKernelCount * sizeof(YoloKernel);
memcpy(d, mYoloKernel.data(), kernelSize);
d += kernelSize;
assert(d == a + getSerializationSize());
}
size_t YoloLayerPlugin::getSerializationSize() const
{
return sizeof(mClassCount) + sizeof(mThreadCount) + sizeof(mKernelCount) + sizeof(Yolo::YoloKernel) * mYoloKernel.size() + sizeof(mYoloV5NetWidth) + sizeof(mYoloV5NetHeight) + sizeof(mMaxOutObject);
}
int YoloLayerPlugin::initialize()
{
return 0;
}
Dims YoloLayerPlugin::getOutputDimensions(int index, const Dims* inputs, int nbInputDims)
{
//output the result to channel
int totalsize = mMaxOutObject * sizeof(Detection) / sizeof(float);
return Dims3(totalsize + 1, 1, 1);
}
// Set plugin namespace
void YoloLayerPlugin::setPluginNamespace(const char* pluginNamespace)
{
mPluginNamespace = pluginNamespace;
}
const char* YoloLayerPlugin::getPluginNamespace() const
{
return mPluginNamespace;
}
// Return the DataType of the plugin output at the requested index
DataType YoloLayerPlugin::getOutputDataType(int index, const nvinfer1::DataType* inputTypes, int nbInputs) const
{
return DataType::kFLOAT;
}
// Return true if output tensor is broadcast across a batch.
bool YoloLayerPlugin::isOutputBroadcastAcrossBatch(int outputIndex, const bool* inputIsBroadcasted, int nbInputs) const
{
return false;
}
// Return true if plugin can use input that is broadcast across batch without replication.
bool YoloLayerPlugin::canBroadcastInputAcrossBatch(int inputIndex) const
{
return false;
}
void YoloLayerPlugin::configurePlugin(const PluginTensorDesc* in, int nbInput, const PluginTensorDesc* out, int nbOutput)
{
}
// Attach the plugin object to an execution context and grant the plugin the access to some context resource.
void YoloLayerPlugin::attachToContext(cudnnContext* cudnnContext, cublasContext* cublasContext, IGpuAllocator* gpuAllocator)
{
}
// Detach the plugin object from its execution context.
void YoloLayerPlugin::detachFromContext() {}
const char* YoloLayerPlugin::getPluginType() const
{
return "YoloLayer_TRT";
}
const char* YoloLayerPlugin::getPluginVersion() const
{
return "1";
}
void YoloLayerPlugin::destroy()
{
delete this;
}
// Clone the plugin
IPluginV2IOExt* YoloLayerPlugin::clone() const
{
YoloLayerPlugin* p = new YoloLayerPlugin(mClassCount, mYoloV5NetWidth, mYoloV5NetHeight, mMaxOutObject, mYoloKernel);
p->setPluginNamespace(mPluginNamespace);
return p;
}
__device__ float Logist(float data) { return 1.0f / (1.0f + expf(-data)); };
__global__ void CalDetection(const float *input, float *output, int noElements,
const int netwidth, const int netheight, int maxoutobject, int yoloWidth, int yoloHeight, const float anchors[CHECK_COUNT * 2], int classes, int outputElem)
{
int idx = threadIdx.x + blockDim.x * blockIdx.x;
if (idx >= noElements) return;
int total_grid = yoloWidth * yoloHeight;
int bnIdx = idx / total_grid;
idx = idx - total_grid * bnIdx;
int info_len_i = 5 + classes;
const float* curInput = input + bnIdx * (info_len_i * total_grid * CHECK_COUNT);
for (int k = 0; k < 3; ++k) {
float box_prob = Logist(curInput[idx + k * info_len_i * total_grid + 4 * total_grid]);
if (box_prob < IGNORE_THRESH) continue;
int class_id = 0;
float max_cls_prob = 0.0;
for (int i = 5; i < info_len_i; ++i) {
float p = Logist(curInput[idx + k * info_len_i * total_grid + i * total_grid]);
if (p > max_cls_prob) {
max_cls_prob = p;
class_id = i - 5;
}
}
float *res_count = output + bnIdx * outputElem;
int count = (int)atomicAdd(res_count, 1);
if (count >= maxoutobject) return;
char* data = (char *)res_count + sizeof(float) + count * sizeof(Detection);
Detection* det = (Detection*)(data);
int row = idx / yoloWidth;
int col = idx % yoloWidth;
//Location
// pytorch:
// y = x[i].sigmoid()
// y[..., 0:2] = (y[..., 0:2] * 2. - 0.5 + self.grid[i].to(x[i].device)) * self.stride[i] # xy
// y[..., 2:4] = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh
// X: (sigmoid(tx) + cx)/FeaturemapW * netwidth
det->bbox[0] = (col - 0.5f + 2.0f * Logist(curInput[idx + k * info_len_i * total_grid + 0 * total_grid])) * netwidth / yoloWidth;
det->bbox[1] = (row - 0.5f + 2.0f * Logist(curInput[idx + k * info_len_i * total_grid + 1 * total_grid])) * netheight / yoloHeight;
// W: (Pw * e^tw) / FeaturemapW * netwidth
// v5: https://github.com/ultralytics/yolov5/issues/471
det->bbox[2] = 2.0f * Logist(curInput[idx + k * info_len_i * total_grid + 2 * total_grid]);
det->bbox[2] = det->bbox[2] * det->bbox[2] * anchors[2 * k];
det->bbox[3] = 2.0f * Logist(curInput[idx + k * info_len_i * total_grid + 3 * total_grid]);
det->bbox[3] = det->bbox[3] * det->bbox[3] * anchors[2 * k + 1];
det->conf = box_prob * max_cls_prob;
det->class_id = class_id;
}
}
void YoloLayerPlugin::forwardGpu(const float *const * inputs, float* output, cudaStream_t stream, int batchSize)
{
int outputElem = 1 + mMaxOutObject * sizeof(Detection) / sizeof(float);
for (int idx = 0; idx < batchSize; ++idx) {
CUDA_CHECK(cudaMemset(output + idx * outputElem, 0, sizeof(float)));
}
int numElem = 0;
for (unsigned int i = 0; i < mYoloKernel.size(); ++i)
{
const auto& yolo = mYoloKernel[i];
numElem = yolo.width*yolo.height*batchSize;
if (numElem < mThreadCount)
mThreadCount = numElem;
//printf("Net: %d %d \n", mYoloV5NetWidth, mYoloV5NetHeight);
CalDetection << < (yolo.width*yolo.height*batchSize + mThreadCount - 1) / mThreadCount, mThreadCount,0,stream >> >
(inputs[i], output, numElem, mYoloV5NetWidth, mYoloV5NetHeight, mMaxOutObject, yolo.width, yolo.height, (float *)mAnchor[i], mClassCount, outputElem);
}
}
int YoloLayerPlugin::enqueue(int batchSize, const void*const * inputs, void** outputs, void* workspace, cudaStream_t stream)
{
forwardGpu((const float *const *)inputs, (float*)outputs[0], stream, batchSize);
return 0;
}
PluginFieldCollection YoloPluginCreator::mFC{};
std::vector<PluginField> YoloPluginCreator::mPluginAttributes;
YoloPluginCreator::YoloPluginCreator()
{
mPluginAttributes.clear();
mFC.nbFields = mPluginAttributes.size();
mFC.fields = mPluginAttributes.data();
}
const char* YoloPluginCreator::getPluginName() const
{
return "YoloLayer_TRT";
}
const char* YoloPluginCreator::getPluginVersion() const
{
return "1";
}
const PluginFieldCollection* YoloPluginCreator::getFieldNames()
{
return &mFC;
}
IPluginV2IOExt* YoloPluginCreator::createPlugin(const char* name, const PluginFieldCollection* fc)
{
int class_count = 80;
int input_w = 416;
int input_h = 416;
int max_output_object_count = 1000;
std::vector<Yolo::YoloKernel> yolo_kernels(3);
const PluginField* fields = fc->fields;
for (int i = 0; i < fc->nbFields; i++) {
if (strcmp(fields[i].name, "netdata") == 0) {
assert(fields[i].type == PluginFieldType::kFLOAT32);
int *tmp = (int*)(fields[i].data);
class_count = tmp[0];
input_w = tmp[1];
input_h = tmp[2];
max_output_object_count = tmp[3];
} else if (strstr(fields[i].name, "yolodata") != NULL) {
assert(fields[i].type == PluginFieldType::kFLOAT32);
int *tmp = (int*)(fields[i].data);
YoloKernel kernel;
kernel.width = tmp[0];
kernel.height = tmp[1];
for (int j = 0; j < fields[i].length - 2; j++) {
kernel.anchors[j] = tmp[j + 2];
}
yolo_kernels[2 - (fields[i].name[8] - '1')] = kernel;
}
}
YoloLayerPlugin* obj = new YoloLayerPlugin(class_count, input_w, input_h, max_output_object_count, yolo_kernels);
obj->setPluginNamespace(mNamespace.c_str());
return obj;
}
IPluginV2IOExt* YoloPluginCreator::deserializePlugin(const char* name, const void* serialData, size_t serialLength)
{
// This object will be deleted when the network is destroyed, which will
// call YoloLayerPlugin::destroy()
YoloLayerPlugin* obj = new YoloLayerPlugin(serialData, serialLength);
obj->setPluginNamespace(mNamespace.c_str());
return obj;
}
}
|
fb01372acc34763db6a76eed7cb7d62d629bdc21.hip | // !!! This is a file automatically generated by hipify!!!
// Program corresponding to CythonBM.cu that can be run directly from the command lin. For testing purposes.
//Attempt to Parallelize function for crossing time. Slower than other methods.
//#include <cmath>
#include <hiprand/hiprand_kernel.h>
#include <stdio.h>
#include <hip/hip_runtime.h>
// Error handling code used in Nvidia example found here: https://docs.nvidia.com/cuda/hiprand/host-api-overview.html#generator-options
#define CUDA_CALL(x) do { if((x)!=hipSuccess) { \
printf("Error at %s:%d\n",__FILE__,__LINE__);\
return EXIT_FAILURE;}} while(0)
//Function to generate brownian path, which is stored in results. Executes on the GPU, hence the __global__ identifier
__global__ void randomWalk(double *results, int *crossTimes, int T, int N, int numSims) {
int crossTimeIndex = threadIdx.x + blockIdx.x * blockDim.x;
if (crossTimeIndex < numSims) {
hiprandState_t state;
hiprand_init (1234, 0, 0, &state);
double random;
int start = (threadIdx.x + blockIdx.x * blockDim.x) * N;
crossTimes[crossTimeIndex] = 0;
results[start] = 0.0;
for (int j = start + 1; j < start + N; j++) {
random = hiprand_normal_double(&state);
results[j] = results[j-1] + random * sqrt((double) T / N);
}
}
/*
Generate 2 doubles at once. Test later to see if this is more efficient:
double hiprand_normal2_double (state);
*/
}
__global__ void getCrossingTimes(double *results, int *crossTimes, int N, int numSims, int lowerThreshold, int upperThreshold) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < N * numSims) {
if (crossTimes[tid/N] == 0) {
if (results[tid] <= lowerThreshold) {
crossTimes[tid/N] = tid % N;
}
else if (results[tid] >= upperThreshold) {
crossTimes[tid/N] = tid % N;
}
}
tid += blockDim.x + gridDim.x;
}
}
int main() {
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start,0);
//Arrays to store the brownian path, one for the host and one for the device
int N = 99000;
int T = 1;
int numSims = 100000;
int numBlocks = (127 + numSims) / numSims;
int numThreads = 128;
double lowerThreshold = -1;
double upperThreshold = 1;
double *results = new double[N * numSims];
double *dev_results;
int *crossTimes = new int[numSims];
int *dev_crossTimes;
int numBlocks2 = (511 + N * numSims) / 512;
// Allocate space for results array on device
CUDA_CALL(hipMalloc(&dev_results, N * numSims * sizeof(double)));
CUDA_CALL(hipMalloc(&dev_crossTimes, numSims * sizeof(int)));
//Call GPU function, with ony one block and one thread
hipLaunchKernelGGL(( randomWalk), dim3(numBlocks), dim3(numThreads), 0, 0, dev_results, dev_crossTimes, T, N, numSims);
//copy results array from device to host
CUDA_CALL(hipMemcpy(results, dev_results , N * numSims * sizeof(double), hipMemcpyDeviceToHost));
hipLaunchKernelGGL(( getCrossingTimes), dim3(numBlocks2),dim3(512), 0, 0, dev_results, dev_crossTimes, N, numSims, lowerThreshold, upperThreshold);
CUDA_CALL(hipMemcpy(crossTimes, dev_crossTimes, numSims * sizeof(int), hipMemcpyDeviceToHost));
hipEventRecord(stop,0);
hipEventSynchronize(stop);
float elapsedTime;
hipEventElapsedTime(&elapsedTime, start, stop);
printf("Time to generate: %3.1f ms/n", elapsedTime);
hipEventDestroy(start);
hipEventDestroy(stop);
printf("\n");
/*
// print out path
for (int i=0; i < (N * numSims); i++) {
printf("%f ", results[i]);
}
printf("\n");
printf("\n");
printf("\n");
// print out cross times
for (int i=0; i < numSims; i++) {
printf("%d ", crossTimes[i]);
}
printf("\n");
*/
//clean up
CUDA_CALL(hipFree(dev_results));
return 0;
}
| fb01372acc34763db6a76eed7cb7d62d629bdc21.cu | // Program corresponding to CythonBM.cu that can be run directly from the command lin. For testing purposes.
//Attempt to Parallelize function for crossing time. Slower than other methods.
//#include <cmath>
#include <curand_kernel.h>
#include <stdio.h>
#include <cuda.h>
// Error handling code used in Nvidia example found here: https://docs.nvidia.com/cuda/curand/host-api-overview.html#generator-options
#define CUDA_CALL(x) do { if((x)!=cudaSuccess) { \
printf("Error at %s:%d\n",__FILE__,__LINE__);\
return EXIT_FAILURE;}} while(0)
//Function to generate brownian path, which is stored in results. Executes on the GPU, hence the __global__ identifier
__global__ void randomWalk(double *results, int *crossTimes, int T, int N, int numSims) {
int crossTimeIndex = threadIdx.x + blockIdx.x * blockDim.x;
if (crossTimeIndex < numSims) {
curandState_t state;
curand_init (1234, 0, 0, &state);
double random;
int start = (threadIdx.x + blockIdx.x * blockDim.x) * N;
crossTimes[crossTimeIndex] = 0;
results[start] = 0.0;
for (int j = start + 1; j < start + N; j++) {
random = curand_normal_double(&state);
results[j] = results[j-1] + random * sqrt((double) T / N);
}
}
/*
Generate 2 doubles at once. Test later to see if this is more efficient:
double curand_normal2_double (state);
*/
}
__global__ void getCrossingTimes(double *results, int *crossTimes, int N, int numSims, int lowerThreshold, int upperThreshold) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < N * numSims) {
if (crossTimes[tid/N] == 0) {
if (results[tid] <= lowerThreshold) {
crossTimes[tid/N] = tid % N;
}
else if (results[tid] >= upperThreshold) {
crossTimes[tid/N] = tid % N;
}
}
tid += blockDim.x + gridDim.x;
}
}
int main() {
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start,0);
//Arrays to store the brownian path, one for the host and one for the device
int N = 99000;
int T = 1;
int numSims = 100000;
int numBlocks = (127 + numSims) / numSims;
int numThreads = 128;
double lowerThreshold = -1;
double upperThreshold = 1;
double *results = new double[N * numSims];
double *dev_results;
int *crossTimes = new int[numSims];
int *dev_crossTimes;
int numBlocks2 = (511 + N * numSims) / 512;
// Allocate space for results array on device
CUDA_CALL(cudaMalloc(&dev_results, N * numSims * sizeof(double)));
CUDA_CALL(cudaMalloc(&dev_crossTimes, numSims * sizeof(int)));
//Call GPU function, with ony one block and one thread
randomWalk<<<numBlocks, numThreads>>>(dev_results, dev_crossTimes, T, N, numSims);
//copy results array from device to host
CUDA_CALL(cudaMemcpy(results, dev_results , N * numSims * sizeof(double), cudaMemcpyDeviceToHost));
getCrossingTimes<<<numBlocks2,512>>>(dev_results, dev_crossTimes, N, numSims, lowerThreshold, upperThreshold);
CUDA_CALL(cudaMemcpy(crossTimes, dev_crossTimes, numSims * sizeof(int), cudaMemcpyDeviceToHost));
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
float elapsedTime;
cudaEventElapsedTime(&elapsedTime, start, stop);
printf("Time to generate: %3.1f ms/n", elapsedTime);
cudaEventDestroy(start);
cudaEventDestroy(stop);
printf("\n");
/*
// print out path
for (int i=0; i < (N * numSims); i++) {
printf("%f ", results[i]);
}
printf("\n");
printf("\n");
printf("\n");
// print out cross times
for (int i=0; i < numSims; i++) {
printf("%d ", crossTimes[i]);
}
printf("\n");
*/
//clean up
CUDA_CALL(cudaFree(dev_results));
return 0;
}
|
23288f63b41a794c7f6e919d661104f2210a1b2b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <faiss/gpu/impl/IVFFlatScan.cuh>
#include <faiss/gpu/impl/DistanceUtils.cuh>
#include <faiss/gpu/impl/IVFUtils.cuh>
#include <faiss/gpu/GpuResources.h>
#include <faiss/gpu/utils/ConversionOperators.cuh>
#include <faiss/gpu/utils/DeviceDefs.cuh>
#include <faiss/gpu/utils/DeviceUtils.h>
#include <faiss/gpu/utils/DeviceTensor.cuh>
#include <faiss/gpu/utils/Float16.cuh>
#include <faiss/gpu/utils/MathOperators.cuh>
#include <faiss/gpu/utils/LoadStoreOperators.cuh>
#include <faiss/gpu/utils/PtxUtils.cuh>
#include <faiss/gpu/utils/Reductions.cuh>
#include <faiss/gpu/utils/StaticUtils.h>
#include <thrust/host_vector.h>
namespace faiss { namespace gpu {
namespace {
/// Sort direction per each metric
inline bool metricToSortDirection(MetricType mt) {
switch (mt) {
case MetricType::METRIC_INNER_PRODUCT:
// highest
return true;
case MetricType::METRIC_L2:
// lowest
return false;
default:
// unhandled metric
FAISS_ASSERT(false);
return false;
}
}
}
// Number of warps we create per block of IVFFlatScan
constexpr int kIVFFlatScanWarps = 4;
// Works for any dimension size
template <typename Codec, typename Metric>
struct IVFFlatScan {
static __device__ void scan(float* query,
bool useResidual,
float* residualBaseSlice,
void* vecData,
const Codec& codec,
const Metric& metric,
int numVecs,
int dim,
float* distanceOut) {
// How many separate loading points are there for the decoder?
int limit = utils::divDown(dim, Codec::kDimPerIter);
// Each warp handles a separate chunk of vectors
int warpId = threadIdx.x / kWarpSize;
// FIXME: why does getLaneId() not work when we write out below!?!?!
int laneId = threadIdx.x % kWarpSize; // getLaneId();
// Divide the set of vectors among the warps
int vecsPerWarp = utils::divUp(numVecs, kIVFFlatScanWarps);
int vecStart = vecsPerWarp * warpId;
int vecEnd = min(vecsPerWarp * (warpId + 1), numVecs);
// Walk the list of vectors for this warp
for (int vec = vecStart; vec < vecEnd; ++vec) {
Metric dist = metric.zero();
// Scan the dimensions availabe that have whole units for the decoder,
// as the decoder may handle more than one dimension at once (leaving the
// remainder to be handled separately)
for (int d = laneId; d < limit; d += kWarpSize) {
int realDim = d * Codec::kDimPerIter;
float vecVal[Codec::kDimPerIter];
// Decode the kDimPerIter dimensions
codec.decode(vecData, vec, d, vecVal);
#pragma unroll
for (int j = 0; j < Codec::kDimPerIter; ++j) {
vecVal[j] += useResidual ? residualBaseSlice[realDim + j] : 0.0f;
}
#pragma unroll
for (int j = 0; j < Codec::kDimPerIter; ++j) {
dist.handle(query[realDim + j], vecVal[j]);
}
}
// Handle remainder by a single thread, if any
// Not needed if we decode 1 dim per time
if (Codec::kDimPerIter > 1) {
int realDim = limit * Codec::kDimPerIter;
// Was there any remainder?
if (realDim < dim) {
// Let the first threads in the block sequentially perform it
int remainderDim = realDim + laneId;
if (remainderDim < dim) {
float vecVal =
codec.decodePartial(vecData, vec, limit, laneId);
vecVal += useResidual ? residualBaseSlice[remainderDim] : 0.0f;
dist.handle(query[remainderDim], vecVal);
}
}
}
// Reduce distance within warp
auto warpDist = warpReduceAllSum(dist.reduce());
if (laneId == 0) {
distanceOut[vec] = warpDist;
}
}
}
};
template <typename Codec, typename Metric>
__global__ void
ivfFlatScan(Tensor<float, 2, true> queries,
bool useResidual,
Tensor<float, 3, true> residualBase,
Tensor<int, 2, true> listIds,
void** allListData,
int* listLengths,
Codec codec,
Metric metric,
Tensor<int, 2, true> prefixSumOffsets,
Tensor<float, 1, true> distance) {
extern __shared__ float smem[];
auto queryId = blockIdx.y;
auto probeId = blockIdx.x;
// This is where we start writing out data
// We ensure that before the array (at offset -1), there is a 0 value
int outBase = *(prefixSumOffsets[queryId][probeId].data() - 1);
auto listId = listIds[queryId][probeId];
// Safety guard in case NaNs in input cause no list ID to be generated
if (listId == -1) {
return;
}
auto query = queries[queryId].data();
auto vecs = allListData[listId];
auto numVecs = listLengths[listId];
auto dim = queries.getSize(1);
auto distanceOut = distance[outBase].data();
auto residualBaseSlice = residualBase[queryId][probeId].data();
codec.setSmem(smem, dim);
IVFFlatScan<Codec, Metric>::scan(query,
useResidual,
residualBaseSlice,
vecs,
codec,
metric,
numVecs,
dim,
distanceOut);
}
void
runIVFFlatScanTile(GpuResources* res,
Tensor<float, 2, true>& queries,
Tensor<int, 2, true>& listIds,
thrust::device_vector<void*>& listData,
thrust::device_vector<void*>& listIndices,
IndicesOptions indicesOptions,
thrust::device_vector<int>& listLengths,
Tensor<char, 1, true>& thrustMem,
Tensor<int, 2, true>& prefixSumOffsets,
Tensor<float, 1, true>& allDistances,
Tensor<float, 3, true>& heapDistances,
Tensor<int, 3, true>& heapIndices,
int k,
faiss::MetricType metricType,
bool useResidual,
Tensor<float, 3, true>& residualBase,
GpuScalarQuantizer* scalarQ,
Tensor<float, 2, true>& outDistances,
Tensor<long, 2, true>& outIndices,
hipStream_t stream) {
int dim = queries.getSize(1);
// Check the amount of shared memory per block available based on our type is
// sufficient
if (scalarQ &&
(scalarQ->qtype == ScalarQuantizer::QuantizerType::QT_8bit ||
scalarQ->qtype == ScalarQuantizer::QuantizerType::QT_4bit)) {
int maxDim = getMaxSharedMemPerBlockCurrentDevice() /
(sizeof(float) * 2);
FAISS_THROW_IF_NOT_FMT(dim < maxDim,
"Insufficient shared memory available on the GPU "
"for QT_8bit or QT_4bit with %d dimensions; "
"maximum dimensions possible is %d", dim, maxDim);
}
// Calculate offset lengths, so we know where to write out
// intermediate results
runCalcListOffsets(
res, listIds, listLengths, prefixSumOffsets, thrustMem, stream);
auto grid = dim3(listIds.getSize(1), listIds.getSize(0));
auto block = dim3(kWarpSize * kIVFFlatScanWarps);
#define RUN_IVF_FLAT \
do { \
hipLaunchKernelGGL(( ivfFlatScan) \
, dim3(grid), dim3(block), codec.getSmemSize(dim), stream, \
queries, \
useResidual, \
residualBase, \
listIds, \
listData.data().get(), \
listLengths.data().get(), \
codec, \
metric, \
prefixSumOffsets, \
allDistances); \
} while (0)
#define HANDLE_METRICS \
do { \
if (metricType == MetricType::METRIC_L2) { \
L2Distance metric; RUN_IVF_FLAT; \
} else { \
IPDistance metric; RUN_IVF_FLAT; \
} \
} while (0)
if (!scalarQ) {
CodecFloat codec(dim * sizeof(float));
HANDLE_METRICS;
} else {
switch (scalarQ->qtype) {
case ScalarQuantizer::QuantizerType::QT_8bit:
{
// FIXME: investigate 32 bit load perf issues
// if (dim % 4 == 0) {
if (false) {
Codec<ScalarQuantizer::QuantizerType::QT_8bit, 4>
codec(scalarQ->code_size,
scalarQ->gpuTrained.data(),
scalarQ->gpuTrained.data() + dim);
HANDLE_METRICS;
} else {
Codec<ScalarQuantizer::QuantizerType::QT_8bit, 1>
codec(scalarQ->code_size,
scalarQ->gpuTrained.data(),
scalarQ->gpuTrained.data() + dim);
HANDLE_METRICS;
}
}
break;
case ScalarQuantizer::QuantizerType::QT_8bit_uniform:
{
// FIXME: investigate 32 bit load perf issues
if (false) {
// if (dim % 4 == 0) {
Codec<ScalarQuantizer::QuantizerType::QT_8bit_uniform, 4>
codec(scalarQ->code_size, scalarQ->trained[0], scalarQ->trained[1]);
HANDLE_METRICS;
} else {
Codec<ScalarQuantizer::QuantizerType::QT_8bit_uniform, 1>
codec(scalarQ->code_size, scalarQ->trained[0], scalarQ->trained[1]);
HANDLE_METRICS;
}
}
break;
case ScalarQuantizer::QuantizerType::QT_fp16:
{
if (false) {
// FIXME: investigate 32 bit load perf issues
// if (dim % 2 == 0) {
Codec<ScalarQuantizer::QuantizerType::QT_fp16, 2>
codec(scalarQ->code_size);
HANDLE_METRICS;
} else {
Codec<ScalarQuantizer::QuantizerType::QT_fp16, 1>
codec(scalarQ->code_size);
HANDLE_METRICS;
}
}
break;
case ScalarQuantizer::QuantizerType::QT_8bit_direct:
{
Codec<ScalarQuantizer::QuantizerType::QT_8bit_direct, 1>
codec(scalarQ->code_size);
HANDLE_METRICS;
}
break;
case ScalarQuantizer::QuantizerType::QT_4bit:
{
Codec<ScalarQuantizer::QuantizerType::QT_4bit, 1>
codec(scalarQ->code_size,
scalarQ->gpuTrained.data(),
scalarQ->gpuTrained.data() + dim);
HANDLE_METRICS;
}
break;
case ScalarQuantizer::QuantizerType::QT_4bit_uniform:
{
Codec<ScalarQuantizer::QuantizerType::QT_4bit_uniform, 1>
codec(scalarQ->code_size, scalarQ->trained[0], scalarQ->trained[1]);
HANDLE_METRICS;
}
break;
default:
// unimplemented, should be handled at a higher level
FAISS_ASSERT(false);
}
}
CUDA_TEST_ERROR();
#undef HANDLE_METRICS
#undef RUN_IVF_FLAT
// k-select the output in chunks, to increase parallelism
runPass1SelectLists(prefixSumOffsets,
allDistances,
listIds.getSize(1),
k,
metricToSortDirection(metricType),
heapDistances,
heapIndices,
stream);
// k-select final output
auto flatHeapDistances = heapDistances.downcastInner<2>();
auto flatHeapIndices = heapIndices.downcastInner<2>();
runPass2SelectLists(flatHeapDistances,
flatHeapIndices,
listIndices,
indicesOptions,
prefixSumOffsets,
listIds,
k,
metricToSortDirection(metricType),
outDistances,
outIndices,
stream);
}
void
runIVFFlatScan(Tensor<float, 2, true>& queries,
Tensor<int, 2, true>& listIds,
thrust::device_vector<void*>& listData,
thrust::device_vector<void*>& listIndices,
IndicesOptions indicesOptions,
thrust::device_vector<int>& listLengths,
int maxListLength,
int k,
faiss::MetricType metric,
bool useResidual,
Tensor<float, 3, true>& residualBase,
GpuScalarQuantizer* scalarQ,
// output
Tensor<float, 2, true>& outDistances,
// output
Tensor<long, 2, true>& outIndices,
GpuResources* res) {
constexpr int kMinQueryTileSize = 8;
constexpr int kMaxQueryTileSize = 128;
constexpr int kThrustMemSize = 16384;
int nprobe = listIds.getSize(1);
auto stream = res->getDefaultStreamCurrentDevice();
// Make a reservation for Thrust to do its dirty work (global memory
// cross-block reduction space); hopefully this is large enough.
DeviceTensor<char, 1, true> thrustMem1(
res, makeTempAlloc(AllocType::Other, stream), {kThrustMemSize});
DeviceTensor<char, 1, true> thrustMem2(
res, makeTempAlloc(AllocType::Other, stream), {kThrustMemSize});
DeviceTensor<char, 1, true>* thrustMem[2] =
{&thrustMem1, &thrustMem2};
// How much temporary storage is available?
// If possible, we'd like to fit within the space available.
size_t sizeAvailable = res->getTempMemoryAvailableCurrentDevice();
// We run two passes of heap selection
// This is the size of the first-level heap passes
constexpr int kNProbeSplit = 8;
int pass2Chunks = ::min(nprobe, kNProbeSplit);
size_t sizeForFirstSelectPass =
pass2Chunks * k * (sizeof(float) + sizeof(int));
// How much temporary storage we need per each query
size_t sizePerQuery =
2 * // # streams
((nprobe * sizeof(int) + sizeof(int)) + // prefixSumOffsets
nprobe * maxListLength * sizeof(float) + // allDistances
sizeForFirstSelectPass);
int queryTileSize = (int) (sizeAvailable / sizePerQuery);
if (queryTileSize < kMinQueryTileSize) {
queryTileSize = kMinQueryTileSize;
} else if (queryTileSize > kMaxQueryTileSize) {
queryTileSize = kMaxQueryTileSize;
}
// FIXME: we should adjust queryTileSize to deal with this, since
// indexing is in int32
FAISS_ASSERT(queryTileSize * nprobe * maxListLength <
std::numeric_limits<int>::max());
// Temporary memory buffers
// Make sure there is space prior to the start which will be 0, and
// will handle the boundary condition without branches
DeviceTensor<int, 1, true> prefixSumOffsetSpace1(
res, makeTempAlloc(AllocType::Other, stream), {queryTileSize * nprobe + 1});
DeviceTensor<int, 1, true> prefixSumOffsetSpace2(
res, makeTempAlloc(AllocType::Other, stream), {queryTileSize * nprobe + 1});
DeviceTensor<int, 2, true> prefixSumOffsets1(
prefixSumOffsetSpace1[1].data(),
{queryTileSize, nprobe});
DeviceTensor<int, 2, true> prefixSumOffsets2(
prefixSumOffsetSpace2[1].data(),
{queryTileSize, nprobe});
DeviceTensor<int, 2, true>* prefixSumOffsets[2] =
{&prefixSumOffsets1, &prefixSumOffsets2};
// Make sure the element before prefixSumOffsets is 0, since we
// depend upon simple, boundary-less indexing to get proper results
CUDA_VERIFY(hipMemsetAsync(prefixSumOffsetSpace1.data(),
0,
sizeof(int),
stream));
CUDA_VERIFY(hipMemsetAsync(prefixSumOffsetSpace2.data(),
0,
sizeof(int),
stream));
DeviceTensor<float, 1, true> allDistances1(
res, makeTempAlloc(AllocType::Other, stream),
{queryTileSize * nprobe * maxListLength});
DeviceTensor<float, 1, true> allDistances2(
res, makeTempAlloc(AllocType::Other, stream),
{queryTileSize * nprobe * maxListLength});
DeviceTensor<float, 1, true>* allDistances[2] =
{&allDistances1, &allDistances2};
DeviceTensor<float, 3, true> heapDistances1(
res, makeTempAlloc(AllocType::Other, stream),
{queryTileSize, pass2Chunks, k});
DeviceTensor<float, 3, true> heapDistances2(
res, makeTempAlloc(AllocType::Other, stream),
{queryTileSize, pass2Chunks, k});
DeviceTensor<float, 3, true>* heapDistances[2] =
{&heapDistances1, &heapDistances2};
DeviceTensor<int, 3, true> heapIndices1(
res, makeTempAlloc(AllocType::Other, stream),
{queryTileSize, pass2Chunks, k});
DeviceTensor<int, 3, true> heapIndices2(
res, makeTempAlloc(AllocType::Other, stream),
{queryTileSize, pass2Chunks, k});
DeviceTensor<int, 3, true>* heapIndices[2] =
{&heapIndices1, &heapIndices2};
auto streams = res->getAlternateStreamsCurrentDevice();
streamWait(streams, {stream});
int curStream = 0;
for (int query = 0; query < queries.getSize(0); query += queryTileSize) {
int numQueriesInTile =
::min(queryTileSize, queries.getSize(0) - query);
auto prefixSumOffsetsView =
prefixSumOffsets[curStream]->narrowOutermost(0, numQueriesInTile);
auto listIdsView =
listIds.narrowOutermost(query, numQueriesInTile);
auto queryView =
queries.narrowOutermost(query, numQueriesInTile);
auto residualBaseView =
residualBase.narrowOutermost(query, numQueriesInTile);
auto heapDistancesView =
heapDistances[curStream]->narrowOutermost(0, numQueriesInTile);
auto heapIndicesView =
heapIndices[curStream]->narrowOutermost(0, numQueriesInTile);
auto outDistanceView =
outDistances.narrowOutermost(query, numQueriesInTile);
auto outIndicesView =
outIndices.narrowOutermost(query, numQueriesInTile);
runIVFFlatScanTile(res,
queryView,
listIdsView,
listData,
listIndices,
indicesOptions,
listLengths,
*thrustMem[curStream],
prefixSumOffsetsView,
*allDistances[curStream],
heapDistancesView,
heapIndicesView,
k,
metric,
useResidual,
residualBaseView,
scalarQ,
outDistanceView,
outIndicesView,
streams[curStream]);
curStream = (curStream + 1) % 2;
}
streamWait({stream}, streams);
}
} } // namespace
| 23288f63b41a794c7f6e919d661104f2210a1b2b.cu | /**
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <faiss/gpu/impl/IVFFlatScan.cuh>
#include <faiss/gpu/impl/DistanceUtils.cuh>
#include <faiss/gpu/impl/IVFUtils.cuh>
#include <faiss/gpu/GpuResources.h>
#include <faiss/gpu/utils/ConversionOperators.cuh>
#include <faiss/gpu/utils/DeviceDefs.cuh>
#include <faiss/gpu/utils/DeviceUtils.h>
#include <faiss/gpu/utils/DeviceTensor.cuh>
#include <faiss/gpu/utils/Float16.cuh>
#include <faiss/gpu/utils/MathOperators.cuh>
#include <faiss/gpu/utils/LoadStoreOperators.cuh>
#include <faiss/gpu/utils/PtxUtils.cuh>
#include <faiss/gpu/utils/Reductions.cuh>
#include <faiss/gpu/utils/StaticUtils.h>
#include <thrust/host_vector.h>
namespace faiss { namespace gpu {
namespace {
/// Sort direction per each metric
inline bool metricToSortDirection(MetricType mt) {
switch (mt) {
case MetricType::METRIC_INNER_PRODUCT:
// highest
return true;
case MetricType::METRIC_L2:
// lowest
return false;
default:
// unhandled metric
FAISS_ASSERT(false);
return false;
}
}
}
// Number of warps we create per block of IVFFlatScan
constexpr int kIVFFlatScanWarps = 4;
// Works for any dimension size
template <typename Codec, typename Metric>
struct IVFFlatScan {
static __device__ void scan(float* query,
bool useResidual,
float* residualBaseSlice,
void* vecData,
const Codec& codec,
const Metric& metric,
int numVecs,
int dim,
float* distanceOut) {
// How many separate loading points are there for the decoder?
int limit = utils::divDown(dim, Codec::kDimPerIter);
// Each warp handles a separate chunk of vectors
int warpId = threadIdx.x / kWarpSize;
// FIXME: why does getLaneId() not work when we write out below!?!?!
int laneId = threadIdx.x % kWarpSize; // getLaneId();
// Divide the set of vectors among the warps
int vecsPerWarp = utils::divUp(numVecs, kIVFFlatScanWarps);
int vecStart = vecsPerWarp * warpId;
int vecEnd = min(vecsPerWarp * (warpId + 1), numVecs);
// Walk the list of vectors for this warp
for (int vec = vecStart; vec < vecEnd; ++vec) {
Metric dist = metric.zero();
// Scan the dimensions availabe that have whole units for the decoder,
// as the decoder may handle more than one dimension at once (leaving the
// remainder to be handled separately)
for (int d = laneId; d < limit; d += kWarpSize) {
int realDim = d * Codec::kDimPerIter;
float vecVal[Codec::kDimPerIter];
// Decode the kDimPerIter dimensions
codec.decode(vecData, vec, d, vecVal);
#pragma unroll
for (int j = 0; j < Codec::kDimPerIter; ++j) {
vecVal[j] += useResidual ? residualBaseSlice[realDim + j] : 0.0f;
}
#pragma unroll
for (int j = 0; j < Codec::kDimPerIter; ++j) {
dist.handle(query[realDim + j], vecVal[j]);
}
}
// Handle remainder by a single thread, if any
// Not needed if we decode 1 dim per time
if (Codec::kDimPerIter > 1) {
int realDim = limit * Codec::kDimPerIter;
// Was there any remainder?
if (realDim < dim) {
// Let the first threads in the block sequentially perform it
int remainderDim = realDim + laneId;
if (remainderDim < dim) {
float vecVal =
codec.decodePartial(vecData, vec, limit, laneId);
vecVal += useResidual ? residualBaseSlice[remainderDim] : 0.0f;
dist.handle(query[remainderDim], vecVal);
}
}
}
// Reduce distance within warp
auto warpDist = warpReduceAllSum(dist.reduce());
if (laneId == 0) {
distanceOut[vec] = warpDist;
}
}
}
};
template <typename Codec, typename Metric>
__global__ void
ivfFlatScan(Tensor<float, 2, true> queries,
bool useResidual,
Tensor<float, 3, true> residualBase,
Tensor<int, 2, true> listIds,
void** allListData,
int* listLengths,
Codec codec,
Metric metric,
Tensor<int, 2, true> prefixSumOffsets,
Tensor<float, 1, true> distance) {
extern __shared__ float smem[];
auto queryId = blockIdx.y;
auto probeId = blockIdx.x;
// This is where we start writing out data
// We ensure that before the array (at offset -1), there is a 0 value
int outBase = *(prefixSumOffsets[queryId][probeId].data() - 1);
auto listId = listIds[queryId][probeId];
// Safety guard in case NaNs in input cause no list ID to be generated
if (listId == -1) {
return;
}
auto query = queries[queryId].data();
auto vecs = allListData[listId];
auto numVecs = listLengths[listId];
auto dim = queries.getSize(1);
auto distanceOut = distance[outBase].data();
auto residualBaseSlice = residualBase[queryId][probeId].data();
codec.setSmem(smem, dim);
IVFFlatScan<Codec, Metric>::scan(query,
useResidual,
residualBaseSlice,
vecs,
codec,
metric,
numVecs,
dim,
distanceOut);
}
void
runIVFFlatScanTile(GpuResources* res,
Tensor<float, 2, true>& queries,
Tensor<int, 2, true>& listIds,
thrust::device_vector<void*>& listData,
thrust::device_vector<void*>& listIndices,
IndicesOptions indicesOptions,
thrust::device_vector<int>& listLengths,
Tensor<char, 1, true>& thrustMem,
Tensor<int, 2, true>& prefixSumOffsets,
Tensor<float, 1, true>& allDistances,
Tensor<float, 3, true>& heapDistances,
Tensor<int, 3, true>& heapIndices,
int k,
faiss::MetricType metricType,
bool useResidual,
Tensor<float, 3, true>& residualBase,
GpuScalarQuantizer* scalarQ,
Tensor<float, 2, true>& outDistances,
Tensor<long, 2, true>& outIndices,
cudaStream_t stream) {
int dim = queries.getSize(1);
// Check the amount of shared memory per block available based on our type is
// sufficient
if (scalarQ &&
(scalarQ->qtype == ScalarQuantizer::QuantizerType::QT_8bit ||
scalarQ->qtype == ScalarQuantizer::QuantizerType::QT_4bit)) {
int maxDim = getMaxSharedMemPerBlockCurrentDevice() /
(sizeof(float) * 2);
FAISS_THROW_IF_NOT_FMT(dim < maxDim,
"Insufficient shared memory available on the GPU "
"for QT_8bit or QT_4bit with %d dimensions; "
"maximum dimensions possible is %d", dim, maxDim);
}
// Calculate offset lengths, so we know where to write out
// intermediate results
runCalcListOffsets(
res, listIds, listLengths, prefixSumOffsets, thrustMem, stream);
auto grid = dim3(listIds.getSize(1), listIds.getSize(0));
auto block = dim3(kWarpSize * kIVFFlatScanWarps);
#define RUN_IVF_FLAT \
do { \
ivfFlatScan \
<<<grid, block, codec.getSmemSize(dim), stream>>>( \
queries, \
useResidual, \
residualBase, \
listIds, \
listData.data().get(), \
listLengths.data().get(), \
codec, \
metric, \
prefixSumOffsets, \
allDistances); \
} while (0)
#define HANDLE_METRICS \
do { \
if (metricType == MetricType::METRIC_L2) { \
L2Distance metric; RUN_IVF_FLAT; \
} else { \
IPDistance metric; RUN_IVF_FLAT; \
} \
} while (0)
if (!scalarQ) {
CodecFloat codec(dim * sizeof(float));
HANDLE_METRICS;
} else {
switch (scalarQ->qtype) {
case ScalarQuantizer::QuantizerType::QT_8bit:
{
// FIXME: investigate 32 bit load perf issues
// if (dim % 4 == 0) {
if (false) {
Codec<ScalarQuantizer::QuantizerType::QT_8bit, 4>
codec(scalarQ->code_size,
scalarQ->gpuTrained.data(),
scalarQ->gpuTrained.data() + dim);
HANDLE_METRICS;
} else {
Codec<ScalarQuantizer::QuantizerType::QT_8bit, 1>
codec(scalarQ->code_size,
scalarQ->gpuTrained.data(),
scalarQ->gpuTrained.data() + dim);
HANDLE_METRICS;
}
}
break;
case ScalarQuantizer::QuantizerType::QT_8bit_uniform:
{
// FIXME: investigate 32 bit load perf issues
if (false) {
// if (dim % 4 == 0) {
Codec<ScalarQuantizer::QuantizerType::QT_8bit_uniform, 4>
codec(scalarQ->code_size, scalarQ->trained[0], scalarQ->trained[1]);
HANDLE_METRICS;
} else {
Codec<ScalarQuantizer::QuantizerType::QT_8bit_uniform, 1>
codec(scalarQ->code_size, scalarQ->trained[0], scalarQ->trained[1]);
HANDLE_METRICS;
}
}
break;
case ScalarQuantizer::QuantizerType::QT_fp16:
{
if (false) {
// FIXME: investigate 32 bit load perf issues
// if (dim % 2 == 0) {
Codec<ScalarQuantizer::QuantizerType::QT_fp16, 2>
codec(scalarQ->code_size);
HANDLE_METRICS;
} else {
Codec<ScalarQuantizer::QuantizerType::QT_fp16, 1>
codec(scalarQ->code_size);
HANDLE_METRICS;
}
}
break;
case ScalarQuantizer::QuantizerType::QT_8bit_direct:
{
Codec<ScalarQuantizer::QuantizerType::QT_8bit_direct, 1>
codec(scalarQ->code_size);
HANDLE_METRICS;
}
break;
case ScalarQuantizer::QuantizerType::QT_4bit:
{
Codec<ScalarQuantizer::QuantizerType::QT_4bit, 1>
codec(scalarQ->code_size,
scalarQ->gpuTrained.data(),
scalarQ->gpuTrained.data() + dim);
HANDLE_METRICS;
}
break;
case ScalarQuantizer::QuantizerType::QT_4bit_uniform:
{
Codec<ScalarQuantizer::QuantizerType::QT_4bit_uniform, 1>
codec(scalarQ->code_size, scalarQ->trained[0], scalarQ->trained[1]);
HANDLE_METRICS;
}
break;
default:
// unimplemented, should be handled at a higher level
FAISS_ASSERT(false);
}
}
CUDA_TEST_ERROR();
#undef HANDLE_METRICS
#undef RUN_IVF_FLAT
// k-select the output in chunks, to increase parallelism
runPass1SelectLists(prefixSumOffsets,
allDistances,
listIds.getSize(1),
k,
metricToSortDirection(metricType),
heapDistances,
heapIndices,
stream);
// k-select final output
auto flatHeapDistances = heapDistances.downcastInner<2>();
auto flatHeapIndices = heapIndices.downcastInner<2>();
runPass2SelectLists(flatHeapDistances,
flatHeapIndices,
listIndices,
indicesOptions,
prefixSumOffsets,
listIds,
k,
metricToSortDirection(metricType),
outDistances,
outIndices,
stream);
}
void
runIVFFlatScan(Tensor<float, 2, true>& queries,
Tensor<int, 2, true>& listIds,
thrust::device_vector<void*>& listData,
thrust::device_vector<void*>& listIndices,
IndicesOptions indicesOptions,
thrust::device_vector<int>& listLengths,
int maxListLength,
int k,
faiss::MetricType metric,
bool useResidual,
Tensor<float, 3, true>& residualBase,
GpuScalarQuantizer* scalarQ,
// output
Tensor<float, 2, true>& outDistances,
// output
Tensor<long, 2, true>& outIndices,
GpuResources* res) {
constexpr int kMinQueryTileSize = 8;
constexpr int kMaxQueryTileSize = 128;
constexpr int kThrustMemSize = 16384;
int nprobe = listIds.getSize(1);
auto stream = res->getDefaultStreamCurrentDevice();
// Make a reservation for Thrust to do its dirty work (global memory
// cross-block reduction space); hopefully this is large enough.
DeviceTensor<char, 1, true> thrustMem1(
res, makeTempAlloc(AllocType::Other, stream), {kThrustMemSize});
DeviceTensor<char, 1, true> thrustMem2(
res, makeTempAlloc(AllocType::Other, stream), {kThrustMemSize});
DeviceTensor<char, 1, true>* thrustMem[2] =
{&thrustMem1, &thrustMem2};
// How much temporary storage is available?
// If possible, we'd like to fit within the space available.
size_t sizeAvailable = res->getTempMemoryAvailableCurrentDevice();
// We run two passes of heap selection
// This is the size of the first-level heap passes
constexpr int kNProbeSplit = 8;
int pass2Chunks = std::min(nprobe, kNProbeSplit);
size_t sizeForFirstSelectPass =
pass2Chunks * k * (sizeof(float) + sizeof(int));
// How much temporary storage we need per each query
size_t sizePerQuery =
2 * // # streams
((nprobe * sizeof(int) + sizeof(int)) + // prefixSumOffsets
nprobe * maxListLength * sizeof(float) + // allDistances
sizeForFirstSelectPass);
int queryTileSize = (int) (sizeAvailable / sizePerQuery);
if (queryTileSize < kMinQueryTileSize) {
queryTileSize = kMinQueryTileSize;
} else if (queryTileSize > kMaxQueryTileSize) {
queryTileSize = kMaxQueryTileSize;
}
// FIXME: we should adjust queryTileSize to deal with this, since
// indexing is in int32
FAISS_ASSERT(queryTileSize * nprobe * maxListLength <
std::numeric_limits<int>::max());
// Temporary memory buffers
// Make sure there is space prior to the start which will be 0, and
// will handle the boundary condition without branches
DeviceTensor<int, 1, true> prefixSumOffsetSpace1(
res, makeTempAlloc(AllocType::Other, stream), {queryTileSize * nprobe + 1});
DeviceTensor<int, 1, true> prefixSumOffsetSpace2(
res, makeTempAlloc(AllocType::Other, stream), {queryTileSize * nprobe + 1});
DeviceTensor<int, 2, true> prefixSumOffsets1(
prefixSumOffsetSpace1[1].data(),
{queryTileSize, nprobe});
DeviceTensor<int, 2, true> prefixSumOffsets2(
prefixSumOffsetSpace2[1].data(),
{queryTileSize, nprobe});
DeviceTensor<int, 2, true>* prefixSumOffsets[2] =
{&prefixSumOffsets1, &prefixSumOffsets2};
// Make sure the element before prefixSumOffsets is 0, since we
// depend upon simple, boundary-less indexing to get proper results
CUDA_VERIFY(cudaMemsetAsync(prefixSumOffsetSpace1.data(),
0,
sizeof(int),
stream));
CUDA_VERIFY(cudaMemsetAsync(prefixSumOffsetSpace2.data(),
0,
sizeof(int),
stream));
DeviceTensor<float, 1, true> allDistances1(
res, makeTempAlloc(AllocType::Other, stream),
{queryTileSize * nprobe * maxListLength});
DeviceTensor<float, 1, true> allDistances2(
res, makeTempAlloc(AllocType::Other, stream),
{queryTileSize * nprobe * maxListLength});
DeviceTensor<float, 1, true>* allDistances[2] =
{&allDistances1, &allDistances2};
DeviceTensor<float, 3, true> heapDistances1(
res, makeTempAlloc(AllocType::Other, stream),
{queryTileSize, pass2Chunks, k});
DeviceTensor<float, 3, true> heapDistances2(
res, makeTempAlloc(AllocType::Other, stream),
{queryTileSize, pass2Chunks, k});
DeviceTensor<float, 3, true>* heapDistances[2] =
{&heapDistances1, &heapDistances2};
DeviceTensor<int, 3, true> heapIndices1(
res, makeTempAlloc(AllocType::Other, stream),
{queryTileSize, pass2Chunks, k});
DeviceTensor<int, 3, true> heapIndices2(
res, makeTempAlloc(AllocType::Other, stream),
{queryTileSize, pass2Chunks, k});
DeviceTensor<int, 3, true>* heapIndices[2] =
{&heapIndices1, &heapIndices2};
auto streams = res->getAlternateStreamsCurrentDevice();
streamWait(streams, {stream});
int curStream = 0;
for (int query = 0; query < queries.getSize(0); query += queryTileSize) {
int numQueriesInTile =
std::min(queryTileSize, queries.getSize(0) - query);
auto prefixSumOffsetsView =
prefixSumOffsets[curStream]->narrowOutermost(0, numQueriesInTile);
auto listIdsView =
listIds.narrowOutermost(query, numQueriesInTile);
auto queryView =
queries.narrowOutermost(query, numQueriesInTile);
auto residualBaseView =
residualBase.narrowOutermost(query, numQueriesInTile);
auto heapDistancesView =
heapDistances[curStream]->narrowOutermost(0, numQueriesInTile);
auto heapIndicesView =
heapIndices[curStream]->narrowOutermost(0, numQueriesInTile);
auto outDistanceView =
outDistances.narrowOutermost(query, numQueriesInTile);
auto outIndicesView =
outIndices.narrowOutermost(query, numQueriesInTile);
runIVFFlatScanTile(res,
queryView,
listIdsView,
listData,
listIndices,
indicesOptions,
listLengths,
*thrustMem[curStream],
prefixSumOffsetsView,
*allDistances[curStream],
heapDistancesView,
heapIndicesView,
k,
metric,
useResidual,
residualBaseView,
scalarQ,
outDistanceView,
outIndicesView,
streams[curStream]);
curStream = (curStream + 1) % 2;
}
streamWait({stream}, streams);
}
} } // namespace
|
13c069e44015628f40bba742c353264d14ea7d21.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include "newtimer.h"
#define BLOCK_SIZE 256
#define SOFTENING 1e-9f
typedef struct { float4 *pos, *vel; } BodySystem;
void randomizeBodies(float *data, int n) {
for (int i = 0; i < n; i++) {
data[i] = 2.0f * (rand() / (float)RAND_MAX) - 1.0f;
}
}
__global__
void bodyForce(float4 *p, float4 *v, float dt, int n) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < n) {
float Fx = 0.0f; float Fy = 0.0f; float Fz = 0.0f;
for (int tile = 0; tile < gridDim.x; tile++) {
__shared__ float3 spos[BLOCK_SIZE];
float4 tpos = p[tile * blockDim.x + threadIdx.x];
spos[threadIdx.x] = make_float3(tpos.x, tpos.y, tpos.z);
__syncthreads();
#pragma unroll
for (int j = 0; j < BLOCK_SIZE; j++) {
float dx = spos[j].x - p[i].x;
float dy = spos[j].y - p[i].y;
float dz = spos[j].z - p[i].z;
float distSqr = dx*dx + dy*dy + dz*dz + SOFTENING;
float invDist = rsqrtf(distSqr);
float invDist3 = invDist * invDist * invDist;
Fx += dx * invDist3; Fy += dy * invDist3; Fz += dz * invDist3;
}
__syncthreads();
}
v[i].x += dt*Fx; v[i].y += dt*Fy; v[i].z += dt*Fz;
}
}
int main(const int argc, const char** argv) {
StartTimer();
int nBodies = 100000;
int nIters = 20;
if (argc > 1) nBodies = atoi(argv[1]);
if (argc > 2) nIters = atoi(argv[2]);
const float dt = 0.01f; // time step
int bytes = 2*nBodies*sizeof(float4);
float *buf = (float*)malloc(bytes);
BodySystem p = { (float4*)buf, ((float4*)buf) + nBodies };
randomizeBodies(buf, 8*nBodies); // Init pos / vel data
float *d_buf;
hipMalloc(&d_buf, bytes);
BodySystem d_p = { (float4*)d_buf, ((float4*)d_buf) + nBodies };
int nBlocks = (nBodies + BLOCK_SIZE - 1) / BLOCK_SIZE;
double tStartLoop = 0.0;
double tEndLoop = 0.0;
double loopTime = 0.0;
for (int iter = 1; iter <= nIters; iter++) {
tStartLoop = GetTimer() / 1000.0;
hipMemcpy(d_buf, buf, bytes, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( bodyForce), dim3(nBlocks), dim3(BLOCK_SIZE), 0, 0, d_p.pos, d_p.vel, dt, nBodies);
hipMemcpy(buf, d_buf, bytes, hipMemcpyDeviceToHost);
tEndLoop = GetTimer() / 1000.0;
loopTime += tEndLoop - tStartLoop;
for (int i = 0 ; i < nBodies; i++) { // integrate position
p.pos[i].x += p.vel[i].x*dt;
p.pos[i].y += p.vel[i].y*dt;
p.pos[i].z += p.vel[i].z*dt;
}
}
free(buf);
hipFree(d_buf);
const double tEndTime = GetTimer() / 1000.0;
printf("percent of time in bodyForce: %f \n", loopTime/tEndTime);
}
| 13c069e44015628f40bba742c353264d14ea7d21.cu | #include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include "newtimer.h"
#define BLOCK_SIZE 256
#define SOFTENING 1e-9f
typedef struct { float4 *pos, *vel; } BodySystem;
void randomizeBodies(float *data, int n) {
for (int i = 0; i < n; i++) {
data[i] = 2.0f * (rand() / (float)RAND_MAX) - 1.0f;
}
}
__global__
void bodyForce(float4 *p, float4 *v, float dt, int n) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < n) {
float Fx = 0.0f; float Fy = 0.0f; float Fz = 0.0f;
for (int tile = 0; tile < gridDim.x; tile++) {
__shared__ float3 spos[BLOCK_SIZE];
float4 tpos = p[tile * blockDim.x + threadIdx.x];
spos[threadIdx.x] = make_float3(tpos.x, tpos.y, tpos.z);
__syncthreads();
#pragma unroll
for (int j = 0; j < BLOCK_SIZE; j++) {
float dx = spos[j].x - p[i].x;
float dy = spos[j].y - p[i].y;
float dz = spos[j].z - p[i].z;
float distSqr = dx*dx + dy*dy + dz*dz + SOFTENING;
float invDist = rsqrtf(distSqr);
float invDist3 = invDist * invDist * invDist;
Fx += dx * invDist3; Fy += dy * invDist3; Fz += dz * invDist3;
}
__syncthreads();
}
v[i].x += dt*Fx; v[i].y += dt*Fy; v[i].z += dt*Fz;
}
}
int main(const int argc, const char** argv) {
StartTimer();
int nBodies = 100000;
int nIters = 20;
if (argc > 1) nBodies = atoi(argv[1]);
if (argc > 2) nIters = atoi(argv[2]);
const float dt = 0.01f; // time step
int bytes = 2*nBodies*sizeof(float4);
float *buf = (float*)malloc(bytes);
BodySystem p = { (float4*)buf, ((float4*)buf) + nBodies };
randomizeBodies(buf, 8*nBodies); // Init pos / vel data
float *d_buf;
cudaMalloc(&d_buf, bytes);
BodySystem d_p = { (float4*)d_buf, ((float4*)d_buf) + nBodies };
int nBlocks = (nBodies + BLOCK_SIZE - 1) / BLOCK_SIZE;
double tStartLoop = 0.0;
double tEndLoop = 0.0;
double loopTime = 0.0;
for (int iter = 1; iter <= nIters; iter++) {
tStartLoop = GetTimer() / 1000.0;
cudaMemcpy(d_buf, buf, bytes, cudaMemcpyHostToDevice);
bodyForce<<<nBlocks, BLOCK_SIZE>>>(d_p.pos, d_p.vel, dt, nBodies);
cudaMemcpy(buf, d_buf, bytes, cudaMemcpyDeviceToHost);
tEndLoop = GetTimer() / 1000.0;
loopTime += tEndLoop - tStartLoop;
for (int i = 0 ; i < nBodies; i++) { // integrate position
p.pos[i].x += p.vel[i].x*dt;
p.pos[i].y += p.vel[i].y*dt;
p.pos[i].z += p.vel[i].z*dt;
}
}
free(buf);
cudaFree(d_buf);
const double tEndTime = GetTimer() / 1000.0;
printf("percent of time in bodyForce: %f \n", loopTime/tEndTime);
}
|
e18d23e12288eb0caa8153523c53477ffff436f3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2011, Alex Krizhevsky (akrizhevsky@gmail.com)
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* - Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
* EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <iostream>
#include <cudaconv2.cuh>
#include "cudamat.cuh"
using namespace std;
/*
* Block size: 16x16.
* blockIdx.x determines case in batches of 16*imgsPerThread.
* blockIdx.y determines 4x4 image region in target image.
*
* threadIdx.x determines case.
* threadIdx.y determines pixel.
*
* hidActs: (numFilters, numModules, numImages)
* filters: (numColors, filterPixels, numFilters) if conv
* (numModules, numColors, filterPixels, numFilters) otherwise
* targets: (numColors, imgPixels, numImages)
*
* Each block reconstructs one 4x4 pixels from 16*imgsPerThread cases.
*
* Number of filters must be divisible by 16.
* Number of images must be divisible by 16*imgsPerThread if checkCaseBounds is false.
* 16 * imgsPerThread must be divisible by 32.
*
* This version loads 32 cases at a time, so it gets full coalescing on that load.
* It only loads 16 weights at a time, so those aren't fully coalesced.
* This version conserves shared memory by loading 16 filters at a time rather than 32.
*/
template <int imgsPerThread, int numColors, bool scale, bool checkCaseBounds, bool conv>
__global__ void img_acts_color(const float* hidActs, const float* filters, float* targets,
const int numModulesX, const int numImages, const int numFilters,
const int filterSize, const int imgSize, const int paddingStart, const int moduleStride,
const float scaleTargets, const float scaleOutputs) {
__shared__ float shFilters[numColors*16][16 + 1];
__shared__ float shHidActs[16][16*imgsPerThread];
const int blockCaseIdx = blockIdx.x * 16*imgsPerThread;
const int numRegionsX = DIVUP(imgSize, 4);
const int blockRegionIdx = blockIdx.y;
const int blockRegionIdxX = blockRegionIdx % numRegionsX;
const int blockRegionIdxY = blockRegionIdx / numRegionsX;
const int blockRegionLeft = blockRegionIdxX * 4;
const int blockRegionTop = blockRegionIdxY * 4;
const int pxYInRegion = threadIdx.y / 4, pxXInRegion = threadIdx.y % 4;
const int pxY = blockRegionTop + pxYInRegion;
const int pxX = blockRegionLeft + pxXInRegion;
const int pxIdx = pxY * imgSize + pxX;
const bool isPxInImg = pxY < imgSize && pxX < imgSize;
const uint numModules = numModulesX * numModulesX;
const int filterPixels = filterSize * filterSize;
const int imgPixels = imgSize * imgSize;
const int tidx = threadIdx.y * 16 + threadIdx.x;
const int loadY = tidx / 32, loadX = tidx % 32;
hidActs += blockCaseIdx + loadY * numImages * numModulesX * numModulesX + loadX;
filters += threadIdx.x;
targets += pxIdx * numImages + blockCaseIdx + threadIdx.x;
float prod[numColors][imgsPerThread];
#pragma unroll
for (int c = 0; c < numColors; c++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
prod[c][i] = 0;
}
}
const int startY = blockRegionTop - paddingStart < filterSize ? 0
: 1 + (blockRegionTop - paddingStart - filterSize) / moduleStride;
const int endY = MIN(numModulesX, 1 + (blockRegionTop + 3 - paddingStart) / moduleStride);
const int startX = blockRegionLeft - paddingStart < filterSize ? 0
: 1 + (blockRegionLeft - paddingStart - filterSize) / moduleStride;
const int endX = MIN(numModulesX, 1 + (blockRegionLeft + 3 - paddingStart) / moduleStride);
float* shilterLoad = &shFilters[threadIdx.y][threadIdx.x];
float* shHidActLoad = &shHidActs[loadY][loadX];
for (int my = startY; my < endY; my++) {
const int moduleTop = paddingStart + my * moduleStride;
const int pxInModuleY = pxY - moduleTop;
for (int mx = startX; mx < endX; mx++) {
const int moduleIdx = my * numModulesX + mx;
const int moduleLeft = paddingStart + mx * moduleStride;
const int pxInModuleX = pxX - moduleLeft;
const bool isPxInModule = pxInModuleY >= 0 && pxInModuleY < filterSize && pxInModuleX >= 0 && pxInModuleX < filterSize;
const int pxIdxInModule = pxInModuleY * filterSize + pxInModuleX;
for (int f = 0; f < numFilters; f += 16) { // multiply with 16 filters at a time
// Now the threads split up into half-warps, and each half-warp decides if it's interested.
const float* hLoad = &hidActs[(moduleIdx + f * numModules) * numImages];
#pragma unroll
for (int i = 0; i < imgsPerThread * 16; i += 32) {
if (!checkCaseBounds || blockCaseIdx + i + loadX < numImages) {
#pragma unroll
for (int j = 0; j < 16; j += 8) { // load 16 rows of imgsPerThread*16 cols, 8 * 32 elements at a time.
shHidActLoad[j * 16 * imgsPerThread + i] = hLoad[j * numModules * numImages + i];
}
} else {
#pragma unroll
for (int j = 0; j < 16; j += 8) { // load 16 rows of imgsPerThread*16 cols, 8 * 32 elements at a time.
shHidActLoad[j * 16 * imgsPerThread + i] = 0;
}
}
}
if (isPxInImg && isPxInModule) {
// This half-warp is interested, so it's going to load the weights from this module to its pixel.
// Not fully coalesced read :(
// But taking out this read entirely only reduces the runtime by ~2.8%, so it isn't costing me much.
const float* fLoad = conv ? &filters[pxIdxInModule * numFilters + f]
: &filters[(moduleIdx * numColors * filterPixels + pxIdxInModule) * numFilters + f];
#pragma unroll
for (int c = 0; c < numColors; c++) {
shilterLoad[c * 16 * (16 + 1)] = fLoad[c * filterPixels * numFilters];
}
}
__syncthreads();
// Do some actual computation
if (isPxInImg && isPxInModule) {
#pragma unroll
for (int c = 0; c < numColors; c++) {
#pragma unroll
for (int w = 0; w < 16; w++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
prod[c][i] += shFilters[threadIdx.y + c * 16][w] * shHidActs[w][threadIdx.x + i * 16];
}
}
}
}
__syncthreads();
}
}
}
// Not fully coalesced write :(... shmem (and fully coalesced) version is actually slightly slower, though
if (isPxInImg) {
if (scale) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * 16 < numImages) {
#pragma unroll
for (int c = 0; c < numColors; c++) {
targets[c * imgPixels * numImages + i * 16] = scaleTargets * targets[c * imgPixels * numImages + i * 16] + scaleOutputs * prod[c][i];
}
}
}
} else {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * 16 < numImages) {
#pragma unroll
for (int c = 0; c < numColors; c++) {
targets[c * imgPixels * numImages + i * 16] = scaleOutputs * prod[c][i];
}
}
}
}
}
}
/*
* Block size: 16x16.
* blockIdx.x determines case in batches of 16*imgsPerThread, also color in batches of colorsPerThread.
* In essence, blockIdx.x.x = 1..numImages/(16*imgsPerThread)
* blockIdx.x.y = 1..numImgColors/colorsPerThread
* blockIdx.y determines 4x4 image region in target image.
*
* threadIdx.x determines case.
* threadIdx.y determines pixel.
*
* hidActs: (numFilters, numModules, numImages)
* filters: (numFilterColors, filterPixels, numFilters) if conv
* (numModules, numFilterColors, filterPixels, numFilters) otherwise
* targets: (numImageColors, imgPixels, numImages)
*
* Each block reconstructs one 4x4 pixels from 16*imgsPerThread cases.
*
* numImages must be divisible by 16*imgsPerThread if checkCaseBounds is false.
* 16 * imgsPerThread must be divisible by 32.
* numImageColors/numGroups must be divisible by colorsPerThread.
*
* This version loads 32 cases at a time, so it gets full coalescing on that load.
* It only loads 16 weights at a time, so those aren't fully coalesced.
* This version conserves shared memory by loading 16 filters at a time rather than 32.
*
* To be used when there are 4-16 color channels.
*/
template <int imgsPerThread, int colorsPerThread, bool scale, bool checkCaseBounds, bool conv>
__global__ void img_acts_mediumcolor(const float* hidActs, const float* filters, float* targets,
const int numModulesX, const int numImages, const int numFilters,
const int filterSize, const int imgSize, const int paddingStart, const int moduleStride,
const int numImgColors, const int numGroups,
const float scaleTargets, const float scaleOutputs) {
__shared__ float shFilters[colorsPerThread*16][16 + 1];
__shared__ float shHidActs[16][16*imgsPerThread];
const int numImgBlocks = DIVUP(numImages,16*imgsPerThread);
const int blockCaseIdx = (blockIdx.x % numImgBlocks) * 16*imgsPerThread;
const int imgColorIdx = (blockIdx.x / numImgBlocks) * colorsPerThread; // color idx globally
const int numFilterColors = numImgColors / numGroups;
const int blockGroupIdx = imgColorIdx / numFilterColors;
const int filterColorIdx = imgColorIdx % numFilterColors; // color idx within group
const int numFiltersPerGroup = numFilters / numGroups;
const int blockFilterIdx = blockGroupIdx * numFiltersPerGroup;
const int numRegionsX = DIVUP(imgSize, 4);
const int blockRegionIdx = blockIdx.y;
const int blockRegionIdxX = blockRegionIdx % numRegionsX;
const int blockRegionIdxY = blockRegionIdx / numRegionsX;
const int blockRegionLeft = blockRegionIdxX * 4;
const int blockRegionTop = blockRegionIdxY * 4;
const int pxYInRegion = threadIdx.y / 4, pxXInRegion = threadIdx.y % 4;
const int pxY = blockRegionTop + pxYInRegion;
const int pxX = blockRegionLeft + pxXInRegion;
const int pxIdx = pxY * imgSize + pxX;
const bool isPxInImg = pxY < imgSize && pxX < imgSize;
// const uint numModules = numModulesX * numModulesX;
const int filterPixels = filterSize * filterSize;
const int imgPixels = imgSize * imgSize;
const int tidx = threadIdx.y * 16 + threadIdx.x;
const int loadY = tidx / 32, loadX = tidx % 32;
hidActs += blockCaseIdx + (blockFilterIdx + loadY) * numImages * numModulesX * numModulesX + loadX;
filters += blockFilterIdx + filterColorIdx * filterPixels * numFilters + threadIdx.x;
targets += imgColorIdx * imgPixels * numImages + pxIdx * numImages + blockCaseIdx + threadIdx.x;
float prod[colorsPerThread][imgsPerThread];
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
prod[c][i] = 0;
}
}
const int startY = blockRegionTop - paddingStart < filterSize ? 0
: 1 + (blockRegionTop - paddingStart - filterSize) / moduleStride;
const int endY = MIN(numModulesX, 1 + (blockRegionTop + 3 - paddingStart) / moduleStride);
const int startX = blockRegionLeft - paddingStart < filterSize ? 0
: 1 + (blockRegionLeft - paddingStart - filterSize) / moduleStride;
const int endX = MIN(numModulesX, 1 + (blockRegionLeft + 3 - paddingStart) / moduleStride);
float* shFilterLoad = &shFilters[threadIdx.y][threadIdx.x];
float* shHidActLoad = &shHidActs[loadY][loadX];
for (int my = startY; my < endY; my++) {
const int moduleTop = paddingStart + my * moduleStride;
const int pxInModuleY = pxY - moduleTop;
for (int mx = startX; mx < endX; mx++) {
const int moduleIdx = my * numModulesX + mx;
const int moduleLeft = paddingStart + mx * moduleStride;
const int pxInModuleX = pxX - moduleLeft;
const bool isPxInModule = pxInModuleY >= 0 && pxInModuleY < filterSize && pxInModuleX >= 0 && pxInModuleX < filterSize;
const int pxIdxInModule = pxInModuleY * filterSize + pxInModuleX;
for (int f = 0; f < numFiltersPerGroup; f += 16) { // multipply with 16 filters at a time
// Now the threads split up into half-warps, and each half-warp decides if it's interested.
const float* hLoad = &hidActs[(moduleIdx + f * numModulesX * numModulesX) * numImages];
#pragma unroll
for (int i = 0; i < imgsPerThread * 16; i += 32) {
if (!checkCaseBounds || blockCaseIdx + loadX + i < numImages) {
#pragma unroll
for (int j = 0; j < 16; j += 8) { // load 16 rows of imgsPerThread*16 cols, 8 * 32 elements at a time.
shHidActLoad[j * 16 * imgsPerThread + i] = hLoad[j * numModulesX * numModulesX * numImages + i];
}
} else {
#pragma unroll
for (int j = 0; j < 16; j += 8) { // load 16 rows of imgsPerThread*16 cols, 8 * 32 elements at a time.
shHidActLoad[j * 16 * imgsPerThread + i] = 0;
}
}
}
if (isPxInImg && isPxInModule) {
// This half-warp is interested, so it's going to load the weights from this module to its pixel.
// Not fully coalesced read :(
// But taking out this read entirely only reduces the runtime by ~2.8%, so it isn't costing me much.
const float* fLoad = conv ? &filters[pxIdxInModule * numFilters + f]
: &filters[moduleIdx * numFilterColors * filterPixels * numFilters + pxIdxInModule * numFilters + f];
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
shFilterLoad[c * 16 * (16 + 1)] = fLoad[c * filterPixels * numFilters];
}
}
__syncthreads();
// Do some actual computation
if (isPxInImg && isPxInModule) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
#pragma unroll
for (int w = 0; w < 16; w++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
prod[c][i] += shFilters[threadIdx.y + c * 16][w] * shHidActs[w][threadIdx.x + i * 16];
}
}
}
}
__syncthreads();
}
}
}
// Not fully coalesced write :(... shmem (and fully coalesced) version is actually slightly slower, though
if (isPxInImg) {
if (scale) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * 16 < numImages) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
targets[c * imgPixels * numImages + i * 16] = scaleTargets * targets[c * imgPixels * numImages + i * 16] + scaleOutputs * prod[c][i];
}
}
}
} else {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * 16 < numImages) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
targets[c * imgPixels * numImages + i * 16] = scaleOutputs * prod[c][i];
}
}
}
}
}
}
/*
* Block size: B_YxB_X.
* blockIdx.x determines case in batches of B_X*imgsPerThread, also color in batches of B_Y*colorsPerThread.
* In essence, blockIdx.x.x = 1..numImages/(B_X*imgsPerThread)
* blockIdx.x.y = 1..numImgColors/(B_Y*colorsPerThread)
* blockIdx.y determines image pixel in target image.
*
* threadIdx.x determines case.
* threadIdx.y determines color.
*
* hidActs: (numFilters, numModules, numImages)
* filters: (numFilterColors, filterPixels, numFilters) if conv
* (numModules, numFilterColors, filterPixels, numFilters) otherwise
* targets: (numImageColors, imgPixels, numImages)
*
* Each block reconstructs one B_Y*colorsPerThread colors from 1 pixel from B_X*imgsPerThread cases.
*
* numImages must be divisible by B_X*imgsPerThread if checkCaseBounds is false.
* numFiltersPerGroup must be divisible by 16.
*
* B_X * imgsPerThread must be divisible by 32.
* numFilterColors must be divisible by B_Y*colorsPerThread.
* B_X*B_Y must be divisible by 32.
*
* This version loads 32 cases at a time, so it gets full coalescing on that load.
* It only loads 16 weights at a time, so those aren't fully coalesced.
* This version conserves shared memory by loading 16 filters at a time rather than 32.
*
* To be used when there are >= 16 color channels.
*/
template <int B_Y, int B_X, int imgsPerThread, int colorsPerThread, bool scale, bool checkCaseBounds, bool conv>
__global__ void conv_img_acts_manycolor(const float* hidActs, const float* filters, float* targets,
const int numModulesX, const int numImages, const int numFilters,
const int filterSize, const int imgSize, const int paddingStart, const int moduleStride,
const int numImgColors, const int numGroups,
const float scaleTargets, const float scaleOutputs) {
__shared__ float shFilters[colorsPerThread*B_Y][16 + 1]; // TODO: perhaps reconsider this 16
__shared__ float shHidActs[16][B_X*imgsPerThread];
const int numImgBlocks = DIVUP(numImages,B_X*imgsPerThread);
const int blockCaseIdx = (blockIdx.x % numImgBlocks) * B_X*imgsPerThread;
const int imgColorIdx = (blockIdx.x / numImgBlocks) * B_Y*colorsPerThread; // color idx globally
const int numFilterColors = numImgColors / numGroups;
const int blockGroupIdx = imgColorIdx / numFilterColors;
const int filterColorIdx = imgColorIdx % numFilterColors; // color idx within group
const int numFiltersPerGroup = numFilters / numGroups;
const int blockFilterIdx = blockGroupIdx * numFiltersPerGroup;
const int blockPixelIdx = blockIdx.y;
const int blockPixelIdxX = blockPixelIdx % imgSize;
const int blockPixelIdxY = blockPixelIdx / imgSize;
const int filterPixels = filterSize * filterSize;
const int imgPixels = imgSize * imgSize;
const int tidx = threadIdx.y * B_X + threadIdx.x;
const int hidActLoadY = tidx / 32, hidActLoadX = tidx % 32;
const int filtersLoadY = tidx / 16, filtersLoadX = tidx % 16;
const int numModules = numModulesX * numModulesX;
hidActs += blockCaseIdx + (blockFilterIdx + hidActLoadY) * numImages * numModules + hidActLoadX;
filters += blockFilterIdx + (filterColorIdx + filtersLoadY) * filterPixels * numFilters + filtersLoadX;
targets += (imgColorIdx + threadIdx.y) * imgPixels * numImages + blockPixelIdx * numImages + blockCaseIdx + threadIdx.x;
float prod[colorsPerThread][imgsPerThread];
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
prod[c][i] = 0;
}
}
const int startY = blockPixelIdxY - paddingStart < filterSize ? 0
: 1 + (blockPixelIdxY - paddingStart - filterSize) / moduleStride;
const int endY = MIN(numModulesX, 1 + (blockPixelIdxY - paddingStart) / moduleStride);
const int startX = blockPixelIdxX - paddingStart < filterSize ? 0
: 1 + (blockPixelIdxX - paddingStart - filterSize) / moduleStride;
const int endX = MIN(numModulesX, 1 + (blockPixelIdxX - paddingStart) / moduleStride);
float* shFilterLoad = &shFilters[filtersLoadY][filtersLoadX];
float* shHidActLoad = &shHidActs[hidActLoadY][hidActLoadX];
for (int my = startY; my < endY; my++) {
const int moduleTop = paddingStart + my * moduleStride;
const int pxInFilterY = blockPixelIdxY - moduleTop;
for (int mx = startX; mx < endX; mx++) {
const int moduleIdx = my * numModulesX + mx;
const int moduleLeft = paddingStart + mx * moduleStride;
const int pxInFilterX = blockPixelIdxX - moduleLeft;
const int pxIdxInFilter = pxInFilterY * filterSize + pxInFilterX;
for (int f = 0; f < numFiltersPerGroup; f += 16) { // multiply with 16 filters at a time
const float* hLoad = &hidActs[(moduleIdx + f * numModules) * numImages];
#pragma unroll
for (int i = 0; i < imgsPerThread * B_X; i += 32) {
if (!checkCaseBounds || blockCaseIdx + hidActLoadX + i < numImages) {
#pragma unroll
for (int j = 0; j < 16; j += B_X*B_Y/32) { // load 16 rows of imgsPerThread*16 cols, 8 * 32 elements at a time.
shHidActLoad[j * B_X * imgsPerThread + i] = hLoad[j * numModules * numImages + i];
}
} else {
#pragma unroll
for (int j = 0; j < 16; j += B_X*B_Y/32) { // load 16 rows of imgsPerThread*16 cols, 8 * 32 elements at a time.
shHidActLoad[j * B_X * imgsPerThread + i] = 0;
}
}
}
const float* fLoad = conv ? &filters[pxIdxInFilter * numFilters + f]
: &filters[moduleIdx * numFilterColors * filterPixels * numFilters + pxIdxInFilter * numFilters + f];
#pragma unroll
for (int i = 0; i < colorsPerThread*B_Y; i+= B_X*B_Y/16) {
if ((colorsPerThread*B_Y) % (B_X*B_Y/16) == 0 || i + filtersLoadY < colorsPerThread*B_Y) {
shFilterLoad[i * (16 + 1)] = fLoad[i * filterPixels * numFilters];
}
}
__syncthreads();
// Do some actual computation
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
#pragma unroll
for (int w = 0; w < 16; w++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
prod[c][i] += shFilters[c * B_Y + threadIdx.y][w] * shHidActs[w][threadIdx.x + i * B_X];
}
}
}
__syncthreads();
}
}
}
if (scale) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * B_X < numImages) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
targets[c * B_Y * imgPixels * numImages + i * B_X] = scaleTargets * targets[c * B_Y * imgPixels * numImages + i * B_X] + scaleOutputs * prod[c][i];
}
}
}
} else {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * B_X < numImages) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
targets[c * B_Y * imgPixels * numImages + i * B_X] = scaleOutputs * prod[c][i];
}
}
}
}
}
/*
* Block size: 16x16.
* blockIdx.x determines case in batches of 16*imgsPerThread, also color in batches of colorsPerThread.
* In essence, blockIdx.x.x = 1..numImages/(16*imgsPerThread)
* blockIdx.x.y = 1..numImgColors/colorsPerThread
* blockIdx.y determines 4x4 image region in target image, also sample
* In essence, blockIdx.y.x = 1..numRegions
* blockIdx.y.y = 1..overSample
*
* threadIdx.x determines case.
* threadIdx.y determines pixel.
*
* overSample := numFilterColors*numGroups/numImgColors
* ^ this is the number of groups that each color channel is connected to
*
* hidActs: (numFilters, numModules, numImages)
* filters: (numFilterColors, filterPixels, numFilters) if conv
* (numModules, numFilterColors, filterPixels, numFilters) otherwise
* targets: (overSample, numImgColors, imgPixels, numImages)
*
* colorIndices: (numGroups, numFilterColors)
*
* Each block reconstructs one 4x4 pixels from 16*imgsPerThread cases.
*
* numImages must be divisible by 16*imgsPerThread if checkCaseBounds is false.
* 16 * imgsPerThread must be divisible by 32.
* numFilterColors must be divisible by colorsPerThread.
*
* This version loads 32 cases at a time, so it gets full coalescing on that load.
* It only loads 16 weights at a time, so those aren't fully coalesced.
* This version conserves shared memory by loading 16 filters at a time rather than 32.
*
* To be used when there are 4-16 color channels.
*/
template <int imgsPerThread, int colorsPerThread, bool scale, bool checkCaseBounds, bool conv>
__global__ void img_acts_mediumcolor_sparse_rand(const float* hidActs, const float* filters, float* targets, int* colorIndices,
const int numModulesX, const int numImages, const int numFilters,
const int filterSize, const int imgSize, const int paddingStart, const int moduleStride,
const int numImgColors, const int numFilterColors, const int numGroups,
const float scaleTargets, const float scaleOutputs) {
__shared__ float shFilters[colorsPerThread*16][16 + 1];
__shared__ float shHidActs[16][16*imgsPerThread];
__shared__ int shColors[colorsPerThread]; // not really necessary -- can repurpose the other shmems
const int numImgBlocks = DIVUP(numImages,16*imgsPerThread);
const int blockCaseIdx = (blockIdx.x % numImgBlocks) * 16*imgsPerThread;
const int numRegionsX = DIVUP(imgSize, 4);
const int numRegions = numRegionsX * numRegionsX;
const int imgColorIdx = (blockIdx.x / numImgBlocks) * colorsPerThread; // color idx globally
const int filterColorIdx = imgColorIdx % numFilterColors; // color idx within group
const int numFiltersPerGroup = numFilters / numGroups;
const int overSample = gridDim.y / numRegions;
const int blockSample = blockIdx.y / numRegions;
const int groupsPerSample = numGroups / overSample;
const int blockGroupIdx = imgColorIdx / numFilterColors + blockSample * groupsPerSample;
const int blockFilterIdx = blockGroupIdx * numFiltersPerGroup;
const int blockRegionIdx = blockIdx.y % numRegions;
const int blockRegionIdxX = blockRegionIdx % numRegionsX;
const int blockRegionIdxY = blockRegionIdx / numRegionsX;
const int blockRegionLeft = blockRegionIdxX * 4;
const int blockRegionTop = blockRegionIdxY * 4;
const int pxYInRegion = threadIdx.y / 4, pxXInRegion = threadIdx.y % 4;
const int pxY = blockRegionTop + pxYInRegion;
const int pxX = blockRegionLeft + pxXInRegion;
const int pxIdx = pxY * imgSize + pxX;
const bool isPxInImg = pxY < imgSize && pxX < imgSize;
// const uint numModules = numModulesX * numModulesX;
const int filterPixels = filterSize * filterSize;
const int imgPixels = imgSize * imgSize;
const int tidx = threadIdx.y * 16 + threadIdx.x;
const int loadY = tidx / 32, loadX = tidx % 32;
hidActs += blockCaseIdx + (blockFilterIdx + loadY) * numImages * numModulesX * numModulesX + loadX;
filters += blockFilterIdx + filterColorIdx * filterPixels * numFilters + threadIdx.x;
targets += blockSample * numImgColors * imgPixels * numImages + pxIdx * numImages + blockCaseIdx + threadIdx.x;
float prod[colorsPerThread][imgsPerThread];
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
prod[c][i] = 0;
}
}
const int startY = blockRegionTop - paddingStart < filterSize ? 0
: 1 + (blockRegionTop - paddingStart - filterSize) / moduleStride;
const int endY = MIN(numModulesX, 1 + (blockRegionTop + 3 - paddingStart) / moduleStride);
const int startX = blockRegionLeft - paddingStart < filterSize ? 0
: 1 + (blockRegionLeft - paddingStart - filterSize) / moduleStride;
const int endX = MIN(numModulesX, 1 + (blockRegionLeft + 3 - paddingStart) / moduleStride);
float* shFilterLoad = &shFilters[threadIdx.y][threadIdx.x];
float* shHidActLoad = &shHidActs[loadY][loadX];
if (tidx < colorsPerThread) {
shColors[tidx] = colorIndices[blockGroupIdx * numFilterColors + filterColorIdx + tidx] * imgPixels * numImages;
}
for (int my = startY; my < endY; my++) {
const int moduleTop = paddingStart + my * moduleStride;
const int pxInModuleY = pxY - moduleTop;
for (int mx = startX; mx < endX; mx++) {
const int moduleIdx = my * numModulesX + mx;
const int moduleLeft = paddingStart + mx * moduleStride;
const int pxInModuleX = pxX - moduleLeft;
const bool isPxInModule = pxInModuleY >= 0 && pxInModuleY < filterSize && pxInModuleX >= 0 && pxInModuleX < filterSize;
const int pxIdxInModule = pxInModuleY * filterSize + pxInModuleX;
for (int f = 0; f < numFiltersPerGroup; f += 16) { // multipply with 16 filters at a time
// Now the threads split up into half-warps, and each half-warp decides if it's interested.
const float* hLoad = &hidActs[(moduleIdx + f * numModulesX * numModulesX) * numImages];
#pragma unroll
for (int i = 0; i < imgsPerThread * 16; i += 32) {
if (!checkCaseBounds || blockCaseIdx + loadX + i < numImages) {
#pragma unroll
for (int j = 0; j < 16; j += 8) { // load 16 rows of imgsPerThread*16 cols, 8 * 32 elements at a time.
shHidActLoad[j * 16 * imgsPerThread + i] = hLoad[j * numModulesX * numModulesX * numImages + i];
}
} else {
#pragma unroll
for (int j = 0; j < 16; j += 8) { // load 16 rows of imgsPerThread*16 cols, 8 * 32 elements at a time.
shHidActLoad[j * 16 * imgsPerThread + i] = 0;
}
}
}
if (isPxInImg && isPxInModule) {
// This half-warp is interested, so it's going to load the weights from this module to its pixel.
// Not fully coalesced read :(
// But taking out this read entirely only reduces the runtime by ~2.8%, so it isn't costing me much.
const float* fLoad = conv ? &filters[pxIdxInModule * numFilters + f]
: &filters[moduleIdx * numFilterColors * filterPixels * numFilters + pxIdxInModule * numFilters + f];
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
shFilterLoad[c * 16 * (16 + 1)] = fLoad[c * filterPixels * numFilters];
}
}
__syncthreads();
// Do some actual computation
if (isPxInImg && isPxInModule) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
#pragma unroll
for (int w = 0; w < 16; w++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
prod[c][i] += shFilters[threadIdx.y + c * 16][w] * shHidActs[w][threadIdx.x + i * 16];
}
}
}
}
__syncthreads();
}
}
}
// Not fully coalesced write :(... shmem (and fully coalesced) version is actually slightly slower, though
if (isPxInImg) {
if (scale) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * 16 < numImages) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
targets[shColors[c] + i * 16] = scaleTargets * targets[shColors[c] + i * 16] + scaleOutputs * prod[c][i];
}
}
}
} else {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * 16 < numImages) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
targets[shColors[c] + i * 16] = scaleOutputs * prod[c][i];
}
}
}
}
}
}
/*
* Block size: B_YxB_X.
* blockIdx.x determines case in batches of B_X*imgsPerThread, also color in batches of B_Y*colorsPerThread.
* In essence, blockIdx.x.x = 1..numImages/(B_X*imgsPerThread)
* blockIdx.x.y = 1..numImgColors/(B_Y*colorsPerThread)
* blockIdx.y determines image pixel in target image, sample idx.
* In essence, blockIdx.y.x = 1..imgPixels
* blockIdx.y.y = 1..overSample
*
* threadIdx.x determines case.
* threadIdx.y determines color.
*
* overSample := numFilterColors*numGroups/numImgColors
* ^ this is the number of groups that each color channel is connected to
*
* hidActs: (numFilters, numModules, numImages)
* filters: (numFilterColors, filterPixels, numFilters) if conv
* (numModules, numFilterColors, filterPixels, numFilters) otherwise
* targets: (overSample, numImgColors, imgPixels, numImages)
*
* colorIndices: (numGroups, numFilterColors)
*
* Each block reconstructs one B_Y*colorsPerThread colors from 1 pixel from B_X*imgsPerThread cases.
*
* numImages must be divisible by B_X*imgsPerThread if checkCaseBounds is false.
* numFiltersPerGroup must be divisible by 16.
* numFilterColors*numGroups must be divisible by numImgColors.
*
* B_X * imgsPerThread must be divisible by 32.
* numFilterColors must be divisible by B_Y*colorsPerThread.
* B_X*B_Y must be divisible by 32.
*
* This version loads 32 cases at a time, so it gets full coalescing on that load.
* It only loads 16 weights at a time, so those aren't fully coalesced.
* This version conserves shared memory by loading 16 filters at a time rather than 32.
*
* To be used when there are >= 16 color channels.
*/
template <int B_Y, int B_X, int imgsPerThread, int colorsPerThread, bool scale, bool checkCaseBounds, bool conv>
__global__ void img_acts_manycolor_sparse_rand(const float* hidActs, const float* filters, float* targets, int* colorIndices,
const int numModulesX, const int numImages, const int numFilters,
const int filterSize, const int imgSize, const int paddingStart, const int moduleStride,
const int numImgColors, const int numFilterColors, const int numGroups,
const float scaleTargets, const float scaleOutputs) {
__shared__ float shFilters[colorsPerThread*B_Y][16 + 1]; // TODO: perhaps reconsider this 16
__shared__ float shHidActs[16][B_X*imgsPerThread];
__shared__ int shColors[colorsPerThread * B_Y]; // not really necessary -- can repurpose the other shmems
const int numImgBlocks = DIVUP(numImages,B_X*imgsPerThread);
const int blockCaseIdx = (blockIdx.x % numImgBlocks) * B_X*imgsPerThread;
const int filterPixels = filterSize * filterSize;
const int imgPixels = imgSize * imgSize;
const int tidx = threadIdx.y * B_X + threadIdx.x;
const int hidActLoadY = tidx / 32, hidActLoadX = tidx % 32;
const int filtersLoadY = tidx / 16, filtersLoadX = tidx % 16;
const int numModules = numModulesX * numModulesX;
const int overSample = gridDim.y / imgPixels;
const int blockSample = blockIdx.y / imgPixels;
const int groupsPerSample = numGroups / overSample;
// const int overSample = (numFilterColors * numGroups) / numImgColors;
const int imgColorIdx = (blockIdx.x / numImgBlocks) * B_Y * colorsPerThread; // color idx globally
const int blockGroupIdx = imgColorIdx / numFilterColors + blockSample * groupsPerSample;
// const int filterColorsPerSample = numFilterColors / overSample;
const int blockPixelIdx = blockIdx.y % imgPixels;
const int blockPixelIdxX = blockPixelIdx % imgSize;
const int blockPixelIdxY = blockPixelIdx / imgSize;
const int filterColorIdx = imgColorIdx % numFilterColors; // color idx within group
const int numFiltersPerGroup = numFilters / numGroups;
const int blockFilterIdx = blockGroupIdx * numFiltersPerGroup;
hidActs += blockCaseIdx + (blockFilterIdx + hidActLoadY) * numImages * numModules + hidActLoadX;
filters += blockFilterIdx + (filterColorIdx + filtersLoadY) * filterPixels * numFilters + filtersLoadX;
targets += blockSample * numImgColors * imgPixels * numImages + blockPixelIdx * numImages + blockCaseIdx + threadIdx.x;
float prod[colorsPerThread][imgsPerThread];
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
prod[c][i] = 0;
}
}
const int startY = blockPixelIdxY - paddingStart < filterSize ? 0
: 1 + (blockPixelIdxY - paddingStart - filterSize) / moduleStride;
const int endY = MIN(numModulesX, 1 + (blockPixelIdxY - paddingStart) / moduleStride);
const int startX = blockPixelIdxX - paddingStart < filterSize ? 0
: 1 + (blockPixelIdxX - paddingStart - filterSize) / moduleStride;
const int endX = MIN(numModulesX, 1 + (blockPixelIdxX - paddingStart) / moduleStride);
float* shFilterLoad = &shFilters[filtersLoadY][filtersLoadX];
float* shHidActLoad = &shHidActs[hidActLoadY][hidActLoadX];
if (tidx < colorsPerThread * B_Y) {
shColors[tidx] = colorIndices[blockGroupIdx * numFilterColors + filterColorIdx + tidx] * imgPixels * numImages;
}
for (int my = startY; my < endY; my++) {
const int moduleTop = paddingStart + my * moduleStride;
const int pxInFilterY = blockPixelIdxY - moduleTop;
for (int mx = startX; mx < endX; mx++) {
const int moduleIdx = my * numModulesX + mx;
const int moduleLeft = paddingStart + mx * moduleStride;
const int pxInFilterX = blockPixelIdxX - moduleLeft;
const int pxIdxInFilter = pxInFilterY * filterSize + pxInFilterX;
for (int f = 0; f < numFiltersPerGroup; f += 16) { // multiply with 16 filters at a time
const float* hLoad = &hidActs[(moduleIdx + f * numModules) * numImages];
#pragma unroll
for (int i = 0; i < imgsPerThread * B_X; i += 32) {
if (!checkCaseBounds || blockCaseIdx + hidActLoadX + i < numImages) {
#pragma unroll
for (int j = 0; j < 16; j += B_X*B_Y/32) { // load 16 rows of imgsPerThread*16 cols, 8 * 32 elements at a time.
shHidActLoad[j * B_X * imgsPerThread + i] = hLoad[j * numModules * numImages + i];
}
} else {
#pragma unroll
for (int j = 0; j < 16; j += B_X*B_Y/32) { // load 16 rows of imgsPerThread*16 cols, 8 * 32 elements at a time.
shHidActLoad[j * B_X * imgsPerThread + i] = 0;
}
}
}
const float* fLoad = conv ? &filters[pxIdxInFilter * numFilters + f]
: &filters[moduleIdx * numFilterColors * filterPixels * numFilters + pxIdxInFilter * numFilters + f];
#pragma unroll
for (int i = 0; i < colorsPerThread*B_Y; i+= B_X*B_Y/16) {
if ((colorsPerThread*B_Y) % (B_X*B_Y/16) == 0 || i + filtersLoadY < colorsPerThread*B_Y) {
shFilterLoad[i * (16 + 1)] = fLoad[i * filterPixels * numFilters];
}
}
__syncthreads();
// Do some actual computation
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
#pragma unroll
for (int w = 0; w < 16; w++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
prod[c][i] += shFilters[c * B_Y + threadIdx.y][w] * shHidActs[w][threadIdx.x + i * B_X];
}
}
}
__syncthreads();
}
}
}
if (scale) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * B_X < numImages) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
targets[shColors[c * B_Y + threadIdx.y] + i * B_X] = scaleTargets * targets[shColors[c * B_Y + threadIdx.y] + i * B_X] + scaleOutputs * prod[c][i];
}
}
}
} else {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * B_X < numImages) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
targets[shColors[c * B_Y + threadIdx.y] + i * B_X] = scaleOutputs * prod[c][i];
}
}
}
}
}
/*
* hidActs: (numFilters, numModules, numImages)
* filters: (numFilterColors, filterPixels, numFilters) if conv
* (numModules, numFilterColors, filterPixels, numFilters) otherwise
* targets: (numImageColors, imgPixels, numImages)
*/
void _imgActs(NVMatrix& hidActs, NVMatrix& filters, NVMatrix& targets,
int imgSize, int paddingStart, int moduleStride, int numImgColors, int numGroups,
float scaleTargets, float scaleOutput, bool conv) {
int numFilterColors = numImgColors / numGroups;
int numImages = hidActs.getNumCols();
int numFilters = filters.getNumCols();
int numFiltersPerGroup = numFilters / numGroups;
int numModules = hidActs.getNumRows() / numFilters;
int filterModuleMult = conv ? 1 : numModules;
int filterPixels = filters.getNumRows() / (filterModuleMult * numFilterColors);
int filterSize = sqrt(filterPixels);
int imgPixels = imgSize * imgSize;
int numModulesX = sqrt(numModules);
assert(numImgColors % numGroups == 0);
assert(numFilters % (16*numGroups) == 0);
assert(numGroups > 1 || (numImgColors > 0 && (numImgColors <= 3 || numImgColors % 2 == 0)));
assert(numGroups == 1 || numFilterColors % 4 == 0);
assert(filterPixels == filterSize * filterSize);
assert(hidActs.getNumRows() == numModules * numFilters);
assert(filters.getNumRows() == filterModuleMult * numFilterColors * filterPixels);
assert(numModules == numModulesX * numModulesX);
assert(hidActs.isContiguous());
assert(filters.isContiguous());
assert(!hidActs.isTrans());
assert(!filters.isTrans());
assert(!targets.isTrans());
// These routines don't handle the case when only part of the image is visited in the convolution
assert(paddingStart <= 0 && paddingStart + (numModules-1)*moduleStride + filterSize >= imgSize);
assert(moduleStride <= filterSize);
assert(targets.isContiguous()); // no stride support here!
dim3 blocks;
dim3 threads(16,16);
int colorsPerThread;
bool checkCaseBounds;
if (numFilterColors % 8 == 0) {
threads = dim3(32, 4);
colorsPerThread = numFilterColors % 16 == 0 ? 4 : 2;
int imgsPerThread = 4;
assert(numFilterColors % (threads.y * colorsPerThread) == 0);
checkCaseBounds = numImages % (threads.x * imgsPerThread) != 0;
blocks = dim3(DIVUP(numImages, threads.x*imgsPerThread) * (numImgColors/(threads.y*colorsPerThread)), imgPixels);
} else if (numFilterColors > 3) {
colorsPerThread = numFilterColors % 4 == 0 ? 4 : 2;
blocks = dim3(DIVUP(numImages,16*8) * (numImgColors / colorsPerThread), DIVUP(imgSize,4) * DIVUP(imgSize,4));
checkCaseBounds = numImages % (16*8) != 0;
} else {
blocks = dim3(DIVUP(numImages,16*8), DIVUP(imgSize,4) * DIVUP(imgSize,4));
checkCaseBounds = numImages % (16*8) != 0;
}
if (scaleTargets == 0) { // do not scale or use targets matrix
targets.resize(numImgColors*imgPixels, numImages);
} else {
assert(targets.getNumRows() == numImgColors * imgPixels);
assert(targets.getNumCols() == numImages);
}
if (conv) { // convolutional units
if (scaleTargets == 0) { // do not scale or use targets matrix
if (numFilterColors % 8 == 0) {
if (checkCaseBounds) {
if (numFilterColors % 16 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 4, false, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 4, 4, false, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 2, false, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 4, 2, false, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors % 16 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 4, false, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 4, 4, false, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 2, false, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 4, 2, false, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
} else if (numFilterColors > 3) {
if (checkCaseBounds) {
if (colorsPerThread == 4) {
hipFuncSetCacheConfig(img_acts_mediumcolor<8, 4, false, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor<8, 4, false, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(img_acts_mediumcolor<8, 2, false, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor<8, 2, false, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (colorsPerThread == 4) {
hipFuncSetCacheConfig(img_acts_mediumcolor<8, 4, false, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor<8, 4, false, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(img_acts_mediumcolor<8, 2, false, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor<8, 2, false, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
} else {
if (checkCaseBounds) {
if (numFilterColors == 1) {
hipFuncSetCacheConfig(img_acts_color<8, 1, false, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color<8, 1, false, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 2) {
hipFuncSetCacheConfig(img_acts_color<8, 2, false, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color<8, 2, false, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 3) {
hipFuncSetCacheConfig(img_acts_color<8, 3, false, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color<8, 3, false, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors == 1) {
hipFuncSetCacheConfig(img_acts_color<8, 1, false, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color<8, 1, false, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 2) {
hipFuncSetCacheConfig(img_acts_color<8, 2, false, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color<8, 2, false, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 3) {
hipFuncSetCacheConfig(img_acts_color<8, 3, false, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color<8, 3, false, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
}
}
} else { // do scale
if (numFilterColors % 8 == 0) {
if (checkCaseBounds) {
if (numFilterColors % 16 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 4, true, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 4, 4, true, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 2, true, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 4, 2, true, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors % 16 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 4, true, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 4, 4, true, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 2, true, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 4, 2, true, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
} else if (numFilterColors > 3) {
if (checkCaseBounds) {
if (colorsPerThread == 4) {
hipFuncSetCacheConfig(img_acts_mediumcolor<8, 4, true, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor<8, 4, true, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(img_acts_mediumcolor<8, 2, true, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor<8, 2, true, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (colorsPerThread == 4) {
hipFuncSetCacheConfig(img_acts_mediumcolor<8, 4, true, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor<8, 4, true, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(img_acts_mediumcolor<8, 2, true, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor<8, 2, true, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
} else {
if (checkCaseBounds) {
if (numFilterColors == 1) {
hipFuncSetCacheConfig(img_acts_color<8, 1, true, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color<8, 1, true, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 2) {
hipFuncSetCacheConfig(img_acts_color<8, 2, true, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color<8, 2, true, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 3) {
hipFuncSetCacheConfig(img_acts_color<8, 3, true, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color<8, 3, true, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors == 1) {
hipFuncSetCacheConfig(img_acts_color<8, 1, true, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color<8, 1, true, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 2) {
hipFuncSetCacheConfig(img_acts_color<8, 2, true, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color<8, 2, true, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 3) {
hipFuncSetCacheConfig(img_acts_color<8, 3, true, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color<8, 3, true, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
}
}
}
} else { // local, unshared units
if (scaleTargets == 0) { // do not scale or use targets matrix
if (numFilterColors % 8 == 0) {
if (checkCaseBounds) {
if (numFilterColors % 16 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 4, false, true, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 4, 4, false, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 2, false, true, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 4, 2, false, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors % 16 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 4, false, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 4, 4, false, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 2, false, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 4, 2, false, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
} else if (numFilterColors > 3) {
if (checkCaseBounds) {
if (colorsPerThread == 4) {
hipFuncSetCacheConfig(img_acts_mediumcolor<8, 4, false, true, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor<8, 4, false, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(img_acts_mediumcolor<8, 2, false, true, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor<8, 2, false, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (colorsPerThread == 4) {
hipFuncSetCacheConfig(img_acts_mediumcolor<8, 4, false, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor<8, 4, false, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(img_acts_mediumcolor<8, 2, false, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor<8, 2, false, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
} else {
if (checkCaseBounds) {
if (numFilterColors == 1) {
hipFuncSetCacheConfig(img_acts_color<8, 1, false, true, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color<8, 1, false, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 2) {
hipFuncSetCacheConfig(img_acts_color<8, 2, false, true, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color<8, 2, false, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 3) {
hipFuncSetCacheConfig(img_acts_color<8, 3, false, true, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color<8, 3, false, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors == 1) {
hipFuncSetCacheConfig(img_acts_color<8, 1, false, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color<8, 1, false, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 2) {
hipFuncSetCacheConfig(img_acts_color<8, 2, false, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color<8, 2, false, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 3) {
hipFuncSetCacheConfig(img_acts_color<8, 3, false, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color<8, 3, false, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
}
}
} else { // do scale
if (numFilterColors % 8 == 0) {
if (checkCaseBounds) {
if (numFilterColors % 16 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 4, true, true, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 4, 4, true, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 2, true, true, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 4, 2, true, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors % 16 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 4, true, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 4, 4, true, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 2, true, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 4, 2, true, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
} else if (numFilterColors > 3) {
if (checkCaseBounds) {
if (colorsPerThread == 4) {
hipFuncSetCacheConfig(img_acts_mediumcolor<8, 4, true, true, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor<8, 4, true, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(img_acts_mediumcolor<8, 2, true, true, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor<8, 2, true, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (colorsPerThread == 4) {
hipFuncSetCacheConfig(img_acts_mediumcolor<8, 4, true, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor<8, 4, true, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(img_acts_mediumcolor<8, 2, true, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor<8, 2, true, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
} else {
if (checkCaseBounds) {
if (numFilterColors == 1) {
hipFuncSetCacheConfig(img_acts_color<8, 1, true, true, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color<8, 1, true, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 2) {
hipFuncSetCacheConfig(img_acts_color<8, 2, true, true, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color<8, 2, true, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 3) {
hipFuncSetCacheConfig(img_acts_color<8, 3, true, true, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color<8, 3, true, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors == 1) {
hipFuncSetCacheConfig(img_acts_color<8, 1, true, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color<8, 1, true, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 2) {
hipFuncSetCacheConfig(img_acts_color<8, 2, true, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color<8, 2, true, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 3) {
hipFuncSetCacheConfig(img_acts_color<8, 3, true, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color<8, 3, true, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
}
}
}
}
cutilCheckMsg("imgActs: kernel execution failed");
}
/*
* hidActs: (numFilters, numModules, numImages)
* filters: (numFilterColors, filterPixels, numFilters) if conv
* (numModules, numFilterColors, filterPixels, numFilters) otherwise
* targets: (numImageColors, imgPixels, numImages)
*/
extern "C" void _imgActsCu(cudamat* hidActs, cudamat* filters, cudamat* targets,
int imgSize, int paddingStart, int moduleStride, int numImgColors, int numGroups,
float scaleTargets, float scaleOutput, bool conv) {
int numFilterColors = numImgColors / numGroups;
int numImages = hidActs->size[0];
int numFilters = filters->size[0];
int numFiltersPerGroup = numFilters / numGroups;
int numModules = hidActs->size[1] / numFilters;
int filterModuleMult = conv ? 1 : numModules;
int filterPixels = filters->size[1] / (filterModuleMult * numFilterColors);
int filterSize = sqrt(filterPixels);
int imgPixels = imgSize * imgSize;
int numModulesX = sqrt(numModules);
assert(numImgColors % numGroups == 0);
assert(numFilters % (16*numGroups) == 0);
assert(numGroups > 1 || (numImgColors > 0 && (numImgColors <= 3 || numImgColors % 2 == 0)));
assert(numGroups == 1 || numFilterColors % 4 == 0);
assert(filterPixels == filterSize * filterSize);
assert(hidActs->size[1] == numModules * numFilters);
assert(filters->size[1] == filterModuleMult * numFilterColors * filterPixels);
assert(numModules == numModulesX * numModulesX);
/*
assert(hidActs.isContiguous());
assert(filters.isContiguous());
assert(!hidActs.isTrans());
assert(!filters.isTrans());
assert(!targets.isTrans());*/
// These routines don't handle the case when only part of the image is visited in the convolution
assert(paddingStart <= 0 && paddingStart + (numModules-1)*moduleStride + filterSize >= imgSize);
assert(moduleStride <= filterSize);
//assert(targets.isContiguous()); // no stride support here!
dim3 blocks;
dim3 threads(16,16);
int colorsPerThread;
bool checkCaseBounds;
if (numFilterColors % 8 == 0) {
threads = dim3(32, 4);
colorsPerThread = numFilterColors % 16 == 0 ? 4 : 2;
int imgsPerThread = 4;
assert(numFilterColors % (threads.y * colorsPerThread) == 0);
checkCaseBounds = numImages % (threads.x * imgsPerThread) != 0;
blocks = dim3(DIVUP(numImages, threads.x*imgsPerThread) * (numImgColors/(threads.y*colorsPerThread)), imgPixels);
} else if (numFilterColors > 3) {
colorsPerThread = numFilterColors % 4 == 0 ? 4 : 2;
blocks = dim3(DIVUP(numImages,16*8) * (numImgColors / colorsPerThread), DIVUP(imgSize,4) * DIVUP(imgSize,4));
checkCaseBounds = numImages % (16*8) != 0;
} else {
blocks = dim3(DIVUP(numImages,16*8), DIVUP(imgSize,4) * DIVUP(imgSize,4));
checkCaseBounds = numImages % (16*8) != 0;
}
//if (scaleTargets == 0) { // do not scale or use targets matrix
// targets.resize(numImgColors*imgPixels, numImages);
//} else {
assert(targets->size[1] == numImgColors * imgPixels);
assert(targets->size[0] == numImages);
//}
if (conv) { // convolutional units
if (scaleTargets == 0) { // do not scale or use targets matrix
if (numFilterColors % 8 == 0) {
if (checkCaseBounds) {
if (numFilterColors % 16 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 4, false, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 4, 4, false, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs->data_device, filters->data_device, targets->data_device,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 2, false, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 4, 2, false, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs->data_device, filters->data_device, targets->data_device,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors % 16 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 4, false, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 4, 4, false, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs->data_device, filters->data_device, targets->data_device,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 2, false, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 4, 2, false, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs->data_device, filters->data_device, targets->data_device,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
} else if (numFilterColors > 3) {
if (checkCaseBounds) {
if (colorsPerThread == 4) {
hipFuncSetCacheConfig(img_acts_mediumcolor<8, 4, false, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor<8, 4, false, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs->data_device, filters->data_device, targets->data_device,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(img_acts_mediumcolor<8, 2, false, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor<8, 2, false, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs->data_device, filters->data_device, targets->data_device,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (colorsPerThread == 4) {
hipFuncSetCacheConfig(img_acts_mediumcolor<8, 4, false, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor<8, 4, false, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs->data_device, filters->data_device, targets->data_device,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(img_acts_mediumcolor<8, 2, false, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor<8, 2, false, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs->data_device, filters->data_device, targets->data_device,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
} else {
if (checkCaseBounds) {
if (numFilterColors == 1) {
hipFuncSetCacheConfig(img_acts_color<8, 1, false, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color<8, 1, false, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs->data_device, filters->data_device, targets->data_device,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 2) {
hipFuncSetCacheConfig(img_acts_color<8, 2, false, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color<8, 2, false, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs->data_device, filters->data_device, targets->data_device,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 3) {
hipFuncSetCacheConfig(img_acts_color<8, 3, false, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color<8, 3, false, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs->data_device, filters->data_device, targets->data_device,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors == 1) {
hipFuncSetCacheConfig(img_acts_color<8, 1, false, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color<8, 1, false, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs->data_device, filters->data_device, targets->data_device,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 2) {
hipFuncSetCacheConfig(img_acts_color<8, 2, false, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color<8, 2, false, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs->data_device, filters->data_device, targets->data_device,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 3) {
hipFuncSetCacheConfig(img_acts_color<8, 3, false, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color<8, 3, false, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs->data_device, filters->data_device, targets->data_device,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
}
}
} else { // do scale
if (numFilterColors % 8 == 0) {
if (checkCaseBounds) {
if (numFilterColors % 16 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 4, true, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 4, 4, true, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs->data_device, filters->data_device, targets->data_device,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 2, true, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 4, 2, true, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs->data_device, filters->data_device, targets->data_device,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors % 16 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 4, true, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 4, 4, true, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs->data_device, filters->data_device, targets->data_device,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 2, true, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 4, 2, true, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs->data_device, filters->data_device, targets->data_device,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
} else if (numFilterColors > 3) {
if (checkCaseBounds) {
if (colorsPerThread == 4) {
hipFuncSetCacheConfig(img_acts_mediumcolor<8, 4, true, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor<8, 4, true, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs->data_device, filters->data_device, targets->data_device,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(img_acts_mediumcolor<8, 2, true, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor<8, 2, true, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs->data_device, filters->data_device, targets->data_device,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (colorsPerThread == 4) {
hipFuncSetCacheConfig(img_acts_mediumcolor<8, 4, true, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor<8, 4, true, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs->data_device, filters->data_device, targets->data_device,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(img_acts_mediumcolor<8, 2, true, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor<8, 2, true, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs->data_device, filters->data_device, targets->data_device,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
} else {
if (checkCaseBounds) {
if (numFilterColors == 1) {
hipFuncSetCacheConfig(img_acts_color<8, 1, true, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color<8, 1, true, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs->data_device, filters->data_device, targets->data_device,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 2) {
hipFuncSetCacheConfig(img_acts_color<8, 2, true, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color<8, 2, true, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs->data_device, filters->data_device, targets->data_device,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 3) {
hipFuncSetCacheConfig(img_acts_color<8, 3, true, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color<8, 3, true, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs->data_device, filters->data_device, targets->data_device,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors == 1) {
hipFuncSetCacheConfig(img_acts_color<8, 1, true, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color<8, 1, true, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs->data_device, filters->data_device, targets->data_device,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 2) {
hipFuncSetCacheConfig(img_acts_color<8, 2, true, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color<8, 2, true, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs->data_device, filters->data_device, targets->data_device,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 3) {
hipFuncSetCacheConfig(img_acts_color<8, 3, true, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color<8, 3, true, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs->data_device, filters->data_device, targets->data_device,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
}
}
}
} else { // local, unshared units
if (scaleTargets == 0) { // do not scale or use targets matrix
if (numFilterColors % 8 == 0) {
if (checkCaseBounds) {
if (numFilterColors % 16 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 4, false, true, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 4, 4, false, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs->data_device, filters->data_device, targets->data_device,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 2, false, true, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 4, 2, false, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs->data_device, filters->data_device, targets->data_device,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors % 16 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 4, false, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 4, 4, false, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs->data_device, filters->data_device, targets->data_device,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 2, false, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 4, 2, false, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs->data_device, filters->data_device, targets->data_device,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
} else if (numFilterColors > 3) {
if (checkCaseBounds) {
if (colorsPerThread == 4) {
hipFuncSetCacheConfig(img_acts_mediumcolor<8, 4, false, true, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor<8, 4, false, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs->data_device, filters->data_device, targets->data_device,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(img_acts_mediumcolor<8, 2, false, true, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor<8, 2, false, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs->data_device, filters->data_device, targets->data_device,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (colorsPerThread == 4) {
hipFuncSetCacheConfig(img_acts_mediumcolor<8, 4, false, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor<8, 4, false, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs->data_device, filters->data_device, targets->data_device,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(img_acts_mediumcolor<8, 2, false, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor<8, 2, false, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs->data_device, filters->data_device, targets->data_device,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
} else {
if (checkCaseBounds) {
if (numFilterColors == 1) {
hipFuncSetCacheConfig(img_acts_color<8, 1, false, true, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color<8, 1, false, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs->data_device, filters->data_device, targets->data_device,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 2) {
hipFuncSetCacheConfig(img_acts_color<8, 2, false, true, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color<8, 2, false, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs->data_device, filters->data_device, targets->data_device,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 3) {
hipFuncSetCacheConfig(img_acts_color<8, 3, false, true, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color<8, 3, false, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs->data_device, filters->data_device, targets->data_device,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors == 1) {
hipFuncSetCacheConfig(img_acts_color<8, 1, false, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color<8, 1, false, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs->data_device, filters->data_device, targets->data_device,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 2) {
hipFuncSetCacheConfig(img_acts_color<8, 2, false, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color<8, 2, false, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs->data_device, filters->data_device, targets->data_device,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 3) {
hipFuncSetCacheConfig(img_acts_color<8, 3, false, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color<8, 3, false, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs->data_device, filters->data_device, targets->data_device,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
}
}
} else { // do scale
if (numFilterColors % 8 == 0) {
if (checkCaseBounds) {
if (numFilterColors % 16 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 4, true, true, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 4, 4, true, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs->data_device, filters->data_device, targets->data_device,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 2, true, true, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 4, 2, true, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs->data_device, filters->data_device, targets->data_device,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors % 16 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 4, true, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 4, 4, true, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs->data_device, filters->data_device, targets->data_device,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 2, true, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 4, 2, true, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs->data_device, filters->data_device, targets->data_device,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
} else if (numFilterColors > 3) {
if (checkCaseBounds) {
if (colorsPerThread == 4) {
hipFuncSetCacheConfig(img_acts_mediumcolor<8, 4, true, true, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor<8, 4, true, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs->data_device, filters->data_device, targets->data_device,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(img_acts_mediumcolor<8, 2, true, true, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor<8, 2, true, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs->data_device, filters->data_device, targets->data_device,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (colorsPerThread == 4) {
hipFuncSetCacheConfig(img_acts_mediumcolor<8, 4, true, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor<8, 4, true, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs->data_device, filters->data_device, targets->data_device,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(img_acts_mediumcolor<8, 2, true, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor<8, 2, true, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs->data_device, filters->data_device, targets->data_device,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
} else {
if (checkCaseBounds) {
if (numFilterColors == 1) {
hipFuncSetCacheConfig(img_acts_color<8, 1, true, true, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color<8, 1, true, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs->data_device, filters->data_device, targets->data_device,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 2) {
hipFuncSetCacheConfig(img_acts_color<8, 2, true, true, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color<8, 2, true, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs->data_device, filters->data_device, targets->data_device,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 3) {
hipFuncSetCacheConfig(img_acts_color<8, 3, true, true, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color<8, 3, true, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs->data_device, filters->data_device, targets->data_device,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors == 1) {
hipFuncSetCacheConfig(img_acts_color<8, 1, true, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color<8, 1, true, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs->data_device, filters->data_device, targets->data_device,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 2) {
hipFuncSetCacheConfig(img_acts_color<8, 2, true, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color<8, 2, true, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs->data_device, filters->data_device, targets->data_device,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 3) {
hipFuncSetCacheConfig(img_acts_color<8, 3, true, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color<8, 3, true, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs->data_device, filters->data_device, targets->data_device,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
}
}
}
}
cutilCheckMsg("imgActs: kernel execution failed");
}
void convImgActs(NVMatrix& hidActs, NVMatrix& filters, NVMatrix& targets,
int imgSize, int paddingStart, int moduleStride, int numImgColors, int numGroups) {
_imgActs(hidActs, filters, targets, imgSize, paddingStart, moduleStride, numImgColors, numGroups, 0, 1, true);
}
void convImgActs(NVMatrix& hidActs, NVMatrix& filters, NVMatrix& targets,
int imgSize, int paddingStart, int moduleStride, int numImgColors, int numGroups,
float scaleTargets, float scaleOutput) {
_imgActs(hidActs, filters, targets, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput, true);
}
void localImgActs(NVMatrix& hidActs, NVMatrix& filters, NVMatrix& targets,
int imgSize, int paddingStart, int moduleStride, int numImgColors, int numGroups) {
_imgActs(hidActs, filters, targets, imgSize, paddingStart, moduleStride, numImgColors, numGroups, 0, 1, false);
}
void localImgActs(NVMatrix& hidActs, NVMatrix& filters, NVMatrix& targets,
int imgSize, int paddingStart, int moduleStride, int numImgColors, int numGroups,
float scaleTargets, float scaleOutput) {
_imgActs(hidActs, filters, targets, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput, false);
}
/*
* hidActs: (numFilters, numModules, numImages)
* filters: (numFilterColors, filterPixels, numFilters) if conv
* (numModules, numFilterColors, filterPixels, numFilters) otherwise
* targets: (overSample, numImgColors, imgPixels, numImages)
* colorIndices: (numGroups, numFilterColors)
*
* where overSample := (numFilterColors * numGroups) / numImgColors
*
*/
void _imgActsSparse(NVMatrix& hidActs, NVMatrix& filters, NVMatrix& targets, int* dColorIndices,
int imgSize, int paddingStart, int moduleStride, int numImgColors, int numFilterColors, int numGroups,
float scaleTargets, float scaleOutput, bool conv) {
int numImages = hidActs.getNumCols();
int numFilters = filters.getNumCols();
// int numFiltersPerGroup = numFilters / numGroups;
int numModules = hidActs.getNumRows() / numFilters;
int filterModuleMult = conv ? 1 : numModules;
int filterPixels = filters.getNumRows() / (filterModuleMult * numFilterColors);
int filterSize = sqrt(filterPixels);
int imgPixels = imgSize * imgSize;
int numModulesX = sqrt(numModules);
int overSample = (numFilterColors * numGroups) / numImgColors;
assert(numImgColors % numFilterColors == 0);
assert(numFilters % (16*numGroups) == 0);
assert((numFilterColors * numGroups) % numImgColors == 0);
assert(numGroups > 1);
assert(numFilterColors > 3 && numFilterColors % 2 == 0);
assert(filterPixels == filterSize * filterSize);
assert(hidActs.getNumRows() == numModules * numFilters);
assert(filters.getNumRows() == filterModuleMult * numFilterColors * filterPixels);
assert(numModules == numModulesX * numModulesX);
assert(hidActs.isContiguous());
assert(filters.isContiguous());
assert(!hidActs.isTrans());
assert(!filters.isTrans());
assert(!targets.isTrans());
// These routines don't handle the case when only part of the image is visited in the convolution
assert(paddingStart <= 0 && paddingStart + (numModules-1)*moduleStride + filterSize >= imgSize);
assert(moduleStride <= filterSize);
assert(targets.isContiguous()); // no stride support here!
dim3 blocks;
dim3 threads;
int colorsPerThread;
bool checkCaseBounds;
if (numFilterColors % 8 == 0) {
threads = dim3(32, 4);
colorsPerThread = numFilterColors % 16 == 0 ? 4 : 2;
int imgsPerThread = 4;
assert(numFilterColors % (threads.y * colorsPerThread) == 0);
checkCaseBounds = numImages % (threads.x * imgsPerThread) != 0;
blocks = dim3(DIVUP(numImages, threads.x*imgsPerThread) * (numImgColors/(threads.y*colorsPerThread)), overSample * imgPixels);
} else if (numFilterColors > 3) {
threads = dim3(16, 16);
colorsPerThread = numFilterColors % 4 == 0 ? 4 : 2;
blocks = dim3(DIVUP(numImages,16*8) * (numImgColors / colorsPerThread), overSample * DIVUP(imgSize,4) * DIVUP(imgSize,4));
checkCaseBounds = numImages % (16*8) != 0;
}
if (scaleTargets == 0) { // do not scale or use targets matrix
targets.resize(overSample*numImgColors*imgPixels, numImages);
} else {
assert(targets.getNumRows() == overSample * numImgColors * imgPixels);
assert(targets.getNumCols() == numImages);
}
if (conv) {
if (scaleTargets == 0) { // do not scale or use targets matrix
if (numFilterColors % 8 == 0) {
if (checkCaseBounds) {
if (numFilterColors % 16 == 0) {
hipFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 4, 4, false, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_manycolor_sparse_rand<4, 32, 4, 4, false, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 4, 2, false, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_manycolor_sparse_rand<4, 32, 4, 2, false, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors % 16 == 0) {
hipFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 4, 4, false, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_manycolor_sparse_rand<4, 32, 4, 4, false, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 4, 2, false, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_manycolor_sparse_rand<4, 32, 4, 2, false, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
}
} else if (numFilterColors > 3) {
if (checkCaseBounds) {
if (colorsPerThread == 4) {
hipFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<8, 4, false, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor_sparse_rand<8, 4, false, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<8, 2, false, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor_sparse_rand<8, 2, false, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (colorsPerThread == 4) {
hipFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<8, 4, false, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor_sparse_rand<8, 4, false, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<8, 2, false, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor_sparse_rand<8, 2, false, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
}
}
} else { // do scale
if (numFilterColors % 8 == 0) {
if (checkCaseBounds) {
if (numFilterColors % 16 == 0) {
hipFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 4, 4, true, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_manycolor_sparse_rand<4, 32, 4, 4, true, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 4, 2, true, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_manycolor_sparse_rand<4, 32, 4, 2, true, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors % 16 == 0) {
hipFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 4, 4, true, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_manycolor_sparse_rand<4, 32, 4, 4, true, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 4, 2, true, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_manycolor_sparse_rand<4, 32, 4, 2, true, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
}
} else if (numFilterColors > 3) {
if (checkCaseBounds) {
if (colorsPerThread == 4) {
hipFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<8, 4, true, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor_sparse_rand<8, 4, true, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<8, 2, true, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor_sparse_rand<8, 2, true, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (colorsPerThread == 4) {
hipFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<8, 4, true, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor_sparse_rand<8, 4, true, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<8, 2, true, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor_sparse_rand<8, 2, true, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
}
}
}
} else {
if (scaleTargets == 0) { // do not scale or use targets matrix
if (numFilterColors % 8 == 0) {
if (checkCaseBounds) {
if (numFilterColors % 16 == 0) {
hipFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 4, 4, false, true, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_manycolor_sparse_rand<4, 32, 4, 4, false, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 4, 2, false, true, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_manycolor_sparse_rand<4, 32, 4, 2, false, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors % 16 == 0) {
hipFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 4, 4, false, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_manycolor_sparse_rand<4, 32, 4, 4, false, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 4, 2, false, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_manycolor_sparse_rand<4, 32, 4, 2, false, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
}
} else if (numFilterColors > 3) {
if (checkCaseBounds) {
if (colorsPerThread == 4) {
hipFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<8, 4, false, true, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor_sparse_rand<8, 4, false, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<8, 2, false, true, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor_sparse_rand<8, 2, false, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (colorsPerThread == 4) {
hipFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<8, 4, false, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor_sparse_rand<8, 4, false, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<8, 2, false, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor_sparse_rand<8, 2, false, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
}
}
} else { // do scale
if (numFilterColors % 8 == 0) {
if (checkCaseBounds) {
if (numFilterColors % 16 == 0) {
hipFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 4, 4, true, true, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_manycolor_sparse_rand<4, 32, 4, 4, true, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 4, 2, true, true, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_manycolor_sparse_rand<4, 32, 4, 2, true, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors % 16 == 0) {
hipFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 4, 4, true, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_manycolor_sparse_rand<4, 32, 4, 4, true, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 4, 2, true, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_manycolor_sparse_rand<4, 32, 4, 2, true, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
}
} else if (numFilterColors > 3) {
if (checkCaseBounds) {
if (colorsPerThread == 4) {
hipFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<8, 4, true, true, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor_sparse_rand<8, 4, true, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<8, 2, true, true, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor_sparse_rand<8, 2, true, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (colorsPerThread == 4) {
hipFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<8, 4, true, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor_sparse_rand<8, 4, true, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<8, 2, true, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor_sparse_rand<8, 2, true, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
}
}
}
}
cutilCheckMsg("imgActsSparse: kernel execution failed");
}
void convImgActsSparse(NVMatrix& hidActs, NVMatrix& filters, NVMatrix& targets, int* dColorIndices,
int imgSize, int paddingStart, int moduleStride, int numImgColors, int numFilterColors, int numGroups) {
_imgActsSparse(hidActs, filters, targets, dColorIndices, imgSize, paddingStart,
moduleStride, numImgColors, numFilterColors, numGroups, 0, 1, true);
}
void convImgActsSparse(NVMatrix& hidActs, NVMatrix& filters, NVMatrix& targets, int* dColorIndices,
int imgSize, int paddingStart, int moduleStride, int numImgColors, int numFilterColors, int numGroups,
float scaleTargets, float scaleOutput) {
_imgActsSparse(hidActs, filters, targets, dColorIndices, imgSize, paddingStart, moduleStride,
numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput, true);
}
void localImgActsSparse(NVMatrix& hidActs, NVMatrix& filters, NVMatrix& targets, int* dColorIndices,
int imgSize, int paddingStart, int moduleStride, int numImgColors, int numFilterColors, int numGroups) {
_imgActsSparse(hidActs, filters, targets, dColorIndices, imgSize, paddingStart,
moduleStride, numImgColors, numFilterColors, numGroups, 0, 1, false);
}
void localImgActsSparse(NVMatrix& hidActs, NVMatrix& filters, NVMatrix& targets, int* dColorIndices,
int imgSize, int paddingStart, int moduleStride, int numImgColors, int numFilterColors, int numGroups,
float scaleTargets, float scaleOutput) {
_imgActsSparse(hidActs, filters, targets, dColorIndices, imgSize, paddingStart, moduleStride,
numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput, false);
}
| e18d23e12288eb0caa8153523c53477ffff436f3.cu | /*
* Copyright (c) 2011, Alex Krizhevsky (akrizhevsky@gmail.com)
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* - Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
* EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <iostream>
#include <cudaconv2.cuh>
#include "cudamat.cuh"
using namespace std;
/*
* Block size: 16x16.
* blockIdx.x determines case in batches of 16*imgsPerThread.
* blockIdx.y determines 4x4 image region in target image.
*
* threadIdx.x determines case.
* threadIdx.y determines pixel.
*
* hidActs: (numFilters, numModules, numImages)
* filters: (numColors, filterPixels, numFilters) if conv
* (numModules, numColors, filterPixels, numFilters) otherwise
* targets: (numColors, imgPixels, numImages)
*
* Each block reconstructs one 4x4 pixels from 16*imgsPerThread cases.
*
* Number of filters must be divisible by 16.
* Number of images must be divisible by 16*imgsPerThread if checkCaseBounds is false.
* 16 * imgsPerThread must be divisible by 32.
*
* This version loads 32 cases at a time, so it gets full coalescing on that load.
* It only loads 16 weights at a time, so those aren't fully coalesced.
* This version conserves shared memory by loading 16 filters at a time rather than 32.
*/
template <int imgsPerThread, int numColors, bool scale, bool checkCaseBounds, bool conv>
__global__ void img_acts_color(const float* hidActs, const float* filters, float* targets,
const int numModulesX, const int numImages, const int numFilters,
const int filterSize, const int imgSize, const int paddingStart, const int moduleStride,
const float scaleTargets, const float scaleOutputs) {
__shared__ float shFilters[numColors*16][16 + 1];
__shared__ float shHidActs[16][16*imgsPerThread];
const int blockCaseIdx = blockIdx.x * 16*imgsPerThread;
const int numRegionsX = DIVUP(imgSize, 4);
const int blockRegionIdx = blockIdx.y;
const int blockRegionIdxX = blockRegionIdx % numRegionsX;
const int blockRegionIdxY = blockRegionIdx / numRegionsX;
const int blockRegionLeft = blockRegionIdxX * 4;
const int blockRegionTop = blockRegionIdxY * 4;
const int pxYInRegion = threadIdx.y / 4, pxXInRegion = threadIdx.y % 4;
const int pxY = blockRegionTop + pxYInRegion;
const int pxX = blockRegionLeft + pxXInRegion;
const int pxIdx = pxY * imgSize + pxX;
const bool isPxInImg = pxY < imgSize && pxX < imgSize;
const uint numModules = numModulesX * numModulesX;
const int filterPixels = filterSize * filterSize;
const int imgPixels = imgSize * imgSize;
const int tidx = threadIdx.y * 16 + threadIdx.x;
const int loadY = tidx / 32, loadX = tidx % 32;
hidActs += blockCaseIdx + loadY * numImages * numModulesX * numModulesX + loadX;
filters += threadIdx.x;
targets += pxIdx * numImages + blockCaseIdx + threadIdx.x;
float prod[numColors][imgsPerThread];
#pragma unroll
for (int c = 0; c < numColors; c++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
prod[c][i] = 0;
}
}
const int startY = blockRegionTop - paddingStart < filterSize ? 0
: 1 + (blockRegionTop - paddingStart - filterSize) / moduleStride;
const int endY = MIN(numModulesX, 1 + (blockRegionTop + 3 - paddingStart) / moduleStride);
const int startX = blockRegionLeft - paddingStart < filterSize ? 0
: 1 + (blockRegionLeft - paddingStart - filterSize) / moduleStride;
const int endX = MIN(numModulesX, 1 + (blockRegionLeft + 3 - paddingStart) / moduleStride);
float* shilterLoad = &shFilters[threadIdx.y][threadIdx.x];
float* shHidActLoad = &shHidActs[loadY][loadX];
for (int my = startY; my < endY; my++) {
const int moduleTop = paddingStart + my * moduleStride;
const int pxInModuleY = pxY - moduleTop;
for (int mx = startX; mx < endX; mx++) {
const int moduleIdx = my * numModulesX + mx;
const int moduleLeft = paddingStart + mx * moduleStride;
const int pxInModuleX = pxX - moduleLeft;
const bool isPxInModule = pxInModuleY >= 0 && pxInModuleY < filterSize && pxInModuleX >= 0 && pxInModuleX < filterSize;
const int pxIdxInModule = pxInModuleY * filterSize + pxInModuleX;
for (int f = 0; f < numFilters; f += 16) { // multiply with 16 filters at a time
// Now the threads split up into half-warps, and each half-warp decides if it's interested.
const float* hLoad = &hidActs[(moduleIdx + f * numModules) * numImages];
#pragma unroll
for (int i = 0; i < imgsPerThread * 16; i += 32) {
if (!checkCaseBounds || blockCaseIdx + i + loadX < numImages) {
#pragma unroll
for (int j = 0; j < 16; j += 8) { // load 16 rows of imgsPerThread*16 cols, 8 * 32 elements at a time.
shHidActLoad[j * 16 * imgsPerThread + i] = hLoad[j * numModules * numImages + i];
}
} else {
#pragma unroll
for (int j = 0; j < 16; j += 8) { // load 16 rows of imgsPerThread*16 cols, 8 * 32 elements at a time.
shHidActLoad[j * 16 * imgsPerThread + i] = 0;
}
}
}
if (isPxInImg && isPxInModule) {
// This half-warp is interested, so it's going to load the weights from this module to its pixel.
// Not fully coalesced read :(
// But taking out this read entirely only reduces the runtime by ~2.8%, so it isn't costing me much.
const float* fLoad = conv ? &filters[pxIdxInModule * numFilters + f]
: &filters[(moduleIdx * numColors * filterPixels + pxIdxInModule) * numFilters + f];
#pragma unroll
for (int c = 0; c < numColors; c++) {
shilterLoad[c * 16 * (16 + 1)] = fLoad[c * filterPixels * numFilters];
}
}
__syncthreads();
// Do some actual computation
if (isPxInImg && isPxInModule) {
#pragma unroll
for (int c = 0; c < numColors; c++) {
#pragma unroll
for (int w = 0; w < 16; w++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
prod[c][i] += shFilters[threadIdx.y + c * 16][w] * shHidActs[w][threadIdx.x + i * 16];
}
}
}
}
__syncthreads();
}
}
}
// Not fully coalesced write :(... shmem (and fully coalesced) version is actually slightly slower, though
if (isPxInImg) {
if (scale) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * 16 < numImages) {
#pragma unroll
for (int c = 0; c < numColors; c++) {
targets[c * imgPixels * numImages + i * 16] = scaleTargets * targets[c * imgPixels * numImages + i * 16] + scaleOutputs * prod[c][i];
}
}
}
} else {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * 16 < numImages) {
#pragma unroll
for (int c = 0; c < numColors; c++) {
targets[c * imgPixels * numImages + i * 16] = scaleOutputs * prod[c][i];
}
}
}
}
}
}
/*
* Block size: 16x16.
* blockIdx.x determines case in batches of 16*imgsPerThread, also color in batches of colorsPerThread.
* In essence, blockIdx.x.x = 1..numImages/(16*imgsPerThread)
* blockIdx.x.y = 1..numImgColors/colorsPerThread
* blockIdx.y determines 4x4 image region in target image.
*
* threadIdx.x determines case.
* threadIdx.y determines pixel.
*
* hidActs: (numFilters, numModules, numImages)
* filters: (numFilterColors, filterPixels, numFilters) if conv
* (numModules, numFilterColors, filterPixels, numFilters) otherwise
* targets: (numImageColors, imgPixels, numImages)
*
* Each block reconstructs one 4x4 pixels from 16*imgsPerThread cases.
*
* numImages must be divisible by 16*imgsPerThread if checkCaseBounds is false.
* 16 * imgsPerThread must be divisible by 32.
* numImageColors/numGroups must be divisible by colorsPerThread.
*
* This version loads 32 cases at a time, so it gets full coalescing on that load.
* It only loads 16 weights at a time, so those aren't fully coalesced.
* This version conserves shared memory by loading 16 filters at a time rather than 32.
*
* To be used when there are 4-16 color channels.
*/
template <int imgsPerThread, int colorsPerThread, bool scale, bool checkCaseBounds, bool conv>
__global__ void img_acts_mediumcolor(const float* hidActs, const float* filters, float* targets,
const int numModulesX, const int numImages, const int numFilters,
const int filterSize, const int imgSize, const int paddingStart, const int moduleStride,
const int numImgColors, const int numGroups,
const float scaleTargets, const float scaleOutputs) {
__shared__ float shFilters[colorsPerThread*16][16 + 1];
__shared__ float shHidActs[16][16*imgsPerThread];
const int numImgBlocks = DIVUP(numImages,16*imgsPerThread);
const int blockCaseIdx = (blockIdx.x % numImgBlocks) * 16*imgsPerThread;
const int imgColorIdx = (blockIdx.x / numImgBlocks) * colorsPerThread; // color idx globally
const int numFilterColors = numImgColors / numGroups;
const int blockGroupIdx = imgColorIdx / numFilterColors;
const int filterColorIdx = imgColorIdx % numFilterColors; // color idx within group
const int numFiltersPerGroup = numFilters / numGroups;
const int blockFilterIdx = blockGroupIdx * numFiltersPerGroup;
const int numRegionsX = DIVUP(imgSize, 4);
const int blockRegionIdx = blockIdx.y;
const int blockRegionIdxX = blockRegionIdx % numRegionsX;
const int blockRegionIdxY = blockRegionIdx / numRegionsX;
const int blockRegionLeft = blockRegionIdxX * 4;
const int blockRegionTop = blockRegionIdxY * 4;
const int pxYInRegion = threadIdx.y / 4, pxXInRegion = threadIdx.y % 4;
const int pxY = blockRegionTop + pxYInRegion;
const int pxX = blockRegionLeft + pxXInRegion;
const int pxIdx = pxY * imgSize + pxX;
const bool isPxInImg = pxY < imgSize && pxX < imgSize;
// const uint numModules = numModulesX * numModulesX;
const int filterPixels = filterSize * filterSize;
const int imgPixels = imgSize * imgSize;
const int tidx = threadIdx.y * 16 + threadIdx.x;
const int loadY = tidx / 32, loadX = tidx % 32;
hidActs += blockCaseIdx + (blockFilterIdx + loadY) * numImages * numModulesX * numModulesX + loadX;
filters += blockFilterIdx + filterColorIdx * filterPixels * numFilters + threadIdx.x;
targets += imgColorIdx * imgPixels * numImages + pxIdx * numImages + blockCaseIdx + threadIdx.x;
float prod[colorsPerThread][imgsPerThread];
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
prod[c][i] = 0;
}
}
const int startY = blockRegionTop - paddingStart < filterSize ? 0
: 1 + (blockRegionTop - paddingStart - filterSize) / moduleStride;
const int endY = MIN(numModulesX, 1 + (blockRegionTop + 3 - paddingStart) / moduleStride);
const int startX = blockRegionLeft - paddingStart < filterSize ? 0
: 1 + (blockRegionLeft - paddingStart - filterSize) / moduleStride;
const int endX = MIN(numModulesX, 1 + (blockRegionLeft + 3 - paddingStart) / moduleStride);
float* shFilterLoad = &shFilters[threadIdx.y][threadIdx.x];
float* shHidActLoad = &shHidActs[loadY][loadX];
for (int my = startY; my < endY; my++) {
const int moduleTop = paddingStart + my * moduleStride;
const int pxInModuleY = pxY - moduleTop;
for (int mx = startX; mx < endX; mx++) {
const int moduleIdx = my * numModulesX + mx;
const int moduleLeft = paddingStart + mx * moduleStride;
const int pxInModuleX = pxX - moduleLeft;
const bool isPxInModule = pxInModuleY >= 0 && pxInModuleY < filterSize && pxInModuleX >= 0 && pxInModuleX < filterSize;
const int pxIdxInModule = pxInModuleY * filterSize + pxInModuleX;
for (int f = 0; f < numFiltersPerGroup; f += 16) { // multipply with 16 filters at a time
// Now the threads split up into half-warps, and each half-warp decides if it's interested.
const float* hLoad = &hidActs[(moduleIdx + f * numModulesX * numModulesX) * numImages];
#pragma unroll
for (int i = 0; i < imgsPerThread * 16; i += 32) {
if (!checkCaseBounds || blockCaseIdx + loadX + i < numImages) {
#pragma unroll
for (int j = 0; j < 16; j += 8) { // load 16 rows of imgsPerThread*16 cols, 8 * 32 elements at a time.
shHidActLoad[j * 16 * imgsPerThread + i] = hLoad[j * numModulesX * numModulesX * numImages + i];
}
} else {
#pragma unroll
for (int j = 0; j < 16; j += 8) { // load 16 rows of imgsPerThread*16 cols, 8 * 32 elements at a time.
shHidActLoad[j * 16 * imgsPerThread + i] = 0;
}
}
}
if (isPxInImg && isPxInModule) {
// This half-warp is interested, so it's going to load the weights from this module to its pixel.
// Not fully coalesced read :(
// But taking out this read entirely only reduces the runtime by ~2.8%, so it isn't costing me much.
const float* fLoad = conv ? &filters[pxIdxInModule * numFilters + f]
: &filters[moduleIdx * numFilterColors * filterPixels * numFilters + pxIdxInModule * numFilters + f];
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
shFilterLoad[c * 16 * (16 + 1)] = fLoad[c * filterPixels * numFilters];
}
}
__syncthreads();
// Do some actual computation
if (isPxInImg && isPxInModule) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
#pragma unroll
for (int w = 0; w < 16; w++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
prod[c][i] += shFilters[threadIdx.y + c * 16][w] * shHidActs[w][threadIdx.x + i * 16];
}
}
}
}
__syncthreads();
}
}
}
// Not fully coalesced write :(... shmem (and fully coalesced) version is actually slightly slower, though
if (isPxInImg) {
if (scale) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * 16 < numImages) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
targets[c * imgPixels * numImages + i * 16] = scaleTargets * targets[c * imgPixels * numImages + i * 16] + scaleOutputs * prod[c][i];
}
}
}
} else {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * 16 < numImages) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
targets[c * imgPixels * numImages + i * 16] = scaleOutputs * prod[c][i];
}
}
}
}
}
}
/*
* Block size: B_YxB_X.
* blockIdx.x determines case in batches of B_X*imgsPerThread, also color in batches of B_Y*colorsPerThread.
* In essence, blockIdx.x.x = 1..numImages/(B_X*imgsPerThread)
* blockIdx.x.y = 1..numImgColors/(B_Y*colorsPerThread)
* blockIdx.y determines image pixel in target image.
*
* threadIdx.x determines case.
* threadIdx.y determines color.
*
* hidActs: (numFilters, numModules, numImages)
* filters: (numFilterColors, filterPixels, numFilters) if conv
* (numModules, numFilterColors, filterPixels, numFilters) otherwise
* targets: (numImageColors, imgPixels, numImages)
*
* Each block reconstructs one B_Y*colorsPerThread colors from 1 pixel from B_X*imgsPerThread cases.
*
* numImages must be divisible by B_X*imgsPerThread if checkCaseBounds is false.
* numFiltersPerGroup must be divisible by 16.
*
* B_X * imgsPerThread must be divisible by 32.
* numFilterColors must be divisible by B_Y*colorsPerThread.
* B_X*B_Y must be divisible by 32.
*
* This version loads 32 cases at a time, so it gets full coalescing on that load.
* It only loads 16 weights at a time, so those aren't fully coalesced.
* This version conserves shared memory by loading 16 filters at a time rather than 32.
*
* To be used when there are >= 16 color channels.
*/
template <int B_Y, int B_X, int imgsPerThread, int colorsPerThread, bool scale, bool checkCaseBounds, bool conv>
__global__ void conv_img_acts_manycolor(const float* hidActs, const float* filters, float* targets,
const int numModulesX, const int numImages, const int numFilters,
const int filterSize, const int imgSize, const int paddingStart, const int moduleStride,
const int numImgColors, const int numGroups,
const float scaleTargets, const float scaleOutputs) {
__shared__ float shFilters[colorsPerThread*B_Y][16 + 1]; // TODO: perhaps reconsider this 16
__shared__ float shHidActs[16][B_X*imgsPerThread];
const int numImgBlocks = DIVUP(numImages,B_X*imgsPerThread);
const int blockCaseIdx = (blockIdx.x % numImgBlocks) * B_X*imgsPerThread;
const int imgColorIdx = (blockIdx.x / numImgBlocks) * B_Y*colorsPerThread; // color idx globally
const int numFilterColors = numImgColors / numGroups;
const int blockGroupIdx = imgColorIdx / numFilterColors;
const int filterColorIdx = imgColorIdx % numFilterColors; // color idx within group
const int numFiltersPerGroup = numFilters / numGroups;
const int blockFilterIdx = blockGroupIdx * numFiltersPerGroup;
const int blockPixelIdx = blockIdx.y;
const int blockPixelIdxX = blockPixelIdx % imgSize;
const int blockPixelIdxY = blockPixelIdx / imgSize;
const int filterPixels = filterSize * filterSize;
const int imgPixels = imgSize * imgSize;
const int tidx = threadIdx.y * B_X + threadIdx.x;
const int hidActLoadY = tidx / 32, hidActLoadX = tidx % 32;
const int filtersLoadY = tidx / 16, filtersLoadX = tidx % 16;
const int numModules = numModulesX * numModulesX;
hidActs += blockCaseIdx + (blockFilterIdx + hidActLoadY) * numImages * numModules + hidActLoadX;
filters += blockFilterIdx + (filterColorIdx + filtersLoadY) * filterPixels * numFilters + filtersLoadX;
targets += (imgColorIdx + threadIdx.y) * imgPixels * numImages + blockPixelIdx * numImages + blockCaseIdx + threadIdx.x;
float prod[colorsPerThread][imgsPerThread];
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
prod[c][i] = 0;
}
}
const int startY = blockPixelIdxY - paddingStart < filterSize ? 0
: 1 + (blockPixelIdxY - paddingStart - filterSize) / moduleStride;
const int endY = MIN(numModulesX, 1 + (blockPixelIdxY - paddingStart) / moduleStride);
const int startX = blockPixelIdxX - paddingStart < filterSize ? 0
: 1 + (blockPixelIdxX - paddingStart - filterSize) / moduleStride;
const int endX = MIN(numModulesX, 1 + (blockPixelIdxX - paddingStart) / moduleStride);
float* shFilterLoad = &shFilters[filtersLoadY][filtersLoadX];
float* shHidActLoad = &shHidActs[hidActLoadY][hidActLoadX];
for (int my = startY; my < endY; my++) {
const int moduleTop = paddingStart + my * moduleStride;
const int pxInFilterY = blockPixelIdxY - moduleTop;
for (int mx = startX; mx < endX; mx++) {
const int moduleIdx = my * numModulesX + mx;
const int moduleLeft = paddingStart + mx * moduleStride;
const int pxInFilterX = blockPixelIdxX - moduleLeft;
const int pxIdxInFilter = pxInFilterY * filterSize + pxInFilterX;
for (int f = 0; f < numFiltersPerGroup; f += 16) { // multiply with 16 filters at a time
const float* hLoad = &hidActs[(moduleIdx + f * numModules) * numImages];
#pragma unroll
for (int i = 0; i < imgsPerThread * B_X; i += 32) {
if (!checkCaseBounds || blockCaseIdx + hidActLoadX + i < numImages) {
#pragma unroll
for (int j = 0; j < 16; j += B_X*B_Y/32) { // load 16 rows of imgsPerThread*16 cols, 8 * 32 elements at a time.
shHidActLoad[j * B_X * imgsPerThread + i] = hLoad[j * numModules * numImages + i];
}
} else {
#pragma unroll
for (int j = 0; j < 16; j += B_X*B_Y/32) { // load 16 rows of imgsPerThread*16 cols, 8 * 32 elements at a time.
shHidActLoad[j * B_X * imgsPerThread + i] = 0;
}
}
}
const float* fLoad = conv ? &filters[pxIdxInFilter * numFilters + f]
: &filters[moduleIdx * numFilterColors * filterPixels * numFilters + pxIdxInFilter * numFilters + f];
#pragma unroll
for (int i = 0; i < colorsPerThread*B_Y; i+= B_X*B_Y/16) {
if ((colorsPerThread*B_Y) % (B_X*B_Y/16) == 0 || i + filtersLoadY < colorsPerThread*B_Y) {
shFilterLoad[i * (16 + 1)] = fLoad[i * filterPixels * numFilters];
}
}
__syncthreads();
// Do some actual computation
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
#pragma unroll
for (int w = 0; w < 16; w++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
prod[c][i] += shFilters[c * B_Y + threadIdx.y][w] * shHidActs[w][threadIdx.x + i * B_X];
}
}
}
__syncthreads();
}
}
}
if (scale) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * B_X < numImages) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
targets[c * B_Y * imgPixels * numImages + i * B_X] = scaleTargets * targets[c * B_Y * imgPixels * numImages + i * B_X] + scaleOutputs * prod[c][i];
}
}
}
} else {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * B_X < numImages) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
targets[c * B_Y * imgPixels * numImages + i * B_X] = scaleOutputs * prod[c][i];
}
}
}
}
}
/*
* Block size: 16x16.
* blockIdx.x determines case in batches of 16*imgsPerThread, also color in batches of colorsPerThread.
* In essence, blockIdx.x.x = 1..numImages/(16*imgsPerThread)
* blockIdx.x.y = 1..numImgColors/colorsPerThread
* blockIdx.y determines 4x4 image region in target image, also sample
* In essence, blockIdx.y.x = 1..numRegions
* blockIdx.y.y = 1..overSample
*
* threadIdx.x determines case.
* threadIdx.y determines pixel.
*
* overSample := numFilterColors*numGroups/numImgColors
* ^ this is the number of groups that each color channel is connected to
*
* hidActs: (numFilters, numModules, numImages)
* filters: (numFilterColors, filterPixels, numFilters) if conv
* (numModules, numFilterColors, filterPixels, numFilters) otherwise
* targets: (overSample, numImgColors, imgPixels, numImages)
*
* colorIndices: (numGroups, numFilterColors)
*
* Each block reconstructs one 4x4 pixels from 16*imgsPerThread cases.
*
* numImages must be divisible by 16*imgsPerThread if checkCaseBounds is false.
* 16 * imgsPerThread must be divisible by 32.
* numFilterColors must be divisible by colorsPerThread.
*
* This version loads 32 cases at a time, so it gets full coalescing on that load.
* It only loads 16 weights at a time, so those aren't fully coalesced.
* This version conserves shared memory by loading 16 filters at a time rather than 32.
*
* To be used when there are 4-16 color channels.
*/
template <int imgsPerThread, int colorsPerThread, bool scale, bool checkCaseBounds, bool conv>
__global__ void img_acts_mediumcolor_sparse_rand(const float* hidActs, const float* filters, float* targets, int* colorIndices,
const int numModulesX, const int numImages, const int numFilters,
const int filterSize, const int imgSize, const int paddingStart, const int moduleStride,
const int numImgColors, const int numFilterColors, const int numGroups,
const float scaleTargets, const float scaleOutputs) {
__shared__ float shFilters[colorsPerThread*16][16 + 1];
__shared__ float shHidActs[16][16*imgsPerThread];
__shared__ int shColors[colorsPerThread]; // not really necessary -- can repurpose the other shmems
const int numImgBlocks = DIVUP(numImages,16*imgsPerThread);
const int blockCaseIdx = (blockIdx.x % numImgBlocks) * 16*imgsPerThread;
const int numRegionsX = DIVUP(imgSize, 4);
const int numRegions = numRegionsX * numRegionsX;
const int imgColorIdx = (blockIdx.x / numImgBlocks) * colorsPerThread; // color idx globally
const int filterColorIdx = imgColorIdx % numFilterColors; // color idx within group
const int numFiltersPerGroup = numFilters / numGroups;
const int overSample = gridDim.y / numRegions;
const int blockSample = blockIdx.y / numRegions;
const int groupsPerSample = numGroups / overSample;
const int blockGroupIdx = imgColorIdx / numFilterColors + blockSample * groupsPerSample;
const int blockFilterIdx = blockGroupIdx * numFiltersPerGroup;
const int blockRegionIdx = blockIdx.y % numRegions;
const int blockRegionIdxX = blockRegionIdx % numRegionsX;
const int blockRegionIdxY = blockRegionIdx / numRegionsX;
const int blockRegionLeft = blockRegionIdxX * 4;
const int blockRegionTop = blockRegionIdxY * 4;
const int pxYInRegion = threadIdx.y / 4, pxXInRegion = threadIdx.y % 4;
const int pxY = blockRegionTop + pxYInRegion;
const int pxX = blockRegionLeft + pxXInRegion;
const int pxIdx = pxY * imgSize + pxX;
const bool isPxInImg = pxY < imgSize && pxX < imgSize;
// const uint numModules = numModulesX * numModulesX;
const int filterPixels = filterSize * filterSize;
const int imgPixels = imgSize * imgSize;
const int tidx = threadIdx.y * 16 + threadIdx.x;
const int loadY = tidx / 32, loadX = tidx % 32;
hidActs += blockCaseIdx + (blockFilterIdx + loadY) * numImages * numModulesX * numModulesX + loadX;
filters += blockFilterIdx + filterColorIdx * filterPixels * numFilters + threadIdx.x;
targets += blockSample * numImgColors * imgPixels * numImages + pxIdx * numImages + blockCaseIdx + threadIdx.x;
float prod[colorsPerThread][imgsPerThread];
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
prod[c][i] = 0;
}
}
const int startY = blockRegionTop - paddingStart < filterSize ? 0
: 1 + (blockRegionTop - paddingStart - filterSize) / moduleStride;
const int endY = MIN(numModulesX, 1 + (blockRegionTop + 3 - paddingStart) / moduleStride);
const int startX = blockRegionLeft - paddingStart < filterSize ? 0
: 1 + (blockRegionLeft - paddingStart - filterSize) / moduleStride;
const int endX = MIN(numModulesX, 1 + (blockRegionLeft + 3 - paddingStart) / moduleStride);
float* shFilterLoad = &shFilters[threadIdx.y][threadIdx.x];
float* shHidActLoad = &shHidActs[loadY][loadX];
if (tidx < colorsPerThread) {
shColors[tidx] = colorIndices[blockGroupIdx * numFilterColors + filterColorIdx + tidx] * imgPixels * numImages;
}
for (int my = startY; my < endY; my++) {
const int moduleTop = paddingStart + my * moduleStride;
const int pxInModuleY = pxY - moduleTop;
for (int mx = startX; mx < endX; mx++) {
const int moduleIdx = my * numModulesX + mx;
const int moduleLeft = paddingStart + mx * moduleStride;
const int pxInModuleX = pxX - moduleLeft;
const bool isPxInModule = pxInModuleY >= 0 && pxInModuleY < filterSize && pxInModuleX >= 0 && pxInModuleX < filterSize;
const int pxIdxInModule = pxInModuleY * filterSize + pxInModuleX;
for (int f = 0; f < numFiltersPerGroup; f += 16) { // multipply with 16 filters at a time
// Now the threads split up into half-warps, and each half-warp decides if it's interested.
const float* hLoad = &hidActs[(moduleIdx + f * numModulesX * numModulesX) * numImages];
#pragma unroll
for (int i = 0; i < imgsPerThread * 16; i += 32) {
if (!checkCaseBounds || blockCaseIdx + loadX + i < numImages) {
#pragma unroll
for (int j = 0; j < 16; j += 8) { // load 16 rows of imgsPerThread*16 cols, 8 * 32 elements at a time.
shHidActLoad[j * 16 * imgsPerThread + i] = hLoad[j * numModulesX * numModulesX * numImages + i];
}
} else {
#pragma unroll
for (int j = 0; j < 16; j += 8) { // load 16 rows of imgsPerThread*16 cols, 8 * 32 elements at a time.
shHidActLoad[j * 16 * imgsPerThread + i] = 0;
}
}
}
if (isPxInImg && isPxInModule) {
// This half-warp is interested, so it's going to load the weights from this module to its pixel.
// Not fully coalesced read :(
// But taking out this read entirely only reduces the runtime by ~2.8%, so it isn't costing me much.
const float* fLoad = conv ? &filters[pxIdxInModule * numFilters + f]
: &filters[moduleIdx * numFilterColors * filterPixels * numFilters + pxIdxInModule * numFilters + f];
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
shFilterLoad[c * 16 * (16 + 1)] = fLoad[c * filterPixels * numFilters];
}
}
__syncthreads();
// Do some actual computation
if (isPxInImg && isPxInModule) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
#pragma unroll
for (int w = 0; w < 16; w++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
prod[c][i] += shFilters[threadIdx.y + c * 16][w] * shHidActs[w][threadIdx.x + i * 16];
}
}
}
}
__syncthreads();
}
}
}
// Not fully coalesced write :(... shmem (and fully coalesced) version is actually slightly slower, though
if (isPxInImg) {
if (scale) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * 16 < numImages) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
targets[shColors[c] + i * 16] = scaleTargets * targets[shColors[c] + i * 16] + scaleOutputs * prod[c][i];
}
}
}
} else {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * 16 < numImages) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
targets[shColors[c] + i * 16] = scaleOutputs * prod[c][i];
}
}
}
}
}
}
/*
* Block size: B_YxB_X.
* blockIdx.x determines case in batches of B_X*imgsPerThread, also color in batches of B_Y*colorsPerThread.
* In essence, blockIdx.x.x = 1..numImages/(B_X*imgsPerThread)
* blockIdx.x.y = 1..numImgColors/(B_Y*colorsPerThread)
* blockIdx.y determines image pixel in target image, sample idx.
* In essence, blockIdx.y.x = 1..imgPixels
* blockIdx.y.y = 1..overSample
*
* threadIdx.x determines case.
* threadIdx.y determines color.
*
* overSample := numFilterColors*numGroups/numImgColors
* ^ this is the number of groups that each color channel is connected to
*
* hidActs: (numFilters, numModules, numImages)
* filters: (numFilterColors, filterPixels, numFilters) if conv
* (numModules, numFilterColors, filterPixels, numFilters) otherwise
* targets: (overSample, numImgColors, imgPixels, numImages)
*
* colorIndices: (numGroups, numFilterColors)
*
* Each block reconstructs one B_Y*colorsPerThread colors from 1 pixel from B_X*imgsPerThread cases.
*
* numImages must be divisible by B_X*imgsPerThread if checkCaseBounds is false.
* numFiltersPerGroup must be divisible by 16.
* numFilterColors*numGroups must be divisible by numImgColors.
*
* B_X * imgsPerThread must be divisible by 32.
* numFilterColors must be divisible by B_Y*colorsPerThread.
* B_X*B_Y must be divisible by 32.
*
* This version loads 32 cases at a time, so it gets full coalescing on that load.
* It only loads 16 weights at a time, so those aren't fully coalesced.
* This version conserves shared memory by loading 16 filters at a time rather than 32.
*
* To be used when there are >= 16 color channels.
*/
template <int B_Y, int B_X, int imgsPerThread, int colorsPerThread, bool scale, bool checkCaseBounds, bool conv>
__global__ void img_acts_manycolor_sparse_rand(const float* hidActs, const float* filters, float* targets, int* colorIndices,
const int numModulesX, const int numImages, const int numFilters,
const int filterSize, const int imgSize, const int paddingStart, const int moduleStride,
const int numImgColors, const int numFilterColors, const int numGroups,
const float scaleTargets, const float scaleOutputs) {
__shared__ float shFilters[colorsPerThread*B_Y][16 + 1]; // TODO: perhaps reconsider this 16
__shared__ float shHidActs[16][B_X*imgsPerThread];
__shared__ int shColors[colorsPerThread * B_Y]; // not really necessary -- can repurpose the other shmems
const int numImgBlocks = DIVUP(numImages,B_X*imgsPerThread);
const int blockCaseIdx = (blockIdx.x % numImgBlocks) * B_X*imgsPerThread;
const int filterPixels = filterSize * filterSize;
const int imgPixels = imgSize * imgSize;
const int tidx = threadIdx.y * B_X + threadIdx.x;
const int hidActLoadY = tidx / 32, hidActLoadX = tidx % 32;
const int filtersLoadY = tidx / 16, filtersLoadX = tidx % 16;
const int numModules = numModulesX * numModulesX;
const int overSample = gridDim.y / imgPixels;
const int blockSample = blockIdx.y / imgPixels;
const int groupsPerSample = numGroups / overSample;
// const int overSample = (numFilterColors * numGroups) / numImgColors;
const int imgColorIdx = (blockIdx.x / numImgBlocks) * B_Y * colorsPerThread; // color idx globally
const int blockGroupIdx = imgColorIdx / numFilterColors + blockSample * groupsPerSample;
// const int filterColorsPerSample = numFilterColors / overSample;
const int blockPixelIdx = blockIdx.y % imgPixels;
const int blockPixelIdxX = blockPixelIdx % imgSize;
const int blockPixelIdxY = blockPixelIdx / imgSize;
const int filterColorIdx = imgColorIdx % numFilterColors; // color idx within group
const int numFiltersPerGroup = numFilters / numGroups;
const int blockFilterIdx = blockGroupIdx * numFiltersPerGroup;
hidActs += blockCaseIdx + (blockFilterIdx + hidActLoadY) * numImages * numModules + hidActLoadX;
filters += blockFilterIdx + (filterColorIdx + filtersLoadY) * filterPixels * numFilters + filtersLoadX;
targets += blockSample * numImgColors * imgPixels * numImages + blockPixelIdx * numImages + blockCaseIdx + threadIdx.x;
float prod[colorsPerThread][imgsPerThread];
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
prod[c][i] = 0;
}
}
const int startY = blockPixelIdxY - paddingStart < filterSize ? 0
: 1 + (blockPixelIdxY - paddingStart - filterSize) / moduleStride;
const int endY = MIN(numModulesX, 1 + (blockPixelIdxY - paddingStart) / moduleStride);
const int startX = blockPixelIdxX - paddingStart < filterSize ? 0
: 1 + (blockPixelIdxX - paddingStart - filterSize) / moduleStride;
const int endX = MIN(numModulesX, 1 + (blockPixelIdxX - paddingStart) / moduleStride);
float* shFilterLoad = &shFilters[filtersLoadY][filtersLoadX];
float* shHidActLoad = &shHidActs[hidActLoadY][hidActLoadX];
if (tidx < colorsPerThread * B_Y) {
shColors[tidx] = colorIndices[blockGroupIdx * numFilterColors + filterColorIdx + tidx] * imgPixels * numImages;
}
for (int my = startY; my < endY; my++) {
const int moduleTop = paddingStart + my * moduleStride;
const int pxInFilterY = blockPixelIdxY - moduleTop;
for (int mx = startX; mx < endX; mx++) {
const int moduleIdx = my * numModulesX + mx;
const int moduleLeft = paddingStart + mx * moduleStride;
const int pxInFilterX = blockPixelIdxX - moduleLeft;
const int pxIdxInFilter = pxInFilterY * filterSize + pxInFilterX;
for (int f = 0; f < numFiltersPerGroup; f += 16) { // multiply with 16 filters at a time
const float* hLoad = &hidActs[(moduleIdx + f * numModules) * numImages];
#pragma unroll
for (int i = 0; i < imgsPerThread * B_X; i += 32) {
if (!checkCaseBounds || blockCaseIdx + hidActLoadX + i < numImages) {
#pragma unroll
for (int j = 0; j < 16; j += B_X*B_Y/32) { // load 16 rows of imgsPerThread*16 cols, 8 * 32 elements at a time.
shHidActLoad[j * B_X * imgsPerThread + i] = hLoad[j * numModules * numImages + i];
}
} else {
#pragma unroll
for (int j = 0; j < 16; j += B_X*B_Y/32) { // load 16 rows of imgsPerThread*16 cols, 8 * 32 elements at a time.
shHidActLoad[j * B_X * imgsPerThread + i] = 0;
}
}
}
const float* fLoad = conv ? &filters[pxIdxInFilter * numFilters + f]
: &filters[moduleIdx * numFilterColors * filterPixels * numFilters + pxIdxInFilter * numFilters + f];
#pragma unroll
for (int i = 0; i < colorsPerThread*B_Y; i+= B_X*B_Y/16) {
if ((colorsPerThread*B_Y) % (B_X*B_Y/16) == 0 || i + filtersLoadY < colorsPerThread*B_Y) {
shFilterLoad[i * (16 + 1)] = fLoad[i * filterPixels * numFilters];
}
}
__syncthreads();
// Do some actual computation
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
#pragma unroll
for (int w = 0; w < 16; w++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
prod[c][i] += shFilters[c * B_Y + threadIdx.y][w] * shHidActs[w][threadIdx.x + i * B_X];
}
}
}
__syncthreads();
}
}
}
if (scale) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * B_X < numImages) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
targets[shColors[c * B_Y + threadIdx.y] + i * B_X] = scaleTargets * targets[shColors[c * B_Y + threadIdx.y] + i * B_X] + scaleOutputs * prod[c][i];
}
}
}
} else {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * B_X < numImages) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
targets[shColors[c * B_Y + threadIdx.y] + i * B_X] = scaleOutputs * prod[c][i];
}
}
}
}
}
/*
* hidActs: (numFilters, numModules, numImages)
* filters: (numFilterColors, filterPixels, numFilters) if conv
* (numModules, numFilterColors, filterPixels, numFilters) otherwise
* targets: (numImageColors, imgPixels, numImages)
*/
void _imgActs(NVMatrix& hidActs, NVMatrix& filters, NVMatrix& targets,
int imgSize, int paddingStart, int moduleStride, int numImgColors, int numGroups,
float scaleTargets, float scaleOutput, bool conv) {
int numFilterColors = numImgColors / numGroups;
int numImages = hidActs.getNumCols();
int numFilters = filters.getNumCols();
int numFiltersPerGroup = numFilters / numGroups;
int numModules = hidActs.getNumRows() / numFilters;
int filterModuleMult = conv ? 1 : numModules;
int filterPixels = filters.getNumRows() / (filterModuleMult * numFilterColors);
int filterSize = sqrt(filterPixels);
int imgPixels = imgSize * imgSize;
int numModulesX = sqrt(numModules);
assert(numImgColors % numGroups == 0);
assert(numFilters % (16*numGroups) == 0);
assert(numGroups > 1 || (numImgColors > 0 && (numImgColors <= 3 || numImgColors % 2 == 0)));
assert(numGroups == 1 || numFilterColors % 4 == 0);
assert(filterPixels == filterSize * filterSize);
assert(hidActs.getNumRows() == numModules * numFilters);
assert(filters.getNumRows() == filterModuleMult * numFilterColors * filterPixels);
assert(numModules == numModulesX * numModulesX);
assert(hidActs.isContiguous());
assert(filters.isContiguous());
assert(!hidActs.isTrans());
assert(!filters.isTrans());
assert(!targets.isTrans());
// These routines don't handle the case when only part of the image is visited in the convolution
assert(paddingStart <= 0 && paddingStart + (numModules-1)*moduleStride + filterSize >= imgSize);
assert(moduleStride <= filterSize);
assert(targets.isContiguous()); // no stride support here!
dim3 blocks;
dim3 threads(16,16);
int colorsPerThread;
bool checkCaseBounds;
if (numFilterColors % 8 == 0) {
threads = dim3(32, 4);
colorsPerThread = numFilterColors % 16 == 0 ? 4 : 2;
int imgsPerThread = 4;
assert(numFilterColors % (threads.y * colorsPerThread) == 0);
checkCaseBounds = numImages % (threads.x * imgsPerThread) != 0;
blocks = dim3(DIVUP(numImages, threads.x*imgsPerThread) * (numImgColors/(threads.y*colorsPerThread)), imgPixels);
} else if (numFilterColors > 3) {
colorsPerThread = numFilterColors % 4 == 0 ? 4 : 2;
blocks = dim3(DIVUP(numImages,16*8) * (numImgColors / colorsPerThread), DIVUP(imgSize,4) * DIVUP(imgSize,4));
checkCaseBounds = numImages % (16*8) != 0;
} else {
blocks = dim3(DIVUP(numImages,16*8), DIVUP(imgSize,4) * DIVUP(imgSize,4));
checkCaseBounds = numImages % (16*8) != 0;
}
if (scaleTargets == 0) { // do not scale or use targets matrix
targets.resize(numImgColors*imgPixels, numImages);
} else {
assert(targets.getNumRows() == numImgColors * imgPixels);
assert(targets.getNumCols() == numImages);
}
if (conv) { // convolutional units
if (scaleTargets == 0) { // do not scale or use targets matrix
if (numFilterColors % 8 == 0) {
if (checkCaseBounds) {
if (numFilterColors % 16 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 4, false, true, true>, cudaFuncCachePreferShared);
conv_img_acts_manycolor<4, 32, 4, 4, false, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 2, false, true, true>, cudaFuncCachePreferShared);
conv_img_acts_manycolor<4, 32, 4, 2, false, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors % 16 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 4, false, false, true>, cudaFuncCachePreferShared);
conv_img_acts_manycolor<4, 32, 4, 4, false, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 2, false, false, true>, cudaFuncCachePreferShared);
conv_img_acts_manycolor<4, 32, 4, 2, false, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
} else if (numFilterColors > 3) {
if (checkCaseBounds) {
if (colorsPerThread == 4) {
cudaFuncSetCacheConfig(img_acts_mediumcolor<8, 4, false, true, true>, cudaFuncCachePreferShared);
img_acts_mediumcolor<8, 4, false, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(img_acts_mediumcolor<8, 2, false, true, true>, cudaFuncCachePreferShared);
img_acts_mediumcolor<8, 2, false, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (colorsPerThread == 4) {
cudaFuncSetCacheConfig(img_acts_mediumcolor<8, 4, false, false, true>, cudaFuncCachePreferShared);
img_acts_mediumcolor<8, 4, false, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(img_acts_mediumcolor<8, 2, false, false, true>, cudaFuncCachePreferShared);
img_acts_mediumcolor<8, 2, false, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
} else {
if (checkCaseBounds) {
if (numFilterColors == 1) {
cudaFuncSetCacheConfig(img_acts_color<8, 1, false, true, true>, cudaFuncCachePreferShared);
img_acts_color<8, 1, false, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 2) {
cudaFuncSetCacheConfig(img_acts_color<8, 2, false, true, true>, cudaFuncCachePreferShared);
img_acts_color<8, 2, false, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 3) {
cudaFuncSetCacheConfig(img_acts_color<8, 3, false, true, true>, cudaFuncCachePreferShared);
img_acts_color<8, 3, false, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors == 1) {
cudaFuncSetCacheConfig(img_acts_color<8, 1, false, false, true>, cudaFuncCachePreferShared);
img_acts_color<8, 1, false, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 2) {
cudaFuncSetCacheConfig(img_acts_color<8, 2, false, false, true>, cudaFuncCachePreferShared);
img_acts_color<8, 2, false, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 3) {
cudaFuncSetCacheConfig(img_acts_color<8, 3, false, false, true>, cudaFuncCachePreferShared);
img_acts_color<8, 3, false, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
}
}
} else { // do scale
if (numFilterColors % 8 == 0) {
if (checkCaseBounds) {
if (numFilterColors % 16 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 4, true, true, true>, cudaFuncCachePreferShared);
conv_img_acts_manycolor<4, 32, 4, 4, true, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 2, true, true, true>, cudaFuncCachePreferShared);
conv_img_acts_manycolor<4, 32, 4, 2, true, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors % 16 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 4, true, false, true>, cudaFuncCachePreferShared);
conv_img_acts_manycolor<4, 32, 4, 4, true, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 2, true, false, true>, cudaFuncCachePreferShared);
conv_img_acts_manycolor<4, 32, 4, 2, true, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
} else if (numFilterColors > 3) {
if (checkCaseBounds) {
if (colorsPerThread == 4) {
cudaFuncSetCacheConfig(img_acts_mediumcolor<8, 4, true, true, true>, cudaFuncCachePreferShared);
img_acts_mediumcolor<8, 4, true, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(img_acts_mediumcolor<8, 2, true, true, true>, cudaFuncCachePreferShared);
img_acts_mediumcolor<8, 2, true, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (colorsPerThread == 4) {
cudaFuncSetCacheConfig(img_acts_mediumcolor<8, 4, true, false, true>, cudaFuncCachePreferShared);
img_acts_mediumcolor<8, 4, true, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(img_acts_mediumcolor<8, 2, true, false, true>, cudaFuncCachePreferShared);
img_acts_mediumcolor<8, 2, true, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
} else {
if (checkCaseBounds) {
if (numFilterColors == 1) {
cudaFuncSetCacheConfig(img_acts_color<8, 1, true, true, true>, cudaFuncCachePreferShared);
img_acts_color<8, 1, true, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 2) {
cudaFuncSetCacheConfig(img_acts_color<8, 2, true, true, true>, cudaFuncCachePreferShared);
img_acts_color<8, 2, true, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 3) {
cudaFuncSetCacheConfig(img_acts_color<8, 3, true, true, true>, cudaFuncCachePreferShared);
img_acts_color<8, 3, true, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors == 1) {
cudaFuncSetCacheConfig(img_acts_color<8, 1, true, false, true>, cudaFuncCachePreferShared);
img_acts_color<8, 1, true, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 2) {
cudaFuncSetCacheConfig(img_acts_color<8, 2, true, false, true>, cudaFuncCachePreferShared);
img_acts_color<8, 2, true, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 3) {
cudaFuncSetCacheConfig(img_acts_color<8, 3, true, false, true>, cudaFuncCachePreferShared);
img_acts_color<8, 3, true, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
}
}
}
} else { // local, unshared units
if (scaleTargets == 0) { // do not scale or use targets matrix
if (numFilterColors % 8 == 0) {
if (checkCaseBounds) {
if (numFilterColors % 16 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 4, false, true, false>, cudaFuncCachePreferShared);
conv_img_acts_manycolor<4, 32, 4, 4, false, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 2, false, true, false>, cudaFuncCachePreferShared);
conv_img_acts_manycolor<4, 32, 4, 2, false, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors % 16 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 4, false, false, false>, cudaFuncCachePreferShared);
conv_img_acts_manycolor<4, 32, 4, 4, false, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 2, false, false, false>, cudaFuncCachePreferShared);
conv_img_acts_manycolor<4, 32, 4, 2, false, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
} else if (numFilterColors > 3) {
if (checkCaseBounds) {
if (colorsPerThread == 4) {
cudaFuncSetCacheConfig(img_acts_mediumcolor<8, 4, false, true, false>, cudaFuncCachePreferShared);
img_acts_mediumcolor<8, 4, false, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(img_acts_mediumcolor<8, 2, false, true, false>, cudaFuncCachePreferShared);
img_acts_mediumcolor<8, 2, false, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (colorsPerThread == 4) {
cudaFuncSetCacheConfig(img_acts_mediumcolor<8, 4, false, false, false>, cudaFuncCachePreferShared);
img_acts_mediumcolor<8, 4, false, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(img_acts_mediumcolor<8, 2, false, false, false>, cudaFuncCachePreferShared);
img_acts_mediumcolor<8, 2, false, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
} else {
if (checkCaseBounds) {
if (numFilterColors == 1) {
cudaFuncSetCacheConfig(img_acts_color<8, 1, false, true, false>, cudaFuncCachePreferShared);
img_acts_color<8, 1, false, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 2) {
cudaFuncSetCacheConfig(img_acts_color<8, 2, false, true, false>, cudaFuncCachePreferShared);
img_acts_color<8, 2, false, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 3) {
cudaFuncSetCacheConfig(img_acts_color<8, 3, false, true, false>, cudaFuncCachePreferShared);
img_acts_color<8, 3, false, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors == 1) {
cudaFuncSetCacheConfig(img_acts_color<8, 1, false, false, false>, cudaFuncCachePreferShared);
img_acts_color<8, 1, false, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 2) {
cudaFuncSetCacheConfig(img_acts_color<8, 2, false, false, false>, cudaFuncCachePreferShared);
img_acts_color<8, 2, false, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 3) {
cudaFuncSetCacheConfig(img_acts_color<8, 3, false, false, false>, cudaFuncCachePreferShared);
img_acts_color<8, 3, false, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
}
}
} else { // do scale
if (numFilterColors % 8 == 0) {
if (checkCaseBounds) {
if (numFilterColors % 16 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 4, true, true, false>, cudaFuncCachePreferShared);
conv_img_acts_manycolor<4, 32, 4, 4, true, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 2, true, true, false>, cudaFuncCachePreferShared);
conv_img_acts_manycolor<4, 32, 4, 2, true, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors % 16 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 4, true, false, false>, cudaFuncCachePreferShared);
conv_img_acts_manycolor<4, 32, 4, 4, true, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 2, true, false, false>, cudaFuncCachePreferShared);
conv_img_acts_manycolor<4, 32, 4, 2, true, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
} else if (numFilterColors > 3) {
if (checkCaseBounds) {
if (colorsPerThread == 4) {
cudaFuncSetCacheConfig(img_acts_mediumcolor<8, 4, true, true, false>, cudaFuncCachePreferShared);
img_acts_mediumcolor<8, 4, true, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(img_acts_mediumcolor<8, 2, true, true, false>, cudaFuncCachePreferShared);
img_acts_mediumcolor<8, 2, true, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (colorsPerThread == 4) {
cudaFuncSetCacheConfig(img_acts_mediumcolor<8, 4, true, false, false>, cudaFuncCachePreferShared);
img_acts_mediumcolor<8, 4, true, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(img_acts_mediumcolor<8, 2, true, false, false>, cudaFuncCachePreferShared);
img_acts_mediumcolor<8, 2, true, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
} else {
if (checkCaseBounds) {
if (numFilterColors == 1) {
cudaFuncSetCacheConfig(img_acts_color<8, 1, true, true, false>, cudaFuncCachePreferShared);
img_acts_color<8, 1, true, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 2) {
cudaFuncSetCacheConfig(img_acts_color<8, 2, true, true, false>, cudaFuncCachePreferShared);
img_acts_color<8, 2, true, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 3) {
cudaFuncSetCacheConfig(img_acts_color<8, 3, true, true, false>, cudaFuncCachePreferShared);
img_acts_color<8, 3, true, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors == 1) {
cudaFuncSetCacheConfig(img_acts_color<8, 1, true, false, false>, cudaFuncCachePreferShared);
img_acts_color<8, 1, true, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 2) {
cudaFuncSetCacheConfig(img_acts_color<8, 2, true, false, false>, cudaFuncCachePreferShared);
img_acts_color<8, 2, true, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 3) {
cudaFuncSetCacheConfig(img_acts_color<8, 3, true, false, false>, cudaFuncCachePreferShared);
img_acts_color<8, 3, true, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
}
}
}
}
cutilCheckMsg("imgActs: kernel execution failed");
}
/*
* hidActs: (numFilters, numModules, numImages)
* filters: (numFilterColors, filterPixels, numFilters) if conv
* (numModules, numFilterColors, filterPixels, numFilters) otherwise
* targets: (numImageColors, imgPixels, numImages)
*/
extern "C" void _imgActsCu(cudamat* hidActs, cudamat* filters, cudamat* targets,
int imgSize, int paddingStart, int moduleStride, int numImgColors, int numGroups,
float scaleTargets, float scaleOutput, bool conv) {
int numFilterColors = numImgColors / numGroups;
int numImages = hidActs->size[0];
int numFilters = filters->size[0];
int numFiltersPerGroup = numFilters / numGroups;
int numModules = hidActs->size[1] / numFilters;
int filterModuleMult = conv ? 1 : numModules;
int filterPixels = filters->size[1] / (filterModuleMult * numFilterColors);
int filterSize = sqrt(filterPixels);
int imgPixels = imgSize * imgSize;
int numModulesX = sqrt(numModules);
assert(numImgColors % numGroups == 0);
assert(numFilters % (16*numGroups) == 0);
assert(numGroups > 1 || (numImgColors > 0 && (numImgColors <= 3 || numImgColors % 2 == 0)));
assert(numGroups == 1 || numFilterColors % 4 == 0);
assert(filterPixels == filterSize * filterSize);
assert(hidActs->size[1] == numModules * numFilters);
assert(filters->size[1] == filterModuleMult * numFilterColors * filterPixels);
assert(numModules == numModulesX * numModulesX);
/*
assert(hidActs.isContiguous());
assert(filters.isContiguous());
assert(!hidActs.isTrans());
assert(!filters.isTrans());
assert(!targets.isTrans());*/
// These routines don't handle the case when only part of the image is visited in the convolution
assert(paddingStart <= 0 && paddingStart + (numModules-1)*moduleStride + filterSize >= imgSize);
assert(moduleStride <= filterSize);
//assert(targets.isContiguous()); // no stride support here!
dim3 blocks;
dim3 threads(16,16);
int colorsPerThread;
bool checkCaseBounds;
if (numFilterColors % 8 == 0) {
threads = dim3(32, 4);
colorsPerThread = numFilterColors % 16 == 0 ? 4 : 2;
int imgsPerThread = 4;
assert(numFilterColors % (threads.y * colorsPerThread) == 0);
checkCaseBounds = numImages % (threads.x * imgsPerThread) != 0;
blocks = dim3(DIVUP(numImages, threads.x*imgsPerThread) * (numImgColors/(threads.y*colorsPerThread)), imgPixels);
} else if (numFilterColors > 3) {
colorsPerThread = numFilterColors % 4 == 0 ? 4 : 2;
blocks = dim3(DIVUP(numImages,16*8) * (numImgColors / colorsPerThread), DIVUP(imgSize,4) * DIVUP(imgSize,4));
checkCaseBounds = numImages % (16*8) != 0;
} else {
blocks = dim3(DIVUP(numImages,16*8), DIVUP(imgSize,4) * DIVUP(imgSize,4));
checkCaseBounds = numImages % (16*8) != 0;
}
//if (scaleTargets == 0) { // do not scale or use targets matrix
// targets.resize(numImgColors*imgPixels, numImages);
//} else {
assert(targets->size[1] == numImgColors * imgPixels);
assert(targets->size[0] == numImages);
//}
if (conv) { // convolutional units
if (scaleTargets == 0) { // do not scale or use targets matrix
if (numFilterColors % 8 == 0) {
if (checkCaseBounds) {
if (numFilterColors % 16 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 4, false, true, true>, cudaFuncCachePreferShared);
conv_img_acts_manycolor<4, 32, 4, 4, false, true, true><<<blocks, threads>>>(hidActs->data_device, filters->data_device, targets->data_device,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 2, false, true, true>, cudaFuncCachePreferShared);
conv_img_acts_manycolor<4, 32, 4, 2, false, true, true><<<blocks, threads>>>(hidActs->data_device, filters->data_device, targets->data_device,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors % 16 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 4, false, false, true>, cudaFuncCachePreferShared);
conv_img_acts_manycolor<4, 32, 4, 4, false, false, true><<<blocks, threads>>>(hidActs->data_device, filters->data_device, targets->data_device,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 2, false, false, true>, cudaFuncCachePreferShared);
conv_img_acts_manycolor<4, 32, 4, 2, false, false, true><<<blocks, threads>>>(hidActs->data_device, filters->data_device, targets->data_device,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
} else if (numFilterColors > 3) {
if (checkCaseBounds) {
if (colorsPerThread == 4) {
cudaFuncSetCacheConfig(img_acts_mediumcolor<8, 4, false, true, true>, cudaFuncCachePreferShared);
img_acts_mediumcolor<8, 4, false, true, true><<<blocks, threads>>>(hidActs->data_device, filters->data_device, targets->data_device,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(img_acts_mediumcolor<8, 2, false, true, true>, cudaFuncCachePreferShared);
img_acts_mediumcolor<8, 2, false, true, true><<<blocks, threads>>>(hidActs->data_device, filters->data_device, targets->data_device,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (colorsPerThread == 4) {
cudaFuncSetCacheConfig(img_acts_mediumcolor<8, 4, false, false, true>, cudaFuncCachePreferShared);
img_acts_mediumcolor<8, 4, false, false, true><<<blocks, threads>>>(hidActs->data_device, filters->data_device, targets->data_device,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(img_acts_mediumcolor<8, 2, false, false, true>, cudaFuncCachePreferShared);
img_acts_mediumcolor<8, 2, false, false, true><<<blocks, threads>>>(hidActs->data_device, filters->data_device, targets->data_device,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
} else {
if (checkCaseBounds) {
if (numFilterColors == 1) {
cudaFuncSetCacheConfig(img_acts_color<8, 1, false, true, true>, cudaFuncCachePreferShared);
img_acts_color<8, 1, false, true, true><<<blocks, threads>>>(hidActs->data_device, filters->data_device, targets->data_device,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 2) {
cudaFuncSetCacheConfig(img_acts_color<8, 2, false, true, true>, cudaFuncCachePreferShared);
img_acts_color<8, 2, false, true, true><<<blocks, threads>>>(hidActs->data_device, filters->data_device, targets->data_device,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 3) {
cudaFuncSetCacheConfig(img_acts_color<8, 3, false, true, true>, cudaFuncCachePreferShared);
img_acts_color<8, 3, false, true, true><<<blocks, threads>>>(hidActs->data_device, filters->data_device, targets->data_device,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors == 1) {
cudaFuncSetCacheConfig(img_acts_color<8, 1, false, false, true>, cudaFuncCachePreferShared);
img_acts_color<8, 1, false, false, true><<<blocks, threads>>>(hidActs->data_device, filters->data_device, targets->data_device,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 2) {
cudaFuncSetCacheConfig(img_acts_color<8, 2, false, false, true>, cudaFuncCachePreferShared);
img_acts_color<8, 2, false, false, true><<<blocks, threads>>>(hidActs->data_device, filters->data_device, targets->data_device,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 3) {
cudaFuncSetCacheConfig(img_acts_color<8, 3, false, false, true>, cudaFuncCachePreferShared);
img_acts_color<8, 3, false, false, true><<<blocks, threads>>>(hidActs->data_device, filters->data_device, targets->data_device,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
}
}
} else { // do scale
if (numFilterColors % 8 == 0) {
if (checkCaseBounds) {
if (numFilterColors % 16 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 4, true, true, true>, cudaFuncCachePreferShared);
conv_img_acts_manycolor<4, 32, 4, 4, true, true, true><<<blocks, threads>>>(hidActs->data_device, filters->data_device, targets->data_device,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 2, true, true, true>, cudaFuncCachePreferShared);
conv_img_acts_manycolor<4, 32, 4, 2, true, true, true><<<blocks, threads>>>(hidActs->data_device, filters->data_device, targets->data_device,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors % 16 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 4, true, false, true>, cudaFuncCachePreferShared);
conv_img_acts_manycolor<4, 32, 4, 4, true, false, true><<<blocks, threads>>>(hidActs->data_device, filters->data_device, targets->data_device,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 2, true, false, true>, cudaFuncCachePreferShared);
conv_img_acts_manycolor<4, 32, 4, 2, true, false, true><<<blocks, threads>>>(hidActs->data_device, filters->data_device, targets->data_device,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
} else if (numFilterColors > 3) {
if (checkCaseBounds) {
if (colorsPerThread == 4) {
cudaFuncSetCacheConfig(img_acts_mediumcolor<8, 4, true, true, true>, cudaFuncCachePreferShared);
img_acts_mediumcolor<8, 4, true, true, true><<<blocks, threads>>>(hidActs->data_device, filters->data_device, targets->data_device,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(img_acts_mediumcolor<8, 2, true, true, true>, cudaFuncCachePreferShared);
img_acts_mediumcolor<8, 2, true, true, true><<<blocks, threads>>>(hidActs->data_device, filters->data_device, targets->data_device,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (colorsPerThread == 4) {
cudaFuncSetCacheConfig(img_acts_mediumcolor<8, 4, true, false, true>, cudaFuncCachePreferShared);
img_acts_mediumcolor<8, 4, true, false, true><<<blocks, threads>>>(hidActs->data_device, filters->data_device, targets->data_device,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(img_acts_mediumcolor<8, 2, true, false, true>, cudaFuncCachePreferShared);
img_acts_mediumcolor<8, 2, true, false, true><<<blocks, threads>>>(hidActs->data_device, filters->data_device, targets->data_device,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
} else {
if (checkCaseBounds) {
if (numFilterColors == 1) {
cudaFuncSetCacheConfig(img_acts_color<8, 1, true, true, true>, cudaFuncCachePreferShared);
img_acts_color<8, 1, true, true, true><<<blocks, threads>>>(hidActs->data_device, filters->data_device, targets->data_device,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 2) {
cudaFuncSetCacheConfig(img_acts_color<8, 2, true, true, true>, cudaFuncCachePreferShared);
img_acts_color<8, 2, true, true, true><<<blocks, threads>>>(hidActs->data_device, filters->data_device, targets->data_device,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 3) {
cudaFuncSetCacheConfig(img_acts_color<8, 3, true, true, true>, cudaFuncCachePreferShared);
img_acts_color<8, 3, true, true, true><<<blocks, threads>>>(hidActs->data_device, filters->data_device, targets->data_device,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors == 1) {
cudaFuncSetCacheConfig(img_acts_color<8, 1, true, false, true>, cudaFuncCachePreferShared);
img_acts_color<8, 1, true, false, true><<<blocks, threads>>>(hidActs->data_device, filters->data_device, targets->data_device,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 2) {
cudaFuncSetCacheConfig(img_acts_color<8, 2, true, false, true>, cudaFuncCachePreferShared);
img_acts_color<8, 2, true, false, true><<<blocks, threads>>>(hidActs->data_device, filters->data_device, targets->data_device,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 3) {
cudaFuncSetCacheConfig(img_acts_color<8, 3, true, false, true>, cudaFuncCachePreferShared);
img_acts_color<8, 3, true, false, true><<<blocks, threads>>>(hidActs->data_device, filters->data_device, targets->data_device,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
}
}
}
} else { // local, unshared units
if (scaleTargets == 0) { // do not scale or use targets matrix
if (numFilterColors % 8 == 0) {
if (checkCaseBounds) {
if (numFilterColors % 16 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 4, false, true, false>, cudaFuncCachePreferShared);
conv_img_acts_manycolor<4, 32, 4, 4, false, true, false><<<blocks, threads>>>(hidActs->data_device, filters->data_device, targets->data_device,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 2, false, true, false>, cudaFuncCachePreferShared);
conv_img_acts_manycolor<4, 32, 4, 2, false, true, false><<<blocks, threads>>>(hidActs->data_device, filters->data_device, targets->data_device,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors % 16 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 4, false, false, false>, cudaFuncCachePreferShared);
conv_img_acts_manycolor<4, 32, 4, 4, false, false, false><<<blocks, threads>>>(hidActs->data_device, filters->data_device, targets->data_device,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 2, false, false, false>, cudaFuncCachePreferShared);
conv_img_acts_manycolor<4, 32, 4, 2, false, false, false><<<blocks, threads>>>(hidActs->data_device, filters->data_device, targets->data_device,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
} else if (numFilterColors > 3) {
if (checkCaseBounds) {
if (colorsPerThread == 4) {
cudaFuncSetCacheConfig(img_acts_mediumcolor<8, 4, false, true, false>, cudaFuncCachePreferShared);
img_acts_mediumcolor<8, 4, false, true, false><<<blocks, threads>>>(hidActs->data_device, filters->data_device, targets->data_device,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(img_acts_mediumcolor<8, 2, false, true, false>, cudaFuncCachePreferShared);
img_acts_mediumcolor<8, 2, false, true, false><<<blocks, threads>>>(hidActs->data_device, filters->data_device, targets->data_device,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (colorsPerThread == 4) {
cudaFuncSetCacheConfig(img_acts_mediumcolor<8, 4, false, false, false>, cudaFuncCachePreferShared);
img_acts_mediumcolor<8, 4, false, false, false><<<blocks, threads>>>(hidActs->data_device, filters->data_device, targets->data_device,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(img_acts_mediumcolor<8, 2, false, false, false>, cudaFuncCachePreferShared);
img_acts_mediumcolor<8, 2, false, false, false><<<blocks, threads>>>(hidActs->data_device, filters->data_device, targets->data_device,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
} else {
if (checkCaseBounds) {
if (numFilterColors == 1) {
cudaFuncSetCacheConfig(img_acts_color<8, 1, false, true, false>, cudaFuncCachePreferShared);
img_acts_color<8, 1, false, true, false><<<blocks, threads>>>(hidActs->data_device, filters->data_device, targets->data_device,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 2) {
cudaFuncSetCacheConfig(img_acts_color<8, 2, false, true, false>, cudaFuncCachePreferShared);
img_acts_color<8, 2, false, true, false><<<blocks, threads>>>(hidActs->data_device, filters->data_device, targets->data_device,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 3) {
cudaFuncSetCacheConfig(img_acts_color<8, 3, false, true, false>, cudaFuncCachePreferShared);
img_acts_color<8, 3, false, true, false><<<blocks, threads>>>(hidActs->data_device, filters->data_device, targets->data_device,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors == 1) {
cudaFuncSetCacheConfig(img_acts_color<8, 1, false, false, false>, cudaFuncCachePreferShared);
img_acts_color<8, 1, false, false, false><<<blocks, threads>>>(hidActs->data_device, filters->data_device, targets->data_device,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 2) {
cudaFuncSetCacheConfig(img_acts_color<8, 2, false, false, false>, cudaFuncCachePreferShared);
img_acts_color<8, 2, false, false, false><<<blocks, threads>>>(hidActs->data_device, filters->data_device, targets->data_device,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 3) {
cudaFuncSetCacheConfig(img_acts_color<8, 3, false, false, false>, cudaFuncCachePreferShared);
img_acts_color<8, 3, false, false, false><<<blocks, threads>>>(hidActs->data_device, filters->data_device, targets->data_device,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
}
}
} else { // do scale
if (numFilterColors % 8 == 0) {
if (checkCaseBounds) {
if (numFilterColors % 16 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 4, true, true, false>, cudaFuncCachePreferShared);
conv_img_acts_manycolor<4, 32, 4, 4, true, true, false><<<blocks, threads>>>(hidActs->data_device, filters->data_device, targets->data_device,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 2, true, true, false>, cudaFuncCachePreferShared);
conv_img_acts_manycolor<4, 32, 4, 2, true, true, false><<<blocks, threads>>>(hidActs->data_device, filters->data_device, targets->data_device,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors % 16 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 4, true, false, false>, cudaFuncCachePreferShared);
conv_img_acts_manycolor<4, 32, 4, 4, true, false, false><<<blocks, threads>>>(hidActs->data_device, filters->data_device, targets->data_device,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 2, true, false, false>, cudaFuncCachePreferShared);
conv_img_acts_manycolor<4, 32, 4, 2, true, false, false><<<blocks, threads>>>(hidActs->data_device, filters->data_device, targets->data_device,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
} else if (numFilterColors > 3) {
if (checkCaseBounds) {
if (colorsPerThread == 4) {
cudaFuncSetCacheConfig(img_acts_mediumcolor<8, 4, true, true, false>, cudaFuncCachePreferShared);
img_acts_mediumcolor<8, 4, true, true, false><<<blocks, threads>>>(hidActs->data_device, filters->data_device, targets->data_device,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(img_acts_mediumcolor<8, 2, true, true, false>, cudaFuncCachePreferShared);
img_acts_mediumcolor<8, 2, true, true, false><<<blocks, threads>>>(hidActs->data_device, filters->data_device, targets->data_device,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (colorsPerThread == 4) {
cudaFuncSetCacheConfig(img_acts_mediumcolor<8, 4, true, false, false>, cudaFuncCachePreferShared);
img_acts_mediumcolor<8, 4, true, false, false><<<blocks, threads>>>(hidActs->data_device, filters->data_device, targets->data_device,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(img_acts_mediumcolor<8, 2, true, false, false>, cudaFuncCachePreferShared);
img_acts_mediumcolor<8, 2, true, false, false><<<blocks, threads>>>(hidActs->data_device, filters->data_device, targets->data_device,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
} else {
if (checkCaseBounds) {
if (numFilterColors == 1) {
cudaFuncSetCacheConfig(img_acts_color<8, 1, true, true, false>, cudaFuncCachePreferShared);
img_acts_color<8, 1, true, true, false><<<blocks, threads>>>(hidActs->data_device, filters->data_device, targets->data_device,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 2) {
cudaFuncSetCacheConfig(img_acts_color<8, 2, true, true, false>, cudaFuncCachePreferShared);
img_acts_color<8, 2, true, true, false><<<blocks, threads>>>(hidActs->data_device, filters->data_device, targets->data_device,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 3) {
cudaFuncSetCacheConfig(img_acts_color<8, 3, true, true, false>, cudaFuncCachePreferShared);
img_acts_color<8, 3, true, true, false><<<blocks, threads>>>(hidActs->data_device, filters->data_device, targets->data_device,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors == 1) {
cudaFuncSetCacheConfig(img_acts_color<8, 1, true, false, false>, cudaFuncCachePreferShared);
img_acts_color<8, 1, true, false, false><<<blocks, threads>>>(hidActs->data_device, filters->data_device, targets->data_device,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 2) {
cudaFuncSetCacheConfig(img_acts_color<8, 2, true, false, false>, cudaFuncCachePreferShared);
img_acts_color<8, 2, true, false, false><<<blocks, threads>>>(hidActs->data_device, filters->data_device, targets->data_device,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 3) {
cudaFuncSetCacheConfig(img_acts_color<8, 3, true, false, false>, cudaFuncCachePreferShared);
img_acts_color<8, 3, true, false, false><<<blocks, threads>>>(hidActs->data_device, filters->data_device, targets->data_device,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
}
}
}
}
cutilCheckMsg("imgActs: kernel execution failed");
}
void convImgActs(NVMatrix& hidActs, NVMatrix& filters, NVMatrix& targets,
int imgSize, int paddingStart, int moduleStride, int numImgColors, int numGroups) {
_imgActs(hidActs, filters, targets, imgSize, paddingStart, moduleStride, numImgColors, numGroups, 0, 1, true);
}
void convImgActs(NVMatrix& hidActs, NVMatrix& filters, NVMatrix& targets,
int imgSize, int paddingStart, int moduleStride, int numImgColors, int numGroups,
float scaleTargets, float scaleOutput) {
_imgActs(hidActs, filters, targets, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput, true);
}
void localImgActs(NVMatrix& hidActs, NVMatrix& filters, NVMatrix& targets,
int imgSize, int paddingStart, int moduleStride, int numImgColors, int numGroups) {
_imgActs(hidActs, filters, targets, imgSize, paddingStart, moduleStride, numImgColors, numGroups, 0, 1, false);
}
void localImgActs(NVMatrix& hidActs, NVMatrix& filters, NVMatrix& targets,
int imgSize, int paddingStart, int moduleStride, int numImgColors, int numGroups,
float scaleTargets, float scaleOutput) {
_imgActs(hidActs, filters, targets, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput, false);
}
/*
* hidActs: (numFilters, numModules, numImages)
* filters: (numFilterColors, filterPixels, numFilters) if conv
* (numModules, numFilterColors, filterPixels, numFilters) otherwise
* targets: (overSample, numImgColors, imgPixels, numImages)
* colorIndices: (numGroups, numFilterColors)
*
* where overSample := (numFilterColors * numGroups) / numImgColors
*
*/
void _imgActsSparse(NVMatrix& hidActs, NVMatrix& filters, NVMatrix& targets, int* dColorIndices,
int imgSize, int paddingStart, int moduleStride, int numImgColors, int numFilterColors, int numGroups,
float scaleTargets, float scaleOutput, bool conv) {
int numImages = hidActs.getNumCols();
int numFilters = filters.getNumCols();
// int numFiltersPerGroup = numFilters / numGroups;
int numModules = hidActs.getNumRows() / numFilters;
int filterModuleMult = conv ? 1 : numModules;
int filterPixels = filters.getNumRows() / (filterModuleMult * numFilterColors);
int filterSize = sqrt(filterPixels);
int imgPixels = imgSize * imgSize;
int numModulesX = sqrt(numModules);
int overSample = (numFilterColors * numGroups) / numImgColors;
assert(numImgColors % numFilterColors == 0);
assert(numFilters % (16*numGroups) == 0);
assert((numFilterColors * numGroups) % numImgColors == 0);
assert(numGroups > 1);
assert(numFilterColors > 3 && numFilterColors % 2 == 0);
assert(filterPixels == filterSize * filterSize);
assert(hidActs.getNumRows() == numModules * numFilters);
assert(filters.getNumRows() == filterModuleMult * numFilterColors * filterPixels);
assert(numModules == numModulesX * numModulesX);
assert(hidActs.isContiguous());
assert(filters.isContiguous());
assert(!hidActs.isTrans());
assert(!filters.isTrans());
assert(!targets.isTrans());
// These routines don't handle the case when only part of the image is visited in the convolution
assert(paddingStart <= 0 && paddingStart + (numModules-1)*moduleStride + filterSize >= imgSize);
assert(moduleStride <= filterSize);
assert(targets.isContiguous()); // no stride support here!
dim3 blocks;
dim3 threads;
int colorsPerThread;
bool checkCaseBounds;
if (numFilterColors % 8 == 0) {
threads = dim3(32, 4);
colorsPerThread = numFilterColors % 16 == 0 ? 4 : 2;
int imgsPerThread = 4;
assert(numFilterColors % (threads.y * colorsPerThread) == 0);
checkCaseBounds = numImages % (threads.x * imgsPerThread) != 0;
blocks = dim3(DIVUP(numImages, threads.x*imgsPerThread) * (numImgColors/(threads.y*colorsPerThread)), overSample * imgPixels);
} else if (numFilterColors > 3) {
threads = dim3(16, 16);
colorsPerThread = numFilterColors % 4 == 0 ? 4 : 2;
blocks = dim3(DIVUP(numImages,16*8) * (numImgColors / colorsPerThread), overSample * DIVUP(imgSize,4) * DIVUP(imgSize,4));
checkCaseBounds = numImages % (16*8) != 0;
}
if (scaleTargets == 0) { // do not scale or use targets matrix
targets.resize(overSample*numImgColors*imgPixels, numImages);
} else {
assert(targets.getNumRows() == overSample * numImgColors * imgPixels);
assert(targets.getNumCols() == numImages);
}
if (conv) {
if (scaleTargets == 0) { // do not scale or use targets matrix
if (numFilterColors % 8 == 0) {
if (checkCaseBounds) {
if (numFilterColors % 16 == 0) {
cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 4, 4, false, true, true>, cudaFuncCachePreferShared);
img_acts_manycolor_sparse_rand<4, 32, 4, 4, false, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 4, 2, false, true, true>, cudaFuncCachePreferShared);
img_acts_manycolor_sparse_rand<4, 32, 4, 2, false, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors % 16 == 0) {
cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 4, 4, false, false, true>, cudaFuncCachePreferShared);
img_acts_manycolor_sparse_rand<4, 32, 4, 4, false, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 4, 2, false, false, true>, cudaFuncCachePreferShared);
img_acts_manycolor_sparse_rand<4, 32, 4, 2, false, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
}
} else if (numFilterColors > 3) {
if (checkCaseBounds) {
if (colorsPerThread == 4) {
cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<8, 4, false, true, true>, cudaFuncCachePreferShared);
img_acts_mediumcolor_sparse_rand<8, 4, false, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<8, 2, false, true, true>, cudaFuncCachePreferShared);
img_acts_mediumcolor_sparse_rand<8, 2, false, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (colorsPerThread == 4) {
cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<8, 4, false, false, true>, cudaFuncCachePreferShared);
img_acts_mediumcolor_sparse_rand<8, 4, false, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<8, 2, false, false, true>, cudaFuncCachePreferShared);
img_acts_mediumcolor_sparse_rand<8, 2, false, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
}
}
} else { // do scale
if (numFilterColors % 8 == 0) {
if (checkCaseBounds) {
if (numFilterColors % 16 == 0) {
cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 4, 4, true, true, true>, cudaFuncCachePreferShared);
img_acts_manycolor_sparse_rand<4, 32, 4, 4, true, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 4, 2, true, true, true>, cudaFuncCachePreferShared);
img_acts_manycolor_sparse_rand<4, 32, 4, 2, true, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors % 16 == 0) {
cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 4, 4, true, false, true>, cudaFuncCachePreferShared);
img_acts_manycolor_sparse_rand<4, 32, 4, 4, true, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 4, 2, true, false, true>, cudaFuncCachePreferShared);
img_acts_manycolor_sparse_rand<4, 32, 4, 2, true, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
}
} else if (numFilterColors > 3) {
if (checkCaseBounds) {
if (colorsPerThread == 4) {
cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<8, 4, true, true, true>, cudaFuncCachePreferShared);
img_acts_mediumcolor_sparse_rand<8, 4, true, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<8, 2, true, true, true>, cudaFuncCachePreferShared);
img_acts_mediumcolor_sparse_rand<8, 2, true, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (colorsPerThread == 4) {
cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<8, 4, true, false, true>, cudaFuncCachePreferShared);
img_acts_mediumcolor_sparse_rand<8, 4, true, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<8, 2, true, false, true>, cudaFuncCachePreferShared);
img_acts_mediumcolor_sparse_rand<8, 2, true, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
}
}
}
} else {
if (scaleTargets == 0) { // do not scale or use targets matrix
if (numFilterColors % 8 == 0) {
if (checkCaseBounds) {
if (numFilterColors % 16 == 0) {
cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 4, 4, false, true, false>, cudaFuncCachePreferShared);
img_acts_manycolor_sparse_rand<4, 32, 4, 4, false, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 4, 2, false, true, false>, cudaFuncCachePreferShared);
img_acts_manycolor_sparse_rand<4, 32, 4, 2, false, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors % 16 == 0) {
cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 4, 4, false, false, false>, cudaFuncCachePreferShared);
img_acts_manycolor_sparse_rand<4, 32, 4, 4, false, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 4, 2, false, false, false>, cudaFuncCachePreferShared);
img_acts_manycolor_sparse_rand<4, 32, 4, 2, false, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
}
} else if (numFilterColors > 3) {
if (checkCaseBounds) {
if (colorsPerThread == 4) {
cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<8, 4, false, true, false>, cudaFuncCachePreferShared);
img_acts_mediumcolor_sparse_rand<8, 4, false, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<8, 2, false, true, false>, cudaFuncCachePreferShared);
img_acts_mediumcolor_sparse_rand<8, 2, false, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (colorsPerThread == 4) {
cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<8, 4, false, false, false>, cudaFuncCachePreferShared);
img_acts_mediumcolor_sparse_rand<8, 4, false, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<8, 2, false, false, false>, cudaFuncCachePreferShared);
img_acts_mediumcolor_sparse_rand<8, 2, false, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
}
}
} else { // do scale
if (numFilterColors % 8 == 0) {
if (checkCaseBounds) {
if (numFilterColors % 16 == 0) {
cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 4, 4, true, true, false>, cudaFuncCachePreferShared);
img_acts_manycolor_sparse_rand<4, 32, 4, 4, true, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 4, 2, true, true, false>, cudaFuncCachePreferShared);
img_acts_manycolor_sparse_rand<4, 32, 4, 2, true, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors % 16 == 0) {
cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 4, 4, true, false, false>, cudaFuncCachePreferShared);
img_acts_manycolor_sparse_rand<4, 32, 4, 4, true, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 4, 2, true, false, false>, cudaFuncCachePreferShared);
img_acts_manycolor_sparse_rand<4, 32, 4, 2, true, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
}
} else if (numFilterColors > 3) {
if (checkCaseBounds) {
if (colorsPerThread == 4) {
cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<8, 4, true, true, false>, cudaFuncCachePreferShared);
img_acts_mediumcolor_sparse_rand<8, 4, true, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<8, 2, true, true, false>, cudaFuncCachePreferShared);
img_acts_mediumcolor_sparse_rand<8, 2, true, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (colorsPerThread == 4) {
cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<8, 4, true, false, false>, cudaFuncCachePreferShared);
img_acts_mediumcolor_sparse_rand<8, 4, true, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<8, 2, true, false, false>, cudaFuncCachePreferShared);
img_acts_mediumcolor_sparse_rand<8, 2, true, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
}
}
}
}
cutilCheckMsg("imgActsSparse: kernel execution failed");
}
void convImgActsSparse(NVMatrix& hidActs, NVMatrix& filters, NVMatrix& targets, int* dColorIndices,
int imgSize, int paddingStart, int moduleStride, int numImgColors, int numFilterColors, int numGroups) {
_imgActsSparse(hidActs, filters, targets, dColorIndices, imgSize, paddingStart,
moduleStride, numImgColors, numFilterColors, numGroups, 0, 1, true);
}
void convImgActsSparse(NVMatrix& hidActs, NVMatrix& filters, NVMatrix& targets, int* dColorIndices,
int imgSize, int paddingStart, int moduleStride, int numImgColors, int numFilterColors, int numGroups,
float scaleTargets, float scaleOutput) {
_imgActsSparse(hidActs, filters, targets, dColorIndices, imgSize, paddingStart, moduleStride,
numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput, true);
}
void localImgActsSparse(NVMatrix& hidActs, NVMatrix& filters, NVMatrix& targets, int* dColorIndices,
int imgSize, int paddingStart, int moduleStride, int numImgColors, int numFilterColors, int numGroups) {
_imgActsSparse(hidActs, filters, targets, dColorIndices, imgSize, paddingStart,
moduleStride, numImgColors, numFilterColors, numGroups, 0, 1, false);
}
void localImgActsSparse(NVMatrix& hidActs, NVMatrix& filters, NVMatrix& targets, int* dColorIndices,
int imgSize, int paddingStart, int moduleStride, int numImgColors, int numFilterColors, int numGroups,
float scaleTargets, float scaleOutput) {
_imgActsSparse(hidActs, filters, targets, dColorIndices, imgSize, paddingStart, moduleStride,
numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput, false);
}
|
340e27988e28da80c8573e29102bf6e330d56102.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "cuda_graph_avgpool_bprop.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *gradInput = NULL;
hipMalloc(&gradInput, XSIZE*YSIZE);
const float *gradOutput = NULL;
hipMalloc(&gradOutput, XSIZE*YSIZE);
const float *clusters = NULL;
hipMalloc(&clusters, XSIZE*YSIZE);
const int nClusters = 1;
const int poolsize = 1;
const int dim = 1;
const int nClustersPerThread = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
cuda_graph_avgpool_bprop), dim3(gridBlock),dim3(threadBlock), 0, 0, gradInput,gradOutput,clusters,nClusters,poolsize,dim,nClustersPerThread);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
cuda_graph_avgpool_bprop), dim3(gridBlock),dim3(threadBlock), 0, 0, gradInput,gradOutput,clusters,nClusters,poolsize,dim,nClustersPerThread);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
cuda_graph_avgpool_bprop), dim3(gridBlock),dim3(threadBlock), 0, 0, gradInput,gradOutput,clusters,nClusters,poolsize,dim,nClustersPerThread);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 340e27988e28da80c8573e29102bf6e330d56102.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "cuda_graph_avgpool_bprop.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *gradInput = NULL;
cudaMalloc(&gradInput, XSIZE*YSIZE);
const float *gradOutput = NULL;
cudaMalloc(&gradOutput, XSIZE*YSIZE);
const float *clusters = NULL;
cudaMalloc(&clusters, XSIZE*YSIZE);
const int nClusters = 1;
const int poolsize = 1;
const int dim = 1;
const int nClustersPerThread = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
cuda_graph_avgpool_bprop<<<gridBlock,threadBlock>>>(gradInput,gradOutput,clusters,nClusters,poolsize,dim,nClustersPerThread);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
cuda_graph_avgpool_bprop<<<gridBlock,threadBlock>>>(gradInput,gradOutput,clusters,nClusters,poolsize,dim,nClustersPerThread);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
cuda_graph_avgpool_bprop<<<gridBlock,threadBlock>>>(gradInput,gradOutput,clusters,nClusters,poolsize,dim,nClustersPerThread);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
c130b6a25634b8ec346fcfb8a6c902bd01777a55.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Writes Potential energy grid of adsorbate inside unit cell of nanoporous material
*/
#include <stdio.h>
#include <stdlib.h>
#include<cuda.h>
#include <sys/time.h>
#include <hip/hip_runtime.h>
#include "datatypes.h"
#include "readsettings.h"
#include "Framework.h"
#include "Forcefield.h"
#include "write_settings_to_outputfile.h"
#include "computegridsheet.h"
#include "load_fast_particle_f_array.h"
#define SQRT_N_THREADS 16 // a block may have a max of 512 threads... so 16x16 is max.
// functions to ensure communication with GPU works ?
#define CUDA_CALL(x) do { hipError_t error = x; \
if (error != hipSuccess) { \
printf("Error at %s:%d - %s \n",__FILE__,__LINE__, hipGetErrorString(error)); \
return EXIT_FAILURE;}} while(0)
#define CURAND_CALL(x) do { if((x)!=HIPRAND_STATUS_SUCCESS) { \
printf("Error at %s:%d\n",__FILE__,__LINE__);\
return EXIT_FAILURE;}} while(0)
double ReadTimer() {
static bool initialized = false;
static struct timeval start;
struct timeval end;
if( !initialized )
{
gettimeofday( &start, NULL );
initialized = true;
}
gettimeofday( &end, NULL );
return (end.tv_sec - start.tv_sec) + 1.0e-6 * (end.tv_usec - start.tv_usec);
}
void HostFractionalToCartesian(double t_matrix[][3],
double x_f, double y_f,double z_f,
double & x, double & y, double & z) {
// compute Cartesian coordinates from fractional
x = t_matrix[0][0] * x_f + t_matrix[0][1] * y_f + t_matrix[0][2] * z_f;
y = t_matrix[1][0] * x_f + t_matrix[1][1] * y_f + t_matrix[1][2] * z_f;
z = t_matrix[2][0] * x_f + t_matrix[2][1] * y_f + t_matrix[2][2] * z_f;
}
int main(int argc, char *argv[]) {
if (argc != 3) {
printf("Run as:\n./writegrid structure_name AdsorbateID\n");
exit(EXIT_FAILURE);
}
bool accessible_or_not_grid = false;
if (accessible_or_not_grid)
printf("\n\nPriting accessibility grid based on hard-sphere model...\n\n");
//
// Import settings
//
GridParameters parameters;
parameters.frameworkname = argv[1];
parameters.adsorbate = argv[2];
parameters.adsorbateMW = GetAdsorbateMW(parameters.adsorbate);
ReadSimulationInputFile(parameters);
if (parameters.verbose) printf("Read simulation.input\n");
// only need UC to be once the cutoff
TripleInt uc_reps = ReadUnitCellReplicationFile(parameters.frameworkname, "once");
parameters.replication_factor_a = uc_reps.arg1;
parameters.replication_factor_b = uc_reps.arg2;
parameters.replication_factor_c = uc_reps.arg3;
if (parameters.verbose) printf("Read .uc replication file\n");
//
// Construct forcefield and framework objects
//
Forcefield forcefield(parameters.forcefieldname);
if (parameters.verbose) printf("Constructed Forcefield object\n");
Framework framework(parameters.frameworkname);
parameters.N_framework_atoms = framework.noatoms;
if (parameters.verbose) printf("Constructed Framework object\n");
// grab sigma/epsilon of adsorbate
PairDouble eps_sig = GrabGuestForceFieldParams(forcefield, parameters.adsorbate);
parameters.epsilon_guest = eps_sig.arg1;
parameters.sigma_guest = eps_sig.arg2;
if (parameters.verbose) printf("Fetched adsorbate FF parameters\n");
//
// Construct array of framework particles, framework_atoms, for speed in energy computations
//
FrameworkParticle * framework_atoms = (FrameworkParticle *) malloc(framework.noatoms * sizeof(FrameworkParticle));
LoadFastFrameworkParticleArray(framework_atoms, framework, forcefield, parameters.epsilon_guest, parameters.sigma_guest);
if (parameters.verbose) printf("Initialized framework_atoms array in host\n");
//
// Construct grid
//
int N_x, N_y, N_z;
if (accessible_or_not_grid) { // compute grid in fractional space
N_x = static_cast<int>(ceil(1.0 / parameters.grid_resolution)); // size of grid
N_y = static_cast<int>(ceil(1.0 / parameters.grid_resolution));
N_z = static_cast<int>(ceil(1.0 / parameters.grid_resolution));
}
else {
N_x = static_cast<int>(ceil(framework.a / parameters.grid_resolution)); // size of grid
N_y = static_cast<int>(ceil(framework.b / parameters.grid_resolution));
N_z = static_cast<int>(ceil(framework.c / parameters.grid_resolution));
}
parameters.N_x = N_x; parameters.N_y = N_y; parameters.N_z = N_z;
// pointer array of fractional grid points
double * x_f_gridpoints = (double *) malloc(N_x * sizeof(double));
double * y_f_gridpoints = (double *) malloc(N_y * sizeof(double));
double * z_f_gridpoints = (double *) malloc(N_z * sizeof(double));
// fractional coordinate for a unit cell
for (int i = 0; i < N_x; i++)
x_f_gridpoints[i] = 1.0 * i / (N_x - 1);
for (int i = 0; i < N_y; i++)
y_f_gridpoints[i] = 1.0 * i / (N_y - 1);
for (int i = 0; i < N_z; i++)
z_f_gridpoints[i] = 1.0 * i / (N_z - 1);
//
// Write settings to outputfile
//
FILE * outputfile;
char outputfilename[512];
sprintf(outputfilename, "output_files/%s_%s_grid.out", parameters.frameworkname.c_str(), parameters.adsorbate.c_str());
outputfile = fopen(outputfilename, "w");
WriteSettingsToOutputfile(outputfile, parameters, framework, forcefield, framework_atoms);
if (parameters.verbose) printf("Wrote info to outputfile\n");
//
// PREPARE GRID FILE
//
FILE * gridfile;
char gridfilename[512];
if ((!accessible_or_not_grid) & (parameters.gridoutputformat == "txt")) { // format I made up for Henry coefficient and GCMC calcs
sprintf(gridfilename, "data/grids/%s_%s_%s.txt", framework.name.c_str(), parameters.adsorbate.c_str(), forcefield.name.c_str());
gridfile = fopen(gridfilename, "w");
fprintf(gridfile, "%d %d %d = (N_x,N_y,N_z) grid points (grid is in fractional coords). Endpoints included.\n", N_x, N_y, N_z);
}
else if (parameters.gridoutputformat == "cube") { // for visualization with VisIt
if (accessible_or_not_grid)
sprintf(gridfilename, "data/grids/%s_%s_%s_accessibility.cube", framework.name.c_str(), parameters.adsorbate.c_str(), forcefield.name.c_str());
else
sprintf(gridfilename, "data/grids/%s_%s_%s.cube", framework.name.c_str(), parameters.adsorbate.c_str(), forcefield.name.c_str());
gridfile = fopen(gridfilename, "w");
fprintf(gridfile, "\nThis is a grid file.\n");
fprintf(gridfile, "%d % 13.6lf % 13.6lf % 13.6lf\n",
0, 0.0, 0.0, 0.0); // give number of atoms
// give little vectors that form a volume element
fprintf(gridfile, "%d % 13.6lf % 13.6lf % 13.6lf\n",
N_x, framework.t_matrix[0][0] / (N_x - 1), 0.0, 0.0);
fprintf(gridfile, "%d % 13.6lf % 13.6lf % 13.6lf\n",
N_y, framework.t_matrix[0][1] / (N_y - 1), framework.t_matrix[1][1] / (N_y - 1), 0.0);
fprintf(gridfile, "%d % 13.6lf % 13.6lf % 13.6lf\n",
N_z, framework.t_matrix[0][2] / (N_z - 1), framework.t_matrix[1][2] / (N_z - 1), framework.t_matrix[2][2] / (N_z - 1));
}
else {
printf("Grid output format must be txt or cube\n");
exit(EXIT_FAILURE);
}
if (parameters.verbose) printf("Initialized grid file\n");
//
// Parallelization strategy: pass sheets of the grid to the GPU at a time, sheets are defind by x = constant
//
// energies at zy grid sheet. entry k+j*N_z is the energy at point z=k*dz, y = j*dy
double * h_zy_energies = (double *) malloc(N_z * N_y * sizeof(double));
//
// Move data to GPU device; "d_" indicates this is data for the device
//
// Initialize memory for zy_energies on device, to be called and stored bck to zy_energies later
double * d_zy_energies;
CUDA_CALL(hipMalloc((void **) & d_zy_energies, N_z * N_y * sizeof(double)));
// Copy framework_atoms to device. All blocks share this.
FrameworkParticle * d_framework_atoms;
CUDA_CALL(hipMalloc((void **) & d_framework_atoms, framework.noatoms * sizeof(FrameworkParticle)));
CUDA_CALL(hipMemcpy(d_framework_atoms, framework_atoms, framework.noatoms * sizeof(FrameworkParticle), hipMemcpyHostToDevice));
fprintf(outputfile, " Size of framework atoms array = %f MB\n", framework.noatoms * sizeof(FrameworkParticle) / (1024.0 * 1024.0));
// copy z_f and y_f grid points to device. The parallelization strategy is to pass sheets of x = constant, so this is not needed on the device.
double * d_z_f_gridpoints;
double * d_y_f_gridpoints;
CUDA_CALL(hipMalloc((void **) & d_z_f_gridpoints, N_z * sizeof(double)));
CUDA_CALL(hipMalloc((void **) & d_y_f_gridpoints, N_y * sizeof(double)));
CUDA_CALL(hipMemcpy(d_z_f_gridpoints, z_f_gridpoints, N_z * sizeof(double), hipMemcpyHostToDevice));
CUDA_CALL(hipMemcpy(d_y_f_gridpoints, y_f_gridpoints, N_y * sizeof(double), hipMemcpyHostToDevice));
fprintf(outputfile, " Size of grid sheet = %f MB\n", N_z * N_y * sizeof(double) / (1024.0 * 1024.0));
if (parameters.verbose) printf("Copied framework_atoms, z_f/y_f grid points, and allocated zy_energies to GPU device\n");
//
// Write the grid
//
fprintf(outputfile, " A block is %d by %d threads.\n", SQRT_N_THREADS, SQRT_N_THREADS);
dim3 dimBlock(SQRT_N_THREADS, SQRT_N_THREADS); // size of block. making 2D thread block
dim3 dimGrid(N_z / SQRT_N_THREADS + 1, N_y / SQRT_N_THREADS + 1);
double t0 = ReadTimer();
if (parameters.verbose) printf("Starting loop to write grid...\n# x-grid points: %d\n", N_x);
for (int i=0; i<3; i++) {
for (int j=0; j<3; j++){
parameters.t_matrix[i][j] = framework.t_matrix[i][j];
}
}// TODO: remove and use framework.t_matrix instead. right now it cant pass to cuda kernel without memory error...
int count_grid_pts = 0;
if (!accessible_or_not_grid) {
for (int i = 0; i < N_x; i++) {
// printf("x_F=%f\n", x_f_gridpoints[i]);
hipLaunchKernelGGL(( ComputeGridSheet) , dim3(dimGrid), dim3(dimBlock), 0, 0, d_z_f_gridpoints,
d_y_f_gridpoints,
d_zy_energies,
d_framework_atoms,
parameters,
x_f_gridpoints[i]);
CUDA_CALL( hipPeekAtLastError() );
CUDA_CALL( hipDeviceSynchronize() );
hipDeviceSynchronize();
// get energies from device
CUDA_CALL(hipMemcpy(h_zy_energies, d_zy_energies, N_z * N_y * sizeof(double) , hipMemcpyDeviceToHost));
hipDeviceSynchronize();
// printf("\n\n Host:\n");
// for (int kk = 0; kk < N_z; kk++) {
// for (int jj=0; jj<N_y;jj++){
// printf("E[%d]=%f\n", kk+ jj*N_z, h_zy_energies[kk+ jj*N_z]);
// }
// }
// exit(EXIT_FAILURE);
// write energies to file
if (parameters.gridoutputformat=="cube") {
for (int j = 0; j < N_y; j++) {
int count = 0;
for(int k = 0; k < N_z; k++) {
fprintf(gridfile, "% 13.6E ", h_zy_energies[k + j * N_z] * 8.314 / 1000); // kJ/mol
count ++;
if (count == 6) {
fprintf(gridfile, "\n");
count = 0; // reset counter
}
count_grid_pts ++;
}
fprintf(gridfile, "\n"); //new line after z over
}
}
if (parameters.gridoutputformat=="txt") { // format I made up ^.^ TODO more efficient format?
for (int j = 0; j < N_y; j++) {
for(int k = 0; k < N_z; k++) {
count_grid_pts += 1;
fprintf(gridfile, "% 13.6E ", h_zy_energies[k + j * N_z]);
if ( k == (N_z - 1))
fprintf(gridfile, "\n"); // new line for every pencil of z's
}
}
}
if (parameters.verbose) printf(" Sheet %d out of %d completed.\n", i, N_x);
} // end x loop
}
else {
printf("Starting...\n");
for (int i = 0; i < N_x; i++) {
hipLaunchKernelGGL(( ComputeGridSheetAccessibleOrNot) , dim3(dimGrid), dim3(dimBlock), 0, 0, d_z_f_gridpoints,
d_y_f_gridpoints,
d_zy_energies,
d_framework_atoms,
parameters,
x_f_gridpoints[i]);
CUDA_CALL( hipPeekAtLastError() );
CUDA_CALL( hipDeviceSynchronize() );
hipDeviceSynchronize();
// get energies from device
CUDA_CALL(hipMemcpy(h_zy_energies, d_zy_energies, N_z * N_y * sizeof(double) , hipMemcpyDeviceToHost));
hipDeviceSynchronize();
if (parameters.verbose) printf(" Sheet %d out of %d completed.\n", i, N_x);
// write energies to cube file
for (int j = 0; j < N_y; j++) {
int count = 0;
for(int k = 0; k < N_z; k++) {
fprintf(gridfile, "%d ", int(h_zy_energies[k + j * N_z])); // kJ/mol
count ++;
if (count == 6) {
fprintf(gridfile, "\n");
count = 0; // reset counter
}
count_grid_pts ++;
}
fprintf(gridfile, "\n"); //new line after z over
}
} // end x loop
}
assert(count_grid_pts == (N_x * N_y * N_z));
double sim_time = ReadTimer() - t0;
fprintf(outputfile, " Time to write grid: %f s\n", sim_time);
if (parameters.verbose) printf("Completed grid writing! Freeing up memory in GPU...\n");
//
// Free memory, close files
//
hipFree(d_framework_atoms);
hipFree(d_zy_energies);
hipFree(d_z_f_gridpoints);
hipFree(d_y_f_gridpoints);
free(framework_atoms);
free(x_f_gridpoints); free(y_f_gridpoints); free(z_f_gridpoints);
fclose(outputfile); fclose(gridfile);
}
| c130b6a25634b8ec346fcfb8a6c902bd01777a55.cu | /*
* Writes Potential energy grid of adsorbate inside unit cell of nanoporous material
*/
#include <stdio.h>
#include <stdlib.h>
#include<cuda.h>
#include <sys/time.h>
#include <cuda_runtime.h>
#include "datatypes.h"
#include "readsettings.h"
#include "Framework.h"
#include "Forcefield.h"
#include "write_settings_to_outputfile.h"
#include "computegridsheet.h"
#include "load_fast_particle_f_array.h"
#define SQRT_N_THREADS 16 // a block may have a max of 512 threads... so 16x16 is max.
// functions to ensure communication with GPU works ?
#define CUDA_CALL(x) do { cudaError_t error = x; \
if (error != cudaSuccess) { \
printf("Error at %s:%d - %s \n",__FILE__,__LINE__, cudaGetErrorString(error)); \
return EXIT_FAILURE;}} while(0)
#define CURAND_CALL(x) do { if((x)!=CURAND_STATUS_SUCCESS) { \
printf("Error at %s:%d\n",__FILE__,__LINE__);\
return EXIT_FAILURE;}} while(0)
double ReadTimer() {
static bool initialized = false;
static struct timeval start;
struct timeval end;
if( !initialized )
{
gettimeofday( &start, NULL );
initialized = true;
}
gettimeofday( &end, NULL );
return (end.tv_sec - start.tv_sec) + 1.0e-6 * (end.tv_usec - start.tv_usec);
}
void HostFractionalToCartesian(double t_matrix[][3],
double x_f, double y_f,double z_f,
double & x, double & y, double & z) {
// compute Cartesian coordinates from fractional
x = t_matrix[0][0] * x_f + t_matrix[0][1] * y_f + t_matrix[0][2] * z_f;
y = t_matrix[1][0] * x_f + t_matrix[1][1] * y_f + t_matrix[1][2] * z_f;
z = t_matrix[2][0] * x_f + t_matrix[2][1] * y_f + t_matrix[2][2] * z_f;
}
int main(int argc, char *argv[]) {
if (argc != 3) {
printf("Run as:\n./writegrid structure_name AdsorbateID\n");
exit(EXIT_FAILURE);
}
bool accessible_or_not_grid = false;
if (accessible_or_not_grid)
printf("\n\nPriting accessibility grid based on hard-sphere model...\n\n");
//
// Import settings
//
GridParameters parameters;
parameters.frameworkname = argv[1];
parameters.adsorbate = argv[2];
parameters.adsorbateMW = GetAdsorbateMW(parameters.adsorbate);
ReadSimulationInputFile(parameters);
if (parameters.verbose) printf("Read simulation.input\n");
// only need UC to be once the cutoff
TripleInt uc_reps = ReadUnitCellReplicationFile(parameters.frameworkname, "once");
parameters.replication_factor_a = uc_reps.arg1;
parameters.replication_factor_b = uc_reps.arg2;
parameters.replication_factor_c = uc_reps.arg3;
if (parameters.verbose) printf("Read .uc replication file\n");
//
// Construct forcefield and framework objects
//
Forcefield forcefield(parameters.forcefieldname);
if (parameters.verbose) printf("Constructed Forcefield object\n");
Framework framework(parameters.frameworkname);
parameters.N_framework_atoms = framework.noatoms;
if (parameters.verbose) printf("Constructed Framework object\n");
// grab sigma/epsilon of adsorbate
PairDouble eps_sig = GrabGuestForceFieldParams(forcefield, parameters.adsorbate);
parameters.epsilon_guest = eps_sig.arg1;
parameters.sigma_guest = eps_sig.arg2;
if (parameters.verbose) printf("Fetched adsorbate FF parameters\n");
//
// Construct array of framework particles, framework_atoms, for speed in energy computations
//
FrameworkParticle * framework_atoms = (FrameworkParticle *) malloc(framework.noatoms * sizeof(FrameworkParticle));
LoadFastFrameworkParticleArray(framework_atoms, framework, forcefield, parameters.epsilon_guest, parameters.sigma_guest);
if (parameters.verbose) printf("Initialized framework_atoms array in host\n");
//
// Construct grid
//
int N_x, N_y, N_z;
if (accessible_or_not_grid) { // compute grid in fractional space
N_x = static_cast<int>(ceil(1.0 / parameters.grid_resolution)); // size of grid
N_y = static_cast<int>(ceil(1.0 / parameters.grid_resolution));
N_z = static_cast<int>(ceil(1.0 / parameters.grid_resolution));
}
else {
N_x = static_cast<int>(ceil(framework.a / parameters.grid_resolution)); // size of grid
N_y = static_cast<int>(ceil(framework.b / parameters.grid_resolution));
N_z = static_cast<int>(ceil(framework.c / parameters.grid_resolution));
}
parameters.N_x = N_x; parameters.N_y = N_y; parameters.N_z = N_z;
// pointer array of fractional grid points
double * x_f_gridpoints = (double *) malloc(N_x * sizeof(double));
double * y_f_gridpoints = (double *) malloc(N_y * sizeof(double));
double * z_f_gridpoints = (double *) malloc(N_z * sizeof(double));
// fractional coordinate for a unit cell
for (int i = 0; i < N_x; i++)
x_f_gridpoints[i] = 1.0 * i / (N_x - 1);
for (int i = 0; i < N_y; i++)
y_f_gridpoints[i] = 1.0 * i / (N_y - 1);
for (int i = 0; i < N_z; i++)
z_f_gridpoints[i] = 1.0 * i / (N_z - 1);
//
// Write settings to outputfile
//
FILE * outputfile;
char outputfilename[512];
sprintf(outputfilename, "output_files/%s_%s_grid.out", parameters.frameworkname.c_str(), parameters.adsorbate.c_str());
outputfile = fopen(outputfilename, "w");
WriteSettingsToOutputfile(outputfile, parameters, framework, forcefield, framework_atoms);
if (parameters.verbose) printf("Wrote info to outputfile\n");
//
// PREPARE GRID FILE
//
FILE * gridfile;
char gridfilename[512];
if ((!accessible_or_not_grid) & (parameters.gridoutputformat == "txt")) { // format I made up for Henry coefficient and GCMC calcs
sprintf(gridfilename, "data/grids/%s_%s_%s.txt", framework.name.c_str(), parameters.adsorbate.c_str(), forcefield.name.c_str());
gridfile = fopen(gridfilename, "w");
fprintf(gridfile, "%d %d %d = (N_x,N_y,N_z) grid points (grid is in fractional coords). Endpoints included.\n", N_x, N_y, N_z);
}
else if (parameters.gridoutputformat == "cube") { // for visualization with VisIt
if (accessible_or_not_grid)
sprintf(gridfilename, "data/grids/%s_%s_%s_accessibility.cube", framework.name.c_str(), parameters.adsorbate.c_str(), forcefield.name.c_str());
else
sprintf(gridfilename, "data/grids/%s_%s_%s.cube", framework.name.c_str(), parameters.adsorbate.c_str(), forcefield.name.c_str());
gridfile = fopen(gridfilename, "w");
fprintf(gridfile, "\nThis is a grid file.\n");
fprintf(gridfile, "%d % 13.6lf % 13.6lf % 13.6lf\n",
0, 0.0, 0.0, 0.0); // give number of atoms
// give little vectors that form a volume element
fprintf(gridfile, "%d % 13.6lf % 13.6lf % 13.6lf\n",
N_x, framework.t_matrix[0][0] / (N_x - 1), 0.0, 0.0);
fprintf(gridfile, "%d % 13.6lf % 13.6lf % 13.6lf\n",
N_y, framework.t_matrix[0][1] / (N_y - 1), framework.t_matrix[1][1] / (N_y - 1), 0.0);
fprintf(gridfile, "%d % 13.6lf % 13.6lf % 13.6lf\n",
N_z, framework.t_matrix[0][2] / (N_z - 1), framework.t_matrix[1][2] / (N_z - 1), framework.t_matrix[2][2] / (N_z - 1));
}
else {
printf("Grid output format must be txt or cube\n");
exit(EXIT_FAILURE);
}
if (parameters.verbose) printf("Initialized grid file\n");
//
// Parallelization strategy: pass sheets of the grid to the GPU at a time, sheets are defind by x = constant
//
// energies at zy grid sheet. entry k+j*N_z is the energy at point z=k*dz, y = j*dy
double * h_zy_energies = (double *) malloc(N_z * N_y * sizeof(double));
//
// Move data to GPU device; "d_" indicates this is data for the device
//
// Initialize memory for zy_energies on device, to be called and stored bck to zy_energies later
double * d_zy_energies;
CUDA_CALL(cudaMalloc((void **) & d_zy_energies, N_z * N_y * sizeof(double)));
// Copy framework_atoms to device. All blocks share this.
FrameworkParticle * d_framework_atoms;
CUDA_CALL(cudaMalloc((void **) & d_framework_atoms, framework.noatoms * sizeof(FrameworkParticle)));
CUDA_CALL(cudaMemcpy(d_framework_atoms, framework_atoms, framework.noatoms * sizeof(FrameworkParticle), cudaMemcpyHostToDevice));
fprintf(outputfile, " Size of framework atoms array = %f MB\n", framework.noatoms * sizeof(FrameworkParticle) / (1024.0 * 1024.0));
// copy z_f and y_f grid points to device. The parallelization strategy is to pass sheets of x = constant, so this is not needed on the device.
double * d_z_f_gridpoints;
double * d_y_f_gridpoints;
CUDA_CALL(cudaMalloc((void **) & d_z_f_gridpoints, N_z * sizeof(double)));
CUDA_CALL(cudaMalloc((void **) & d_y_f_gridpoints, N_y * sizeof(double)));
CUDA_CALL(cudaMemcpy(d_z_f_gridpoints, z_f_gridpoints, N_z * sizeof(double), cudaMemcpyHostToDevice));
CUDA_CALL(cudaMemcpy(d_y_f_gridpoints, y_f_gridpoints, N_y * sizeof(double), cudaMemcpyHostToDevice));
fprintf(outputfile, " Size of grid sheet = %f MB\n", N_z * N_y * sizeof(double) / (1024.0 * 1024.0));
if (parameters.verbose) printf("Copied framework_atoms, z_f/y_f grid points, and allocated zy_energies to GPU device\n");
//
// Write the grid
//
fprintf(outputfile, " A block is %d by %d threads.\n", SQRT_N_THREADS, SQRT_N_THREADS);
dim3 dimBlock(SQRT_N_THREADS, SQRT_N_THREADS); // size of block. making 2D thread block
dim3 dimGrid(N_z / SQRT_N_THREADS + 1, N_y / SQRT_N_THREADS + 1);
double t0 = ReadTimer();
if (parameters.verbose) printf("Starting loop to write grid...\n# x-grid points: %d\n", N_x);
for (int i=0; i<3; i++) {
for (int j=0; j<3; j++){
parameters.t_matrix[i][j] = framework.t_matrix[i][j];
}
}// TODO: remove and use framework.t_matrix instead. right now it cant pass to cuda kernel without memory error...
int count_grid_pts = 0;
if (!accessible_or_not_grid) {
for (int i = 0; i < N_x; i++) {
// printf("x_F=%f\n", x_f_gridpoints[i]);
ComputeGridSheet <<<dimGrid, dimBlock>>> (d_z_f_gridpoints,
d_y_f_gridpoints,
d_zy_energies,
d_framework_atoms,
parameters,
x_f_gridpoints[i]);
CUDA_CALL( cudaPeekAtLastError() );
CUDA_CALL( cudaDeviceSynchronize() );
cudaDeviceSynchronize();
// get energies from device
CUDA_CALL(cudaMemcpy(h_zy_energies, d_zy_energies, N_z * N_y * sizeof(double) , cudaMemcpyDeviceToHost));
cudaDeviceSynchronize();
// printf("\n\n Host:\n");
// for (int kk = 0; kk < N_z; kk++) {
// for (int jj=0; jj<N_y;jj++){
// printf("E[%d]=%f\n", kk+ jj*N_z, h_zy_energies[kk+ jj*N_z]);
// }
// }
// exit(EXIT_FAILURE);
// write energies to file
if (parameters.gridoutputformat=="cube") {
for (int j = 0; j < N_y; j++) {
int count = 0;
for(int k = 0; k < N_z; k++) {
fprintf(gridfile, "% 13.6E ", h_zy_energies[k + j * N_z] * 8.314 / 1000); // kJ/mol
count ++;
if (count == 6) {
fprintf(gridfile, "\n");
count = 0; // reset counter
}
count_grid_pts ++;
}
fprintf(gridfile, "\n"); //new line after z over
}
}
if (parameters.gridoutputformat=="txt") { // format I made up ^.^ TODO more efficient format?
for (int j = 0; j < N_y; j++) {
for(int k = 0; k < N_z; k++) {
count_grid_pts += 1;
fprintf(gridfile, "% 13.6E ", h_zy_energies[k + j * N_z]);
if ( k == (N_z - 1))
fprintf(gridfile, "\n"); // new line for every pencil of z's
}
}
}
if (parameters.verbose) printf(" Sheet %d out of %d completed.\n", i, N_x);
} // end x loop
}
else {
printf("Starting...\n");
for (int i = 0; i < N_x; i++) {
ComputeGridSheetAccessibleOrNot <<<dimGrid, dimBlock>>> (d_z_f_gridpoints,
d_y_f_gridpoints,
d_zy_energies,
d_framework_atoms,
parameters,
x_f_gridpoints[i]);
CUDA_CALL( cudaPeekAtLastError() );
CUDA_CALL( cudaDeviceSynchronize() );
cudaDeviceSynchronize();
// get energies from device
CUDA_CALL(cudaMemcpy(h_zy_energies, d_zy_energies, N_z * N_y * sizeof(double) , cudaMemcpyDeviceToHost));
cudaDeviceSynchronize();
if (parameters.verbose) printf(" Sheet %d out of %d completed.\n", i, N_x);
// write energies to cube file
for (int j = 0; j < N_y; j++) {
int count = 0;
for(int k = 0; k < N_z; k++) {
fprintf(gridfile, "%d ", int(h_zy_energies[k + j * N_z])); // kJ/mol
count ++;
if (count == 6) {
fprintf(gridfile, "\n");
count = 0; // reset counter
}
count_grid_pts ++;
}
fprintf(gridfile, "\n"); //new line after z over
}
} // end x loop
}
assert(count_grid_pts == (N_x * N_y * N_z));
double sim_time = ReadTimer() - t0;
fprintf(outputfile, " Time to write grid: %f s\n", sim_time);
if (parameters.verbose) printf("Completed grid writing! Freeing up memory in GPU...\n");
//
// Free memory, close files
//
cudaFree(d_framework_atoms);
cudaFree(d_zy_energies);
cudaFree(d_z_f_gridpoints);
cudaFree(d_y_f_gridpoints);
free(framework_atoms);
free(x_f_gridpoints); free(y_f_gridpoints); free(z_f_gridpoints);
fclose(outputfile); fclose(gridfile);
}
|
e0866091125f91bea838c0af304cc99391dff389.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cassert>
#include <cstdlib>
#include <memory>
#include <Eigen/Core>
#include <Eigen/Dense>
#include "DataFormats/SoATemplate/interface/SoALayout.h"
#include "DataFormats/SoATemplate/interface/SoAView.h"
#include "HeterogeneousCore/CUDAUtilities/interface/cudaCheck.h"
#include "HeterogeneousCore/CUDAUtilities/interface/requireDevices.h"
// Test SoA stores and view.
// Use cases
// Multiple stores in a buffer
// Scalars, Columns of scalars and of Eigen vectors
// View to each of them, from one and multiple stores.
GENERATE_SOA_LAYOUT(SoAHostDeviceLayoutTemplate,
/*SoAHostDeviceViewTemplate,*/
// predefined static scalars
// size_t size;
// size_t alignment;
// columns: one value per element
SOA_COLUMN(double, x),
SOA_COLUMN(double, y),
SOA_COLUMN(double, z),
SOA_EIGEN_COLUMN(Eigen::Vector3d, a),
SOA_EIGEN_COLUMN(Eigen::Vector3d, b),
SOA_EIGEN_COLUMN(Eigen::Vector3d, r),
// scalars: one value for the whole structure
SOA_SCALAR(const char*, description),
SOA_SCALAR(uint32_t, someNumber))
using SoAHostDeviceLayout = SoAHostDeviceLayoutTemplate<>;
using SoAHostDeviceView = SoAHostDeviceLayout::View;
using SoAHostDeviceConstView = SoAHostDeviceLayout::ConstView;
GENERATE_SOA_LAYOUT(SoADeviceOnlyLayoutTemplate,
/*SoADeviceOnlyViewTemplate,*/
SOA_COLUMN(uint16_t, color),
SOA_COLUMN(double, value),
SOA_COLUMN(double*, py),
SOA_COLUMN(uint32_t, count),
SOA_COLUMN(uint32_t, anotherCount))
using SoADeviceOnlyLayout = SoADeviceOnlyLayoutTemplate<>;
using SoADeviceOnlyView = SoADeviceOnlyLayout::View;
// A 1 to 1 view of the store (except for unsupported types).
GENERATE_SOA_VIEW(SoAFullDeviceConstViewTemplate,
SoAFullDeviceViewTemplate,
SOA_VIEW_LAYOUT_LIST(SOA_VIEW_LAYOUT(SoAHostDeviceLayout, soaHD),
SOA_VIEW_LAYOUT(SoADeviceOnlyLayout, soaDO)),
SOA_VIEW_VALUE_LIST(SOA_VIEW_VALUE(soaHD, x),
SOA_VIEW_VALUE(soaHD, y),
SOA_VIEW_VALUE(soaHD, z),
SOA_VIEW_VALUE(soaDO, color),
SOA_VIEW_VALUE(soaDO, value),
SOA_VIEW_VALUE(soaDO, py),
SOA_VIEW_VALUE(soaDO, count),
SOA_VIEW_VALUE(soaDO, anotherCount),
SOA_VIEW_VALUE(soaHD, description),
SOA_VIEW_VALUE(soaHD, someNumber)))
using SoAFullDeviceView =
SoAFullDeviceViewTemplate<cms::soa::CacheLineSize::NvidiaGPU, cms::soa::AlignmentEnforcement::enforced>;
// Eigen cross product kernel (on store)
__global__ void crossProduct(SoAHostDeviceView soa, const unsigned int numElements) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= numElements)
return;
auto si = soa[i];
si.r() = si.a().cross(si.b());
}
// Device-only producer kernel
__global__ void producerKernel(SoAFullDeviceView soa, const unsigned int numElements) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= numElements)
return;
auto si = soa[i];
si.color() &= 0x55 << i % (sizeof(si.color()) - sizeof(char));
si.value() = sqrt(si.x() * si.x() + si.y() * si.y() + si.z() * si.z());
}
// Device-only consumer with result in host-device area
__global__ void consumerKernel(SoAFullDeviceView soa, const unsigned int numElements) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= numElements)
return;
auto si = soa[i];
si.x() = si.color() * si.value();
}
// Get a view like the default, except for range checking
using RangeCheckingHostDeviceView =
SoAHostDeviceLayout::ViewTemplate<SoAHostDeviceView::restrictQualify, cms::soa::RangeChecking::enabled>;
// We expect to just run one thread.
__global__ void rangeCheckKernel(RangeCheckingHostDeviceView soa) {
printf("About to fail range-check (operator[]) in CUDA thread: %d\n", threadIdx.x);
[[maybe_unused]] auto si = soa[soa.metadata().size()];
printf("Fail: range-check failure should have stopped the kernel.\n");
}
int main(void) {
cms::cudatest::requireDevices();
hipStream_t stream;
cudaCheck(hipStreamCreateWithFlags(&stream, hipStreamNonBlocking));
// Non-aligned number of elements to check alignment features.
constexpr unsigned int numElements = 65537;
// Allocate buffer and store on host
size_t hostDeviceSize = SoAHostDeviceLayout::computeDataSize(numElements);
std::byte* h_buf = nullptr;
cudaCheck(hipHostMalloc(&h_buf, hostDeviceSize));
SoAHostDeviceLayout h_soahdLayout(h_buf, numElements);
SoAHostDeviceView h_soahd(h_soahdLayout);
SoAHostDeviceConstView h_soahd_c(h_soahdLayout);
// Alocate buffer, stores and views on the device (single, shared buffer).
size_t deviceOnlySize = SoADeviceOnlyLayout::computeDataSize(numElements);
std::byte* d_buf = nullptr;
cudaCheck(hipHostMalloc(&d_buf, hostDeviceSize + deviceOnlySize));
SoAHostDeviceLayout d_soahdLayout(d_buf, numElements);
SoADeviceOnlyLayout d_soadoLayout(d_soahdLayout.metadata().nextByte(), numElements);
SoAHostDeviceView d_soahdView(d_soahdLayout);
SoAFullDeviceView d_soaFullView(d_soahdLayout, d_soadoLayout);
// Assert column alignments
assert(0 == reinterpret_cast<uintptr_t>(h_soahd.metadata().addressOf_x()) % decltype(h_soahd)::alignment);
assert(0 == reinterpret_cast<uintptr_t>(h_soahd.metadata().addressOf_y()) % decltype(h_soahd)::alignment);
assert(0 == reinterpret_cast<uintptr_t>(h_soahd.metadata().addressOf_z()) % decltype(h_soahd)::alignment);
assert(0 == reinterpret_cast<uintptr_t>(h_soahd.metadata().addressOf_a()) % decltype(h_soahd)::alignment);
assert(0 == reinterpret_cast<uintptr_t>(h_soahd.metadata().addressOf_b()) % decltype(h_soahd)::alignment);
assert(0 == reinterpret_cast<uintptr_t>(h_soahd.metadata().addressOf_r()) % decltype(h_soahd)::alignment);
assert(0 == reinterpret_cast<uintptr_t>(h_soahd.metadata().addressOf_description()) % decltype(h_soahd)::alignment);
assert(0 == reinterpret_cast<uintptr_t>(h_soahd.metadata().addressOf_someNumber()) % decltype(h_soahd)::alignment);
assert(0 == reinterpret_cast<uintptr_t>(d_soahdLayout.metadata().addressOf_x()) % decltype(d_soahdLayout)::alignment);
assert(0 == reinterpret_cast<uintptr_t>(d_soahdLayout.metadata().addressOf_y()) % decltype(d_soahdLayout)::alignment);
assert(0 == reinterpret_cast<uintptr_t>(d_soahdLayout.metadata().addressOf_z()) % decltype(d_soahdLayout)::alignment);
assert(0 == reinterpret_cast<uintptr_t>(d_soahdLayout.metadata().addressOf_a()) % decltype(d_soahdLayout)::alignment);
assert(0 == reinterpret_cast<uintptr_t>(d_soahdLayout.metadata().addressOf_b()) % decltype(d_soahdLayout)::alignment);
assert(0 == reinterpret_cast<uintptr_t>(d_soahdLayout.metadata().addressOf_r()) % decltype(d_soahdLayout)::alignment);
assert(0 == reinterpret_cast<uintptr_t>(d_soahdLayout.metadata().addressOf_description()) %
decltype(d_soahdLayout)::alignment);
assert(0 == reinterpret_cast<uintptr_t>(d_soahdLayout.metadata().addressOf_someNumber()) %
decltype(d_soahdLayout)::alignment);
assert(0 ==
reinterpret_cast<uintptr_t>(d_soadoLayout.metadata().addressOf_color()) % decltype(d_soadoLayout)::alignment);
assert(0 ==
reinterpret_cast<uintptr_t>(d_soadoLayout.metadata().addressOf_value()) % decltype(d_soadoLayout)::alignment);
assert(0 ==
reinterpret_cast<uintptr_t>(d_soadoLayout.metadata().addressOf_py()) % decltype(d_soadoLayout)::alignment);
assert(0 ==
reinterpret_cast<uintptr_t>(d_soadoLayout.metadata().addressOf_count()) % decltype(d_soadoLayout)::alignment);
assert(0 == reinterpret_cast<uintptr_t>(d_soadoLayout.metadata().addressOf_anotherCount()) %
decltype(d_soadoLayout)::alignment);
// Views should get the same alignment as the stores they refer to
assert(0 == reinterpret_cast<uintptr_t>(d_soaFullView.metadata().addressOf_x()) % decltype(d_soaFullView)::alignment);
assert(0 == reinterpret_cast<uintptr_t>(d_soaFullView.metadata().addressOf_y()) % decltype(d_soaFullView)::alignment);
assert(0 == reinterpret_cast<uintptr_t>(d_soaFullView.metadata().addressOf_z()) % decltype(d_soaFullView)::alignment);
// Limitation of views: we have to get scalar member addresses via metadata.
assert(0 == reinterpret_cast<uintptr_t>(d_soaFullView.metadata().addressOf_description()) %
decltype(d_soaFullView)::alignment);
assert(0 == reinterpret_cast<uintptr_t>(d_soaFullView.metadata().addressOf_someNumber()) %
decltype(d_soaFullView)::alignment);
assert(0 ==
reinterpret_cast<uintptr_t>(d_soaFullView.metadata().addressOf_color()) % decltype(d_soaFullView)::alignment);
assert(0 ==
reinterpret_cast<uintptr_t>(d_soaFullView.metadata().addressOf_value()) % decltype(d_soaFullView)::alignment);
assert(0 ==
reinterpret_cast<uintptr_t>(d_soaFullView.metadata().addressOf_py()) % decltype(d_soaFullView)::alignment);
assert(0 ==
reinterpret_cast<uintptr_t>(d_soaFullView.metadata().addressOf_count()) % decltype(d_soaFullView)::alignment);
assert(0 == reinterpret_cast<uintptr_t>(d_soaFullView.metadata().addressOf_anotherCount()) %
decltype(d_soaFullView)::alignment);
// Initialize and fill the host buffer
std::memset(h_soahdLayout.metadata().data(), 0, hostDeviceSize);
for (size_t i = 0; i < numElements; ++i) {
auto si = h_soahd[i];
// Tuple assignment...
// elements are: x, y, z, a, b, r
auto v1 = 1.0 * i + 1.0;
auto v2 = 2.0 * i;
auto v3 = 3.0 * i - 1.0;
if (i % 2) {
si = {v1, v2, v3, {v1, v2, v3}, {v3, v2, v1}, {0, 0, 0}};
} else {
si.x() = si.a()(0) = si.b()(2) = v1;
si.y() = si.a()(1) = si.b()(1) = v2;
si.z() = si.a()(2) = si.b()(0) = v3;
}
}
auto& sn = h_soahd.someNumber();
sn = numElements + 2;
// Push to device
cudaCheck(hipMemcpyAsync(d_buf, h_buf, hostDeviceSize, hipMemcpyDefault, stream));
// Process on device
hipLaunchKernelGGL(( crossProduct), dim3((numElements + 255) / 256), dim3(256), 0, stream, d_soahdView, numElements);
// Paint the device only with 0xFF initially
cudaCheck(hipMemsetAsync(d_soadoLayout.metadata().data(), 0xFF, d_soadoLayout.metadata().byteSize(), stream));
// Produce to the device only area
hipLaunchKernelGGL(( producerKernel), dim3((numElements + 255) / 256), dim3(256), 0, stream, d_soaFullView, numElements);
// Consume the device only area and generate a result on the host-device area
hipLaunchKernelGGL(( consumerKernel), dim3((numElements + 255) / 256), dim3(256), 0, stream, d_soaFullView, numElements);
// Get result back
cudaCheck(hipMemcpyAsync(h_buf, d_buf, hostDeviceSize, hipMemcpyDefault, stream));
// Wait and validate.
cudaCheck(hipStreamSynchronize(stream));
for (size_t i = 0; i < numElements; ++i) {
auto si = h_soahd_c[i];
assert(si.r() == si.a().cross(si.b()));
double initialX = 1.0 * i + 1.0;
double initialY = 2.0 * i;
double initialZ = 3.0 * i - 1.0;
uint16_t expectedColor = 0x55 << i % (sizeof(uint16_t) - sizeof(char));
double expectedX = expectedColor * sqrt(initialX * initialX + initialY * initialY + initialZ * initialZ);
if (abs(si.x() - expectedX) / expectedX >= 2 * std::numeric_limits<double>::epsilon()) {
std::cout << "X failed: for i=" << i << std::endl
<< "initialX=" << initialX << " initialY=" << initialY << " initialZ=" << initialZ << std::endl
<< "expectedX=" << expectedX << std::endl
<< "resultX=" << si.x() << " resultY=" << si.y() << " resultZ=" << si.z() << std::endl
<< "relativeDiff=" << abs(si.x() - expectedX) / expectedX
<< " epsilon=" << std::numeric_limits<double>::epsilon() << std::endl;
assert(false);
}
}
// Validation of range checking
try {
// Get a view like the default, except for range checking
SoAHostDeviceLayout::ViewTemplate<SoAHostDeviceView::restrictQualify, cms::soa::RangeChecking::enabled>
soa1viewRangeChecking(h_soahdLayout);
// This should throw an exception
[[maybe_unused]] auto si = soa1viewRangeChecking[soa1viewRangeChecking.metadata().size()];
std::cout << "Fail: expected range-check exception (operator[]) not caught on the host." << std::endl;
assert(false);
} catch (const std::out_of_range&) {
std::cout << "Pass: expected range-check exception (operator[]) successfully caught on the host." << std::endl;
}
try {
// Get a view like the default, except for range checking
SoAHostDeviceLayout::ViewTemplate<SoAHostDeviceView::restrictQualify, cms::soa::RangeChecking::enabled>
soa1viewRangeChecking(h_soahdLayout);
// This should throw an exception
[[maybe_unused]] auto si = soa1viewRangeChecking[soa1viewRangeChecking.metadata().size()];
std::cout << "Fail: expected range-check exception (view-level index access) not caught on the host." << std::endl;
assert(false);
} catch (const std::out_of_range&) {
std::cout << "Pass: expected range-check exception (view-level index access) successfully caught on the host."
<< std::endl;
}
// Validation of range checking in a kernel
// Get a view like the default one, except for range checking
RangeCheckingHostDeviceView soa1viewRangeChecking(d_soahdLayout);
// This should throw an exception in the kernel
hipLaunchKernelGGL(( rangeCheckKernel), dim3(1), dim3(1), 0, stream, soa1viewRangeChecking);
// Wait and confirm that the CUDA kernel failed
try {
cudaCheck(hipStreamSynchronize(stream));
std::cout << "Fail: expected range-check exception not caught while executing the kernel." << std::endl;
assert(false);
} catch (const std::runtime_error&) {
std::cout << "Pass: expected range-check exception caught while executing the kernel." << std::endl;
}
std::cout << "OK" << std::endl;
}
| e0866091125f91bea838c0af304cc99391dff389.cu | #include <cassert>
#include <cstdlib>
#include <memory>
#include <Eigen/Core>
#include <Eigen/Dense>
#include "DataFormats/SoATemplate/interface/SoALayout.h"
#include "DataFormats/SoATemplate/interface/SoAView.h"
#include "HeterogeneousCore/CUDAUtilities/interface/cudaCheck.h"
#include "HeterogeneousCore/CUDAUtilities/interface/requireDevices.h"
// Test SoA stores and view.
// Use cases
// Multiple stores in a buffer
// Scalars, Columns of scalars and of Eigen vectors
// View to each of them, from one and multiple stores.
GENERATE_SOA_LAYOUT(SoAHostDeviceLayoutTemplate,
/*SoAHostDeviceViewTemplate,*/
// predefined static scalars
// size_t size;
// size_t alignment;
// columns: one value per element
SOA_COLUMN(double, x),
SOA_COLUMN(double, y),
SOA_COLUMN(double, z),
SOA_EIGEN_COLUMN(Eigen::Vector3d, a),
SOA_EIGEN_COLUMN(Eigen::Vector3d, b),
SOA_EIGEN_COLUMN(Eigen::Vector3d, r),
// scalars: one value for the whole structure
SOA_SCALAR(const char*, description),
SOA_SCALAR(uint32_t, someNumber))
using SoAHostDeviceLayout = SoAHostDeviceLayoutTemplate<>;
using SoAHostDeviceView = SoAHostDeviceLayout::View;
using SoAHostDeviceConstView = SoAHostDeviceLayout::ConstView;
GENERATE_SOA_LAYOUT(SoADeviceOnlyLayoutTemplate,
/*SoADeviceOnlyViewTemplate,*/
SOA_COLUMN(uint16_t, color),
SOA_COLUMN(double, value),
SOA_COLUMN(double*, py),
SOA_COLUMN(uint32_t, count),
SOA_COLUMN(uint32_t, anotherCount))
using SoADeviceOnlyLayout = SoADeviceOnlyLayoutTemplate<>;
using SoADeviceOnlyView = SoADeviceOnlyLayout::View;
// A 1 to 1 view of the store (except for unsupported types).
GENERATE_SOA_VIEW(SoAFullDeviceConstViewTemplate,
SoAFullDeviceViewTemplate,
SOA_VIEW_LAYOUT_LIST(SOA_VIEW_LAYOUT(SoAHostDeviceLayout, soaHD),
SOA_VIEW_LAYOUT(SoADeviceOnlyLayout, soaDO)),
SOA_VIEW_VALUE_LIST(SOA_VIEW_VALUE(soaHD, x),
SOA_VIEW_VALUE(soaHD, y),
SOA_VIEW_VALUE(soaHD, z),
SOA_VIEW_VALUE(soaDO, color),
SOA_VIEW_VALUE(soaDO, value),
SOA_VIEW_VALUE(soaDO, py),
SOA_VIEW_VALUE(soaDO, count),
SOA_VIEW_VALUE(soaDO, anotherCount),
SOA_VIEW_VALUE(soaHD, description),
SOA_VIEW_VALUE(soaHD, someNumber)))
using SoAFullDeviceView =
SoAFullDeviceViewTemplate<cms::soa::CacheLineSize::NvidiaGPU, cms::soa::AlignmentEnforcement::enforced>;
// Eigen cross product kernel (on store)
__global__ void crossProduct(SoAHostDeviceView soa, const unsigned int numElements) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= numElements)
return;
auto si = soa[i];
si.r() = si.a().cross(si.b());
}
// Device-only producer kernel
__global__ void producerKernel(SoAFullDeviceView soa, const unsigned int numElements) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= numElements)
return;
auto si = soa[i];
si.color() &= 0x55 << i % (sizeof(si.color()) - sizeof(char));
si.value() = sqrt(si.x() * si.x() + si.y() * si.y() + si.z() * si.z());
}
// Device-only consumer with result in host-device area
__global__ void consumerKernel(SoAFullDeviceView soa, const unsigned int numElements) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= numElements)
return;
auto si = soa[i];
si.x() = si.color() * si.value();
}
// Get a view like the default, except for range checking
using RangeCheckingHostDeviceView =
SoAHostDeviceLayout::ViewTemplate<SoAHostDeviceView::restrictQualify, cms::soa::RangeChecking::enabled>;
// We expect to just run one thread.
__global__ void rangeCheckKernel(RangeCheckingHostDeviceView soa) {
printf("About to fail range-check (operator[]) in CUDA thread: %d\n", threadIdx.x);
[[maybe_unused]] auto si = soa[soa.metadata().size()];
printf("Fail: range-check failure should have stopped the kernel.\n");
}
int main(void) {
cms::cudatest::requireDevices();
cudaStream_t stream;
cudaCheck(cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking));
// Non-aligned number of elements to check alignment features.
constexpr unsigned int numElements = 65537;
// Allocate buffer and store on host
size_t hostDeviceSize = SoAHostDeviceLayout::computeDataSize(numElements);
std::byte* h_buf = nullptr;
cudaCheck(cudaMallocHost(&h_buf, hostDeviceSize));
SoAHostDeviceLayout h_soahdLayout(h_buf, numElements);
SoAHostDeviceView h_soahd(h_soahdLayout);
SoAHostDeviceConstView h_soahd_c(h_soahdLayout);
// Alocate buffer, stores and views on the device (single, shared buffer).
size_t deviceOnlySize = SoADeviceOnlyLayout::computeDataSize(numElements);
std::byte* d_buf = nullptr;
cudaCheck(cudaMallocHost(&d_buf, hostDeviceSize + deviceOnlySize));
SoAHostDeviceLayout d_soahdLayout(d_buf, numElements);
SoADeviceOnlyLayout d_soadoLayout(d_soahdLayout.metadata().nextByte(), numElements);
SoAHostDeviceView d_soahdView(d_soahdLayout);
SoAFullDeviceView d_soaFullView(d_soahdLayout, d_soadoLayout);
// Assert column alignments
assert(0 == reinterpret_cast<uintptr_t>(h_soahd.metadata().addressOf_x()) % decltype(h_soahd)::alignment);
assert(0 == reinterpret_cast<uintptr_t>(h_soahd.metadata().addressOf_y()) % decltype(h_soahd)::alignment);
assert(0 == reinterpret_cast<uintptr_t>(h_soahd.metadata().addressOf_z()) % decltype(h_soahd)::alignment);
assert(0 == reinterpret_cast<uintptr_t>(h_soahd.metadata().addressOf_a()) % decltype(h_soahd)::alignment);
assert(0 == reinterpret_cast<uintptr_t>(h_soahd.metadata().addressOf_b()) % decltype(h_soahd)::alignment);
assert(0 == reinterpret_cast<uintptr_t>(h_soahd.metadata().addressOf_r()) % decltype(h_soahd)::alignment);
assert(0 == reinterpret_cast<uintptr_t>(h_soahd.metadata().addressOf_description()) % decltype(h_soahd)::alignment);
assert(0 == reinterpret_cast<uintptr_t>(h_soahd.metadata().addressOf_someNumber()) % decltype(h_soahd)::alignment);
assert(0 == reinterpret_cast<uintptr_t>(d_soahdLayout.metadata().addressOf_x()) % decltype(d_soahdLayout)::alignment);
assert(0 == reinterpret_cast<uintptr_t>(d_soahdLayout.metadata().addressOf_y()) % decltype(d_soahdLayout)::alignment);
assert(0 == reinterpret_cast<uintptr_t>(d_soahdLayout.metadata().addressOf_z()) % decltype(d_soahdLayout)::alignment);
assert(0 == reinterpret_cast<uintptr_t>(d_soahdLayout.metadata().addressOf_a()) % decltype(d_soahdLayout)::alignment);
assert(0 == reinterpret_cast<uintptr_t>(d_soahdLayout.metadata().addressOf_b()) % decltype(d_soahdLayout)::alignment);
assert(0 == reinterpret_cast<uintptr_t>(d_soahdLayout.metadata().addressOf_r()) % decltype(d_soahdLayout)::alignment);
assert(0 == reinterpret_cast<uintptr_t>(d_soahdLayout.metadata().addressOf_description()) %
decltype(d_soahdLayout)::alignment);
assert(0 == reinterpret_cast<uintptr_t>(d_soahdLayout.metadata().addressOf_someNumber()) %
decltype(d_soahdLayout)::alignment);
assert(0 ==
reinterpret_cast<uintptr_t>(d_soadoLayout.metadata().addressOf_color()) % decltype(d_soadoLayout)::alignment);
assert(0 ==
reinterpret_cast<uintptr_t>(d_soadoLayout.metadata().addressOf_value()) % decltype(d_soadoLayout)::alignment);
assert(0 ==
reinterpret_cast<uintptr_t>(d_soadoLayout.metadata().addressOf_py()) % decltype(d_soadoLayout)::alignment);
assert(0 ==
reinterpret_cast<uintptr_t>(d_soadoLayout.metadata().addressOf_count()) % decltype(d_soadoLayout)::alignment);
assert(0 == reinterpret_cast<uintptr_t>(d_soadoLayout.metadata().addressOf_anotherCount()) %
decltype(d_soadoLayout)::alignment);
// Views should get the same alignment as the stores they refer to
assert(0 == reinterpret_cast<uintptr_t>(d_soaFullView.metadata().addressOf_x()) % decltype(d_soaFullView)::alignment);
assert(0 == reinterpret_cast<uintptr_t>(d_soaFullView.metadata().addressOf_y()) % decltype(d_soaFullView)::alignment);
assert(0 == reinterpret_cast<uintptr_t>(d_soaFullView.metadata().addressOf_z()) % decltype(d_soaFullView)::alignment);
// Limitation of views: we have to get scalar member addresses via metadata.
assert(0 == reinterpret_cast<uintptr_t>(d_soaFullView.metadata().addressOf_description()) %
decltype(d_soaFullView)::alignment);
assert(0 == reinterpret_cast<uintptr_t>(d_soaFullView.metadata().addressOf_someNumber()) %
decltype(d_soaFullView)::alignment);
assert(0 ==
reinterpret_cast<uintptr_t>(d_soaFullView.metadata().addressOf_color()) % decltype(d_soaFullView)::alignment);
assert(0 ==
reinterpret_cast<uintptr_t>(d_soaFullView.metadata().addressOf_value()) % decltype(d_soaFullView)::alignment);
assert(0 ==
reinterpret_cast<uintptr_t>(d_soaFullView.metadata().addressOf_py()) % decltype(d_soaFullView)::alignment);
assert(0 ==
reinterpret_cast<uintptr_t>(d_soaFullView.metadata().addressOf_count()) % decltype(d_soaFullView)::alignment);
assert(0 == reinterpret_cast<uintptr_t>(d_soaFullView.metadata().addressOf_anotherCount()) %
decltype(d_soaFullView)::alignment);
// Initialize and fill the host buffer
std::memset(h_soahdLayout.metadata().data(), 0, hostDeviceSize);
for (size_t i = 0; i < numElements; ++i) {
auto si = h_soahd[i];
// Tuple assignment...
// elements are: x, y, z, a, b, r
auto v1 = 1.0 * i + 1.0;
auto v2 = 2.0 * i;
auto v3 = 3.0 * i - 1.0;
if (i % 2) {
si = {v1, v2, v3, {v1, v2, v3}, {v3, v2, v1}, {0, 0, 0}};
} else {
si.x() = si.a()(0) = si.b()(2) = v1;
si.y() = si.a()(1) = si.b()(1) = v2;
si.z() = si.a()(2) = si.b()(0) = v3;
}
}
auto& sn = h_soahd.someNumber();
sn = numElements + 2;
// Push to device
cudaCheck(cudaMemcpyAsync(d_buf, h_buf, hostDeviceSize, cudaMemcpyDefault, stream));
// Process on device
crossProduct<<<(numElements + 255) / 256, 256, 0, stream>>>(d_soahdView, numElements);
// Paint the device only with 0xFF initially
cudaCheck(cudaMemsetAsync(d_soadoLayout.metadata().data(), 0xFF, d_soadoLayout.metadata().byteSize(), stream));
// Produce to the device only area
producerKernel<<<(numElements + 255) / 256, 256, 0, stream>>>(d_soaFullView, numElements);
// Consume the device only area and generate a result on the host-device area
consumerKernel<<<(numElements + 255) / 256, 256, 0, stream>>>(d_soaFullView, numElements);
// Get result back
cudaCheck(cudaMemcpyAsync(h_buf, d_buf, hostDeviceSize, cudaMemcpyDefault, stream));
// Wait and validate.
cudaCheck(cudaStreamSynchronize(stream));
for (size_t i = 0; i < numElements; ++i) {
auto si = h_soahd_c[i];
assert(si.r() == si.a().cross(si.b()));
double initialX = 1.0 * i + 1.0;
double initialY = 2.0 * i;
double initialZ = 3.0 * i - 1.0;
uint16_t expectedColor = 0x55 << i % (sizeof(uint16_t) - sizeof(char));
double expectedX = expectedColor * sqrt(initialX * initialX + initialY * initialY + initialZ * initialZ);
if (abs(si.x() - expectedX) / expectedX >= 2 * std::numeric_limits<double>::epsilon()) {
std::cout << "X failed: for i=" << i << std::endl
<< "initialX=" << initialX << " initialY=" << initialY << " initialZ=" << initialZ << std::endl
<< "expectedX=" << expectedX << std::endl
<< "resultX=" << si.x() << " resultY=" << si.y() << " resultZ=" << si.z() << std::endl
<< "relativeDiff=" << abs(si.x() - expectedX) / expectedX
<< " epsilon=" << std::numeric_limits<double>::epsilon() << std::endl;
assert(false);
}
}
// Validation of range checking
try {
// Get a view like the default, except for range checking
SoAHostDeviceLayout::ViewTemplate<SoAHostDeviceView::restrictQualify, cms::soa::RangeChecking::enabled>
soa1viewRangeChecking(h_soahdLayout);
// This should throw an exception
[[maybe_unused]] auto si = soa1viewRangeChecking[soa1viewRangeChecking.metadata().size()];
std::cout << "Fail: expected range-check exception (operator[]) not caught on the host." << std::endl;
assert(false);
} catch (const std::out_of_range&) {
std::cout << "Pass: expected range-check exception (operator[]) successfully caught on the host." << std::endl;
}
try {
// Get a view like the default, except for range checking
SoAHostDeviceLayout::ViewTemplate<SoAHostDeviceView::restrictQualify, cms::soa::RangeChecking::enabled>
soa1viewRangeChecking(h_soahdLayout);
// This should throw an exception
[[maybe_unused]] auto si = soa1viewRangeChecking[soa1viewRangeChecking.metadata().size()];
std::cout << "Fail: expected range-check exception (view-level index access) not caught on the host." << std::endl;
assert(false);
} catch (const std::out_of_range&) {
std::cout << "Pass: expected range-check exception (view-level index access) successfully caught on the host."
<< std::endl;
}
// Validation of range checking in a kernel
// Get a view like the default one, except for range checking
RangeCheckingHostDeviceView soa1viewRangeChecking(d_soahdLayout);
// This should throw an exception in the kernel
rangeCheckKernel<<<1, 1, 0, stream>>>(soa1viewRangeChecking);
// Wait and confirm that the CUDA kernel failed
try {
cudaCheck(cudaStreamSynchronize(stream));
std::cout << "Fail: expected range-check exception not caught while executing the kernel." << std::endl;
assert(false);
} catch (const std::runtime_error&) {
std::cout << "Pass: expected range-check exception caught while executing the kernel." << std::endl;
}
std::cout << "OK" << std::endl;
}
|
24fb0aadbd9c46e9c2d49d439901ef7a664d74fe.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "CUDACore/cudaCheck.h"
#include "CUDACore/AtomicPairCounter.h"
#include "CUDACore/cuda_assert.h"
__global__ void update(AtomicPairCounter *dc, uint32_t *ind, uint32_t *cont, uint32_t n) {
auto i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= n)
return;
auto m = i % 11;
m = m % 6 + 1; // max 6, no 0
auto c = dc->add(m);
assert(c.m < n);
ind[c.m] = c.n;
for (int j = c.n; j < c.n + m; ++j)
cont[j] = i;
};
__global__ void finalize(AtomicPairCounter const *dc, uint32_t *ind, uint32_t *cont, uint32_t n) {
assert(dc->get().m == n);
ind[n] = dc->get().n;
}
__global__ void verify(AtomicPairCounter const *dc, uint32_t const *ind, uint32_t const *cont, uint32_t n) {
auto i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= n)
return;
assert(0 == ind[0]);
assert(dc->get().m == n);
assert(ind[n] == dc->get().n);
auto ib = ind[i];
auto ie = ind[i + 1];
auto k = cont[ib++];
assert(k < n);
for (; ib < ie; ++ib)
assert(cont[ib] == k);
}
#include <iostream>
int main() {
AtomicPairCounter *dc_d;
cudaCheck(hipMalloc(&dc_d, sizeof(AtomicPairCounter)));
cudaCheck(hipMemset(dc_d, 0, sizeof(AtomicPairCounter)));
std::cout << "size " << sizeof(AtomicPairCounter) << std::endl;
constexpr uint32_t N = 20000;
constexpr uint32_t M = N * 6;
uint32_t *n_d, *m_d;
cudaCheck(hipMalloc(&n_d, N * sizeof(int)));
// hipMemset(n_d, 0, N*sizeof(int));
cudaCheck(hipMalloc(&m_d, M * sizeof(int)));
hipLaunchKernelGGL(( update), dim3(2000), dim3(512), 0, 0, dc_d, n_d, m_d, 10000);
hipLaunchKernelGGL(( finalize), dim3(1), dim3(1), 0, 0, dc_d, n_d, m_d, 10000);
hipLaunchKernelGGL(( verify), dim3(2000), dim3(512), 0, 0, dc_d, n_d, m_d, 10000);
AtomicPairCounter dc;
cudaCheck(hipMemcpy(&dc, dc_d, sizeof(AtomicPairCounter), hipMemcpyDeviceToHost));
std::cout << dc.get().n << ' ' << dc.get().m << std::endl;
return 0;
}
| 24fb0aadbd9c46e9c2d49d439901ef7a664d74fe.cu | #include "CUDACore/cudaCheck.h"
#include "CUDACore/AtomicPairCounter.h"
#include "CUDACore/cuda_assert.h"
__global__ void update(AtomicPairCounter *dc, uint32_t *ind, uint32_t *cont, uint32_t n) {
auto i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= n)
return;
auto m = i % 11;
m = m % 6 + 1; // max 6, no 0
auto c = dc->add(m);
assert(c.m < n);
ind[c.m] = c.n;
for (int j = c.n; j < c.n + m; ++j)
cont[j] = i;
};
__global__ void finalize(AtomicPairCounter const *dc, uint32_t *ind, uint32_t *cont, uint32_t n) {
assert(dc->get().m == n);
ind[n] = dc->get().n;
}
__global__ void verify(AtomicPairCounter const *dc, uint32_t const *ind, uint32_t const *cont, uint32_t n) {
auto i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= n)
return;
assert(0 == ind[0]);
assert(dc->get().m == n);
assert(ind[n] == dc->get().n);
auto ib = ind[i];
auto ie = ind[i + 1];
auto k = cont[ib++];
assert(k < n);
for (; ib < ie; ++ib)
assert(cont[ib] == k);
}
#include <iostream>
int main() {
AtomicPairCounter *dc_d;
cudaCheck(cudaMalloc(&dc_d, sizeof(AtomicPairCounter)));
cudaCheck(cudaMemset(dc_d, 0, sizeof(AtomicPairCounter)));
std::cout << "size " << sizeof(AtomicPairCounter) << std::endl;
constexpr uint32_t N = 20000;
constexpr uint32_t M = N * 6;
uint32_t *n_d, *m_d;
cudaCheck(cudaMalloc(&n_d, N * sizeof(int)));
// cudaMemset(n_d, 0, N*sizeof(int));
cudaCheck(cudaMalloc(&m_d, M * sizeof(int)));
update<<<2000, 512>>>(dc_d, n_d, m_d, 10000);
finalize<<<1, 1>>>(dc_d, n_d, m_d, 10000);
verify<<<2000, 512>>>(dc_d, n_d, m_d, 10000);
AtomicPairCounter dc;
cudaCheck(cudaMemcpy(&dc, dc_d, sizeof(AtomicPairCounter), cudaMemcpyDeviceToHost));
std::cout << dc.get().n << ' ' << dc.get().m << std::endl;
return 0;
}
|
dc9874e0d2984887c8740fb1d71bacf5fddfc7be.hip | // !!! This is a file automatically generated by hipify!!!
// CUDA runtime
#include <hip/hip_runtime.h>
#include <typedef.hpp>
#include <cuda_macro.hpp>
#include <thrust/device_ptr.h>
#include <thrust/tuple.h>
#include <thrust/for_each.h>
#include <thrust/transform.h>
#include <thrust/iterator/zip_iterator.h>
#include <thrust/system/hip/execution_policy.h>
#include "PlaneCAvoid_cuda.hpp"
#include <vector>
#include <algorithm>
#include <helperOC/DynSys/DynSys/DynSys.hpp>
#include <Core/UVec.hpp>
#include <Core/CudaStream.hpp>
#if defined(WITH_GPU)
#if defined(USER_DEFINED_GPU_DYNSYS_FUNC)
namespace PlaneCAvoid_CUDA {
struct Get_optCtrl_dim0dim1_D {
public:
const FLOAT_TYPE vrangeA0;
const FLOAT_TYPE vrangeA1;
const FLOAT_TYPE wMax;
Get_optCtrl_dim0dim1_D(
const FLOAT_TYPE vrangeA0,
const FLOAT_TYPE vrangeA1,
const FLOAT_TYPE wMax) :
vrangeA0(vrangeA0),
vrangeA1(vrangeA1),
wMax(wMax) {}
template<typename Tuple>
__host__ __device__
void operator()(Tuple v) const
{
const FLOAT_TYPE y0 = thrust::get<2>(v);
const FLOAT_TYPE y1 = thrust::get<3>(v);
const FLOAT_TYPE deriv0 = thrust::get<4>(v);
const FLOAT_TYPE deriv1 = thrust::get<5>(v);
const FLOAT_TYPE deriv2 = thrust::get<6>(v);
const FLOAT_TYPE det0 = -deriv0;
const FLOAT_TYPE det1 = deriv0 * y1 - deriv1 * y0 - deriv2;
thrust::get<0>(v) = (det0 >= 0) ? vrangeA1 : vrangeA0;
thrust::get<1>(v) = (det1 >= 0) ? wMax : -wMax;
}
};
struct Get_optCtrl_dim1_d {
public:
const FLOAT_TYPE wMax;
const FLOAT_TYPE d0;
const FLOAT_TYPE d1;
const FLOAT_TYPE d2;
Get_optCtrl_dim1_d(const FLOAT_TYPE wMax, const FLOAT_TYPE d0, const FLOAT_TYPE d1, const FLOAT_TYPE d2) :
wMax(wMax), d0(d0), d1(d1), d2(d2) {}
__host__ __device__
FLOAT_TYPE operator()(const FLOAT_TYPE y0, const FLOAT_TYPE y1) const
{
const FLOAT_TYPE det1 = d0 * y1 - d1 * y0 - d2;
return (det1 >= 0) ? wMax : -wMax;
}
};
bool optCtrl_execute_cuda(
std::vector<beacls::UVec>& u_uvecs,
const std::vector<beacls::UVec>& x_uvecs,
const std::vector<beacls::UVec>& deriv_uvecs,
const FLOAT_TYPE wMaxA,
const std::vector<FLOAT_TYPE>& vRangeA,
const helperOC::DynSys_UMode_Type uMode
)
{
bool result = true;
beacls::reallocateAsSrc(u_uvecs[0], deriv_uvecs[0]);
beacls::reallocateAsSrc(u_uvecs[1], x_uvecs[0]);
FLOAT_TYPE* uOpt0_ptr = beacls::UVec_<FLOAT_TYPE>(u_uvecs[0]).ptr();
FLOAT_TYPE* uOpt1_ptr = beacls::UVec_<FLOAT_TYPE>(u_uvecs[1]).ptr();
const FLOAT_TYPE* y0_ptr = beacls::UVec_<FLOAT_TYPE>(x_uvecs[0]).ptr();
const FLOAT_TYPE* y1_ptr = beacls::UVec_<FLOAT_TYPE>(x_uvecs[1]).ptr();
const FLOAT_TYPE* deriv0_ptr = beacls::UVec_<FLOAT_TYPE>(deriv_uvecs[0]).ptr();
const FLOAT_TYPE* deriv1_ptr = beacls::UVec_<FLOAT_TYPE>(deriv_uvecs[1]).ptr();
const FLOAT_TYPE* deriv2_ptr = beacls::UVec_<FLOAT_TYPE>(deriv_uvecs[2]).ptr();
thrust::device_ptr<const FLOAT_TYPE> y0_dev_ptr = thrust::device_pointer_cast(y0_ptr);
thrust::device_ptr<const FLOAT_TYPE> y1_dev_ptr = thrust::device_pointer_cast(y1_ptr);
const FLOAT_TYPE vRangeA0 = vRangeA[0];
const FLOAT_TYPE vRangeA1 = vRangeA[1];
if ((uMode == helperOC::DynSys_UMode_Max) || (uMode == helperOC::DynSys_UMode_Min)) {
const FLOAT_TYPE moded_wMaxA = (uMode == helperOC::DynSys_UMode_Max) ? wMaxA : -wMaxA;
const FLOAT_TYPE moded_vRangeA0 = (uMode == helperOC::DynSys_UMode_Max) ? vRangeA0 : vRangeA1;
const FLOAT_TYPE moded_vRangeA1 = (uMode == helperOC::DynSys_UMode_Max) ? vRangeA1 : vRangeA0;
hipStream_t u_stream = beacls::get_stream(u_uvecs[1]);
thrust::device_ptr<FLOAT_TYPE> uOpt1_dev_ptr = thrust::device_pointer_cast(uOpt1_ptr);
if(is_cuda(deriv_uvecs[0]) && is_cuda(deriv_uvecs[1]) && is_cuda(deriv_uvecs[2])){
thrust::device_ptr<FLOAT_TYPE> uOpt0_dev_ptr = thrust::device_pointer_cast(uOpt0_ptr);
u_uvecs[0].set_cudaStream(u_uvecs[1].get_cudaStream());
thrust::device_ptr<const FLOAT_TYPE> deriv0_dev_ptr = thrust::device_pointer_cast(deriv0_ptr);
thrust::device_ptr<const FLOAT_TYPE> deriv1_dev_ptr = thrust::device_pointer_cast(deriv1_ptr);
thrust::device_ptr<const FLOAT_TYPE> deriv2_dev_ptr = thrust::device_pointer_cast(deriv2_ptr);
auto src_dst_Tuple = thrust::make_tuple(uOpt0_dev_ptr, uOpt1_dev_ptr,
y0_dev_ptr, y1_dev_ptr, deriv0_dev_ptr, deriv1_dev_ptr, deriv2_dev_ptr);
auto src_dst_Iterator = thrust::make_zip_iterator(src_dst_Tuple);
thrust::for_each(thrust::hip::par.on(u_stream),
src_dst_Iterator, src_dst_Iterator + x_uvecs[0].size(),
Get_optCtrl_dim0dim1_D(moded_vRangeA0, moded_vRangeA1, moded_wMaxA));
}
else {
const FLOAT_TYPE d0 = deriv0_ptr[0];
const FLOAT_TYPE d1 = deriv1_ptr[0];
const FLOAT_TYPE d2 = deriv2_ptr[0];
const FLOAT_TYPE det0 = -d0;
uOpt0_ptr[0] = (det0 >= 0) ? moded_vRangeA1 : moded_vRangeA0;
thrust::transform(thrust::hip::par.on(u_stream),
y0_dev_ptr, y0_dev_ptr + x_uvecs[0].size(), y1_dev_ptr, uOpt1_dev_ptr,
Get_optCtrl_dim1_d(moded_wMaxA, d0, d1, d2));
}
}
else {
std::cerr << "Unknown uMode!: " << uMode << std::endl;
result = false;
}
return result;
}
struct Get_optDstb_dim0_d {
public:
const FLOAT_TYPE vrangeB0;
const FLOAT_TYPE vrangeB1;
const FLOAT_TYPE d0;
const FLOAT_TYPE d1;
Get_optDstb_dim0_d(const FLOAT_TYPE vrangeB0, const FLOAT_TYPE vrangeB1, const FLOAT_TYPE d0, const FLOAT_TYPE d1) :
vrangeB0(vrangeB0), vrangeB1(vrangeB1), d0(d0), d1(d1) {}
__host__ __device__
FLOAT_TYPE operator()(const FLOAT_TYPE y2) const
{
FLOAT_TYPE sin_y2;
FLOAT_TYPE cos_y2;
sincos_float_type<FLOAT_TYPE>(y2, sin_y2, cos_y2);
const FLOAT_TYPE det0 = d0 * cos_y2 + d1 * sin_y2;
return (det0 >= 0) ? vrangeB1 : vrangeB0;
}
};
struct Get_optDstb_dim0dim1dim2dim3dim4_D {
public:
const FLOAT_TYPE vrangeB0;
const FLOAT_TYPE vrangeB1;
const FLOAT_TYPE wMaxB;
const FLOAT_TYPE dMaxA_0_dMaxB_0;
const FLOAT_TYPE dMaxA_1_dMaxB_1;
Get_optDstb_dim0dim1dim2dim3dim4_D(
const FLOAT_TYPE vrangeB0,
const FLOAT_TYPE vrangeB1,
const FLOAT_TYPE wMaxB,
const FLOAT_TYPE dMaxA_0_dMaxB_0,
const FLOAT_TYPE dMaxA_1_dMaxB_1) :
vrangeB0(vrangeB0),
vrangeB1(vrangeB1),
wMaxB(wMaxB),
dMaxA_0_dMaxB_0(dMaxA_0_dMaxB_0),
dMaxA_1_dMaxB_1(dMaxA_1_dMaxB_1) {}
template<typename Tuple>
__host__ __device__
void operator()(Tuple v) const
{
const FLOAT_TYPE y2 = thrust::get<5>(v);
const FLOAT_TYPE deriv0 = thrust::get<6>(v);
const FLOAT_TYPE deriv1 = thrust::get<7>(v);
const FLOAT_TYPE deriv2 = thrust::get<8>(v);
FLOAT_TYPE sin_y2;
FLOAT_TYPE cos_y2;
sincos_float_type<FLOAT_TYPE>(y2, sin_y2, cos_y2);
const FLOAT_TYPE normDeriv = sqrt_float_type<FLOAT_TYPE>(deriv0 * deriv0 + deriv1 * deriv1);
const FLOAT_TYPE det0 = deriv0 * cos_y2 + deriv1 * sin_y2;
thrust::get<0>(v) = (det0 >= 0) ? vrangeB1 : vrangeB0;
if (normDeriv == 0) {
thrust::get<2>(v) = 0;
thrust::get<3>(v) = 0;
} else {
thrust::get<2>(v) = dMaxA_0_dMaxB_0 * deriv0 / normDeriv;
thrust::get<3>(v) = dMaxA_0_dMaxB_0 * deriv1 / normDeriv;
}
if (deriv2 >= 0) {
thrust::get<1>(v) = wMaxB;
thrust::get<4>(v) = dMaxA_1_dMaxB_1;
} else {
thrust::get<1>(v) = -wMaxB;
thrust::get<4>(v) = -dMaxA_1_dMaxB_1;
}
}
};
bool optDstb_execute_cuda(
std::vector<beacls::UVec>& d_uvecs,
const std::vector<beacls::UVec>& x_uvecs,
const std::vector<beacls::UVec>& deriv_uvecs,
const std::vector<FLOAT_TYPE>& dMaxA,
const std::vector<FLOAT_TYPE>& dMaxB,
const std::vector<FLOAT_TYPE>& vRangeB,
const FLOAT_TYPE wMaxB,
const helperOC::DynSys_DMode_Type dMode
)
{
bool result = true;
beacls::reallocateAsSrc(d_uvecs[0], x_uvecs[2]);
beacls::reallocateAsSrc(d_uvecs[1], deriv_uvecs[0]);
beacls::reallocateAsSrc(d_uvecs[2], deriv_uvecs[0]);
beacls::reallocateAsSrc(d_uvecs[3], deriv_uvecs[0]);
beacls::reallocateAsSrc(d_uvecs[4], deriv_uvecs[0]);
FLOAT_TYPE* dOpt0_ptr = beacls::UVec_<FLOAT_TYPE>(d_uvecs[0]).ptr();
FLOAT_TYPE* dOpt1_ptr = beacls::UVec_<FLOAT_TYPE>(d_uvecs[1]).ptr();
FLOAT_TYPE* dOpt2_ptr = beacls::UVec_<FLOAT_TYPE>(d_uvecs[2]).ptr();
FLOAT_TYPE* dOpt3_ptr = beacls::UVec_<FLOAT_TYPE>(d_uvecs[3]).ptr();
FLOAT_TYPE* dOpt4_ptr = beacls::UVec_<FLOAT_TYPE>(d_uvecs[4]).ptr();
const FLOAT_TYPE* y2_ptr = beacls::UVec_<FLOAT_TYPE>(x_uvecs[2]).ptr();
const FLOAT_TYPE* deriv0_ptr = beacls::UVec_<FLOAT_TYPE>(deriv_uvecs[0]).ptr();
const FLOAT_TYPE* deriv1_ptr = beacls::UVec_<FLOAT_TYPE>(deriv_uvecs[1]).ptr();
const FLOAT_TYPE* deriv2_ptr = beacls::UVec_<FLOAT_TYPE>(deriv_uvecs[2]).ptr();
const FLOAT_TYPE dMaxA_0 = dMaxA[0];
const FLOAT_TYPE dMaxA_1 = dMaxA[1];
const FLOAT_TYPE dMaxB_0 = dMaxB[0];
const FLOAT_TYPE dMaxB_1 = dMaxB[1];
const FLOAT_TYPE vRangeB0 = vRangeB[0];
const FLOAT_TYPE vRangeB1 = vRangeB[1];
const FLOAT_TYPE dMaxA_0_dMaxB_0 = dMaxA_0 + dMaxB_0;
const FLOAT_TYPE dMaxA_1_dMaxB_1 = dMaxA_1 + dMaxB_1;
if ((dMode == helperOC::DynSys_DMode_Max) || (dMode == helperOC::DynSys_DMode_Min)) {
const FLOAT_TYPE moded_wMaxB = (dMode == helperOC::DynSys_DMode_Max) ? wMaxB : -wMaxB;
const FLOAT_TYPE moded_vRangeB0 = (dMode == helperOC::DynSys_DMode_Max) ? vRangeB0 : vRangeB1;
const FLOAT_TYPE moded_vRangeB1 = (dMode == helperOC::DynSys_DMode_Max) ? vRangeB1 : vRangeB0;
const FLOAT_TYPE moded_dMaxA_0_dMaxB_0 = (dMode == helperOC::DynSys_DMode_Max) ? dMaxA_0_dMaxB_0 : -dMaxA_0_dMaxB_0;
const FLOAT_TYPE moded_dMaxA_1_dMaxB_1 = (dMode == helperOC::DynSys_DMode_Max) ? dMaxA_1_dMaxB_1 : -dMaxA_1_dMaxB_1;
thrust::device_ptr<FLOAT_TYPE> dOpt0_dev_ptr = thrust::device_pointer_cast(dOpt0_ptr);
hipStream_t d_stream = beacls::get_stream(d_uvecs[0]);
thrust::device_ptr<const FLOAT_TYPE> y2_dev_ptr = thrust::device_pointer_cast(y2_ptr);
if (beacls::is_cuda(deriv_uvecs[0]) && beacls::is_cuda(deriv_uvecs[1]) && beacls::is_cuda(deriv_uvecs[2])){
thrust::device_ptr<FLOAT_TYPE> dOpt1_dev_ptr = thrust::device_pointer_cast(dOpt1_ptr);
thrust::device_ptr<FLOAT_TYPE> dOpt2_dev_ptr = thrust::device_pointer_cast(dOpt2_ptr);
thrust::device_ptr<FLOAT_TYPE> dOpt3_dev_ptr = thrust::device_pointer_cast(dOpt3_ptr);
thrust::device_ptr<FLOAT_TYPE> dOpt4_dev_ptr = thrust::device_pointer_cast(dOpt4_ptr);
thrust::device_ptr<const FLOAT_TYPE> deriv0_dev_ptr = thrust::device_pointer_cast(deriv0_ptr);
thrust::device_ptr<const FLOAT_TYPE> deriv1_dev_ptr = thrust::device_pointer_cast(deriv1_ptr);
thrust::device_ptr<const FLOAT_TYPE> deriv2_dev_ptr = thrust::device_pointer_cast(deriv2_ptr);
d_uvecs[1].set_cudaStream(d_uvecs[0].get_cudaStream());
d_uvecs[2].set_cudaStream(d_uvecs[0].get_cudaStream());
d_uvecs[3].set_cudaStream(d_uvecs[0].get_cudaStream());
d_uvecs[4].set_cudaStream(d_uvecs[0].get_cudaStream());
auto dst_src_Tuple = thrust::make_tuple(
dOpt0_dev_ptr, dOpt1_dev_ptr, dOpt2_dev_ptr, dOpt3_dev_ptr, dOpt4_dev_ptr,
y2_dev_ptr, deriv0_dev_ptr, deriv1_dev_ptr, deriv2_dev_ptr);
auto dst_src_Iterator = thrust::make_zip_iterator(dst_src_Tuple);
thrust::for_each(thrust::hip::par.on(d_stream),
dst_src_Iterator, dst_src_Iterator + deriv_uvecs[0].size(),
Get_optDstb_dim0dim1dim2dim3dim4_D(moded_vRangeB0, moded_vRangeB1,
moded_wMaxB, moded_dMaxA_0_dMaxB_0, moded_dMaxA_1_dMaxB_1));
}
else {
const FLOAT_TYPE d0 = deriv0_ptr[0];
const FLOAT_TYPE d1 = deriv1_ptr[0];
const FLOAT_TYPE d2 = deriv2_ptr[0];
thrust::transform(thrust::hip::par.on(d_stream),
y2_dev_ptr, y2_dev_ptr + x_uvecs[2].size(), dOpt0_dev_ptr,
Get_optDstb_dim0_d(moded_vRangeB0, moded_vRangeB1, d0, d1));
const FLOAT_TYPE det1 = d2;
const FLOAT_TYPE det4 = d2;
dOpt1_ptr[0] = (det1 >= 0) ? moded_wMaxB : -moded_wMaxB;
dOpt4_ptr[0] = (det4 >= 0) ? moded_dMaxA_1_dMaxB_1 : -moded_dMaxA_1_dMaxB_1;
const FLOAT_TYPE denom = sqrt_float_type<FLOAT_TYPE>(d0 * d0 + d1 * d1);
dOpt2_ptr[0] = (denom == 0) ? 0 : moded_dMaxA_0_dMaxB_0 * d0 / denom;
dOpt3_ptr[0] = (denom == 0) ? 0 : moded_dMaxA_0_dMaxB_0 * d1 / denom;
}
}
else {
std::cerr << "Unknown dMode!: " << dMode << std::endl;
result = false;
}
return result;
}
struct Get_dynamics_dimAll_U0_U1_D0_D1_D2_D3_D4
{
Get_dynamics_dimAll_U0_U1_D0_D1_D2_D3_D4(
)
{}
template<typename Tuple>
__host__ __device__
void operator()(Tuple v) const
{
const FLOAT_TYPE x0 = thrust::get<3>(v);
const FLOAT_TYPE x1 = thrust::get<4>(v);
const FLOAT_TYPE x2 = thrust::get<5>(v);
const FLOAT_TYPE u0 = thrust::get<6>(v);
const FLOAT_TYPE u1 = thrust::get<7>(v);
const FLOAT_TYPE d0 = thrust::get<8>(v);
const FLOAT_TYPE d1 = thrust::get<9>(v);
const FLOAT_TYPE d2 = thrust::get<10>(v);
const FLOAT_TYPE d3 = thrust::get<11>(v);
const FLOAT_TYPE d4 = thrust::get<12>(v);
FLOAT_TYPE sin_x;
FLOAT_TYPE cos_x;
sincos_float_type<FLOAT_TYPE>(x2, sin_x, cos_x);
thrust::get<0>(v) = -u0 + d0 * cos_x + u1 * x1 + d2;
thrust::get<1>(v) = d0 * sin_x - u1 * x0 + d3;
thrust::get<2>(v) = d1 - u1 + d4;
}
};
struct Get_dynamics_dim01_U0_U1_D0_D2_D3
{
Get_dynamics_dim01_U0_U1_D0_D2_D3(
)
{}
template<typename Tuple>
__host__ __device__
void operator()(Tuple v) const
{
const FLOAT_TYPE x0 = thrust::get<2>(v);
const FLOAT_TYPE x1 = thrust::get<3>(v);
const FLOAT_TYPE x2 = thrust::get<4>(v);
const FLOAT_TYPE u0 = thrust::get<5>(v);
const FLOAT_TYPE u1 = thrust::get<6>(v);
const FLOAT_TYPE d0 = thrust::get<7>(v);
const FLOAT_TYPE d2 = thrust::get<8>(v);
const FLOAT_TYPE d3 = thrust::get<9>(v);
FLOAT_TYPE sin_x;
FLOAT_TYPE cos_x;
sincos_float_type<FLOAT_TYPE>(x2, sin_x, cos_x);
thrust::get<0>(v) = -u0 + d0 * cos_x + u1 * x1 + d2;
thrust::get<1>(v) = d0 * sin_x - u1 * x0 + d3;
}
};
struct Get_dynamics_dimAll_u0_U1_D0_d1_d2_d3_d4
{
const FLOAT_TYPE u0;
const FLOAT_TYPE d1;
const FLOAT_TYPE d2;
const FLOAT_TYPE d3;
const FLOAT_TYPE d4;
Get_dynamics_dimAll_u0_U1_D0_d1_d2_d3_d4(
const FLOAT_TYPE u0,
const FLOAT_TYPE d1,
const FLOAT_TYPE d2,
const FLOAT_TYPE d3,
const FLOAT_TYPE d4
) : u0(u0), d1(d1), d2(d2), d3(d3), d4(d4) {}
template<typename Tuple>
__host__ __device__
void operator()(Tuple v) const
{
const FLOAT_TYPE x0 = thrust::get<3>(v);
const FLOAT_TYPE x1 = thrust::get<4>(v);
const FLOAT_TYPE x2 = thrust::get<5>(v);
const FLOAT_TYPE u1 = thrust::get<6>(v);
const FLOAT_TYPE d0 = thrust::get<7>(v);
FLOAT_TYPE sin_x;
FLOAT_TYPE cos_x;
sincos_float_type<FLOAT_TYPE>(x2, sin_x, cos_x);
thrust::get<0>(v) = -u0 + d0 * cos_x + u1 * x1 + d2;
thrust::get<1>(v) = d0 * sin_x - u1 * x0 + d3;
thrust::get<2>(v) = d1 - u1 + d4;
}
};
struct Get_dynamics_dim0_U0_U1_D0_D2 {
public:
template<typename Tuple>
__host__ __device__
FLOAT_TYPE operator()(const Tuple &v) const
{
const FLOAT_TYPE x1 = thrust::get<0>(v);
const FLOAT_TYPE x2 = thrust::get<1>(v);
const FLOAT_TYPE u0 = thrust::get<2>(v);
const FLOAT_TYPE u1 = thrust::get<3>(v);
const FLOAT_TYPE d0 = thrust::get<4>(v);
const FLOAT_TYPE d2 = thrust::get<5>(v);
return -u0 + d0 * cos_float_type<FLOAT_TYPE>(x2) + u1 * x1 + d2;
}
};
struct Get_dynamics_dim0_u0_U1_D0_d2 {
public:
const FLOAT_TYPE u0;
const FLOAT_TYPE d2;
Get_dynamics_dim0_u0_U1_D0_d2(const FLOAT_TYPE u0, const FLOAT_TYPE d2) : u0(u0), d2(d2) {}
template<typename Tuple>
__host__ __device__
FLOAT_TYPE operator()(const Tuple &v) const
{
const FLOAT_TYPE x1 = thrust::get<0>(v);
const FLOAT_TYPE x2 = thrust::get<1>(v);
const FLOAT_TYPE u1 = thrust::get<2>(v);
const FLOAT_TYPE d0 = thrust::get<3>(v);
return -u0 + d0 * cos_float_type<FLOAT_TYPE>(x2) + u1 * x1 + d2;
}
};
struct Get_dynamics_dim1_U1_D0_D3 {
public:
template<typename Tuple>
__host__ __device__
FLOAT_TYPE operator()(const Tuple &v) const
{
const FLOAT_TYPE x0 = thrust::get<0>(v);
const FLOAT_TYPE x2 = thrust::get<1>(v);
const FLOAT_TYPE u1 = thrust::get<2>(v);
const FLOAT_TYPE d0 = thrust::get<3>(v);
const FLOAT_TYPE d3 = thrust::get<4>(v);
return d0 * sin_float_type<FLOAT_TYPE>(x2) - u1 * x0 + d3;
}
};
struct Get_dynamics_dim1_U1_D0_d3 {
public:
const FLOAT_TYPE d3;
Get_dynamics_dim1_U1_D0_d3(const FLOAT_TYPE d3) : d3(d3) {}
template<typename Tuple>
__host__ __device__
FLOAT_TYPE operator()(const Tuple &v) const
{
const FLOAT_TYPE x0 = thrust::get<0>(v);
const FLOAT_TYPE x2 = thrust::get<1>(v);
const FLOAT_TYPE u1 = thrust::get<2>(v);
const FLOAT_TYPE d0 = thrust::get<3>(v);
return d0 * sin_float_type<FLOAT_TYPE>(x2) - u1 * x0 + d3;
}
};
struct Get_dynamics_dim2_U1_D1_D4 {
public:
template<typename Tuple>
__host__ __device__
FLOAT_TYPE operator()(const Tuple &v) const
{
const FLOAT_TYPE u1 = thrust::get<0>(v);
const FLOAT_TYPE d1 = thrust::get<1>(v);
const FLOAT_TYPE d4 = thrust::get<2>(v);
return d1 - u1 + d4;
}
};
struct Get_dynamics_dim2_U1_D1_d4 {
public:
const FLOAT_TYPE d4;
Get_dynamics_dim2_U1_D1_d4(const FLOAT_TYPE d4) : d4(d4) {}
__host__ __device__
FLOAT_TYPE operator()(const FLOAT_TYPE u1, const FLOAT_TYPE d1) const
{
return d1 - u1 + d4;
}
};
struct Get_dynamics_dim2_U1_d1_D4 {
public:
const FLOAT_TYPE d1;
Get_dynamics_dim2_U1_d1_D4(const FLOAT_TYPE d1) : d1(d1) {}
__host__ __device__
FLOAT_TYPE operator()(const FLOAT_TYPE u1, const FLOAT_TYPE d4) const
{
return d1 - u1 + d4;
}
};
struct Get_dynamics_dim2_U1_d1_d4 {
public:
const FLOAT_TYPE d1;
const FLOAT_TYPE d4;
Get_dynamics_dim2_U1_d1_d4(const FLOAT_TYPE d1, const FLOAT_TYPE d4) : d1(d1), d4(d4) {}
__host__ __device__
FLOAT_TYPE operator()(const FLOAT_TYPE u1) const
{
return d1 - u1 + d4;
}
};
bool dynamics_cell_helper_execute_cuda_dimAll(
std::vector<beacls::UVec>& dx_uvecs,
const std::vector<beacls::UVec>& x_uvecs,
const std::vector<beacls::UVec>& u_uvecs,
const std::vector<beacls::UVec>& d_uvecs
) {
bool result = true;
beacls::reallocateAsSrc(dx_uvecs[0], x_uvecs[2]);
beacls::reallocateAsSrc(dx_uvecs[1], x_uvecs[2]);
beacls::reallocateAsSrc(dx_uvecs[2], u_uvecs[1]);
FLOAT_TYPE* dx_dim0_ptr = beacls::UVec_<FLOAT_TYPE>(dx_uvecs[0]).ptr();
FLOAT_TYPE* dx_dim1_ptr = beacls::UVec_<FLOAT_TYPE>(dx_uvecs[1]).ptr();
FLOAT_TYPE* dx_dim2_ptr = beacls::UVec_<FLOAT_TYPE>(dx_uvecs[2]).ptr();
const FLOAT_TYPE* uOpt0_ptr = beacls::UVec_<FLOAT_TYPE>(u_uvecs[0]).ptr();
const FLOAT_TYPE* uOpt1_ptr = beacls::UVec_<FLOAT_TYPE>(u_uvecs[1]).ptr();
const FLOAT_TYPE* dOpt0_ptr = beacls::UVec_<FLOAT_TYPE>(d_uvecs[0]).ptr();
const FLOAT_TYPE* dOpt1_ptr = beacls::UVec_<FLOAT_TYPE>(d_uvecs[1]).ptr();
const FLOAT_TYPE* dOpt2_ptr = beacls::UVec_<FLOAT_TYPE>(d_uvecs[2]).ptr();
const FLOAT_TYPE* dOpt3_ptr = beacls::UVec_<FLOAT_TYPE>(d_uvecs[3]).ptr();
const FLOAT_TYPE* dOpt4_ptr = beacls::UVec_<FLOAT_TYPE>(d_uvecs[4]).ptr();
const FLOAT_TYPE* x0_ptr = beacls::UVec_<FLOAT_TYPE>(x_uvecs[0]).ptr();
const FLOAT_TYPE* x1_ptr = beacls::UVec_<FLOAT_TYPE>(x_uvecs[1]).ptr();
const FLOAT_TYPE* x2_ptr = beacls::UVec_<FLOAT_TYPE>(x_uvecs[2]).ptr();
thrust::device_ptr<FLOAT_TYPE> dx_dim0_dev_ptr = thrust::device_pointer_cast(dx_dim0_ptr);
thrust::device_ptr<FLOAT_TYPE> dx_dim1_dev_ptr = thrust::device_pointer_cast(dx_dim1_ptr);
thrust::device_ptr<FLOAT_TYPE> dx_dim2_dev_ptr = thrust::device_pointer_cast(dx_dim2_ptr);
hipStream_t dx_stream = beacls::get_stream(dx_uvecs[0]);
dx_uvecs[1].set_cudaStream(dx_uvecs[0].get_cudaStream());
dx_uvecs[2].set_cudaStream(dx_uvecs[0].get_cudaStream());
thrust::device_ptr<const FLOAT_TYPE> us_1_dev_ptr = thrust::device_pointer_cast(uOpt1_ptr);
thrust::device_ptr<const FLOAT_TYPE> ds_0_dev_ptr = thrust::device_pointer_cast(dOpt0_ptr);
beacls::synchronizeUVec(u_uvecs[1]);
beacls::synchronizeUVec(d_uvecs[0]);
thrust::device_ptr<const FLOAT_TYPE> x0_dev_ptr = thrust::device_pointer_cast(x0_ptr);
thrust::device_ptr<const FLOAT_TYPE> x1_dev_ptr = thrust::device_pointer_cast(x1_ptr);
thrust::device_ptr<const FLOAT_TYPE> x2_dev_ptr = thrust::device_pointer_cast(x2_ptr);
if (beacls::is_cuda(u_uvecs[1]) && beacls::is_cuda(d_uvecs[0])) {
if (beacls::is_cuda(u_uvecs[0]) && beacls::is_cuda(d_uvecs[2]) && beacls::is_cuda(d_uvecs[3]) && beacls::is_cuda(d_uvecs[1]) && beacls::is_cuda(d_uvecs[4])){
thrust::device_ptr<const FLOAT_TYPE> us_0_dev_ptr = thrust::device_pointer_cast(uOpt0_ptr);
thrust::device_ptr<const FLOAT_TYPE> ds_1_dev_ptr = thrust::device_pointer_cast(dOpt1_ptr);
thrust::device_ptr<const FLOAT_TYPE> ds_2_dev_ptr = thrust::device_pointer_cast(dOpt2_ptr);
thrust::device_ptr<const FLOAT_TYPE> ds_3_dev_ptr = thrust::device_pointer_cast(dOpt3_ptr);
thrust::device_ptr<const FLOAT_TYPE> ds_4_dev_ptr = thrust::device_pointer_cast(dOpt4_ptr);
auto dst_src_Tuple = thrust::make_tuple(
dx_dim0_dev_ptr, dx_dim1_dev_ptr,
x0_dev_ptr, x1_dev_ptr, x2_dev_ptr,
us_0_dev_ptr, us_1_dev_ptr,
ds_0_dev_ptr, ds_2_dev_ptr, ds_3_dev_ptr);
auto dst_src_Iterator = thrust::make_zip_iterator(dst_src_Tuple);
thrust::for_each(thrust::hip::par.on(dx_stream),
dst_src_Iterator, dst_src_Iterator + x_uvecs[0].size(),
Get_dynamics_dim01_U0_U1_D0_D2_D3());
//!< limit of template variables of thrust::tuple is 10, therefore devide 2 thrust calls.
auto src2_Tuple = thrust::make_tuple(us_1_dev_ptr, ds_1_dev_ptr, ds_4_dev_ptr);
auto src2_Iterator = thrust::make_zip_iterator(src2_Tuple);
thrust::transform(thrust::hip::par.on(dx_stream),
src2_Iterator, src2_Iterator + u_uvecs[1].size(), dx_dim2_dev_ptr,
Get_dynamics_dim2_U1_D1_D4());
}
else {
const FLOAT_TYPE u0 = uOpt0_ptr[0];
const FLOAT_TYPE d1 = dOpt1_ptr[0];
const FLOAT_TYPE d2 = dOpt2_ptr[0];
const FLOAT_TYPE d3 = dOpt3_ptr[0];
const FLOAT_TYPE d4 = dOpt4_ptr[0];
auto dst_src_Tuple = thrust::make_tuple(
dx_dim0_dev_ptr, dx_dim1_dev_ptr, dx_dim2_dev_ptr,
x0_dev_ptr, x1_dev_ptr, x2_dev_ptr,
us_1_dev_ptr, ds_0_dev_ptr);
auto dst_src_Iterator = thrust::make_zip_iterator(dst_src_Tuple);
thrust::for_each(thrust::hip::par.on(dx_stream),
dst_src_Iterator, dst_src_Iterator + x_uvecs[0].size(),
Get_dynamics_dimAll_u0_U1_D0_d1_d2_d3_d4(u0, d1, d2, d3, d4));
}
}
else {
std::cerr << __FILE__ << ":" << __LINE__ << ":" << __FUNCTION__ << " Invalid data size" << std::endl;
return false;
}
return result;
}
bool dynamics_cell_helper_execute_cuda(
beacls::UVec& dx_uvec,
const std::vector<beacls::UVec>& x_uvecs,
const std::vector<beacls::UVec>& u_uvecs,
const std::vector<beacls::UVec>& d_uvecs,
const size_t dim
) {
bool result = true;
switch (dim) {
case 0:
if (beacls::is_cuda(u_uvecs[1]) && beacls::is_cuda(d_uvecs[0])) {
beacls::reallocateAsSrc(dx_uvec, x_uvecs[2]);
FLOAT_TYPE* dx_dim_ptr = beacls::UVec_<FLOAT_TYPE>(dx_uvec).ptr();
hipStream_t dx_stream = beacls::get_stream(dx_uvec);
const FLOAT_TYPE* x1_ptr = beacls::UVec_<FLOAT_TYPE>(x_uvecs[1]).ptr();
const FLOAT_TYPE* x2_ptr = beacls::UVec_<FLOAT_TYPE>(x_uvecs[2]).ptr();
const FLOAT_TYPE* uOpt0_ptr = beacls::UVec_<FLOAT_TYPE>(u_uvecs[0]).ptr();
const FLOAT_TYPE* uOpt1_ptr = beacls::UVec_<FLOAT_TYPE>(u_uvecs[1]).ptr();
const FLOAT_TYPE* dOpt0_ptr = beacls::UVec_<FLOAT_TYPE>(d_uvecs[0]).ptr();
const FLOAT_TYPE* dOpt2_ptr = beacls::UVec_<FLOAT_TYPE>(d_uvecs[2]).ptr();
thrust::device_ptr<FLOAT_TYPE> dx_dim_dev_ptr = thrust::device_pointer_cast(dx_dim_ptr);
thrust::device_ptr<const FLOAT_TYPE> x1_dev_ptr = thrust::device_pointer_cast(x1_ptr);
thrust::device_ptr<const FLOAT_TYPE> x2_dev_ptr = thrust::device_pointer_cast(x2_ptr);
thrust::device_ptr<const FLOAT_TYPE> us_1_dev_ptr = thrust::device_pointer_cast(uOpt1_ptr);
thrust::device_ptr<const FLOAT_TYPE> ds_0_dev_ptr = thrust::device_pointer_cast(dOpt0_ptr);
beacls::synchronizeUVec(u_uvecs[1]);
beacls::synchronizeUVec(d_uvecs[0]);
if (beacls::is_cuda(u_uvecs[0]) && beacls::is_cuda(d_uvecs[2])) {
thrust::device_ptr<const FLOAT_TYPE> us_0_dev_ptr = thrust::device_pointer_cast(uOpt0_ptr);
thrust::device_ptr<const FLOAT_TYPE> ds_2_dev_ptr = thrust::device_pointer_cast(dOpt2_ptr);
auto src0_Tuple = thrust::make_tuple(
x1_dev_ptr, x2_dev_ptr,
us_0_dev_ptr, us_1_dev_ptr, ds_0_dev_ptr, ds_2_dev_ptr);
auto src0_Iterator = thrust::make_zip_iterator(src0_Tuple);
thrust::transform(thrust::hip::par.on(dx_stream),
src0_Iterator, src0_Iterator + x_uvecs[1].size(), dx_dim_dev_ptr,
Get_dynamics_dim0_U0_U1_D0_D2());
}
else {
const FLOAT_TYPE u0 = uOpt0_ptr[0];
const FLOAT_TYPE d2 = dOpt2_ptr[0];
auto src0_Tuple = thrust::make_tuple(
x1_dev_ptr, x2_dev_ptr,
us_1_dev_ptr, ds_0_dev_ptr);
auto src0_Iterator = thrust::make_zip_iterator(src0_Tuple);
thrust::transform(thrust::hip::par.on(dx_stream),
src0_Iterator, src0_Iterator + x_uvecs[1].size(), dx_dim_dev_ptr,
Get_dynamics_dim0_u0_U1_D0_d2(u0, d2));
}
}
else {
std::cerr << __FILE__ << ":" << __LINE__ << ":" << __FUNCTION__ << " Invalid data size" << std::endl;
result = false;
}
break;
case 1:
if (beacls::is_cuda(u_uvecs[1]) && beacls::is_cuda(d_uvecs[0])) {
beacls::reallocateAsSrc(dx_uvec, x_uvecs[2]);
FLOAT_TYPE* dx_dim_ptr = beacls::UVec_<FLOAT_TYPE>(dx_uvec).ptr();
hipStream_t dx_stream = beacls::get_stream(dx_uvec);
const FLOAT_TYPE* x0_ptr = beacls::UVec_<FLOAT_TYPE>(x_uvecs[0]).ptr();
const FLOAT_TYPE* x2_ptr = beacls::UVec_<FLOAT_TYPE>(x_uvecs[2]).ptr();
const FLOAT_TYPE* uOpt1_ptr = beacls::UVec_<FLOAT_TYPE>(u_uvecs[1]).ptr();
const FLOAT_TYPE* dOpt0_ptr = beacls::UVec_<FLOAT_TYPE>(d_uvecs[0]).ptr();
const FLOAT_TYPE* dOpt3_ptr = beacls::UVec_<FLOAT_TYPE>(d_uvecs[3]).ptr();
thrust::device_ptr<FLOAT_TYPE> dx_dim_dev_ptr = thrust::device_pointer_cast(dx_dim_ptr);
thrust::device_ptr<const FLOAT_TYPE> x0_dev_ptr = thrust::device_pointer_cast(x0_ptr);
thrust::device_ptr<const FLOAT_TYPE> x2_dev_ptr = thrust::device_pointer_cast(x2_ptr);
thrust::device_ptr<const FLOAT_TYPE> us_1_dev_ptr = thrust::device_pointer_cast(uOpt1_ptr);
thrust::device_ptr<const FLOAT_TYPE> ds_0_dev_ptr = thrust::device_pointer_cast(dOpt0_ptr);
beacls::synchronizeUVec(u_uvecs[1]);
beacls::synchronizeUVec(d_uvecs[0]);
if (beacls::is_cuda(d_uvecs[3])) {
thrust::device_ptr<const FLOAT_TYPE> ds_3_dev_ptr = thrust::device_pointer_cast(dOpt3_ptr);
auto src1_Tuple = thrust::make_tuple(x0_dev_ptr, x2_dev_ptr, us_1_dev_ptr, ds_0_dev_ptr, ds_3_dev_ptr);
auto src1_Iterator = thrust::make_zip_iterator(src1_Tuple);
thrust::transform(thrust::hip::par.on(dx_stream),
src1_Iterator, src1_Iterator + x_uvecs[0].size(), dx_dim_dev_ptr,
Get_dynamics_dim1_U1_D0_D3());
}
else {
const FLOAT_TYPE d3 = dOpt3_ptr[0];
auto src1_Tuple = thrust::make_tuple(x0_dev_ptr, x2_dev_ptr, us_1_dev_ptr, ds_0_dev_ptr);
auto src1_Iterator = thrust::make_zip_iterator(src1_Tuple);
thrust::transform(thrust::hip::par.on(dx_stream),
src1_Iterator, src1_Iterator + x_uvecs[0].size(), dx_dim_dev_ptr,
Get_dynamics_dim1_U1_D0_d3(d3));
}
}
else {
std::cerr << __FILE__ << ":" << __LINE__ << ":" << __FUNCTION__ << " Invalid data size" << std::endl;
result = false;
}
break;
case 2:
if (beacls::is_cuda(u_uvecs[1])) {
beacls::reallocateAsSrc(dx_uvec, u_uvecs[1]);
FLOAT_TYPE* dx_dim_ptr = beacls::UVec_<FLOAT_TYPE>(dx_uvec).ptr();
hipStream_t dx_stream = beacls::get_stream(dx_uvec);
const FLOAT_TYPE* uOpt1_ptr = beacls::UVec_<FLOAT_TYPE>(u_uvecs[1]).ptr();
const FLOAT_TYPE* dOpt1_ptr = beacls::UVec_<FLOAT_TYPE>(d_uvecs[1]).ptr();
const FLOAT_TYPE* dOpt4_ptr = beacls::UVec_<FLOAT_TYPE>(d_uvecs[4]).ptr();
thrust::device_ptr<FLOAT_TYPE> dx_dim_dev_ptr = thrust::device_pointer_cast(dx_dim_ptr);
thrust::device_ptr<const FLOAT_TYPE> us_1_dev_ptr = thrust::device_pointer_cast(uOpt1_ptr);
beacls::synchronizeUVec(u_uvecs[1]);
if (beacls::is_cuda(d_uvecs[1]) && beacls::is_cuda(d_uvecs[4])) {
thrust::device_ptr<const FLOAT_TYPE> ds_1_dev_ptr = thrust::device_pointer_cast(dOpt1_ptr);
thrust::device_ptr<const FLOAT_TYPE> ds_4_dev_ptr = thrust::device_pointer_cast(dOpt4_ptr);
beacls::synchronizeUVec(d_uvecs[1]);
auto src2_Tuple = thrust::make_tuple(us_1_dev_ptr, ds_1_dev_ptr, ds_4_dev_ptr);
auto src2_Iterator = thrust::make_zip_iterator(src2_Tuple);
thrust::transform(thrust::hip::par.on(dx_stream),
src2_Iterator, src2_Iterator + u_uvecs[1].size(), dx_dim_dev_ptr,
Get_dynamics_dim2_U1_D1_D4());
}
else {
const FLOAT_TYPE d1 = dOpt1_ptr[0];
const FLOAT_TYPE d4 = dOpt4_ptr[0];
thrust::transform(thrust::hip::par.on(dx_stream),
us_1_dev_ptr, us_1_dev_ptr + u_uvecs[1].size(), dx_dim_dev_ptr,
Get_dynamics_dim2_U1_d1_d4(d1, d4));
}
}
else {
std::cerr << __FILE__ << ":" << __LINE__ << ":" << __FUNCTION__ << " Invalid data size" << std::endl;
result = false;
}
break;
default:
std::cerr << "Only dimension 1-4 are defined for dynamics of PlaneCAvoid!" << std::endl;
result = false;
break;
}
return result;
}
};
#endif /* defined(USER_DEFINED_GPU_DYNSYS_FUNC) */
#endif /* defined(WITH_GPU) */
| dc9874e0d2984887c8740fb1d71bacf5fddfc7be.cu | // CUDA runtime
#include <cuda_runtime.h>
#include <typedef.hpp>
#include <cuda_macro.hpp>
#include <thrust/device_ptr.h>
#include <thrust/tuple.h>
#include <thrust/for_each.h>
#include <thrust/transform.h>
#include <thrust/iterator/zip_iterator.h>
#include <thrust/system/cuda/execution_policy.h>
#include "PlaneCAvoid_cuda.hpp"
#include <vector>
#include <algorithm>
#include <helperOC/DynSys/DynSys/DynSys.hpp>
#include <Core/UVec.hpp>
#include <Core/CudaStream.hpp>
#if defined(WITH_GPU)
#if defined(USER_DEFINED_GPU_DYNSYS_FUNC)
namespace PlaneCAvoid_CUDA {
struct Get_optCtrl_dim0dim1_D {
public:
const FLOAT_TYPE vrangeA0;
const FLOAT_TYPE vrangeA1;
const FLOAT_TYPE wMax;
Get_optCtrl_dim0dim1_D(
const FLOAT_TYPE vrangeA0,
const FLOAT_TYPE vrangeA1,
const FLOAT_TYPE wMax) :
vrangeA0(vrangeA0),
vrangeA1(vrangeA1),
wMax(wMax) {}
template<typename Tuple>
__host__ __device__
void operator()(Tuple v) const
{
const FLOAT_TYPE y0 = thrust::get<2>(v);
const FLOAT_TYPE y1 = thrust::get<3>(v);
const FLOAT_TYPE deriv0 = thrust::get<4>(v);
const FLOAT_TYPE deriv1 = thrust::get<5>(v);
const FLOAT_TYPE deriv2 = thrust::get<6>(v);
const FLOAT_TYPE det0 = -deriv0;
const FLOAT_TYPE det1 = deriv0 * y1 - deriv1 * y0 - deriv2;
thrust::get<0>(v) = (det0 >= 0) ? vrangeA1 : vrangeA0;
thrust::get<1>(v) = (det1 >= 0) ? wMax : -wMax;
}
};
struct Get_optCtrl_dim1_d {
public:
const FLOAT_TYPE wMax;
const FLOAT_TYPE d0;
const FLOAT_TYPE d1;
const FLOAT_TYPE d2;
Get_optCtrl_dim1_d(const FLOAT_TYPE wMax, const FLOAT_TYPE d0, const FLOAT_TYPE d1, const FLOAT_TYPE d2) :
wMax(wMax), d0(d0), d1(d1), d2(d2) {}
__host__ __device__
FLOAT_TYPE operator()(const FLOAT_TYPE y0, const FLOAT_TYPE y1) const
{
const FLOAT_TYPE det1 = d0 * y1 - d1 * y0 - d2;
return (det1 >= 0) ? wMax : -wMax;
}
};
bool optCtrl_execute_cuda(
std::vector<beacls::UVec>& u_uvecs,
const std::vector<beacls::UVec>& x_uvecs,
const std::vector<beacls::UVec>& deriv_uvecs,
const FLOAT_TYPE wMaxA,
const std::vector<FLOAT_TYPE>& vRangeA,
const helperOC::DynSys_UMode_Type uMode
)
{
bool result = true;
beacls::reallocateAsSrc(u_uvecs[0], deriv_uvecs[0]);
beacls::reallocateAsSrc(u_uvecs[1], x_uvecs[0]);
FLOAT_TYPE* uOpt0_ptr = beacls::UVec_<FLOAT_TYPE>(u_uvecs[0]).ptr();
FLOAT_TYPE* uOpt1_ptr = beacls::UVec_<FLOAT_TYPE>(u_uvecs[1]).ptr();
const FLOAT_TYPE* y0_ptr = beacls::UVec_<FLOAT_TYPE>(x_uvecs[0]).ptr();
const FLOAT_TYPE* y1_ptr = beacls::UVec_<FLOAT_TYPE>(x_uvecs[1]).ptr();
const FLOAT_TYPE* deriv0_ptr = beacls::UVec_<FLOAT_TYPE>(deriv_uvecs[0]).ptr();
const FLOAT_TYPE* deriv1_ptr = beacls::UVec_<FLOAT_TYPE>(deriv_uvecs[1]).ptr();
const FLOAT_TYPE* deriv2_ptr = beacls::UVec_<FLOAT_TYPE>(deriv_uvecs[2]).ptr();
thrust::device_ptr<const FLOAT_TYPE> y0_dev_ptr = thrust::device_pointer_cast(y0_ptr);
thrust::device_ptr<const FLOAT_TYPE> y1_dev_ptr = thrust::device_pointer_cast(y1_ptr);
const FLOAT_TYPE vRangeA0 = vRangeA[0];
const FLOAT_TYPE vRangeA1 = vRangeA[1];
if ((uMode == helperOC::DynSys_UMode_Max) || (uMode == helperOC::DynSys_UMode_Min)) {
const FLOAT_TYPE moded_wMaxA = (uMode == helperOC::DynSys_UMode_Max) ? wMaxA : -wMaxA;
const FLOAT_TYPE moded_vRangeA0 = (uMode == helperOC::DynSys_UMode_Max) ? vRangeA0 : vRangeA1;
const FLOAT_TYPE moded_vRangeA1 = (uMode == helperOC::DynSys_UMode_Max) ? vRangeA1 : vRangeA0;
cudaStream_t u_stream = beacls::get_stream(u_uvecs[1]);
thrust::device_ptr<FLOAT_TYPE> uOpt1_dev_ptr = thrust::device_pointer_cast(uOpt1_ptr);
if(is_cuda(deriv_uvecs[0]) && is_cuda(deriv_uvecs[1]) && is_cuda(deriv_uvecs[2])){
thrust::device_ptr<FLOAT_TYPE> uOpt0_dev_ptr = thrust::device_pointer_cast(uOpt0_ptr);
u_uvecs[0].set_cudaStream(u_uvecs[1].get_cudaStream());
thrust::device_ptr<const FLOAT_TYPE> deriv0_dev_ptr = thrust::device_pointer_cast(deriv0_ptr);
thrust::device_ptr<const FLOAT_TYPE> deriv1_dev_ptr = thrust::device_pointer_cast(deriv1_ptr);
thrust::device_ptr<const FLOAT_TYPE> deriv2_dev_ptr = thrust::device_pointer_cast(deriv2_ptr);
auto src_dst_Tuple = thrust::make_tuple(uOpt0_dev_ptr, uOpt1_dev_ptr,
y0_dev_ptr, y1_dev_ptr, deriv0_dev_ptr, deriv1_dev_ptr, deriv2_dev_ptr);
auto src_dst_Iterator = thrust::make_zip_iterator(src_dst_Tuple);
thrust::for_each(thrust::cuda::par.on(u_stream),
src_dst_Iterator, src_dst_Iterator + x_uvecs[0].size(),
Get_optCtrl_dim0dim1_D(moded_vRangeA0, moded_vRangeA1, moded_wMaxA));
}
else {
const FLOAT_TYPE d0 = deriv0_ptr[0];
const FLOAT_TYPE d1 = deriv1_ptr[0];
const FLOAT_TYPE d2 = deriv2_ptr[0];
const FLOAT_TYPE det0 = -d0;
uOpt0_ptr[0] = (det0 >= 0) ? moded_vRangeA1 : moded_vRangeA0;
thrust::transform(thrust::cuda::par.on(u_stream),
y0_dev_ptr, y0_dev_ptr + x_uvecs[0].size(), y1_dev_ptr, uOpt1_dev_ptr,
Get_optCtrl_dim1_d(moded_wMaxA, d0, d1, d2));
}
}
else {
std::cerr << "Unknown uMode!: " << uMode << std::endl;
result = false;
}
return result;
}
struct Get_optDstb_dim0_d {
public:
const FLOAT_TYPE vrangeB0;
const FLOAT_TYPE vrangeB1;
const FLOAT_TYPE d0;
const FLOAT_TYPE d1;
Get_optDstb_dim0_d(const FLOAT_TYPE vrangeB0, const FLOAT_TYPE vrangeB1, const FLOAT_TYPE d0, const FLOAT_TYPE d1) :
vrangeB0(vrangeB0), vrangeB1(vrangeB1), d0(d0), d1(d1) {}
__host__ __device__
FLOAT_TYPE operator()(const FLOAT_TYPE y2) const
{
FLOAT_TYPE sin_y2;
FLOAT_TYPE cos_y2;
sincos_float_type<FLOAT_TYPE>(y2, sin_y2, cos_y2);
const FLOAT_TYPE det0 = d0 * cos_y2 + d1 * sin_y2;
return (det0 >= 0) ? vrangeB1 : vrangeB0;
}
};
struct Get_optDstb_dim0dim1dim2dim3dim4_D {
public:
const FLOAT_TYPE vrangeB0;
const FLOAT_TYPE vrangeB1;
const FLOAT_TYPE wMaxB;
const FLOAT_TYPE dMaxA_0_dMaxB_0;
const FLOAT_TYPE dMaxA_1_dMaxB_1;
Get_optDstb_dim0dim1dim2dim3dim4_D(
const FLOAT_TYPE vrangeB0,
const FLOAT_TYPE vrangeB1,
const FLOAT_TYPE wMaxB,
const FLOAT_TYPE dMaxA_0_dMaxB_0,
const FLOAT_TYPE dMaxA_1_dMaxB_1) :
vrangeB0(vrangeB0),
vrangeB1(vrangeB1),
wMaxB(wMaxB),
dMaxA_0_dMaxB_0(dMaxA_0_dMaxB_0),
dMaxA_1_dMaxB_1(dMaxA_1_dMaxB_1) {}
template<typename Tuple>
__host__ __device__
void operator()(Tuple v) const
{
const FLOAT_TYPE y2 = thrust::get<5>(v);
const FLOAT_TYPE deriv0 = thrust::get<6>(v);
const FLOAT_TYPE deriv1 = thrust::get<7>(v);
const FLOAT_TYPE deriv2 = thrust::get<8>(v);
FLOAT_TYPE sin_y2;
FLOAT_TYPE cos_y2;
sincos_float_type<FLOAT_TYPE>(y2, sin_y2, cos_y2);
const FLOAT_TYPE normDeriv = sqrt_float_type<FLOAT_TYPE>(deriv0 * deriv0 + deriv1 * deriv1);
const FLOAT_TYPE det0 = deriv0 * cos_y2 + deriv1 * sin_y2;
thrust::get<0>(v) = (det0 >= 0) ? vrangeB1 : vrangeB0;
if (normDeriv == 0) {
thrust::get<2>(v) = 0;
thrust::get<3>(v) = 0;
} else {
thrust::get<2>(v) = dMaxA_0_dMaxB_0 * deriv0 / normDeriv;
thrust::get<3>(v) = dMaxA_0_dMaxB_0 * deriv1 / normDeriv;
}
if (deriv2 >= 0) {
thrust::get<1>(v) = wMaxB;
thrust::get<4>(v) = dMaxA_1_dMaxB_1;
} else {
thrust::get<1>(v) = -wMaxB;
thrust::get<4>(v) = -dMaxA_1_dMaxB_1;
}
}
};
bool optDstb_execute_cuda(
std::vector<beacls::UVec>& d_uvecs,
const std::vector<beacls::UVec>& x_uvecs,
const std::vector<beacls::UVec>& deriv_uvecs,
const std::vector<FLOAT_TYPE>& dMaxA,
const std::vector<FLOAT_TYPE>& dMaxB,
const std::vector<FLOAT_TYPE>& vRangeB,
const FLOAT_TYPE wMaxB,
const helperOC::DynSys_DMode_Type dMode
)
{
bool result = true;
beacls::reallocateAsSrc(d_uvecs[0], x_uvecs[2]);
beacls::reallocateAsSrc(d_uvecs[1], deriv_uvecs[0]);
beacls::reallocateAsSrc(d_uvecs[2], deriv_uvecs[0]);
beacls::reallocateAsSrc(d_uvecs[3], deriv_uvecs[0]);
beacls::reallocateAsSrc(d_uvecs[4], deriv_uvecs[0]);
FLOAT_TYPE* dOpt0_ptr = beacls::UVec_<FLOAT_TYPE>(d_uvecs[0]).ptr();
FLOAT_TYPE* dOpt1_ptr = beacls::UVec_<FLOAT_TYPE>(d_uvecs[1]).ptr();
FLOAT_TYPE* dOpt2_ptr = beacls::UVec_<FLOAT_TYPE>(d_uvecs[2]).ptr();
FLOAT_TYPE* dOpt3_ptr = beacls::UVec_<FLOAT_TYPE>(d_uvecs[3]).ptr();
FLOAT_TYPE* dOpt4_ptr = beacls::UVec_<FLOAT_TYPE>(d_uvecs[4]).ptr();
const FLOAT_TYPE* y2_ptr = beacls::UVec_<FLOAT_TYPE>(x_uvecs[2]).ptr();
const FLOAT_TYPE* deriv0_ptr = beacls::UVec_<FLOAT_TYPE>(deriv_uvecs[0]).ptr();
const FLOAT_TYPE* deriv1_ptr = beacls::UVec_<FLOAT_TYPE>(deriv_uvecs[1]).ptr();
const FLOAT_TYPE* deriv2_ptr = beacls::UVec_<FLOAT_TYPE>(deriv_uvecs[2]).ptr();
const FLOAT_TYPE dMaxA_0 = dMaxA[0];
const FLOAT_TYPE dMaxA_1 = dMaxA[1];
const FLOAT_TYPE dMaxB_0 = dMaxB[0];
const FLOAT_TYPE dMaxB_1 = dMaxB[1];
const FLOAT_TYPE vRangeB0 = vRangeB[0];
const FLOAT_TYPE vRangeB1 = vRangeB[1];
const FLOAT_TYPE dMaxA_0_dMaxB_0 = dMaxA_0 + dMaxB_0;
const FLOAT_TYPE dMaxA_1_dMaxB_1 = dMaxA_1 + dMaxB_1;
if ((dMode == helperOC::DynSys_DMode_Max) || (dMode == helperOC::DynSys_DMode_Min)) {
const FLOAT_TYPE moded_wMaxB = (dMode == helperOC::DynSys_DMode_Max) ? wMaxB : -wMaxB;
const FLOAT_TYPE moded_vRangeB0 = (dMode == helperOC::DynSys_DMode_Max) ? vRangeB0 : vRangeB1;
const FLOAT_TYPE moded_vRangeB1 = (dMode == helperOC::DynSys_DMode_Max) ? vRangeB1 : vRangeB0;
const FLOAT_TYPE moded_dMaxA_0_dMaxB_0 = (dMode == helperOC::DynSys_DMode_Max) ? dMaxA_0_dMaxB_0 : -dMaxA_0_dMaxB_0;
const FLOAT_TYPE moded_dMaxA_1_dMaxB_1 = (dMode == helperOC::DynSys_DMode_Max) ? dMaxA_1_dMaxB_1 : -dMaxA_1_dMaxB_1;
thrust::device_ptr<FLOAT_TYPE> dOpt0_dev_ptr = thrust::device_pointer_cast(dOpt0_ptr);
cudaStream_t d_stream = beacls::get_stream(d_uvecs[0]);
thrust::device_ptr<const FLOAT_TYPE> y2_dev_ptr = thrust::device_pointer_cast(y2_ptr);
if (beacls::is_cuda(deriv_uvecs[0]) && beacls::is_cuda(deriv_uvecs[1]) && beacls::is_cuda(deriv_uvecs[2])){
thrust::device_ptr<FLOAT_TYPE> dOpt1_dev_ptr = thrust::device_pointer_cast(dOpt1_ptr);
thrust::device_ptr<FLOAT_TYPE> dOpt2_dev_ptr = thrust::device_pointer_cast(dOpt2_ptr);
thrust::device_ptr<FLOAT_TYPE> dOpt3_dev_ptr = thrust::device_pointer_cast(dOpt3_ptr);
thrust::device_ptr<FLOAT_TYPE> dOpt4_dev_ptr = thrust::device_pointer_cast(dOpt4_ptr);
thrust::device_ptr<const FLOAT_TYPE> deriv0_dev_ptr = thrust::device_pointer_cast(deriv0_ptr);
thrust::device_ptr<const FLOAT_TYPE> deriv1_dev_ptr = thrust::device_pointer_cast(deriv1_ptr);
thrust::device_ptr<const FLOAT_TYPE> deriv2_dev_ptr = thrust::device_pointer_cast(deriv2_ptr);
d_uvecs[1].set_cudaStream(d_uvecs[0].get_cudaStream());
d_uvecs[2].set_cudaStream(d_uvecs[0].get_cudaStream());
d_uvecs[3].set_cudaStream(d_uvecs[0].get_cudaStream());
d_uvecs[4].set_cudaStream(d_uvecs[0].get_cudaStream());
auto dst_src_Tuple = thrust::make_tuple(
dOpt0_dev_ptr, dOpt1_dev_ptr, dOpt2_dev_ptr, dOpt3_dev_ptr, dOpt4_dev_ptr,
y2_dev_ptr, deriv0_dev_ptr, deriv1_dev_ptr, deriv2_dev_ptr);
auto dst_src_Iterator = thrust::make_zip_iterator(dst_src_Tuple);
thrust::for_each(thrust::cuda::par.on(d_stream),
dst_src_Iterator, dst_src_Iterator + deriv_uvecs[0].size(),
Get_optDstb_dim0dim1dim2dim3dim4_D(moded_vRangeB0, moded_vRangeB1,
moded_wMaxB, moded_dMaxA_0_dMaxB_0, moded_dMaxA_1_dMaxB_1));
}
else {
const FLOAT_TYPE d0 = deriv0_ptr[0];
const FLOAT_TYPE d1 = deriv1_ptr[0];
const FLOAT_TYPE d2 = deriv2_ptr[0];
thrust::transform(thrust::cuda::par.on(d_stream),
y2_dev_ptr, y2_dev_ptr + x_uvecs[2].size(), dOpt0_dev_ptr,
Get_optDstb_dim0_d(moded_vRangeB0, moded_vRangeB1, d0, d1));
const FLOAT_TYPE det1 = d2;
const FLOAT_TYPE det4 = d2;
dOpt1_ptr[0] = (det1 >= 0) ? moded_wMaxB : -moded_wMaxB;
dOpt4_ptr[0] = (det4 >= 0) ? moded_dMaxA_1_dMaxB_1 : -moded_dMaxA_1_dMaxB_1;
const FLOAT_TYPE denom = sqrt_float_type<FLOAT_TYPE>(d0 * d0 + d1 * d1);
dOpt2_ptr[0] = (denom == 0) ? 0 : moded_dMaxA_0_dMaxB_0 * d0 / denom;
dOpt3_ptr[0] = (denom == 0) ? 0 : moded_dMaxA_0_dMaxB_0 * d1 / denom;
}
}
else {
std::cerr << "Unknown dMode!: " << dMode << std::endl;
result = false;
}
return result;
}
struct Get_dynamics_dimAll_U0_U1_D0_D1_D2_D3_D4
{
Get_dynamics_dimAll_U0_U1_D0_D1_D2_D3_D4(
)
{}
template<typename Tuple>
__host__ __device__
void operator()(Tuple v) const
{
const FLOAT_TYPE x0 = thrust::get<3>(v);
const FLOAT_TYPE x1 = thrust::get<4>(v);
const FLOAT_TYPE x2 = thrust::get<5>(v);
const FLOAT_TYPE u0 = thrust::get<6>(v);
const FLOAT_TYPE u1 = thrust::get<7>(v);
const FLOAT_TYPE d0 = thrust::get<8>(v);
const FLOAT_TYPE d1 = thrust::get<9>(v);
const FLOAT_TYPE d2 = thrust::get<10>(v);
const FLOAT_TYPE d3 = thrust::get<11>(v);
const FLOAT_TYPE d4 = thrust::get<12>(v);
FLOAT_TYPE sin_x;
FLOAT_TYPE cos_x;
sincos_float_type<FLOAT_TYPE>(x2, sin_x, cos_x);
thrust::get<0>(v) = -u0 + d0 * cos_x + u1 * x1 + d2;
thrust::get<1>(v) = d0 * sin_x - u1 * x0 + d3;
thrust::get<2>(v) = d1 - u1 + d4;
}
};
struct Get_dynamics_dim01_U0_U1_D0_D2_D3
{
Get_dynamics_dim01_U0_U1_D0_D2_D3(
)
{}
template<typename Tuple>
__host__ __device__
void operator()(Tuple v) const
{
const FLOAT_TYPE x0 = thrust::get<2>(v);
const FLOAT_TYPE x1 = thrust::get<3>(v);
const FLOAT_TYPE x2 = thrust::get<4>(v);
const FLOAT_TYPE u0 = thrust::get<5>(v);
const FLOAT_TYPE u1 = thrust::get<6>(v);
const FLOAT_TYPE d0 = thrust::get<7>(v);
const FLOAT_TYPE d2 = thrust::get<8>(v);
const FLOAT_TYPE d3 = thrust::get<9>(v);
FLOAT_TYPE sin_x;
FLOAT_TYPE cos_x;
sincos_float_type<FLOAT_TYPE>(x2, sin_x, cos_x);
thrust::get<0>(v) = -u0 + d0 * cos_x + u1 * x1 + d2;
thrust::get<1>(v) = d0 * sin_x - u1 * x0 + d3;
}
};
struct Get_dynamics_dimAll_u0_U1_D0_d1_d2_d3_d4
{
const FLOAT_TYPE u0;
const FLOAT_TYPE d1;
const FLOAT_TYPE d2;
const FLOAT_TYPE d3;
const FLOAT_TYPE d4;
Get_dynamics_dimAll_u0_U1_D0_d1_d2_d3_d4(
const FLOAT_TYPE u0,
const FLOAT_TYPE d1,
const FLOAT_TYPE d2,
const FLOAT_TYPE d3,
const FLOAT_TYPE d4
) : u0(u0), d1(d1), d2(d2), d3(d3), d4(d4) {}
template<typename Tuple>
__host__ __device__
void operator()(Tuple v) const
{
const FLOAT_TYPE x0 = thrust::get<3>(v);
const FLOAT_TYPE x1 = thrust::get<4>(v);
const FLOAT_TYPE x2 = thrust::get<5>(v);
const FLOAT_TYPE u1 = thrust::get<6>(v);
const FLOAT_TYPE d0 = thrust::get<7>(v);
FLOAT_TYPE sin_x;
FLOAT_TYPE cos_x;
sincos_float_type<FLOAT_TYPE>(x2, sin_x, cos_x);
thrust::get<0>(v) = -u0 + d0 * cos_x + u1 * x1 + d2;
thrust::get<1>(v) = d0 * sin_x - u1 * x0 + d3;
thrust::get<2>(v) = d1 - u1 + d4;
}
};
struct Get_dynamics_dim0_U0_U1_D0_D2 {
public:
template<typename Tuple>
__host__ __device__
FLOAT_TYPE operator()(const Tuple &v) const
{
const FLOAT_TYPE x1 = thrust::get<0>(v);
const FLOAT_TYPE x2 = thrust::get<1>(v);
const FLOAT_TYPE u0 = thrust::get<2>(v);
const FLOAT_TYPE u1 = thrust::get<3>(v);
const FLOAT_TYPE d0 = thrust::get<4>(v);
const FLOAT_TYPE d2 = thrust::get<5>(v);
return -u0 + d0 * cos_float_type<FLOAT_TYPE>(x2) + u1 * x1 + d2;
}
};
struct Get_dynamics_dim0_u0_U1_D0_d2 {
public:
const FLOAT_TYPE u0;
const FLOAT_TYPE d2;
Get_dynamics_dim0_u0_U1_D0_d2(const FLOAT_TYPE u0, const FLOAT_TYPE d2) : u0(u0), d2(d2) {}
template<typename Tuple>
__host__ __device__
FLOAT_TYPE operator()(const Tuple &v) const
{
const FLOAT_TYPE x1 = thrust::get<0>(v);
const FLOAT_TYPE x2 = thrust::get<1>(v);
const FLOAT_TYPE u1 = thrust::get<2>(v);
const FLOAT_TYPE d0 = thrust::get<3>(v);
return -u0 + d0 * cos_float_type<FLOAT_TYPE>(x2) + u1 * x1 + d2;
}
};
struct Get_dynamics_dim1_U1_D0_D3 {
public:
template<typename Tuple>
__host__ __device__
FLOAT_TYPE operator()(const Tuple &v) const
{
const FLOAT_TYPE x0 = thrust::get<0>(v);
const FLOAT_TYPE x2 = thrust::get<1>(v);
const FLOAT_TYPE u1 = thrust::get<2>(v);
const FLOAT_TYPE d0 = thrust::get<3>(v);
const FLOAT_TYPE d3 = thrust::get<4>(v);
return d0 * sin_float_type<FLOAT_TYPE>(x2) - u1 * x0 + d3;
}
};
struct Get_dynamics_dim1_U1_D0_d3 {
public:
const FLOAT_TYPE d3;
Get_dynamics_dim1_U1_D0_d3(const FLOAT_TYPE d3) : d3(d3) {}
template<typename Tuple>
__host__ __device__
FLOAT_TYPE operator()(const Tuple &v) const
{
const FLOAT_TYPE x0 = thrust::get<0>(v);
const FLOAT_TYPE x2 = thrust::get<1>(v);
const FLOAT_TYPE u1 = thrust::get<2>(v);
const FLOAT_TYPE d0 = thrust::get<3>(v);
return d0 * sin_float_type<FLOAT_TYPE>(x2) - u1 * x0 + d3;
}
};
struct Get_dynamics_dim2_U1_D1_D4 {
public:
template<typename Tuple>
__host__ __device__
FLOAT_TYPE operator()(const Tuple &v) const
{
const FLOAT_TYPE u1 = thrust::get<0>(v);
const FLOAT_TYPE d1 = thrust::get<1>(v);
const FLOAT_TYPE d4 = thrust::get<2>(v);
return d1 - u1 + d4;
}
};
struct Get_dynamics_dim2_U1_D1_d4 {
public:
const FLOAT_TYPE d4;
Get_dynamics_dim2_U1_D1_d4(const FLOAT_TYPE d4) : d4(d4) {}
__host__ __device__
FLOAT_TYPE operator()(const FLOAT_TYPE u1, const FLOAT_TYPE d1) const
{
return d1 - u1 + d4;
}
};
struct Get_dynamics_dim2_U1_d1_D4 {
public:
const FLOAT_TYPE d1;
Get_dynamics_dim2_U1_d1_D4(const FLOAT_TYPE d1) : d1(d1) {}
__host__ __device__
FLOAT_TYPE operator()(const FLOAT_TYPE u1, const FLOAT_TYPE d4) const
{
return d1 - u1 + d4;
}
};
struct Get_dynamics_dim2_U1_d1_d4 {
public:
const FLOAT_TYPE d1;
const FLOAT_TYPE d4;
Get_dynamics_dim2_U1_d1_d4(const FLOAT_TYPE d1, const FLOAT_TYPE d4) : d1(d1), d4(d4) {}
__host__ __device__
FLOAT_TYPE operator()(const FLOAT_TYPE u1) const
{
return d1 - u1 + d4;
}
};
bool dynamics_cell_helper_execute_cuda_dimAll(
std::vector<beacls::UVec>& dx_uvecs,
const std::vector<beacls::UVec>& x_uvecs,
const std::vector<beacls::UVec>& u_uvecs,
const std::vector<beacls::UVec>& d_uvecs
) {
bool result = true;
beacls::reallocateAsSrc(dx_uvecs[0], x_uvecs[2]);
beacls::reallocateAsSrc(dx_uvecs[1], x_uvecs[2]);
beacls::reallocateAsSrc(dx_uvecs[2], u_uvecs[1]);
FLOAT_TYPE* dx_dim0_ptr = beacls::UVec_<FLOAT_TYPE>(dx_uvecs[0]).ptr();
FLOAT_TYPE* dx_dim1_ptr = beacls::UVec_<FLOAT_TYPE>(dx_uvecs[1]).ptr();
FLOAT_TYPE* dx_dim2_ptr = beacls::UVec_<FLOAT_TYPE>(dx_uvecs[2]).ptr();
const FLOAT_TYPE* uOpt0_ptr = beacls::UVec_<FLOAT_TYPE>(u_uvecs[0]).ptr();
const FLOAT_TYPE* uOpt1_ptr = beacls::UVec_<FLOAT_TYPE>(u_uvecs[1]).ptr();
const FLOAT_TYPE* dOpt0_ptr = beacls::UVec_<FLOAT_TYPE>(d_uvecs[0]).ptr();
const FLOAT_TYPE* dOpt1_ptr = beacls::UVec_<FLOAT_TYPE>(d_uvecs[1]).ptr();
const FLOAT_TYPE* dOpt2_ptr = beacls::UVec_<FLOAT_TYPE>(d_uvecs[2]).ptr();
const FLOAT_TYPE* dOpt3_ptr = beacls::UVec_<FLOAT_TYPE>(d_uvecs[3]).ptr();
const FLOAT_TYPE* dOpt4_ptr = beacls::UVec_<FLOAT_TYPE>(d_uvecs[4]).ptr();
const FLOAT_TYPE* x0_ptr = beacls::UVec_<FLOAT_TYPE>(x_uvecs[0]).ptr();
const FLOAT_TYPE* x1_ptr = beacls::UVec_<FLOAT_TYPE>(x_uvecs[1]).ptr();
const FLOAT_TYPE* x2_ptr = beacls::UVec_<FLOAT_TYPE>(x_uvecs[2]).ptr();
thrust::device_ptr<FLOAT_TYPE> dx_dim0_dev_ptr = thrust::device_pointer_cast(dx_dim0_ptr);
thrust::device_ptr<FLOAT_TYPE> dx_dim1_dev_ptr = thrust::device_pointer_cast(dx_dim1_ptr);
thrust::device_ptr<FLOAT_TYPE> dx_dim2_dev_ptr = thrust::device_pointer_cast(dx_dim2_ptr);
cudaStream_t dx_stream = beacls::get_stream(dx_uvecs[0]);
dx_uvecs[1].set_cudaStream(dx_uvecs[0].get_cudaStream());
dx_uvecs[2].set_cudaStream(dx_uvecs[0].get_cudaStream());
thrust::device_ptr<const FLOAT_TYPE> us_1_dev_ptr = thrust::device_pointer_cast(uOpt1_ptr);
thrust::device_ptr<const FLOAT_TYPE> ds_0_dev_ptr = thrust::device_pointer_cast(dOpt0_ptr);
beacls::synchronizeUVec(u_uvecs[1]);
beacls::synchronizeUVec(d_uvecs[0]);
thrust::device_ptr<const FLOAT_TYPE> x0_dev_ptr = thrust::device_pointer_cast(x0_ptr);
thrust::device_ptr<const FLOAT_TYPE> x1_dev_ptr = thrust::device_pointer_cast(x1_ptr);
thrust::device_ptr<const FLOAT_TYPE> x2_dev_ptr = thrust::device_pointer_cast(x2_ptr);
if (beacls::is_cuda(u_uvecs[1]) && beacls::is_cuda(d_uvecs[0])) {
if (beacls::is_cuda(u_uvecs[0]) && beacls::is_cuda(d_uvecs[2]) && beacls::is_cuda(d_uvecs[3]) && beacls::is_cuda(d_uvecs[1]) && beacls::is_cuda(d_uvecs[4])){
thrust::device_ptr<const FLOAT_TYPE> us_0_dev_ptr = thrust::device_pointer_cast(uOpt0_ptr);
thrust::device_ptr<const FLOAT_TYPE> ds_1_dev_ptr = thrust::device_pointer_cast(dOpt1_ptr);
thrust::device_ptr<const FLOAT_TYPE> ds_2_dev_ptr = thrust::device_pointer_cast(dOpt2_ptr);
thrust::device_ptr<const FLOAT_TYPE> ds_3_dev_ptr = thrust::device_pointer_cast(dOpt3_ptr);
thrust::device_ptr<const FLOAT_TYPE> ds_4_dev_ptr = thrust::device_pointer_cast(dOpt4_ptr);
auto dst_src_Tuple = thrust::make_tuple(
dx_dim0_dev_ptr, dx_dim1_dev_ptr,
x0_dev_ptr, x1_dev_ptr, x2_dev_ptr,
us_0_dev_ptr, us_1_dev_ptr,
ds_0_dev_ptr, ds_2_dev_ptr, ds_3_dev_ptr);
auto dst_src_Iterator = thrust::make_zip_iterator(dst_src_Tuple);
thrust::for_each(thrust::cuda::par.on(dx_stream),
dst_src_Iterator, dst_src_Iterator + x_uvecs[0].size(),
Get_dynamics_dim01_U0_U1_D0_D2_D3());
//!< limit of template variables of thrust::tuple is 10, therefore devide 2 thrust calls.
auto src2_Tuple = thrust::make_tuple(us_1_dev_ptr, ds_1_dev_ptr, ds_4_dev_ptr);
auto src2_Iterator = thrust::make_zip_iterator(src2_Tuple);
thrust::transform(thrust::cuda::par.on(dx_stream),
src2_Iterator, src2_Iterator + u_uvecs[1].size(), dx_dim2_dev_ptr,
Get_dynamics_dim2_U1_D1_D4());
}
else {
const FLOAT_TYPE u0 = uOpt0_ptr[0];
const FLOAT_TYPE d1 = dOpt1_ptr[0];
const FLOAT_TYPE d2 = dOpt2_ptr[0];
const FLOAT_TYPE d3 = dOpt3_ptr[0];
const FLOAT_TYPE d4 = dOpt4_ptr[0];
auto dst_src_Tuple = thrust::make_tuple(
dx_dim0_dev_ptr, dx_dim1_dev_ptr, dx_dim2_dev_ptr,
x0_dev_ptr, x1_dev_ptr, x2_dev_ptr,
us_1_dev_ptr, ds_0_dev_ptr);
auto dst_src_Iterator = thrust::make_zip_iterator(dst_src_Tuple);
thrust::for_each(thrust::cuda::par.on(dx_stream),
dst_src_Iterator, dst_src_Iterator + x_uvecs[0].size(),
Get_dynamics_dimAll_u0_U1_D0_d1_d2_d3_d4(u0, d1, d2, d3, d4));
}
}
else {
std::cerr << __FILE__ << ":" << __LINE__ << ":" << __FUNCTION__ << " Invalid data size" << std::endl;
return false;
}
return result;
}
bool dynamics_cell_helper_execute_cuda(
beacls::UVec& dx_uvec,
const std::vector<beacls::UVec>& x_uvecs,
const std::vector<beacls::UVec>& u_uvecs,
const std::vector<beacls::UVec>& d_uvecs,
const size_t dim
) {
bool result = true;
switch (dim) {
case 0:
if (beacls::is_cuda(u_uvecs[1]) && beacls::is_cuda(d_uvecs[0])) {
beacls::reallocateAsSrc(dx_uvec, x_uvecs[2]);
FLOAT_TYPE* dx_dim_ptr = beacls::UVec_<FLOAT_TYPE>(dx_uvec).ptr();
cudaStream_t dx_stream = beacls::get_stream(dx_uvec);
const FLOAT_TYPE* x1_ptr = beacls::UVec_<FLOAT_TYPE>(x_uvecs[1]).ptr();
const FLOAT_TYPE* x2_ptr = beacls::UVec_<FLOAT_TYPE>(x_uvecs[2]).ptr();
const FLOAT_TYPE* uOpt0_ptr = beacls::UVec_<FLOAT_TYPE>(u_uvecs[0]).ptr();
const FLOAT_TYPE* uOpt1_ptr = beacls::UVec_<FLOAT_TYPE>(u_uvecs[1]).ptr();
const FLOAT_TYPE* dOpt0_ptr = beacls::UVec_<FLOAT_TYPE>(d_uvecs[0]).ptr();
const FLOAT_TYPE* dOpt2_ptr = beacls::UVec_<FLOAT_TYPE>(d_uvecs[2]).ptr();
thrust::device_ptr<FLOAT_TYPE> dx_dim_dev_ptr = thrust::device_pointer_cast(dx_dim_ptr);
thrust::device_ptr<const FLOAT_TYPE> x1_dev_ptr = thrust::device_pointer_cast(x1_ptr);
thrust::device_ptr<const FLOAT_TYPE> x2_dev_ptr = thrust::device_pointer_cast(x2_ptr);
thrust::device_ptr<const FLOAT_TYPE> us_1_dev_ptr = thrust::device_pointer_cast(uOpt1_ptr);
thrust::device_ptr<const FLOAT_TYPE> ds_0_dev_ptr = thrust::device_pointer_cast(dOpt0_ptr);
beacls::synchronizeUVec(u_uvecs[1]);
beacls::synchronizeUVec(d_uvecs[0]);
if (beacls::is_cuda(u_uvecs[0]) && beacls::is_cuda(d_uvecs[2])) {
thrust::device_ptr<const FLOAT_TYPE> us_0_dev_ptr = thrust::device_pointer_cast(uOpt0_ptr);
thrust::device_ptr<const FLOAT_TYPE> ds_2_dev_ptr = thrust::device_pointer_cast(dOpt2_ptr);
auto src0_Tuple = thrust::make_tuple(
x1_dev_ptr, x2_dev_ptr,
us_0_dev_ptr, us_1_dev_ptr, ds_0_dev_ptr, ds_2_dev_ptr);
auto src0_Iterator = thrust::make_zip_iterator(src0_Tuple);
thrust::transform(thrust::cuda::par.on(dx_stream),
src0_Iterator, src0_Iterator + x_uvecs[1].size(), dx_dim_dev_ptr,
Get_dynamics_dim0_U0_U1_D0_D2());
}
else {
const FLOAT_TYPE u0 = uOpt0_ptr[0];
const FLOAT_TYPE d2 = dOpt2_ptr[0];
auto src0_Tuple = thrust::make_tuple(
x1_dev_ptr, x2_dev_ptr,
us_1_dev_ptr, ds_0_dev_ptr);
auto src0_Iterator = thrust::make_zip_iterator(src0_Tuple);
thrust::transform(thrust::cuda::par.on(dx_stream),
src0_Iterator, src0_Iterator + x_uvecs[1].size(), dx_dim_dev_ptr,
Get_dynamics_dim0_u0_U1_D0_d2(u0, d2));
}
}
else {
std::cerr << __FILE__ << ":" << __LINE__ << ":" << __FUNCTION__ << " Invalid data size" << std::endl;
result = false;
}
break;
case 1:
if (beacls::is_cuda(u_uvecs[1]) && beacls::is_cuda(d_uvecs[0])) {
beacls::reallocateAsSrc(dx_uvec, x_uvecs[2]);
FLOAT_TYPE* dx_dim_ptr = beacls::UVec_<FLOAT_TYPE>(dx_uvec).ptr();
cudaStream_t dx_stream = beacls::get_stream(dx_uvec);
const FLOAT_TYPE* x0_ptr = beacls::UVec_<FLOAT_TYPE>(x_uvecs[0]).ptr();
const FLOAT_TYPE* x2_ptr = beacls::UVec_<FLOAT_TYPE>(x_uvecs[2]).ptr();
const FLOAT_TYPE* uOpt1_ptr = beacls::UVec_<FLOAT_TYPE>(u_uvecs[1]).ptr();
const FLOAT_TYPE* dOpt0_ptr = beacls::UVec_<FLOAT_TYPE>(d_uvecs[0]).ptr();
const FLOAT_TYPE* dOpt3_ptr = beacls::UVec_<FLOAT_TYPE>(d_uvecs[3]).ptr();
thrust::device_ptr<FLOAT_TYPE> dx_dim_dev_ptr = thrust::device_pointer_cast(dx_dim_ptr);
thrust::device_ptr<const FLOAT_TYPE> x0_dev_ptr = thrust::device_pointer_cast(x0_ptr);
thrust::device_ptr<const FLOAT_TYPE> x2_dev_ptr = thrust::device_pointer_cast(x2_ptr);
thrust::device_ptr<const FLOAT_TYPE> us_1_dev_ptr = thrust::device_pointer_cast(uOpt1_ptr);
thrust::device_ptr<const FLOAT_TYPE> ds_0_dev_ptr = thrust::device_pointer_cast(dOpt0_ptr);
beacls::synchronizeUVec(u_uvecs[1]);
beacls::synchronizeUVec(d_uvecs[0]);
if (beacls::is_cuda(d_uvecs[3])) {
thrust::device_ptr<const FLOAT_TYPE> ds_3_dev_ptr = thrust::device_pointer_cast(dOpt3_ptr);
auto src1_Tuple = thrust::make_tuple(x0_dev_ptr, x2_dev_ptr, us_1_dev_ptr, ds_0_dev_ptr, ds_3_dev_ptr);
auto src1_Iterator = thrust::make_zip_iterator(src1_Tuple);
thrust::transform(thrust::cuda::par.on(dx_stream),
src1_Iterator, src1_Iterator + x_uvecs[0].size(), dx_dim_dev_ptr,
Get_dynamics_dim1_U1_D0_D3());
}
else {
const FLOAT_TYPE d3 = dOpt3_ptr[0];
auto src1_Tuple = thrust::make_tuple(x0_dev_ptr, x2_dev_ptr, us_1_dev_ptr, ds_0_dev_ptr);
auto src1_Iterator = thrust::make_zip_iterator(src1_Tuple);
thrust::transform(thrust::cuda::par.on(dx_stream),
src1_Iterator, src1_Iterator + x_uvecs[0].size(), dx_dim_dev_ptr,
Get_dynamics_dim1_U1_D0_d3(d3));
}
}
else {
std::cerr << __FILE__ << ":" << __LINE__ << ":" << __FUNCTION__ << " Invalid data size" << std::endl;
result = false;
}
break;
case 2:
if (beacls::is_cuda(u_uvecs[1])) {
beacls::reallocateAsSrc(dx_uvec, u_uvecs[1]);
FLOAT_TYPE* dx_dim_ptr = beacls::UVec_<FLOAT_TYPE>(dx_uvec).ptr();
cudaStream_t dx_stream = beacls::get_stream(dx_uvec);
const FLOAT_TYPE* uOpt1_ptr = beacls::UVec_<FLOAT_TYPE>(u_uvecs[1]).ptr();
const FLOAT_TYPE* dOpt1_ptr = beacls::UVec_<FLOAT_TYPE>(d_uvecs[1]).ptr();
const FLOAT_TYPE* dOpt4_ptr = beacls::UVec_<FLOAT_TYPE>(d_uvecs[4]).ptr();
thrust::device_ptr<FLOAT_TYPE> dx_dim_dev_ptr = thrust::device_pointer_cast(dx_dim_ptr);
thrust::device_ptr<const FLOAT_TYPE> us_1_dev_ptr = thrust::device_pointer_cast(uOpt1_ptr);
beacls::synchronizeUVec(u_uvecs[1]);
if (beacls::is_cuda(d_uvecs[1]) && beacls::is_cuda(d_uvecs[4])) {
thrust::device_ptr<const FLOAT_TYPE> ds_1_dev_ptr = thrust::device_pointer_cast(dOpt1_ptr);
thrust::device_ptr<const FLOAT_TYPE> ds_4_dev_ptr = thrust::device_pointer_cast(dOpt4_ptr);
beacls::synchronizeUVec(d_uvecs[1]);
auto src2_Tuple = thrust::make_tuple(us_1_dev_ptr, ds_1_dev_ptr, ds_4_dev_ptr);
auto src2_Iterator = thrust::make_zip_iterator(src2_Tuple);
thrust::transform(thrust::cuda::par.on(dx_stream),
src2_Iterator, src2_Iterator + u_uvecs[1].size(), dx_dim_dev_ptr,
Get_dynamics_dim2_U1_D1_D4());
}
else {
const FLOAT_TYPE d1 = dOpt1_ptr[0];
const FLOAT_TYPE d4 = dOpt4_ptr[0];
thrust::transform(thrust::cuda::par.on(dx_stream),
us_1_dev_ptr, us_1_dev_ptr + u_uvecs[1].size(), dx_dim_dev_ptr,
Get_dynamics_dim2_U1_d1_d4(d1, d4));
}
}
else {
std::cerr << __FILE__ << ":" << __LINE__ << ":" << __FUNCTION__ << " Invalid data size" << std::endl;
result = false;
}
break;
default:
std::cerr << "Only dimension 1-4 are defined for dynamics of PlaneCAvoid!" << std::endl;
result = false;
break;
}
return result;
}
};
#endif /* defined(USER_DEFINED_GPU_DYNSYS_FUNC) */
#endif /* defined(WITH_GPU) */
|
row_partitioner.hip | // !!! This is a file automatically generated by hipify!!!
/*!
* Copyright 2017-2022 XGBoost contributors
*/
#include <thrust/fill.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/iterator/discard_iterator.h>
#include <thrust/iterator/transform_output_iterator.h>
#include <thrust/sequence.h>
#include <vector>
#include "../../common/device_helpers.cuh"
#include "row_partitioner_hip.cuh"
namespace xgboost {
namespace tree {
RowPartitioner::RowPartitioner(int device_idx, size_t num_rows)
: device_idx_(device_idx), ridx_(num_rows), ridx_tmp_(num_rows) {
dh::safe_cuda(hipSetDevice(device_idx_));
ridx_segments_.emplace_back(NodePositionInfo{Segment(0, num_rows)});
thrust::sequence(thrust::device, ridx_.data(), ridx_.data() + ridx_.size());
dh::safe_cuda(hipStreamCreate(&stream_));
}
RowPartitioner::~RowPartitioner() {
dh::safe_cuda(hipSetDevice(device_idx_));
dh::safe_cuda(hipStreamDestroy(stream_));
}
common::Span<const RowPartitioner::RowIndexT> RowPartitioner::GetRows(bst_node_t nidx) {
auto segment = ridx_segments_.at(nidx).segment;
return dh::ToSpan(ridx_).subspan(segment.begin, segment.Size());
}
common::Span<const RowPartitioner::RowIndexT> RowPartitioner::GetRows() {
return dh::ToSpan(ridx_);
}
std::vector<RowPartitioner::RowIndexT> RowPartitioner::GetRowsHost(bst_node_t nidx) {
auto span = GetRows(nidx);
std::vector<RowIndexT> rows(span.size());
dh::CopyDeviceSpanToVector(&rows, span);
return rows;
}
}; // namespace tree
}; // namespace xgboost
| row_partitioner.cuh | /*!
* Copyright 2017-2022 XGBoost contributors
*/
#pragma once
#include <thrust/execution_policy.h>
#include <limits>
#include <vector>
#include "../../common/device_helpers.cuh"
#include "xgboost/base.h"
#include "xgboost/context.h"
#include "xgboost/task.h"
#include "xgboost/tree_model.h"
namespace xgboost {
namespace tree {
/** \brief Used to demarcate a contiguous set of row indices associated with
* some tree node. */
struct Segment {
bst_uint begin{0};
bst_uint end{0};
Segment() = default;
Segment(bst_uint begin, bst_uint end) : begin(begin), end(end) { CHECK_GE(end, begin); }
__host__ __device__ size_t Size() const { return end - begin; }
};
// TODO(Rory): Can be larger. To be tuned alongside other batch operations.
static const int kMaxUpdatePositionBatchSize = 32;
template <typename OpDataT>
struct PerNodeData {
Segment segment;
OpDataT data;
};
template <typename BatchIterT>
__device__ __forceinline__ void AssignBatch(BatchIterT batch_info, std::size_t global_thread_idx,
int* batch_idx, std::size_t* item_idx) {
bst_uint sum = 0;
for (int i = 0; i < kMaxUpdatePositionBatchSize; i++) {
if (sum + batch_info[i].segment.Size() > global_thread_idx) {
*batch_idx = i;
*item_idx = (global_thread_idx - sum) + batch_info[i].segment.begin;
break;
}
sum += batch_info[i].segment.Size();
}
}
template <int kBlockSize, typename RowIndexT, typename OpDataT>
__global__ __launch_bounds__(kBlockSize) void SortPositionCopyKernel(
dh::LDGIterator<PerNodeData<OpDataT>> batch_info, common::Span<RowIndexT> d_ridx,
const common::Span<const RowIndexT> ridx_tmp, std::size_t total_rows) {
for (auto idx : dh::GridStrideRange<std::size_t>(0, total_rows)) {
int batch_idx;
std::size_t item_idx;
AssignBatch(batch_info, idx, &batch_idx, &item_idx);
d_ridx[item_idx] = ridx_tmp[item_idx];
}
}
// We can scan over this tuple, where the scan gives us information on how to partition inputs
// according to the flag
struct IndexFlagTuple {
bst_uint idx; // The location of the item we are working on in ridx_
bst_uint flag_scan; // This gets populated after scanning
int batch_idx; // Which node in the batch does this item belong to
bool flag; // Result of op (is this item going left?)
};
struct IndexFlagOp {
__device__ IndexFlagTuple operator()(const IndexFlagTuple& a, const IndexFlagTuple& b) const {
// Segmented scan - resets if we cross batch boundaries
if (a.batch_idx == b.batch_idx) {
// Accumulate the flags, everything else stays the same
return {b.idx, a.flag_scan + b.flag_scan, b.batch_idx, b.flag};
} else {
return b;
}
}
};
template <typename OpDataT>
struct WriteResultsFunctor {
dh::LDGIterator<PerNodeData<OpDataT>> batch_info;
const bst_uint* ridx_in;
bst_uint* ridx_out;
bst_uint* counts;
__device__ IndexFlagTuple operator()(const IndexFlagTuple& x) {
std::size_t scatter_address;
const Segment& segment = batch_info[x.batch_idx].segment;
if (x.flag) {
bst_uint num_previous_flagged = x.flag_scan - 1; // -1 because inclusive scan
scatter_address = segment.begin + num_previous_flagged;
} else {
bst_uint num_previous_unflagged = (x.idx - segment.begin) - x.flag_scan;
scatter_address = segment.end - num_previous_unflagged - 1;
}
ridx_out[scatter_address] = ridx_in[x.idx];
if (x.idx == (segment.end - 1)) {
// Write out counts
counts[x.batch_idx] = x.flag_scan;
}
// Discard
return {};
}
};
template <typename RowIndexT, typename OpT, typename OpDataT>
void SortPositionBatch(common::Span<const PerNodeData<OpDataT>> d_batch_info,
common::Span<RowIndexT> ridx, common::Span<RowIndexT> ridx_tmp,
common::Span<bst_uint> d_counts, std::size_t total_rows, OpT op,
dh::device_vector<int8_t>* tmp, cudaStream_t stream) {
dh::LDGIterator<PerNodeData<OpDataT>> batch_info_itr(d_batch_info.data());
WriteResultsFunctor<OpDataT> write_results{batch_info_itr, ridx.data(), ridx_tmp.data(),
d_counts.data()};
auto discard_write_iterator =
thrust::make_transform_output_iterator(dh::TypedDiscard<IndexFlagTuple>(), write_results);
auto counting = thrust::make_counting_iterator(0llu);
auto input_iterator =
dh::MakeTransformIterator<IndexFlagTuple>(counting, [=] __device__(size_t idx) {
int batch_idx;
std::size_t item_idx;
AssignBatch(batch_info_itr, idx, &batch_idx, &item_idx);
auto op_res = op(ridx[item_idx], batch_info_itr[batch_idx].data);
return IndexFlagTuple{bst_uint(item_idx), op_res, batch_idx, op_res};
});
size_t temp_bytes = 0;
if (tmp->empty()) {
cub::DeviceScan::InclusiveScan(nullptr, temp_bytes, input_iterator, discard_write_iterator,
IndexFlagOp(), total_rows, stream);
tmp->resize(temp_bytes);
}
temp_bytes = tmp->size();
cub::DeviceScan::InclusiveScan(tmp->data().get(), temp_bytes, input_iterator,
discard_write_iterator, IndexFlagOp(), total_rows, stream);
constexpr int kBlockSize = 256;
// Value found by experimentation
const int kItemsThread = 12;
const int grid_size = xgboost::common::DivRoundUp(total_rows, kBlockSize * kItemsThread);
SortPositionCopyKernel<kBlockSize, RowIndexT, OpDataT>
<<<grid_size, kBlockSize, 0, stream>>>(batch_info_itr, ridx, ridx_tmp, total_rows);
}
struct NodePositionInfo {
Segment segment;
bst_node_t left_child = -1;
bst_node_t right_child = -1;
__device__ bool IsLeaf() { return left_child == -1; }
};
__device__ __forceinline__ int GetPositionFromSegments(std::size_t idx,
const NodePositionInfo* d_node_info) {
int position = 0;
NodePositionInfo node = d_node_info[position];
while (!node.IsLeaf()) {
NodePositionInfo left = d_node_info[node.left_child];
NodePositionInfo right = d_node_info[node.right_child];
if (idx >= left.segment.begin && idx < left.segment.end) {
position = node.left_child;
node = left;
} else if (idx >= right.segment.begin && idx < right.segment.end) {
position = node.right_child;
node = right;
} else {
KERNEL_CHECK(false);
}
}
return position;
}
template <int kBlockSize, typename RowIndexT, typename OpT>
__global__ __launch_bounds__(kBlockSize) void FinalisePositionKernel(
const common::Span<const NodePositionInfo> d_node_info,
const common::Span<const RowIndexT> d_ridx, common::Span<bst_node_t> d_out_position, OpT op) {
for (auto idx : dh::GridStrideRange<std::size_t>(0, d_ridx.size())) {
auto position = GetPositionFromSegments(idx, d_node_info.data());
RowIndexT ridx = d_ridx[idx];
bst_node_t new_position = op(ridx, position);
d_out_position[ridx] = new_position;
}
}
/** \brief Class responsible for tracking subsets of rows as we add splits and
* partition training rows into different leaf nodes. */
class RowPartitioner {
public:
using RowIndexT = bst_uint;
static constexpr bst_node_t kIgnoredTreePosition = -1;
private:
int device_idx_;
/*! \brief In here if you want to find the rows belong to a node nid, first you need to
* get the indices segment from ridx_segments[nid], then get the row index that
* represents position of row in input data X. `RowPartitioner::GetRows` would be a
* good starting place to get a sense what are these vector storing.
*
* node id -> segment -> indices of rows belonging to node
*/
/*! \brief Range of row index for each node, pointers into ridx below. */
std::vector<NodePositionInfo> ridx_segments_;
/*! \brief mapping for node id -> rows.
* This looks like:
* node id | 1 | 2 |
* rows idx | 3, 5, 1 | 13, 31 |
*/
dh::TemporaryArray<RowIndexT> ridx_;
// Staging area for sorting ridx
dh::TemporaryArray<RowIndexT> ridx_tmp_;
dh::device_vector<int8_t> tmp_;
dh::PinnedMemory pinned_;
dh::PinnedMemory pinned2_;
cudaStream_t stream_;
public:
RowPartitioner(int device_idx, size_t num_rows);
~RowPartitioner();
RowPartitioner(const RowPartitioner&) = delete;
RowPartitioner& operator=(const RowPartitioner&) = delete;
/**
* \brief Gets the row indices of training instances in a given node.
*/
common::Span<const RowIndexT> GetRows(bst_node_t nidx);
/**
* \brief Gets all training rows in the set.
*/
common::Span<const RowIndexT> GetRows();
/**
* \brief Convenience method for testing
*/
std::vector<RowIndexT> GetRowsHost(bst_node_t nidx);
/**
* \brief Updates the tree position for set of training instances being split
* into left and right child nodes. Accepts a user-defined lambda specifying
* which branch each training instance should go down.
*
* \tparam UpdatePositionOpT
* \tparam OpDataT
* \param nidx The index of the nodes being split.
* \param left_nidx The left child indices.
* \param right_nidx The right child indices.
* \param op_data User-defined data provided as the second argument to op
* \param op Device lambda with the row index as the first argument and op_data as the
* second. Returns true if this training instance goes on the left partition.
*/
template <typename UpdatePositionOpT, typename OpDataT>
void UpdatePositionBatch(const std::vector<bst_node_t>& nidx,
const std::vector<bst_node_t>& left_nidx,
const std::vector<bst_node_t>& right_nidx,
const std::vector<OpDataT>& op_data, UpdatePositionOpT op) {
if (nidx.empty()) return;
CHECK_EQ(nidx.size(), left_nidx.size());
CHECK_EQ(nidx.size(), right_nidx.size());
CHECK_EQ(nidx.size(), op_data.size());
auto h_batch_info = pinned2_.GetSpan<PerNodeData<OpDataT>>(nidx.size());
dh::TemporaryArray<PerNodeData<OpDataT>> d_batch_info(nidx.size());
std::size_t total_rows = 0;
for (size_t i = 0; i < nidx.size(); i++) {
h_batch_info[i] = {ridx_segments_.at(nidx.at(i)).segment, op_data.at(i)};
total_rows += ridx_segments_.at(nidx.at(i)).segment.Size();
}
dh::safe_cuda(cudaMemcpyAsync(d_batch_info.data().get(), h_batch_info.data(),
h_batch_info.size() * sizeof(PerNodeData<OpDataT>),
cudaMemcpyDefault, stream_));
// Temporary arrays
auto h_counts = pinned_.GetSpan<bst_uint>(nidx.size(), 0);
dh::TemporaryArray<bst_uint> d_counts(nidx.size(), 0);
// Partition the rows according to the operator
SortPositionBatch<RowIndexT, UpdatePositionOpT, OpDataT>(
dh::ToSpan(d_batch_info), dh::ToSpan(ridx_), dh::ToSpan(ridx_tmp_), dh::ToSpan(d_counts),
total_rows, op, &tmp_, stream_);
dh::safe_cuda(cudaMemcpyAsync(h_counts.data(), d_counts.data().get(), h_counts.size_bytes(),
cudaMemcpyDefault, stream_));
// TODO(Rory): this synchronisation hurts performance a lot
// Future optimisation should find a way to skip this
dh::safe_cuda(cudaStreamSynchronize(stream_));
// Update segments
for (size_t i = 0; i < nidx.size(); i++) {
auto segment = ridx_segments_.at(nidx[i]).segment;
auto left_count = h_counts[i];
CHECK_LE(left_count, segment.Size());
ridx_segments_.resize(std::max(static_cast<bst_node_t>(ridx_segments_.size()),
std::max(left_nidx[i], right_nidx[i]) + 1));
ridx_segments_[nidx[i]] = NodePositionInfo{segment, left_nidx[i], right_nidx[i]};
ridx_segments_[left_nidx[i]] =
NodePositionInfo{Segment(segment.begin, segment.begin + left_count)};
ridx_segments_[right_nidx[i]] =
NodePositionInfo{Segment(segment.begin + left_count, segment.end)};
}
}
/**
* \brief Finalise the position of all training instances after tree construction is
* complete. Does not update any other meta information in this data structure, so
* should only be used at the end of training.
*
* When the task requires update leaf, this function will copy the node index into
* p_out_position. The index is negated if it's being sampled in current iteration.
*
* \param p_out_position Node index for each row.
* \param op Device lambda. Should provide the row index and current position as an
* argument and return the new position for this training instance.
* \param sampled A device lambda to inform the partitioner whether a row is sampled.
*/
template <typename FinalisePositionOpT>
void FinalisePosition(common::Span<bst_node_t> d_out_position, FinalisePositionOpT op) {
dh::TemporaryArray<NodePositionInfo> d_node_info_storage(ridx_segments_.size());
dh::safe_cuda(cudaMemcpyAsync(d_node_info_storage.data().get(), ridx_segments_.data(),
sizeof(NodePositionInfo) * ridx_segments_.size(),
cudaMemcpyDefault, stream_));
constexpr int kBlockSize = 512;
const int kItemsThread = 8;
const int grid_size = xgboost::common::DivRoundUp(ridx_.size(), kBlockSize * kItemsThread);
common::Span<const RowIndexT> d_ridx(ridx_.data().get(), ridx_.size());
FinalisePositionKernel<kBlockSize><<<grid_size, kBlockSize, 0, stream_>>>(
dh::ToSpan(d_node_info_storage), d_ridx, d_out_position, op);
}
};
}; // namespace tree
}; // namespace xgboost
|
row_partitioner.hip | // !!! This is a file automatically generated by hipify!!!
/*!
* Copyright 2017-2022 XGBoost contributors
*/
#include <thrust/fill.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/iterator/discard_iterator.h>
#include <thrust/iterator/transform_output_iterator.h>
#include <thrust/sequence.h>
#include <vector>
#include "../../common/device_helpers.cuh"
#include "row_partitioner_hip.cuh"
namespace xgboost {
namespace tree {
RowPartitioner::RowPartitioner(int device_idx, size_t num_rows)
: device_idx_(device_idx), ridx_(num_rows), ridx_tmp_(num_rows) {
dh::safe_cuda(hipSetDevice(device_idx_));
ridx_segments_.emplace_back(NodePositionInfo{Segment(0, num_rows)});
thrust::sequence(thrust::device, ridx_.data(), ridx_.data() + ridx_.size());
dh::safe_cuda(hipStreamCreate(&stream_));
}
RowPartitioner::~RowPartitioner() {
dh::safe_cuda(hipSetDevice(device_idx_));
dh::safe_cuda(hipStreamDestroy(stream_));
}
common::Span<const RowPartitioner::RowIndexT> RowPartitioner::GetRows(bst_node_t nidx) {
auto segment = ridx_segments_.at(nidx).segment;
return dh::ToSpan(ridx_).subspan(segment.begin, segment.Size());
}
common::Span<const RowPartitioner::RowIndexT> RowPartitioner::GetRows() {
return dh::ToSpan(ridx_);
}
std::vector<RowPartitioner::RowIndexT> RowPartitioner::GetRowsHost(bst_node_t nidx) {
auto span = GetRows(nidx);
std::vector<RowIndexT> rows(span.size());
dh::CopyDeviceSpanToVector(&rows, span);
return rows;
}
}; // namespace tree
}; // namespace xgboost
| row_partitioner.cu | /*!
* Copyright 2017-2022 XGBoost contributors
*/
#include <thrust/fill.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/iterator/discard_iterator.h>
#include <thrust/iterator/transform_output_iterator.h>
#include <thrust/sequence.h>
#include <vector>
#include "../../common/device_helpers.cuh"
#include "row_partitioner.cuh"
namespace xgboost {
namespace tree {
RowPartitioner::RowPartitioner(int device_idx, size_t num_rows)
: device_idx_(device_idx), ridx_(num_rows), ridx_tmp_(num_rows) {
dh::safe_cuda(cudaSetDevice(device_idx_));
ridx_segments_.emplace_back(NodePositionInfo{Segment(0, num_rows)});
thrust::sequence(thrust::device, ridx_.data(), ridx_.data() + ridx_.size());
dh::safe_cuda(cudaStreamCreate(&stream_));
}
RowPartitioner::~RowPartitioner() {
dh::safe_cuda(cudaSetDevice(device_idx_));
dh::safe_cuda(cudaStreamDestroy(stream_));
}
common::Span<const RowPartitioner::RowIndexT> RowPartitioner::GetRows(bst_node_t nidx) {
auto segment = ridx_segments_.at(nidx).segment;
return dh::ToSpan(ridx_).subspan(segment.begin, segment.Size());
}
common::Span<const RowPartitioner::RowIndexT> RowPartitioner::GetRows() {
return dh::ToSpan(ridx_);
}
std::vector<RowPartitioner::RowIndexT> RowPartitioner::GetRowsHost(bst_node_t nidx) {
auto span = GetRows(nidx);
std::vector<RowIndexT> rows(span.size());
dh::CopyDeviceSpanToVector(&rows, span);
return rows;
}
}; // namespace tree
}; // namespace xgboost
|
4638f5b12878cc522044736c0b0094184046709f.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file optimizer_op.cu
* \brief Optimizer operators
* \author Leonard Lausen
*/
#include "./optimizer_op-inl.h"
#include <hipcub/hipcub.hpp>
namespace mxnet {
namespace op {
NNVM_REGISTER_OP(_contrib_group_adagrad_update)
.set_attr<FComputeEx>("FComputeEx<gpu>", GroupAdagradUpdateEx<gpu>);
} // namespace op
} // namespace mxnet
| 4638f5b12878cc522044736c0b0094184046709f.cu | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file optimizer_op.cu
* \brief Optimizer operators
* \author Leonard Lausen
*/
#include "./optimizer_op-inl.h"
#include <cub/cub.cuh>
namespace mxnet {
namespace op {
NNVM_REGISTER_OP(_contrib_group_adagrad_update)
.set_attr<FComputeEx>("FComputeEx<gpu>", GroupAdagradUpdateEx<gpu>);
} // namespace op
} // namespace mxnet
|
c083153efbb59763259d22aca83dc323d44ee17d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include "CUDACore/cudaCheck.h"
#include "CUDACore/prefixScan.h"
#include "CUDACore/requireDevices.h"
using namespace cms::cuda;
template <typename T>
struct format_traits {
public:
static const constexpr char *failed_msg = "failed %d %d %d: %d %d\n";
};
template <>
struct format_traits<float> {
public:
static const constexpr char *failed_msg = "failed %d %d %d: %f %f\n";
};
template <typename T>
__global__ void testPrefixScan(uint32_t size) {
__shared__ T ws[32];
__shared__ T c[1024];
__shared__ T co[1024];
auto first = threadIdx.x;
for (auto i = first; i < size; i += blockDim.x)
c[i] = 1;
__syncthreads();
blockPrefixScan(c, co, size, ws);
blockPrefixScan(c, size, ws);
assert(1 == c[0]);
assert(1 == co[0]);
for (auto i = first + 1; i < size; i += blockDim.x) {
if (c[i] != c[i - 1] + 1)
printf(format_traits<T>::failed_msg, size, i, blockDim.x, c[i], c[i - 1]);
assert(c[i] == c[i - 1] + 1);
assert(c[i] == i + 1);
assert(c[i] = co[i]);
}
}
template <typename T>
__global__ void testWarpPrefixScan(uint32_t size) {
assert(size <= 32);
__shared__ T c[1024];
__shared__ T co[1024];
auto i = threadIdx.x;
c[i] = 1;
__syncthreads();
warpPrefixScan(c, co, i, 0xffffffff);
warpPrefixScan(c, i, 0xffffffff);
__syncthreads();
assert(1 == c[0]);
assert(1 == co[0]);
if (i != 0) {
if (c[i] != c[i - 1] + 1)
printf(format_traits<T>::failed_msg, size, i, blockDim.x, c[i], c[i - 1]);
assert(c[i] == c[i - 1] + 1);
assert(c[i] == i + 1);
assert(c[i] = co[i]);
}
}
__global__ void init(uint32_t *v, uint32_t val, uint32_t n) {
auto i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n)
v[i] = val;
if (i == 0)
printf("init\n");
}
__global__ void verify(uint32_t const *v, uint32_t n) {
auto i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n)
assert(v[i] == i + 1);
if (i == 0)
printf("verify\n");
}
int main() {
cms::cudatest::requireDevices();
std::cout << "warp level" << std::endl;
// std::cout << "warp 32" << std::endl;
hipLaunchKernelGGL(( testWarpPrefixScan<int>), dim3(1), dim3(32), 0, 0, 32);
hipDeviceSynchronize();
// std::cout << "warp 16" << std::endl;
hipLaunchKernelGGL(( testWarpPrefixScan<int>), dim3(1), dim3(32), 0, 0, 16);
hipDeviceSynchronize();
// std::cout << "warp 5" << std::endl;
hipLaunchKernelGGL(( testWarpPrefixScan<int>), dim3(1), dim3(32), 0, 0, 5);
hipDeviceSynchronize();
std::cout << "block level" << std::endl;
for (int bs = 32; bs <= 1024; bs += 32) {
// std::cout << "bs " << bs << std::endl;
for (int j = 1; j <= 1024; ++j) {
// std::cout << j << std::endl;
hipLaunchKernelGGL(( testPrefixScan<uint16_t>), dim3(1), dim3(bs), 0, 0, j);
hipDeviceSynchronize();
hipLaunchKernelGGL(( testPrefixScan<float>), dim3(1), dim3(bs), 0, 0, j);
hipDeviceSynchronize();
}
}
hipDeviceSynchronize();
int num_items = 200;
for (int ksize = 1; ksize < 4; ++ksize) {
// test multiblock
std::cout << "multiblok" << std::endl;
// Declare, allocate, and initialize device-accessible pointers for input and output
num_items *= 10;
uint32_t *d_in;
uint32_t *d_out1;
uint32_t *d_out2;
cudaCheck(hipMalloc(&d_in, num_items * sizeof(uint32_t)));
cudaCheck(hipMalloc(&d_out1, num_items * sizeof(uint32_t)));
cudaCheck(hipMalloc(&d_out2, num_items * sizeof(uint32_t)));
auto nthreads = 256;
auto nblocks = (num_items + nthreads - 1) / nthreads;
hipLaunchKernelGGL(( init), dim3(nblocks), dim3(nthreads), 0, 0, d_in, 1, num_items);
// the block counter
int32_t *d_pc;
cudaCheck(hipMalloc(&d_pc, sizeof(int32_t)));
cudaCheck(hipMemset(d_pc, 0, sizeof(int32_t)));
nthreads = 1024;
nblocks = (num_items + nthreads - 1) / nthreads;
std::cout << "launch multiBlockPrefixScan " << num_items << ' ' << nblocks << std::endl;
hipLaunchKernelGGL(( multiBlockPrefixScan), dim3(nblocks), dim3(nthreads), 4 * nblocks, 0, d_in, d_out1, num_items, d_pc);
cudaCheck(hipGetLastError());
hipLaunchKernelGGL(( verify), dim3(nblocks), dim3(nthreads), 0, 0, d_out1, num_items);
cudaCheck(hipGetLastError());
hipDeviceSynchronize();
} // ksize
return 0;
}
| c083153efbb59763259d22aca83dc323d44ee17d.cu | #include <iostream>
#include "CUDACore/cudaCheck.h"
#include "CUDACore/prefixScan.h"
#include "CUDACore/requireDevices.h"
using namespace cms::cuda;
template <typename T>
struct format_traits {
public:
static const constexpr char *failed_msg = "failed %d %d %d: %d %d\n";
};
template <>
struct format_traits<float> {
public:
static const constexpr char *failed_msg = "failed %d %d %d: %f %f\n";
};
template <typename T>
__global__ void testPrefixScan(uint32_t size) {
__shared__ T ws[32];
__shared__ T c[1024];
__shared__ T co[1024];
auto first = threadIdx.x;
for (auto i = first; i < size; i += blockDim.x)
c[i] = 1;
__syncthreads();
blockPrefixScan(c, co, size, ws);
blockPrefixScan(c, size, ws);
assert(1 == c[0]);
assert(1 == co[0]);
for (auto i = first + 1; i < size; i += blockDim.x) {
if (c[i] != c[i - 1] + 1)
printf(format_traits<T>::failed_msg, size, i, blockDim.x, c[i], c[i - 1]);
assert(c[i] == c[i - 1] + 1);
assert(c[i] == i + 1);
assert(c[i] = co[i]);
}
}
template <typename T>
__global__ void testWarpPrefixScan(uint32_t size) {
assert(size <= 32);
__shared__ T c[1024];
__shared__ T co[1024];
auto i = threadIdx.x;
c[i] = 1;
__syncthreads();
warpPrefixScan(c, co, i, 0xffffffff);
warpPrefixScan(c, i, 0xffffffff);
__syncthreads();
assert(1 == c[0]);
assert(1 == co[0]);
if (i != 0) {
if (c[i] != c[i - 1] + 1)
printf(format_traits<T>::failed_msg, size, i, blockDim.x, c[i], c[i - 1]);
assert(c[i] == c[i - 1] + 1);
assert(c[i] == i + 1);
assert(c[i] = co[i]);
}
}
__global__ void init(uint32_t *v, uint32_t val, uint32_t n) {
auto i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n)
v[i] = val;
if (i == 0)
printf("init\n");
}
__global__ void verify(uint32_t const *v, uint32_t n) {
auto i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n)
assert(v[i] == i + 1);
if (i == 0)
printf("verify\n");
}
int main() {
cms::cudatest::requireDevices();
std::cout << "warp level" << std::endl;
// std::cout << "warp 32" << std::endl;
testWarpPrefixScan<int><<<1, 32>>>(32);
cudaDeviceSynchronize();
// std::cout << "warp 16" << std::endl;
testWarpPrefixScan<int><<<1, 32>>>(16);
cudaDeviceSynchronize();
// std::cout << "warp 5" << std::endl;
testWarpPrefixScan<int><<<1, 32>>>(5);
cudaDeviceSynchronize();
std::cout << "block level" << std::endl;
for (int bs = 32; bs <= 1024; bs += 32) {
// std::cout << "bs " << bs << std::endl;
for (int j = 1; j <= 1024; ++j) {
// std::cout << j << std::endl;
testPrefixScan<uint16_t><<<1, bs>>>(j);
cudaDeviceSynchronize();
testPrefixScan<float><<<1, bs>>>(j);
cudaDeviceSynchronize();
}
}
cudaDeviceSynchronize();
int num_items = 200;
for (int ksize = 1; ksize < 4; ++ksize) {
// test multiblock
std::cout << "multiblok" << std::endl;
// Declare, allocate, and initialize device-accessible pointers for input and output
num_items *= 10;
uint32_t *d_in;
uint32_t *d_out1;
uint32_t *d_out2;
cudaCheck(cudaMalloc(&d_in, num_items * sizeof(uint32_t)));
cudaCheck(cudaMalloc(&d_out1, num_items * sizeof(uint32_t)));
cudaCheck(cudaMalloc(&d_out2, num_items * sizeof(uint32_t)));
auto nthreads = 256;
auto nblocks = (num_items + nthreads - 1) / nthreads;
init<<<nblocks, nthreads, 0>>>(d_in, 1, num_items);
// the block counter
int32_t *d_pc;
cudaCheck(cudaMalloc(&d_pc, sizeof(int32_t)));
cudaCheck(cudaMemset(d_pc, 0, sizeof(int32_t)));
nthreads = 1024;
nblocks = (num_items + nthreads - 1) / nthreads;
std::cout << "launch multiBlockPrefixScan " << num_items << ' ' << nblocks << std::endl;
multiBlockPrefixScan<<<nblocks, nthreads, 4 * nblocks>>>(d_in, d_out1, num_items, d_pc);
cudaCheck(cudaGetLastError());
verify<<<nblocks, nthreads, 0>>>(d_out1, num_items);
cudaCheck(cudaGetLastError());
cudaDeviceSynchronize();
} // ksize
return 0;
}
|
d2ac53425e92323fc7ce8e31dceeacb963528de4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int xdim0_update_halo_kernel5_minus_4_front;
int xdim0_update_halo_kernel5_minus_4_front_h = -1;
__constant__ int ydim0_update_halo_kernel5_minus_4_front;
int ydim0_update_halo_kernel5_minus_4_front_h = -1;
__constant__ int xdim1_update_halo_kernel5_minus_4_front;
int xdim1_update_halo_kernel5_minus_4_front_h = -1;
__constant__ int ydim1_update_halo_kernel5_minus_4_front;
int ydim1_update_halo_kernel5_minus_4_front_h = -1;
#define OPS_ACC0(x,y,z) (x+xdim0_update_halo_kernel5_minus_4_front*(y)+xdim0_update_halo_kernel5_minus_4_front*ydim0_update_halo_kernel5_minus_4_front*(z))
#define OPS_ACC1(x,y,z) (x+xdim1_update_halo_kernel5_minus_4_front*(y)+xdim1_update_halo_kernel5_minus_4_front*ydim1_update_halo_kernel5_minus_4_front*(z))
//user function
__device__
inline void update_halo_kernel5_minus_4_front(double *vol_flux_z, double *mass_flux_z, const int* fields) {
if(fields[FIELD_VOL_FLUX_Z] == 1) vol_flux_z[OPS_ACC0(0,0,0)] = -vol_flux_z[OPS_ACC0(0,0,-4)];
if(fields[FIELD_MASS_FLUX_Z] == 1) mass_flux_z[OPS_ACC1(0,0,0)] = -mass_flux_z[OPS_ACC1(0,0,-4)];
}
#undef OPS_ACC0
#undef OPS_ACC1
__global__ void ops_update_halo_kernel5_minus_4_front(
double* __restrict arg0,
double* __restrict arg1,
const int* __restrict arg2,
int size0,
int size1,
int size2 ){
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 + idx_y * 1 * xdim0_update_halo_kernel5_minus_4_front + idx_z * 1 * xdim0_update_halo_kernel5_minus_4_front * ydim0_update_halo_kernel5_minus_4_front;
arg1 += idx_x * 1 + idx_y * 1 * xdim1_update_halo_kernel5_minus_4_front + idx_z * 1 * xdim1_update_halo_kernel5_minus_4_front * ydim1_update_halo_kernel5_minus_4_front;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
update_halo_kernel5_minus_4_front(arg0, arg1, arg2);
}
}
// host stub function
void ops_par_loop_update_halo_kernel5_minus_4_front(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
ops_arg args[3] = { arg0, arg1, arg2};
ops_timing_realloc(123,"update_halo_kernel5_minus_4_front");
OPS_kernels[123].count++;
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#ifdef OPS_MPI
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned) return;
for ( int n=0; n<3; n++ ){
start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n];
if (start[n] >= range[2*n]) {
start[n] = 0;
}
else {
start[n] = range[2*n] - start[n];
}
if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n];
if (end[n] >= range[2*n+1]) {
end[n] = range[2*n+1] - sb->decomp_disp[n];
}
else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n]))
end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]);
}
#else //OPS_MPI
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif //OPS_MPI
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
int xdim0 = args[0].dat->size[0]*args[0].dat->dim;
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0]*args[1].dat->dim;
int ydim1 = args[1].dat->size[1];
//Timing
double t1,t2,c1,c2;
ops_timers_core(&c2,&t2);
if (xdim0 != xdim0_update_halo_kernel5_minus_4_front_h || ydim0 != ydim0_update_halo_kernel5_minus_4_front_h || xdim1 != xdim1_update_halo_kernel5_minus_4_front_h || ydim1 != ydim1_update_halo_kernel5_minus_4_front_h) {
hipMemcpyToSymbol( xdim0_update_halo_kernel5_minus_4_front, &xdim0, sizeof(int) );
xdim0_update_halo_kernel5_minus_4_front_h = xdim0;
hipMemcpyToSymbol( ydim0_update_halo_kernel5_minus_4_front, &ydim0, sizeof(int) );
ydim0_update_halo_kernel5_minus_4_front_h = ydim0;
hipMemcpyToSymbol( xdim1_update_halo_kernel5_minus_4_front, &xdim1, sizeof(int) );
xdim1_update_halo_kernel5_minus_4_front_h = xdim1;
hipMemcpyToSymbol( ydim1_update_halo_kernel5_minus_4_front, &ydim1, sizeof(int) );
ydim1_update_halo_kernel5_minus_4_front_h = ydim1;
}
int *arg2h = (int *)arg2.data;
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, z_size);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,1);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d=0; d<NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = args[0].dat->elem_size;
int dat1 = args[1].dat->elem_size;
char *p_a[3];
//set up initial pointers
int d_m[OPS_MAX_DIM];
#ifdef OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
#else //OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d];
#endif //OPS_MPI
int base0 = dat0 * 1 *
(start[0] * args[0].stencil->stride[0] - args[0].dat->base[0] - d_m[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1] - args[0].dat->base[1] - d_m[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] - d_m[2]);
p_a[0] = (char *)args[0].data_d + base0;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d];
#else //OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d];
#endif //OPS_MPI
int base1 = dat1 * 1 *
(start[0] * args[1].stencil->stride[0] - args[1].dat->base[0] - d_m[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1] - args[1].dat->base[1] - d_m[1]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] - d_m[2]);
p_a[1] = (char *)args[1].data_d + base1;
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args,3,range);
ops_timers_core(&c1,&t1);
OPS_kernels[123].mpi_time += t1-t2;
//call kernel wrapper function, passing in pointers to data
hipLaunchKernelGGL(( ops_update_halo_kernel5_minus_4_front), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1],
(int *)arg2.data_d,x_size, y_size, z_size);
if (OPS_diags>1) {
cutilSafeCall(hipDeviceSynchronize());
}
ops_timers_core(&c2,&t2);
OPS_kernels[123].time += t2-t1;
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0],range);
ops_set_halo_dirtybit3(&args[1],range);
//Update kernel record
OPS_kernels[123].transfer += ops_compute_transfer(dim, range, &arg0);
OPS_kernels[123].transfer += ops_compute_transfer(dim, range, &arg1);
}
| d2ac53425e92323fc7ce8e31dceeacb963528de4.cu | //
// auto-generated by ops.py
//
__constant__ int xdim0_update_halo_kernel5_minus_4_front;
int xdim0_update_halo_kernel5_minus_4_front_h = -1;
__constant__ int ydim0_update_halo_kernel5_minus_4_front;
int ydim0_update_halo_kernel5_minus_4_front_h = -1;
__constant__ int xdim1_update_halo_kernel5_minus_4_front;
int xdim1_update_halo_kernel5_minus_4_front_h = -1;
__constant__ int ydim1_update_halo_kernel5_minus_4_front;
int ydim1_update_halo_kernel5_minus_4_front_h = -1;
#define OPS_ACC0(x,y,z) (x+xdim0_update_halo_kernel5_minus_4_front*(y)+xdim0_update_halo_kernel5_minus_4_front*ydim0_update_halo_kernel5_minus_4_front*(z))
#define OPS_ACC1(x,y,z) (x+xdim1_update_halo_kernel5_minus_4_front*(y)+xdim1_update_halo_kernel5_minus_4_front*ydim1_update_halo_kernel5_minus_4_front*(z))
//user function
__device__
inline void update_halo_kernel5_minus_4_front(double *vol_flux_z, double *mass_flux_z, const int* fields) {
if(fields[FIELD_VOL_FLUX_Z] == 1) vol_flux_z[OPS_ACC0(0,0,0)] = -vol_flux_z[OPS_ACC0(0,0,-4)];
if(fields[FIELD_MASS_FLUX_Z] == 1) mass_flux_z[OPS_ACC1(0,0,0)] = -mass_flux_z[OPS_ACC1(0,0,-4)];
}
#undef OPS_ACC0
#undef OPS_ACC1
__global__ void ops_update_halo_kernel5_minus_4_front(
double* __restrict arg0,
double* __restrict arg1,
const int* __restrict arg2,
int size0,
int size1,
int size2 ){
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 + idx_y * 1 * xdim0_update_halo_kernel5_minus_4_front + idx_z * 1 * xdim0_update_halo_kernel5_minus_4_front * ydim0_update_halo_kernel5_minus_4_front;
arg1 += idx_x * 1 + idx_y * 1 * xdim1_update_halo_kernel5_minus_4_front + idx_z * 1 * xdim1_update_halo_kernel5_minus_4_front * ydim1_update_halo_kernel5_minus_4_front;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
update_halo_kernel5_minus_4_front(arg0, arg1, arg2);
}
}
// host stub function
void ops_par_loop_update_halo_kernel5_minus_4_front(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
ops_arg args[3] = { arg0, arg1, arg2};
ops_timing_realloc(123,"update_halo_kernel5_minus_4_front");
OPS_kernels[123].count++;
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#ifdef OPS_MPI
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned) return;
for ( int n=0; n<3; n++ ){
start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n];
if (start[n] >= range[2*n]) {
start[n] = 0;
}
else {
start[n] = range[2*n] - start[n];
}
if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n];
if (end[n] >= range[2*n+1]) {
end[n] = range[2*n+1] - sb->decomp_disp[n];
}
else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n]))
end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]);
}
#else //OPS_MPI
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif //OPS_MPI
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
int xdim0 = args[0].dat->size[0]*args[0].dat->dim;
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0]*args[1].dat->dim;
int ydim1 = args[1].dat->size[1];
//Timing
double t1,t2,c1,c2;
ops_timers_core(&c2,&t2);
if (xdim0 != xdim0_update_halo_kernel5_minus_4_front_h || ydim0 != ydim0_update_halo_kernel5_minus_4_front_h || xdim1 != xdim1_update_halo_kernel5_minus_4_front_h || ydim1 != ydim1_update_halo_kernel5_minus_4_front_h) {
cudaMemcpyToSymbol( xdim0_update_halo_kernel5_minus_4_front, &xdim0, sizeof(int) );
xdim0_update_halo_kernel5_minus_4_front_h = xdim0;
cudaMemcpyToSymbol( ydim0_update_halo_kernel5_minus_4_front, &ydim0, sizeof(int) );
ydim0_update_halo_kernel5_minus_4_front_h = ydim0;
cudaMemcpyToSymbol( xdim1_update_halo_kernel5_minus_4_front, &xdim1, sizeof(int) );
xdim1_update_halo_kernel5_minus_4_front_h = xdim1;
cudaMemcpyToSymbol( ydim1_update_halo_kernel5_minus_4_front, &ydim1, sizeof(int) );
ydim1_update_halo_kernel5_minus_4_front_h = ydim1;
}
int *arg2h = (int *)arg2.data;
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, z_size);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,1);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d=0; d<NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = args[0].dat->elem_size;
int dat1 = args[1].dat->elem_size;
char *p_a[3];
//set up initial pointers
int d_m[OPS_MAX_DIM];
#ifdef OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
#else //OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d];
#endif //OPS_MPI
int base0 = dat0 * 1 *
(start[0] * args[0].stencil->stride[0] - args[0].dat->base[0] - d_m[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1] - args[0].dat->base[1] - d_m[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] - d_m[2]);
p_a[0] = (char *)args[0].data_d + base0;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d];
#else //OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d];
#endif //OPS_MPI
int base1 = dat1 * 1 *
(start[0] * args[1].stencil->stride[0] - args[1].dat->base[0] - d_m[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1] - args[1].dat->base[1] - d_m[1]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] - d_m[2]);
p_a[1] = (char *)args[1].data_d + base1;
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args,3,range);
ops_timers_core(&c1,&t1);
OPS_kernels[123].mpi_time += t1-t2;
//call kernel wrapper function, passing in pointers to data
ops_update_halo_kernel5_minus_4_front<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1],
(int *)arg2.data_d,x_size, y_size, z_size);
if (OPS_diags>1) {
cutilSafeCall(cudaDeviceSynchronize());
}
ops_timers_core(&c2,&t2);
OPS_kernels[123].time += t2-t1;
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0],range);
ops_set_halo_dirtybit3(&args[1],range);
//Update kernel record
OPS_kernels[123].transfer += ops_compute_transfer(dim, range, &arg0);
OPS_kernels[123].transfer += ops_compute_transfer(dim, range, &arg1);
}
|
fa79660ca1571d9e1e0bdfb9612ae481ca65b25f.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "vec_tgamma.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int n = XSIZE*YSIZE;
double *result = NULL;
hipMalloc(&result, XSIZE*YSIZE);
double *x = NULL;
hipMalloc(&x, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
vec_tgamma), dim3(gridBlock),dim3(threadBlock), 0, 0, n,result,x);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
vec_tgamma), dim3(gridBlock),dim3(threadBlock), 0, 0, n,result,x);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
vec_tgamma), dim3(gridBlock),dim3(threadBlock), 0, 0, n,result,x);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | fa79660ca1571d9e1e0bdfb9612ae481ca65b25f.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "vec_tgamma.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int n = XSIZE*YSIZE;
double *result = NULL;
cudaMalloc(&result, XSIZE*YSIZE);
double *x = NULL;
cudaMalloc(&x, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
vec_tgamma<<<gridBlock,threadBlock>>>(n,result,x);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
vec_tgamma<<<gridBlock,threadBlock>>>(n,result,x);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
vec_tgamma<<<gridBlock,threadBlock>>>(n,result,x);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
2c6b2dcef07f1038bf5fc10fdc18a3de6b535060.hip | // !!! This is a file automatically generated by hipify!!!
#include <Tracer/CUDA/Kernel.cuh>
#include <hip/hip_runtime.h>
#include <surface_functions.h>
#include <cuda_surface_types.h>
#include <device_launch_parameters.h>
#include <Tracer/CUDA/CudaDefinitions.h>
#include <Tracer/CUDA/CudaAssert.h>
#include <Tracer/Core/SceneData.cuh>
#include <Tracer/BVH/MBVHNode.cuh>
#include <Tracer/Core/Random.cuh>
using namespace glm;
#ifdef __HIPCC__
#define LAUNCH_BOUNDS __launch_bounds__(128, 8)
#else
#define LAUNCH_BOUNDS
#endif
#define MAX_DEPTH 16
#define PI glm::pi<float>()
#define INVPI glm::one_over_pi<float>()
__device__ int primary_ray_cnt = 0;
//The index of the ray at which we start generating more rays in ray generation step.
//Effectively is the last index which was previously generated + 1.
__device__ int start_position = 0;
//Ray number incremented by every thread in primary_rays ray generation
__device__ int ray_nr_primary = 0;
//Ray number to fetch different ray from every CUDA thread during the extend step.
__device__ int ray_nr_extend = 0;
//Ray number to fetch different ray from every CUDA thread in the shade step.
__device__ int ray_nr_microfacet = 0;
__device__ int ray_nr_regular = 0;
__device__ int ray_nr_invalid = 0;
//Ray number to fetch different ray from every CUDA thread in the connect step.
__device__ int ray_nr_connect = 0;
//Number of shadow rays generated in shade step, which are placed in connect step.
__device__ int shadow_ray_cnt = 0;
surface<void, cudaSurfaceType2D> framebuffer;
__device__ inline void draw(unsigned int x, unsigned int y, const vec4& color)
{
surf2Dwrite(color, framebuffer, x * sizeof(vec4), y);
}
__device__ inline void draw_unbounded(unsigned int x, unsigned int y, const vec4& color)
{
surf2Dwrite(color, framebuffer, x * sizeof(vec4), y, hipBoundaryModeZero);
}
__device__ inline float balancePDFs(float pdf1, float pdf2)
{
const float sum = pdf1 + pdf2;
const float w1 = pdf1 / sum;
const float w2 = pdf2 / sum;
return max(0.0f, 1.0f / (w1 * pdf1 + w2 * pdf2));
}
__device__ inline float balanceHeuristic(float nf, float fPdf, float ng, float gPdf)
{
return max(0.0f, (nf * fPdf) / (nf * fPdf + ng * gPdf));
}
__device__ inline float powerHeuristic(float nf, float fPdf, float ng, float gPdf)
{
const float f = nf * fPdf;
const float g = ng * gPdf;
return max(0.0f, (f * f) / (f * f + g * g));
}
__global__ void setGlobals(int rayBufferSize, int width, int height)
{
const int maxBuffer = width * height;
const unsigned int progress = rayBufferSize - (glm::min(primary_ray_cnt, rayBufferSize));
start_position += progress;
start_position %= maxBuffer;
shadow_ray_cnt = 0;
primary_ray_cnt = 0;
ray_nr_primary = 0;
ray_nr_extend = 0;
ray_nr_microfacet = 0;
ray_nr_regular = 0;
ray_nr_invalid = 0;
ray_nr_connect = 0;
}
__global__ void generatePrimaryRays(
Ray* rays,
vec3 origin,
vec3 viewDir,
vec3 hor,
vec3 ver,
int w,
int h,
float invw,
float invh,
int rayBufferSize,
unsigned int frame
)
{
while (true)
{
const int index = atomicAdd(&ray_nr_primary, 1);
// Start from where extended rays ended
const int rayIdx = index + primary_ray_cnt;
if (rayIdx >= rayBufferSize) return;
unsigned int seed = (index + frame * 147565741) * 720898027 * index;
const int x = (start_position + index) % w;
const int y = ((start_position + index) / w) % h;
const float px = float(x) + RandomFloat(seed) - 0.5f;
const float py = float(y) + RandomFloat(seed) - 0.5f;
rays[rayIdx] = Ray::generate(origin, viewDir, hor, ver, px, py, invw, invh, x + y * w);
}
}
__global__ void LAUNCH_BOUNDS extend(Ray * rays, SceneData scene, int rayBufferSize)
{
while (true)
{
const int index = atomicAdd(&ray_nr_extend, 1);
if (index >= rayBufferSize) return;
Ray & ray = rays[index];
ray.t = MAX_DISTANCE;
MBVHNode::traverseMBVH(ray.origin, ray.direction, &ray.t, &ray.hit_idx, scene);
}
}
__global__ void LAUNCH_BOUNDS shade_invalid(Ray * rays, Ray * eRays, ShadowRay * sRays, SceneData scene, unsigned int frame, int rayBufferSize)
{
while (true)
{
const int index = atomicAdd(&ray_nr_invalid, 1);
if (index >= rayBufferSize) return;
Ray & ray = rays[index];
vec3 color = vec3(0.0f);
if (ray.valid())
{
if (!scene.indirect)
continue;
const Material& mat = scene.gpuMaterials[scene.gpuMatIdxs[ray.hit_idx]];
if (mat.type != Light) continue;
ray.origin = ray.getHitpoint();
const uvec3 tIdx = scene.indices[ray.hit_idx];
const vec3 cN = scene.centerNormals[ray.hit_idx];
const vec3 bary = triangle::getBaryCoords(ray.origin, cN, scene.vertices[tIdx.x], scene.vertices[tIdx.y], scene.vertices[tIdx.z]);
const vec2 tCoords = triangle::getTexCoords(bary, scene.texCoords[tIdx.x], scene.texCoords[tIdx.y], scene.texCoords[tIdx.z]);
vec3 normal;
if (mat.normalTex >= 0)
{
vec3 T, B;
convertToLocalSpace(cN, &T, &B);
const vec3 n = scene.getTextureNormal(mat.normalTex, tCoords);
normal = normalize(localToWorld(n, T, B, cN));
}
else
normal = triangle::getNormal(bary, scene.normals[tIdx.x], scene.normals[tIdx.y], scene.normals[tIdx.z]);
const bool backFacing = glm::dot(normal, ray.direction) >= 0.0f;
if (backFacing) normal *= -1.0f;
const auto mf = scene.microfacets[scene.gpuMatIdxs[ray.hit_idx]];
if (ray.lastBounceType != Lambertian)
{
color = ray.throughput * mat.emission;
}
else
{
const float NdotL = dot(ray.lastNormal, ray.direction);
const float LNdotL = dot(normal, -ray.direction);
const float lightPDF = ray.t * ray.t / (LNdotL * triangle::getArea(scene.vertices[tIdx.x], scene.vertices[tIdx.y], scene.vertices[tIdx.z]));
const vec3 wi = glm::reflect(-ray.direction, ray.lastNormal);
const float oPDF = mat.type <= Lambertian ? NdotL * glm::one_over_pi<float>() : mat.evaluate(mf, ray.direction, ray.lastNormal, wi);
const vec3 col = ray.throughput * mat.emission * NdotL;
if (lightPDF > 0 && oPDF > 0)
{
const float pdf = balancePDFs(oPDF, lightPDF);
color = col * pdf;
}
}
}
else if (scene.skyboxEnabled)
{
const vec2 uv = {
1.0f + atan2f(ray.direction.x, -ray.direction.z) * glm::one_over_pi<float>() * 0.5f,
1.0f - acosf(ray.direction.y) * glm::one_over_pi<float>()
};
color = ray.throughput * vec3(scene.getTextureColor(scene.skyboxTexture, uv));
}
ray.throughput = vec3(0.0f);
const float length2 = dot(color, color);
if (length2 > 100.0f)
color = color / sqrtf(length2) * 10.0f;
atomicAdd(&scene.currentFrame[ray.index].r, color.r);
atomicAdd(&scene.currentFrame[ray.index].g, color.g);
atomicAdd(&scene.currentFrame[ray.index].b, color.b);
atomicAdd(&scene.currentFrame[ray.index].a, 1.0f);
}
}
__global__ void LAUNCH_BOUNDS shade_regular(Ray * rays, Ray * eRays, ShadowRay * sRays, SceneData scene, unsigned int frame, int rayBufferSize)
{
while (true)
{
const int index = atomicAdd(&ray_nr_regular, 1);
if (index >= rayBufferSize) return;
Ray & ray = rays[index];
if (!ray.valid()) continue;
const Material & mat = scene.gpuMaterials[scene.gpuMatIdxs[ray.hit_idx]];
if (mat.type == Light || mat.type >= Beckmann) continue;
vec3 color = vec3(0.0f);
unsigned int seed = (frame * ray.index * 147565741) * 720898027 * index;
ray.origin = ray.getHitpoint();
const uvec3 tIdx = scene.indices[ray.hit_idx];
const vec3 cN = scene.centerNormals[ray.hit_idx];
const vec3 bary = triangle::getBaryCoords(ray.origin, cN, scene.vertices[tIdx.x], scene.vertices[tIdx.y], scene.vertices[tIdx.z]);
const vec2 tCoords = triangle::getTexCoords(bary, scene.texCoords[tIdx.x], scene.texCoords[tIdx.y], scene.texCoords[tIdx.z]);
vec3 normal = mat.normalTex >= 0
? sampleToWorld(scene.getTextureNormal(mat.normalTex, tCoords), cN)
: triangle::getNormal(bary, scene.normals[tIdx.x], scene.normals[tIdx.y], scene.normals[tIdx.z]);
const bool backFacing = glm::dot(normal, ray.direction) >= 0.0f;
normal *= backFacing ? -1.0f : 1.0f;
const vec3 matColor = mat.diffuseTex < 0 ? mat.albedo : scene.getTextureColor(mat.diffuseTex, tCoords);
switch (mat.type)
{
case Lambertian: {
const vec3 BRDF = matColor * glm::one_over_pi<float>();
if (scene.shadow)
{
const int light = RandomIntMax(seed, scene.lightCount - 1);
const uvec3 lightIdx = scene.indices[scene.lightIndices[light]];
const vec3 lightPos = triangle::getRandomPointOnSurface(scene.vertices[lightIdx.x], scene.vertices[lightIdx.y], scene.vertices[lightIdx.z], RandomFloat(seed), RandomFloat(seed));
vec3 L = lightPos - ray.origin;
const float squaredDistance = dot(L, L);
const float distance = sqrtf(squaredDistance);
L /= distance;
const vec3 cNormal = scene.centerNormals[scene.lightIndices[light]];
const vec3 baryLight = triangle::getBaryCoords(lightPos, cNormal, scene.vertices[lightIdx.x], scene.vertices[lightIdx.y], scene.vertices[lightIdx.z]);
const vec3 lightNormal = triangle::getNormal(bary, scene.normals[lightIdx.x], scene.normals[lightIdx.y], scene.normals[lightIdx.z]);
const float NdotL = dot(normal, L);
const float LNdotL = dot(lightNormal, -L);
if (NdotL > 0 && LNdotL > 0)
{
const float area = triangle::getArea(scene.vertices[lightIdx.x], scene.vertices[lightIdx.y], scene.vertices[lightIdx.z]);
const auto emission = scene.gpuMaterials[scene.gpuMatIdxs[light]].emission;
const vec3 shadowCol = ray.throughput * BRDF * emission * float(scene.lightCount) * NdotL;
const float lambertPDF = NdotL * glm::one_over_pi<float>();
const float lightPDF = squaredDistance / (LNdotL * area);
const unsigned int shadowIdx = atomicAdd(&shadow_ray_cnt, 1);
const float pdf = balancePDFs(lambertPDF, lightPDF);
sRays[shadowIdx] = ShadowRay(
ray.origin + L * scene.normalEpsilon, L, shadowCol * pdf,
distance - scene.distEpsilon, ray.index
);
ray.lastNormal = normal;
}
}
ray.reflectCosineWeighted(RandomFloat(seed), RandomFloat(seed));
const float NdotR = dot(normal, ray.direction);
const float PDF = NdotR * glm::one_over_pi<float>();
ray.lastBounceType = Lambertian;
ray.throughput *= BRDF * NdotR / PDF;
break;
}
case Specular: {
ray.throughput *= matColor;
ray.reflect(normal);
ray.lastBounceType = Specular;
break;
}
case Fresnel: {
ray.throughput *= matColor;
const vec3 dir = ray.direction;
ray.reflect(normal);
ray.lastBounceType = Specular;
const float n1 = backFacing ? mat.refractIdx : 1.0f;
const float n2 = backFacing ? 1.0f : mat.refractIdx;
const float n = n1 / n2;
const float cosTheta = dot(normal, -dir);
const float k = 1.0f - (n * n) * (1.0f - cosTheta * cosTheta);
if (k > 0)
{
const float a = n1 - n2;
const float b = n1 + n2;
const float R0 = (a * a) / (b * b);
const float c = 1.0f - cosTheta;
const float Fr = R0 + (1.0f - R0) * (c * c * c * c * c);
if (RandomFloat(seed) > Fr)
{
ray.lastBounceType = Fresnel;
if (backFacing)
ray.throughput *= exp(-mat.absorption * ray.t);;
ray.origin -= EPSILON * 2.0f * normal;
ray.direction = normalize(n * dir + normal * (n * cosTheta - sqrtf(k)));
}
}
break;
}
default:
break;
}
ray.throughput = glm::max(vec3(0.0f), ray.throughput);
const float prob = ray.bounces >= 3 ? min(0.5f, max(ray.throughput.x, min(ray.throughput.y, ray.throughput.z))) : 1.0f;
if (ray.bounces < MAX_DEPTH && prob > EPSILON && prob > RandomFloat(seed))
{
ray.origin += ray.direction * scene.normalEpsilon;
ray.bounces++;
ray.lastNormal = normal;
ray.throughput /= prob;
unsigned int primary_index = atomicAdd(&primary_ray_cnt, 1);
ray.lastBounceType = mat.type;
eRays[primary_index] = ray;
}
else
{
ray.throughput = vec3(0.0f);
atomicAdd(&scene.currentFrame[ray.index].a, 1.0f);
}
}
}
__global__ void LAUNCH_BOUNDS shade_microfacet(Ray * rays, Ray * eRays, ShadowRay * sRays, SceneData scene, unsigned int frame, int rayBufferSize)
{
while (true)
{
const int index = atomicAdd(&ray_nr_microfacet, 1);
if (index >= rayBufferSize) return;
Ray & ray = rays[index];
if (!ray.valid()) continue;
const Material & mat = scene.gpuMaterials[scene.gpuMatIdxs[ray.hit_idx]];
if (mat.type == Light || mat.type < Beckmann) continue;
vec3 color = vec3(0.0f);
unsigned int seed = (frame * ray.index * 147565741) * 720898027 * index;
ray.origin = ray.getHitpoint();
const uvec3 tIdx = scene.indices[ray.hit_idx];
const vec3 cN = scene.centerNormals[ray.hit_idx];
const vec3 bary = triangle::getBaryCoords(ray.origin, cN, scene.vertices[tIdx.x], scene.vertices[tIdx.y], scene.vertices[tIdx.z]);
const vec2 tCoords = triangle::getTexCoords(bary, scene.texCoords[tIdx.x], scene.texCoords[tIdx.y], scene.texCoords[tIdx.z]);
vec3 normal = mat.normalTex >= 0
? sampleToWorld(scene.getTextureNormal(mat.normalTex, tCoords), cN)
: triangle::getNormal(bary, scene.normals[tIdx.x], scene.normals[tIdx.y], scene.normals[tIdx.z]);
const bool backFacing = glm::dot(normal, ray.direction) >= 0.0f;
normal *= backFacing ? -1.0f : 1.0f;
const vec3 matColor = mat.diffuseTex < 0 ? mat.albedo : scene.getTextureColor(mat.diffuseTex, tCoords);
const auto mf = scene.microfacets[scene.gpuMatIdxs[ray.hit_idx]];
const vec3 wi = -ray.direction;
vec3 T, B;
convertToLocalSpace(normal, &T, &B);
const vec3 wiLocal = normalize(vec3(dot(T, wi), dot(B, wi), dot(normal, wi)));
const vec3 wmLocal = mat.sample(mf, wiLocal, RandomFloat(seed), RandomFloat(seed));
// Half-way vector
const vec3 wm = T * wmLocal.x + B * wmLocal.y + normal * wmLocal.z;
// Local new ray direction
const vec3 woLocal = glm::reflect(-wiLocal, wmLocal);
// New outgoing ray direction
vec3 wo = localToWorld(woLocal, T, B, wm);
ray.lastBounceType = mat.type;
const float PDF = mat.evaluate(mf, woLocal, wmLocal, wiLocal);
if (ray.lastBounceType >= FresnelBeckmann)
{
ray.lastBounceType = Fresnel;
const float n1 = backFacing ? mat.refractIdx : 1.0f;
const float n2 = backFacing ? 1.0f : mat.refractIdx;
const float n = n1 / n2;
const float cosTheta = dot(wm, wi);
const float k = 1.0f - (n * n) * (1.0f - cosTheta * cosTheta);
if (k > 0)
{
const float a = n1 - n2;
const float b = n1 + n2;
const float R0 = (a * a) / (b * b);
const float c = 1.0f - cosTheta;
const float Fr = R0 + (1.0f - R0) * (c * c * c * c * c);
if (RandomFloat(seed) > Fr)
{
ray.lastBounceType = Fresnel;
if (backFacing)
ray.throughput *= exp(-mat.absorption * ray.t);;
wo = normalize(n * -wi + wm * (n * cosTheta - sqrtf(k)));
}
}
}
if (ray.lastBounceType != Fresnel && scene.shadow)
{
const int light = RandomIntMax(seed, scene.lightCount - 1);
const uvec3 lightIdx = scene.indices[scene.lightIndices[light]];
const vec3 lightPos = triangle::getRandomPointOnSurface(scene.vertices[lightIdx.x], scene.vertices[lightIdx.y], scene.vertices[lightIdx.z], RandomFloat(seed), RandomFloat(seed));
vec3 L = lightPos - ray.origin;
const float squaredDistance = dot(L, L);
const float distance = sqrtf(squaredDistance);
L /= distance;
const vec3 cNormal = scene.centerNormals[scene.lightIndices[light]];
const vec3 baryLight = triangle::getBaryCoords(lightPos, cNormal, scene.vertices[lightIdx.x], scene.vertices[lightIdx.y], scene.vertices[lightIdx.z]);
const vec3 lightNormal = triangle::getNormal(bary, scene.normals[lightIdx.x], scene.normals[lightIdx.y], scene.normals[lightIdx.z]);
const float NdotL = dot(wm, L);
const float LNdotL = dot(lightNormal, -L);
if (NdotL > 0 && LNdotL > 0)
{
const float area = triangle::getArea(scene.vertices[lightIdx.x], scene.vertices[lightIdx.y], scene.vertices[lightIdx.z]);
const auto emission = scene.gpuMaterials[scene.gpuMatIdxs[light]].emission;
const float mfPDF = 1.0f / mat.evaluate(mf, L, wm, wi);
const float lightPDF = squaredDistance / (LNdotL * area);
const vec3 shadowCol = ray.throughput * matColor / mfPDF * emission * NdotL / lightPDF;
const unsigned int shadowIdx = atomicAdd(&shadow_ray_cnt, 1);
const float pdf = balancePDFs(lightPDF, mfPDF);
sRays[shadowIdx] = ShadowRay(
ray.origin + scene.normalEpsilon * L, L, shadowCol,
distance - scene.distEpsilon, ray.index
);
}
}
ray.throughput *= matColor * PDF;
ray.direction = wo;
ray.throughput = glm::max(vec3(0.0f), ray.throughput);
const float prob = ray.bounces >= 3 ? min(0.5f, max(ray.throughput.x, min(ray.throughput.y, ray.throughput.z))) : 1.0f;
if (ray.bounces < MAX_DEPTH && prob > EPSILON && prob > RandomFloat(seed))
{
ray.origin += ray.direction * scene.normalEpsilon;
ray.bounces++;
ray.throughput /= prob;
unsigned int primary_index = atomicAdd(&primary_ray_cnt, 1);
ray.lastBounceType = mat.type;
ray.lastNormal = wm;
eRays[primary_index] = ray;
}
else
{
ray.throughput = vec3(0.0f);
atomicAdd(&scene.currentFrame[ray.index].a, 1.0f);
}
}
}
__global__ void LAUNCH_BOUNDS shade_invalid_ref(Ray * rays, Ray * eRays, ShadowRay * sRays, SceneData scene, unsigned int frame, int rayBufferSize)
{
while (true)
{
const int index = atomicAdd(&ray_nr_invalid, 1);
if (index >= rayBufferSize) return;
Ray & ray = rays[index];
vec3 color = vec3(0.0f);
float alpha = 1.0f;
if (ray.valid())
{
const Material& mat = scene.gpuMaterials[scene.gpuMatIdxs[ray.hit_idx]];
if (mat.type != Light) continue;
color = ray.throughput * mat.emission;
}
else if (scene.skyboxEnabled)
{
const vec2 uv = {
1.0f + atan2f(ray.direction.x, -ray.direction.z) * glm::one_over_pi<float>() * 0.5f,
1.0f - acosf(ray.direction.y) * glm::one_over_pi<float>()
};
color = ray.throughput * vec3(scene.getTextureColor(scene.skyboxTexture, uv));
}
ray.throughput = vec3(0.0f);
atomicAdd(&scene.currentFrame[ray.index].r, color.r);
atomicAdd(&scene.currentFrame[ray.index].g, color.g);
atomicAdd(&scene.currentFrame[ray.index].b, color.b);
atomicAdd(&scene.currentFrame[ray.index].a, alpha);
}
}
__global__ void LAUNCH_BOUNDS shade_regular_ref(Ray * rays, Ray * eRays, ShadowRay * sRays, SceneData scene, unsigned int frame, int rayBufferSize)
{
while (true)
{
const int index = atomicAdd(&ray_nr_regular, 1);
if (index >= rayBufferSize) return;
Ray & ray = rays[index];
if (!ray.valid()) continue;
const Material & mat = scene.gpuMaterials[scene.gpuMatIdxs[ray.hit_idx]];
if (mat.type == Light || mat.type >= Beckmann) continue;
vec3 color = vec3(0.0f);
unsigned int seed = (frame * ray.index * 147565741) * 720898027 * index;
ray.origin = ray.getHitpoint();
const uvec3 tIdx = scene.indices[ray.hit_idx];
const vec3 cN = scene.centerNormals[ray.hit_idx];
const vec3 bary = triangle::getBaryCoords(ray.origin, cN, scene.vertices[tIdx.x], scene.vertices[tIdx.y], scene.vertices[tIdx.z]);
const vec2 tCoords = triangle::getTexCoords(bary, scene.texCoords[tIdx.x], scene.texCoords[tIdx.y], scene.texCoords[tIdx.z]);
vec3 normal = mat.normalTex >= 0
? sampleToWorld(scene.getTextureNormal(mat.normalTex, tCoords), cN)
: triangle::getNormal(bary, scene.normals[tIdx.x], scene.normals[tIdx.y], scene.normals[tIdx.z]);
const bool backFacing = glm::dot(normal, ray.direction) >= 0.0f;
normal *= backFacing ? -1.0f : 1.0f;
const vec3 matColor = mat.diffuseTex < 0 ? mat.albedo : scene.getTextureColor(mat.diffuseTex, tCoords);
ray.origin += normal * EPSILON;
switch (mat.type)
{
case Lambertian: {
const vec3 BRDF = matColor * glm::one_over_pi<float>();
ray.reflectCosineWeighted(RandomFloat(seed), RandomFloat(seed));
const float NdotR = dot(normal, ray.direction);
const float PDF = NdotR * glm::one_over_pi<float>();
ray.lastBounceType = Lambertian;
ray.throughput *= BRDF * NdotR / PDF;
break;
}
case Specular: {
ray.throughput *= matColor;
ray.reflect(normal);
ray.lastBounceType = Specular;
break;
}
case Fresnel: {
ray.throughput *= matColor;
const vec3 dir = ray.direction;
ray.reflect(normal);
ray.lastBounceType = Specular;
const float n1 = backFacing ? mat.refractIdx : 1.0f;
const float n2 = backFacing ? 1.0f : mat.refractIdx;
const float n = n1 / n2;
const float cosTheta = dot(normal, -dir);
const float k = 1.0f - (n * n) * (1.0f - cosTheta * cosTheta);
if (k > 0)
{
const float a = n1 - n2;
const float b = n1 + n2;
const float R0 = (a * a) / (b * b);
const float c = 1.0f - cosTheta;
const float Fr = R0 + (1.0f - R0) * (c * c * c * c * c);
const float r = RandomFloat(seed);
if (r > Fr)
{
ray.lastBounceType = Fresnel;
if (backFacing)
ray.throughput *= exp(-mat.absorption * ray.t);;
ray.origin -= EPSILON * 2.0f * normal;
ray.direction = normalize(n * dir + normal * (n * cosTheta - sqrtf(k)));
}
}
break;
}
default:
break;
}
ray.throughput = glm::max(vec3(0.0f), ray.throughput);
const float prob = ray.bounces >= 3 ? min(0.5f, max(ray.throughput.x, min(ray.throughput.y, ray.throughput.z))) : 1.0f;
if (ray.bounces < MAX_DEPTH && prob > EPSILON && prob > RandomFloat(seed))
{
ray.bounces++;
ray.throughput /= prob;
unsigned int primary_index = atomicAdd(&primary_ray_cnt, 1);
ray.lastBounceType = mat.type;
eRays[primary_index] = ray;
}
else
{
ray.throughput = vec3(0.0f);
atomicAdd(&scene.currentFrame[ray.index].a, 1.0f);
}
}
}
__global__ void LAUNCH_BOUNDS shade_microfacet_ref(Ray * rays, Ray * eRays, ShadowRay * sRays, SceneData scene, unsigned int frame, int rayBufferSize)
{
while (true)
{
const int index = atomicAdd(&ray_nr_microfacet, 1);
if (index >= rayBufferSize) return;
Ray & ray = rays[index];
if (!ray.valid()) continue;
const Material & mat = scene.gpuMaterials[scene.gpuMatIdxs[ray.hit_idx]];
if (mat.type == Light || mat.type < Beckmann) continue;
vec3 color = vec3(0.0f);
unsigned int seed = (frame * ray.index * 147565741) * 720898027 * index;
ray.origin = ray.getHitpoint();
const uvec3 tIdx = scene.indices[ray.hit_idx];
const vec3 cN = scene.centerNormals[ray.hit_idx];
const vec3 bary = triangle::getBaryCoords(ray.origin, cN, scene.vertices[tIdx.x], scene.vertices[tIdx.y], scene.vertices[tIdx.z]);
const vec2 tCoords = triangle::getTexCoords(bary, scene.texCoords[tIdx.x], scene.texCoords[tIdx.y], scene.texCoords[tIdx.z]);
vec3 normal = mat.normalTex >= 0
? sampleToWorld(scene.getTextureNormal(mat.normalTex, tCoords), cN)
: triangle::getNormal(bary, scene.normals[tIdx.x], scene.normals[tIdx.y], scene.normals[tIdx.z]);
const bool backFacing = glm::dot(normal, ray.direction) >= 0.0f;
normal *= backFacing ? -1.0f : 1.0f;
const vec3 matColor = mat.diffuseTex < 0 ? mat.albedo : scene.getTextureColor(mat.diffuseTex, tCoords);
const auto mf = scene.microfacets[scene.gpuMatIdxs[ray.hit_idx]];
const vec3 wi = -ray.direction;
vec3 T, B;
convertToLocalSpace(normal, &T, &B);
const vec3 wiLocal = normalize(vec3(dot(T, wi), dot(B, wi), dot(normal, wi)));
vec3 wmLocal{};
switch (mat.type)
{
case(Beckmann):
{
wmLocal = mf.sample_beckmann(wiLocal, RandomFloat(seed), RandomFloat(seed));
break;
}
case(GGX):
{
wmLocal = mf.sample_ggx(wiLocal, RandomFloat(seed), RandomFloat(seed));
break;
}
case(Trowbridge):
{
wmLocal = mf.sample_trowbridge_reitz(wiLocal, RandomFloat(seed), RandomFloat(seed));
break;
}
case(FresnelBeckmann):
{
wmLocal = mf.sample_beckmann(wiLocal, RandomFloat(seed), RandomFloat(seed));
break;
}
case(FresnelGGX):
{
wmLocal = mf.sample_ggx(wiLocal, RandomFloat(seed), RandomFloat(seed));
break;
}
case(FresnelTrowbridge):
{
wmLocal = mf.sample_trowbridge_reitz(wiLocal, RandomFloat(seed), RandomFloat(seed));
break;
}
default:
break;
}
// Half-way vector
const vec3 wm = T * wmLocal.x + B * wmLocal.y + normal * wmLocal.z;
// Local new ray direction
const vec3 woLocal = glm::reflect(-wiLocal, wmLocal);
// New outgoing ray direction
vec3 wo = localToWorld(woLocal, T, B, wm);
float PDF = 0.0f;
switch (mat.type)
{
case(Beckmann):
{
PDF = mf.pdf_beckmann(woLocal, wmLocal, wiLocal);
break;
}
case(GGX):
{
PDF = mf.pdf_ggx(woLocal, wmLocal, wiLocal);
break;
}
case(Trowbridge):
{
PDF = mf.pdf_trowbridge_reitz(woLocal, wmLocal, wiLocal);
break;
}
case(FresnelBeckmann):
{
PDF = mf.pdf_beckmann(woLocal, wmLocal, wiLocal);
break;
}
case(FresnelGGX):
{
PDF = mf.pdf_ggx(woLocal, wmLocal, wiLocal);
break;
}
case(FresnelTrowbridge):
{
PDF = mf.pdf_trowbridge_reitz(woLocal, wmLocal, wiLocal);
break;
}
default:
break;
}
ray.origin += wm * scene.normalEpsilon;
if (mat.type >= FresnelBeckmann)
{
ray.lastBounceType = mat.type;
const float n1 = backFacing ? mat.refractIdx : 1.0f;
const float n2 = backFacing ? 1.0f : mat.refractIdx;
const float n = n1 / n2;
const float cosTheta = dot(wm, wi);
const float k = 1.0f - (n * n) * (1.0f - cosTheta * cosTheta);
if (k > 0)
{
const float a = n1 - n2;
const float b = n1 + n2;
const float R0 = (a * a) / (b * b);
const float c = 1.0f - cosTheta;
const float Fr = R0 + (1.0f - R0) * (c * c * c * c * c);
const float r = RandomFloat(seed);
if (r > Fr)
{
ray.lastBounceType = Fresnel;
if (backFacing)
ray.throughput *= exp(-mat.absorption * ray.t);;
ray.origin -= EPSILON * 2.0f * wm;
wo = normalize(n * -wi + wm * (n * cosTheta - sqrtf(k)));
}
}
}
ray.throughput *= matColor * PDF;
ray.direction = wo;
ray.throughput = glm::max(vec3(0.0f), ray.throughput);
const float prob = ray.bounces >= 3 ? min(0.5f, max(ray.throughput.x, min(ray.throughput.y, ray.throughput.z))) : 1.0f;
if (ray.bounces < MAX_DEPTH && prob > EPSILON && prob > RandomFloat(seed))
{
ray.bounces++;
ray.throughput /= prob;
unsigned int primary_index = atomicAdd(&primary_ray_cnt, 1);
ray.lastBounceType = mat.type;
ray.lastNormal = wm;
eRays[primary_index] = ray;
}
else
{
ray.throughput = vec3(0.0f);
atomicAdd(&scene.currentFrame[ray.index].a, 1.0f);
}
}
}
__global__ void LAUNCH_BOUNDS connect(ShadowRay * sRays, SceneData scene, int rayBufferSize)
{
while (true)
{
const int index = atomicAdd(&ray_nr_connect, 1);
if (index >= shadow_ray_cnt) return;
const ShadowRay & ray = sRays[index];
if (MBVHNode::traverseMBVHShadow(ray.origin, ray.direction, ray.t, scene))
{
atomicAdd(&scene.currentFrame[ray.index].r, ray.color.r);
atomicAdd(&scene.currentFrame[ray.index].g, ray.color.g);
atomicAdd(&scene.currentFrame[ray.index].b, ray.color.b);
}
}
}
__global__ void draw_framebuffer(vec4 * currentBuffer, int width, int height)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= width || y >= height) return;
const int index = x + y * width;
const vec4 & color = currentBuffer[index];
const vec3 col = vec3(color.r, color.g, color.b) / color.a;
const vec3 exponent = vec3(1.0f / 2.2f);
draw(x, y, vec4(glm::pow(col, exponent), 1.0f));
}
__host__ inline void sample(uint &frame, Params& params, int rayBufferSize)
{
const auto* camera = params.camera;
const vec3 w = camera->getForward();
const vec3 up = camera->getUp();
const vec3 u = normalize(cross(w, up));
const vec3 v = normalize(cross(u, w));
vec3 hor, ver;
const float dist = camera->getPlaneDistance();
hor = u * dist;
ver = v * dist;
const float aspectRatio = float(params.width) / float(params.height);
if (params.width > params.height)
hor *= aspectRatio;
else
ver *= aspectRatio;
generatePrimaryRays << <params.smCores * 8, 128 >> > (params.gpuRays, camera->getPosition(), w, hor, ver, params.width, params.height,
1.0f / float(params.width), 1.0f / float(params.height), rayBufferSize, frame);
setGlobals << <1, 1 >> > (rayBufferSize, params.width, params.height);
extend << <params.smCores * 8, 128 >> > (params.gpuRays, params.gpuScene, rayBufferSize);
if (params.reference)
{
shade_regular_ref << <params.smCores * 8, 128 >> > (params.gpuRays, params.gpuNextRays, params.gpuShadowRays, params.gpuScene, frame, rayBufferSize);
shade_invalid_ref << <params.smCores * 8, 128 >> > (params.gpuRays, params.gpuNextRays, params.gpuShadowRays, params.gpuScene, frame, rayBufferSize);
shade_microfacet_ref << <params.smCores * 8, 128 >> > (params.gpuRays, params.gpuNextRays, params.gpuShadowRays, params.gpuScene, frame, rayBufferSize);
}
else
{
shade_regular << <params.smCores * 8, 128 >> > (params.gpuRays, params.gpuNextRays, params.gpuShadowRays, params.gpuScene, frame, rayBufferSize);
shade_invalid << <params.smCores * 8, 128 >> > (params.gpuRays, params.gpuNextRays, params.gpuShadowRays, params.gpuScene, frame, rayBufferSize);
shade_microfacet << <params.smCores * 8, 128 >> > (params.gpuRays, params.gpuNextRays, params.gpuShadowRays, params.gpuScene, frame, rayBufferSize);
connect << <params.smCores * 8, 128 >> > (params.gpuShadowRays, params.gpuScene, rayBufferSize);
}
std::swap(params.gpuRays, params.gpuNextRays);
frame++;
}
hipError_t launchKernels(hipArray_const_t array, Params & params, int rayBufferSize)
{
static uint frame = 1;
hipError_t err;
err = cuda(BindSurfaceToArray(framebuffer, array));
if (params.samples == 0)
cuda(MemcpyToSymbol(primary_ray_cnt, ¶ms.samples, sizeof(int)));
sample(frame, params, rayBufferSize);
dim3 dimBlock(16, 16);
dim3 dimGrid((params.width + dimBlock.x - 1) / dimBlock.x, (params.height + dimBlock.y - 1) / dimBlock.y);
draw_framebuffer << <dimGrid, dimBlock >> > (params.gpuScene.currentFrame, params.width, params.height);
cuda(DeviceSynchronize());
params.samples++;
if (frame >= UINT_MAX) frame = 1;
return err;
} | 2c6b2dcef07f1038bf5fc10fdc18a3de6b535060.cu | #include <Tracer/CUDA/Kernel.cuh>
#include <cuda_runtime.h>
#include <surface_functions.h>
#include <cuda_surface_types.h>
#include <device_launch_parameters.h>
#include <Tracer/CUDA/CudaDefinitions.h>
#include <Tracer/CUDA/CudaAssert.h>
#include <Tracer/Core/SceneData.cuh>
#include <Tracer/BVH/MBVHNode.cuh>
#include <Tracer/Core/Random.cuh>
using namespace glm;
#ifdef __CUDACC__
#define LAUNCH_BOUNDS __launch_bounds__(128, 8)
#else
#define LAUNCH_BOUNDS
#endif
#define MAX_DEPTH 16
#define PI glm::pi<float>()
#define INVPI glm::one_over_pi<float>()
__device__ int primary_ray_cnt = 0;
//The index of the ray at which we start generating more rays in ray generation step.
//Effectively is the last index which was previously generated + 1.
__device__ int start_position = 0;
//Ray number incremented by every thread in primary_rays ray generation
__device__ int ray_nr_primary = 0;
//Ray number to fetch different ray from every CUDA thread during the extend step.
__device__ int ray_nr_extend = 0;
//Ray number to fetch different ray from every CUDA thread in the shade step.
__device__ int ray_nr_microfacet = 0;
__device__ int ray_nr_regular = 0;
__device__ int ray_nr_invalid = 0;
//Ray number to fetch different ray from every CUDA thread in the connect step.
__device__ int ray_nr_connect = 0;
//Number of shadow rays generated in shade step, which are placed in connect step.
__device__ int shadow_ray_cnt = 0;
surface<void, cudaSurfaceType2D> framebuffer;
__device__ inline void draw(unsigned int x, unsigned int y, const vec4& color)
{
surf2Dwrite(color, framebuffer, x * sizeof(vec4), y);
}
__device__ inline void draw_unbounded(unsigned int x, unsigned int y, const vec4& color)
{
surf2Dwrite(color, framebuffer, x * sizeof(vec4), y, cudaBoundaryModeZero);
}
__device__ inline float balancePDFs(float pdf1, float pdf2)
{
const float sum = pdf1 + pdf2;
const float w1 = pdf1 / sum;
const float w2 = pdf2 / sum;
return max(0.0f, 1.0f / (w1 * pdf1 + w2 * pdf2));
}
__device__ inline float balanceHeuristic(float nf, float fPdf, float ng, float gPdf)
{
return max(0.0f, (nf * fPdf) / (nf * fPdf + ng * gPdf));
}
__device__ inline float powerHeuristic(float nf, float fPdf, float ng, float gPdf)
{
const float f = nf * fPdf;
const float g = ng * gPdf;
return max(0.0f, (f * f) / (f * f + g * g));
}
__global__ void setGlobals(int rayBufferSize, int width, int height)
{
const int maxBuffer = width * height;
const unsigned int progress = rayBufferSize - (glm::min(primary_ray_cnt, rayBufferSize));
start_position += progress;
start_position %= maxBuffer;
shadow_ray_cnt = 0;
primary_ray_cnt = 0;
ray_nr_primary = 0;
ray_nr_extend = 0;
ray_nr_microfacet = 0;
ray_nr_regular = 0;
ray_nr_invalid = 0;
ray_nr_connect = 0;
}
__global__ void generatePrimaryRays(
Ray* rays,
vec3 origin,
vec3 viewDir,
vec3 hor,
vec3 ver,
int w,
int h,
float invw,
float invh,
int rayBufferSize,
unsigned int frame
)
{
while (true)
{
const int index = atomicAdd(&ray_nr_primary, 1);
// Start from where extended rays ended
const int rayIdx = index + primary_ray_cnt;
if (rayIdx >= rayBufferSize) return;
unsigned int seed = (index + frame * 147565741) * 720898027 * index;
const int x = (start_position + index) % w;
const int y = ((start_position + index) / w) % h;
const float px = float(x) + RandomFloat(seed) - 0.5f;
const float py = float(y) + RandomFloat(seed) - 0.5f;
rays[rayIdx] = Ray::generate(origin, viewDir, hor, ver, px, py, invw, invh, x + y * w);
}
}
__global__ void LAUNCH_BOUNDS extend(Ray * rays, SceneData scene, int rayBufferSize)
{
while (true)
{
const int index = atomicAdd(&ray_nr_extend, 1);
if (index >= rayBufferSize) return;
Ray & ray = rays[index];
ray.t = MAX_DISTANCE;
MBVHNode::traverseMBVH(ray.origin, ray.direction, &ray.t, &ray.hit_idx, scene);
}
}
__global__ void LAUNCH_BOUNDS shade_invalid(Ray * rays, Ray * eRays, ShadowRay * sRays, SceneData scene, unsigned int frame, int rayBufferSize)
{
while (true)
{
const int index = atomicAdd(&ray_nr_invalid, 1);
if (index >= rayBufferSize) return;
Ray & ray = rays[index];
vec3 color = vec3(0.0f);
if (ray.valid())
{
if (!scene.indirect)
continue;
const Material& mat = scene.gpuMaterials[scene.gpuMatIdxs[ray.hit_idx]];
if (mat.type != Light) continue;
ray.origin = ray.getHitpoint();
const uvec3 tIdx = scene.indices[ray.hit_idx];
const vec3 cN = scene.centerNormals[ray.hit_idx];
const vec3 bary = triangle::getBaryCoords(ray.origin, cN, scene.vertices[tIdx.x], scene.vertices[tIdx.y], scene.vertices[tIdx.z]);
const vec2 tCoords = triangle::getTexCoords(bary, scene.texCoords[tIdx.x], scene.texCoords[tIdx.y], scene.texCoords[tIdx.z]);
vec3 normal;
if (mat.normalTex >= 0)
{
vec3 T, B;
convertToLocalSpace(cN, &T, &B);
const vec3 n = scene.getTextureNormal(mat.normalTex, tCoords);
normal = normalize(localToWorld(n, T, B, cN));
}
else
normal = triangle::getNormal(bary, scene.normals[tIdx.x], scene.normals[tIdx.y], scene.normals[tIdx.z]);
const bool backFacing = glm::dot(normal, ray.direction) >= 0.0f;
if (backFacing) normal *= -1.0f;
const auto mf = scene.microfacets[scene.gpuMatIdxs[ray.hit_idx]];
if (ray.lastBounceType != Lambertian)
{
color = ray.throughput * mat.emission;
}
else
{
const float NdotL = dot(ray.lastNormal, ray.direction);
const float LNdotL = dot(normal, -ray.direction);
const float lightPDF = ray.t * ray.t / (LNdotL * triangle::getArea(scene.vertices[tIdx.x], scene.vertices[tIdx.y], scene.vertices[tIdx.z]));
const vec3 wi = glm::reflect(-ray.direction, ray.lastNormal);
const float oPDF = mat.type <= Lambertian ? NdotL * glm::one_over_pi<float>() : mat.evaluate(mf, ray.direction, ray.lastNormal, wi);
const vec3 col = ray.throughput * mat.emission * NdotL;
if (lightPDF > 0 && oPDF > 0)
{
const float pdf = balancePDFs(oPDF, lightPDF);
color = col * pdf;
}
}
}
else if (scene.skyboxEnabled)
{
const vec2 uv = {
1.0f + atan2f(ray.direction.x, -ray.direction.z) * glm::one_over_pi<float>() * 0.5f,
1.0f - acosf(ray.direction.y) * glm::one_over_pi<float>()
};
color = ray.throughput * vec3(scene.getTextureColor(scene.skyboxTexture, uv));
}
ray.throughput = vec3(0.0f);
const float length2 = dot(color, color);
if (length2 > 100.0f)
color = color / sqrtf(length2) * 10.0f;
atomicAdd(&scene.currentFrame[ray.index].r, color.r);
atomicAdd(&scene.currentFrame[ray.index].g, color.g);
atomicAdd(&scene.currentFrame[ray.index].b, color.b);
atomicAdd(&scene.currentFrame[ray.index].a, 1.0f);
}
}
__global__ void LAUNCH_BOUNDS shade_regular(Ray * rays, Ray * eRays, ShadowRay * sRays, SceneData scene, unsigned int frame, int rayBufferSize)
{
while (true)
{
const int index = atomicAdd(&ray_nr_regular, 1);
if (index >= rayBufferSize) return;
Ray & ray = rays[index];
if (!ray.valid()) continue;
const Material & mat = scene.gpuMaterials[scene.gpuMatIdxs[ray.hit_idx]];
if (mat.type == Light || mat.type >= Beckmann) continue;
vec3 color = vec3(0.0f);
unsigned int seed = (frame * ray.index * 147565741) * 720898027 * index;
ray.origin = ray.getHitpoint();
const uvec3 tIdx = scene.indices[ray.hit_idx];
const vec3 cN = scene.centerNormals[ray.hit_idx];
const vec3 bary = triangle::getBaryCoords(ray.origin, cN, scene.vertices[tIdx.x], scene.vertices[tIdx.y], scene.vertices[tIdx.z]);
const vec2 tCoords = triangle::getTexCoords(bary, scene.texCoords[tIdx.x], scene.texCoords[tIdx.y], scene.texCoords[tIdx.z]);
vec3 normal = mat.normalTex >= 0
? sampleToWorld(scene.getTextureNormal(mat.normalTex, tCoords), cN)
: triangle::getNormal(bary, scene.normals[tIdx.x], scene.normals[tIdx.y], scene.normals[tIdx.z]);
const bool backFacing = glm::dot(normal, ray.direction) >= 0.0f;
normal *= backFacing ? -1.0f : 1.0f;
const vec3 matColor = mat.diffuseTex < 0 ? mat.albedo : scene.getTextureColor(mat.diffuseTex, tCoords);
switch (mat.type)
{
case Lambertian: {
const vec3 BRDF = matColor * glm::one_over_pi<float>();
if (scene.shadow)
{
const int light = RandomIntMax(seed, scene.lightCount - 1);
const uvec3 lightIdx = scene.indices[scene.lightIndices[light]];
const vec3 lightPos = triangle::getRandomPointOnSurface(scene.vertices[lightIdx.x], scene.vertices[lightIdx.y], scene.vertices[lightIdx.z], RandomFloat(seed), RandomFloat(seed));
vec3 L = lightPos - ray.origin;
const float squaredDistance = dot(L, L);
const float distance = sqrtf(squaredDistance);
L /= distance;
const vec3 cNormal = scene.centerNormals[scene.lightIndices[light]];
const vec3 baryLight = triangle::getBaryCoords(lightPos, cNormal, scene.vertices[lightIdx.x], scene.vertices[lightIdx.y], scene.vertices[lightIdx.z]);
const vec3 lightNormal = triangle::getNormal(bary, scene.normals[lightIdx.x], scene.normals[lightIdx.y], scene.normals[lightIdx.z]);
const float NdotL = dot(normal, L);
const float LNdotL = dot(lightNormal, -L);
if (NdotL > 0 && LNdotL > 0)
{
const float area = triangle::getArea(scene.vertices[lightIdx.x], scene.vertices[lightIdx.y], scene.vertices[lightIdx.z]);
const auto emission = scene.gpuMaterials[scene.gpuMatIdxs[light]].emission;
const vec3 shadowCol = ray.throughput * BRDF * emission * float(scene.lightCount) * NdotL;
const float lambertPDF = NdotL * glm::one_over_pi<float>();
const float lightPDF = squaredDistance / (LNdotL * area);
const unsigned int shadowIdx = atomicAdd(&shadow_ray_cnt, 1);
const float pdf = balancePDFs(lambertPDF, lightPDF);
sRays[shadowIdx] = ShadowRay(
ray.origin + L * scene.normalEpsilon, L, shadowCol * pdf,
distance - scene.distEpsilon, ray.index
);
ray.lastNormal = normal;
}
}
ray.reflectCosineWeighted(RandomFloat(seed), RandomFloat(seed));
const float NdotR = dot(normal, ray.direction);
const float PDF = NdotR * glm::one_over_pi<float>();
ray.lastBounceType = Lambertian;
ray.throughput *= BRDF * NdotR / PDF;
break;
}
case Specular: {
ray.throughput *= matColor;
ray.reflect(normal);
ray.lastBounceType = Specular;
break;
}
case Fresnel: {
ray.throughput *= matColor;
const vec3 dir = ray.direction;
ray.reflect(normal);
ray.lastBounceType = Specular;
const float n1 = backFacing ? mat.refractIdx : 1.0f;
const float n2 = backFacing ? 1.0f : mat.refractIdx;
const float n = n1 / n2;
const float cosTheta = dot(normal, -dir);
const float k = 1.0f - (n * n) * (1.0f - cosTheta * cosTheta);
if (k > 0)
{
const float a = n1 - n2;
const float b = n1 + n2;
const float R0 = (a * a) / (b * b);
const float c = 1.0f - cosTheta;
const float Fr = R0 + (1.0f - R0) * (c * c * c * c * c);
if (RandomFloat(seed) > Fr)
{
ray.lastBounceType = Fresnel;
if (backFacing)
ray.throughput *= exp(-mat.absorption * ray.t);;
ray.origin -= EPSILON * 2.0f * normal;
ray.direction = normalize(n * dir + normal * (n * cosTheta - sqrtf(k)));
}
}
break;
}
default:
break;
}
ray.throughput = glm::max(vec3(0.0f), ray.throughput);
const float prob = ray.bounces >= 3 ? min(0.5f, max(ray.throughput.x, min(ray.throughput.y, ray.throughput.z))) : 1.0f;
if (ray.bounces < MAX_DEPTH && prob > EPSILON && prob > RandomFloat(seed))
{
ray.origin += ray.direction * scene.normalEpsilon;
ray.bounces++;
ray.lastNormal = normal;
ray.throughput /= prob;
unsigned int primary_index = atomicAdd(&primary_ray_cnt, 1);
ray.lastBounceType = mat.type;
eRays[primary_index] = ray;
}
else
{
ray.throughput = vec3(0.0f);
atomicAdd(&scene.currentFrame[ray.index].a, 1.0f);
}
}
}
__global__ void LAUNCH_BOUNDS shade_microfacet(Ray * rays, Ray * eRays, ShadowRay * sRays, SceneData scene, unsigned int frame, int rayBufferSize)
{
while (true)
{
const int index = atomicAdd(&ray_nr_microfacet, 1);
if (index >= rayBufferSize) return;
Ray & ray = rays[index];
if (!ray.valid()) continue;
const Material & mat = scene.gpuMaterials[scene.gpuMatIdxs[ray.hit_idx]];
if (mat.type == Light || mat.type < Beckmann) continue;
vec3 color = vec3(0.0f);
unsigned int seed = (frame * ray.index * 147565741) * 720898027 * index;
ray.origin = ray.getHitpoint();
const uvec3 tIdx = scene.indices[ray.hit_idx];
const vec3 cN = scene.centerNormals[ray.hit_idx];
const vec3 bary = triangle::getBaryCoords(ray.origin, cN, scene.vertices[tIdx.x], scene.vertices[tIdx.y], scene.vertices[tIdx.z]);
const vec2 tCoords = triangle::getTexCoords(bary, scene.texCoords[tIdx.x], scene.texCoords[tIdx.y], scene.texCoords[tIdx.z]);
vec3 normal = mat.normalTex >= 0
? sampleToWorld(scene.getTextureNormal(mat.normalTex, tCoords), cN)
: triangle::getNormal(bary, scene.normals[tIdx.x], scene.normals[tIdx.y], scene.normals[tIdx.z]);
const bool backFacing = glm::dot(normal, ray.direction) >= 0.0f;
normal *= backFacing ? -1.0f : 1.0f;
const vec3 matColor = mat.diffuseTex < 0 ? mat.albedo : scene.getTextureColor(mat.diffuseTex, tCoords);
const auto mf = scene.microfacets[scene.gpuMatIdxs[ray.hit_idx]];
const vec3 wi = -ray.direction;
vec3 T, B;
convertToLocalSpace(normal, &T, &B);
const vec3 wiLocal = normalize(vec3(dot(T, wi), dot(B, wi), dot(normal, wi)));
const vec3 wmLocal = mat.sample(mf, wiLocal, RandomFloat(seed), RandomFloat(seed));
// Half-way vector
const vec3 wm = T * wmLocal.x + B * wmLocal.y + normal * wmLocal.z;
// Local new ray direction
const vec3 woLocal = glm::reflect(-wiLocal, wmLocal);
// New outgoing ray direction
vec3 wo = localToWorld(woLocal, T, B, wm);
ray.lastBounceType = mat.type;
const float PDF = mat.evaluate(mf, woLocal, wmLocal, wiLocal);
if (ray.lastBounceType >= FresnelBeckmann)
{
ray.lastBounceType = Fresnel;
const float n1 = backFacing ? mat.refractIdx : 1.0f;
const float n2 = backFacing ? 1.0f : mat.refractIdx;
const float n = n1 / n2;
const float cosTheta = dot(wm, wi);
const float k = 1.0f - (n * n) * (1.0f - cosTheta * cosTheta);
if (k > 0)
{
const float a = n1 - n2;
const float b = n1 + n2;
const float R0 = (a * a) / (b * b);
const float c = 1.0f - cosTheta;
const float Fr = R0 + (1.0f - R0) * (c * c * c * c * c);
if (RandomFloat(seed) > Fr)
{
ray.lastBounceType = Fresnel;
if (backFacing)
ray.throughput *= exp(-mat.absorption * ray.t);;
wo = normalize(n * -wi + wm * (n * cosTheta - sqrtf(k)));
}
}
}
if (ray.lastBounceType != Fresnel && scene.shadow)
{
const int light = RandomIntMax(seed, scene.lightCount - 1);
const uvec3 lightIdx = scene.indices[scene.lightIndices[light]];
const vec3 lightPos = triangle::getRandomPointOnSurface(scene.vertices[lightIdx.x], scene.vertices[lightIdx.y], scene.vertices[lightIdx.z], RandomFloat(seed), RandomFloat(seed));
vec3 L = lightPos - ray.origin;
const float squaredDistance = dot(L, L);
const float distance = sqrtf(squaredDistance);
L /= distance;
const vec3 cNormal = scene.centerNormals[scene.lightIndices[light]];
const vec3 baryLight = triangle::getBaryCoords(lightPos, cNormal, scene.vertices[lightIdx.x], scene.vertices[lightIdx.y], scene.vertices[lightIdx.z]);
const vec3 lightNormal = triangle::getNormal(bary, scene.normals[lightIdx.x], scene.normals[lightIdx.y], scene.normals[lightIdx.z]);
const float NdotL = dot(wm, L);
const float LNdotL = dot(lightNormal, -L);
if (NdotL > 0 && LNdotL > 0)
{
const float area = triangle::getArea(scene.vertices[lightIdx.x], scene.vertices[lightIdx.y], scene.vertices[lightIdx.z]);
const auto emission = scene.gpuMaterials[scene.gpuMatIdxs[light]].emission;
const float mfPDF = 1.0f / mat.evaluate(mf, L, wm, wi);
const float lightPDF = squaredDistance / (LNdotL * area);
const vec3 shadowCol = ray.throughput * matColor / mfPDF * emission * NdotL / lightPDF;
const unsigned int shadowIdx = atomicAdd(&shadow_ray_cnt, 1);
const float pdf = balancePDFs(lightPDF, mfPDF);
sRays[shadowIdx] = ShadowRay(
ray.origin + scene.normalEpsilon * L, L, shadowCol,
distance - scene.distEpsilon, ray.index
);
}
}
ray.throughput *= matColor * PDF;
ray.direction = wo;
ray.throughput = glm::max(vec3(0.0f), ray.throughput);
const float prob = ray.bounces >= 3 ? min(0.5f, max(ray.throughput.x, min(ray.throughput.y, ray.throughput.z))) : 1.0f;
if (ray.bounces < MAX_DEPTH && prob > EPSILON && prob > RandomFloat(seed))
{
ray.origin += ray.direction * scene.normalEpsilon;
ray.bounces++;
ray.throughput /= prob;
unsigned int primary_index = atomicAdd(&primary_ray_cnt, 1);
ray.lastBounceType = mat.type;
ray.lastNormal = wm;
eRays[primary_index] = ray;
}
else
{
ray.throughput = vec3(0.0f);
atomicAdd(&scene.currentFrame[ray.index].a, 1.0f);
}
}
}
__global__ void LAUNCH_BOUNDS shade_invalid_ref(Ray * rays, Ray * eRays, ShadowRay * sRays, SceneData scene, unsigned int frame, int rayBufferSize)
{
while (true)
{
const int index = atomicAdd(&ray_nr_invalid, 1);
if (index >= rayBufferSize) return;
Ray & ray = rays[index];
vec3 color = vec3(0.0f);
float alpha = 1.0f;
if (ray.valid())
{
const Material& mat = scene.gpuMaterials[scene.gpuMatIdxs[ray.hit_idx]];
if (mat.type != Light) continue;
color = ray.throughput * mat.emission;
}
else if (scene.skyboxEnabled)
{
const vec2 uv = {
1.0f + atan2f(ray.direction.x, -ray.direction.z) * glm::one_over_pi<float>() * 0.5f,
1.0f - acosf(ray.direction.y) * glm::one_over_pi<float>()
};
color = ray.throughput * vec3(scene.getTextureColor(scene.skyboxTexture, uv));
}
ray.throughput = vec3(0.0f);
atomicAdd(&scene.currentFrame[ray.index].r, color.r);
atomicAdd(&scene.currentFrame[ray.index].g, color.g);
atomicAdd(&scene.currentFrame[ray.index].b, color.b);
atomicAdd(&scene.currentFrame[ray.index].a, alpha);
}
}
__global__ void LAUNCH_BOUNDS shade_regular_ref(Ray * rays, Ray * eRays, ShadowRay * sRays, SceneData scene, unsigned int frame, int rayBufferSize)
{
while (true)
{
const int index = atomicAdd(&ray_nr_regular, 1);
if (index >= rayBufferSize) return;
Ray & ray = rays[index];
if (!ray.valid()) continue;
const Material & mat = scene.gpuMaterials[scene.gpuMatIdxs[ray.hit_idx]];
if (mat.type == Light || mat.type >= Beckmann) continue;
vec3 color = vec3(0.0f);
unsigned int seed = (frame * ray.index * 147565741) * 720898027 * index;
ray.origin = ray.getHitpoint();
const uvec3 tIdx = scene.indices[ray.hit_idx];
const vec3 cN = scene.centerNormals[ray.hit_idx];
const vec3 bary = triangle::getBaryCoords(ray.origin, cN, scene.vertices[tIdx.x], scene.vertices[tIdx.y], scene.vertices[tIdx.z]);
const vec2 tCoords = triangle::getTexCoords(bary, scene.texCoords[tIdx.x], scene.texCoords[tIdx.y], scene.texCoords[tIdx.z]);
vec3 normal = mat.normalTex >= 0
? sampleToWorld(scene.getTextureNormal(mat.normalTex, tCoords), cN)
: triangle::getNormal(bary, scene.normals[tIdx.x], scene.normals[tIdx.y], scene.normals[tIdx.z]);
const bool backFacing = glm::dot(normal, ray.direction) >= 0.0f;
normal *= backFacing ? -1.0f : 1.0f;
const vec3 matColor = mat.diffuseTex < 0 ? mat.albedo : scene.getTextureColor(mat.diffuseTex, tCoords);
ray.origin += normal * EPSILON;
switch (mat.type)
{
case Lambertian: {
const vec3 BRDF = matColor * glm::one_over_pi<float>();
ray.reflectCosineWeighted(RandomFloat(seed), RandomFloat(seed));
const float NdotR = dot(normal, ray.direction);
const float PDF = NdotR * glm::one_over_pi<float>();
ray.lastBounceType = Lambertian;
ray.throughput *= BRDF * NdotR / PDF;
break;
}
case Specular: {
ray.throughput *= matColor;
ray.reflect(normal);
ray.lastBounceType = Specular;
break;
}
case Fresnel: {
ray.throughput *= matColor;
const vec3 dir = ray.direction;
ray.reflect(normal);
ray.lastBounceType = Specular;
const float n1 = backFacing ? mat.refractIdx : 1.0f;
const float n2 = backFacing ? 1.0f : mat.refractIdx;
const float n = n1 / n2;
const float cosTheta = dot(normal, -dir);
const float k = 1.0f - (n * n) * (1.0f - cosTheta * cosTheta);
if (k > 0)
{
const float a = n1 - n2;
const float b = n1 + n2;
const float R0 = (a * a) / (b * b);
const float c = 1.0f - cosTheta;
const float Fr = R0 + (1.0f - R0) * (c * c * c * c * c);
const float r = RandomFloat(seed);
if (r > Fr)
{
ray.lastBounceType = Fresnel;
if (backFacing)
ray.throughput *= exp(-mat.absorption * ray.t);;
ray.origin -= EPSILON * 2.0f * normal;
ray.direction = normalize(n * dir + normal * (n * cosTheta - sqrtf(k)));
}
}
break;
}
default:
break;
}
ray.throughput = glm::max(vec3(0.0f), ray.throughput);
const float prob = ray.bounces >= 3 ? min(0.5f, max(ray.throughput.x, min(ray.throughput.y, ray.throughput.z))) : 1.0f;
if (ray.bounces < MAX_DEPTH && prob > EPSILON && prob > RandomFloat(seed))
{
ray.bounces++;
ray.throughput /= prob;
unsigned int primary_index = atomicAdd(&primary_ray_cnt, 1);
ray.lastBounceType = mat.type;
eRays[primary_index] = ray;
}
else
{
ray.throughput = vec3(0.0f);
atomicAdd(&scene.currentFrame[ray.index].a, 1.0f);
}
}
}
__global__ void LAUNCH_BOUNDS shade_microfacet_ref(Ray * rays, Ray * eRays, ShadowRay * sRays, SceneData scene, unsigned int frame, int rayBufferSize)
{
while (true)
{
const int index = atomicAdd(&ray_nr_microfacet, 1);
if (index >= rayBufferSize) return;
Ray & ray = rays[index];
if (!ray.valid()) continue;
const Material & mat = scene.gpuMaterials[scene.gpuMatIdxs[ray.hit_idx]];
if (mat.type == Light || mat.type < Beckmann) continue;
vec3 color = vec3(0.0f);
unsigned int seed = (frame * ray.index * 147565741) * 720898027 * index;
ray.origin = ray.getHitpoint();
const uvec3 tIdx = scene.indices[ray.hit_idx];
const vec3 cN = scene.centerNormals[ray.hit_idx];
const vec3 bary = triangle::getBaryCoords(ray.origin, cN, scene.vertices[tIdx.x], scene.vertices[tIdx.y], scene.vertices[tIdx.z]);
const vec2 tCoords = triangle::getTexCoords(bary, scene.texCoords[tIdx.x], scene.texCoords[tIdx.y], scene.texCoords[tIdx.z]);
vec3 normal = mat.normalTex >= 0
? sampleToWorld(scene.getTextureNormal(mat.normalTex, tCoords), cN)
: triangle::getNormal(bary, scene.normals[tIdx.x], scene.normals[tIdx.y], scene.normals[tIdx.z]);
const bool backFacing = glm::dot(normal, ray.direction) >= 0.0f;
normal *= backFacing ? -1.0f : 1.0f;
const vec3 matColor = mat.diffuseTex < 0 ? mat.albedo : scene.getTextureColor(mat.diffuseTex, tCoords);
const auto mf = scene.microfacets[scene.gpuMatIdxs[ray.hit_idx]];
const vec3 wi = -ray.direction;
vec3 T, B;
convertToLocalSpace(normal, &T, &B);
const vec3 wiLocal = normalize(vec3(dot(T, wi), dot(B, wi), dot(normal, wi)));
vec3 wmLocal{};
switch (mat.type)
{
case(Beckmann):
{
wmLocal = mf.sample_beckmann(wiLocal, RandomFloat(seed), RandomFloat(seed));
break;
}
case(GGX):
{
wmLocal = mf.sample_ggx(wiLocal, RandomFloat(seed), RandomFloat(seed));
break;
}
case(Trowbridge):
{
wmLocal = mf.sample_trowbridge_reitz(wiLocal, RandomFloat(seed), RandomFloat(seed));
break;
}
case(FresnelBeckmann):
{
wmLocal = mf.sample_beckmann(wiLocal, RandomFloat(seed), RandomFloat(seed));
break;
}
case(FresnelGGX):
{
wmLocal = mf.sample_ggx(wiLocal, RandomFloat(seed), RandomFloat(seed));
break;
}
case(FresnelTrowbridge):
{
wmLocal = mf.sample_trowbridge_reitz(wiLocal, RandomFloat(seed), RandomFloat(seed));
break;
}
default:
break;
}
// Half-way vector
const vec3 wm = T * wmLocal.x + B * wmLocal.y + normal * wmLocal.z;
// Local new ray direction
const vec3 woLocal = glm::reflect(-wiLocal, wmLocal);
// New outgoing ray direction
vec3 wo = localToWorld(woLocal, T, B, wm);
float PDF = 0.0f;
switch (mat.type)
{
case(Beckmann):
{
PDF = mf.pdf_beckmann(woLocal, wmLocal, wiLocal);
break;
}
case(GGX):
{
PDF = mf.pdf_ggx(woLocal, wmLocal, wiLocal);
break;
}
case(Trowbridge):
{
PDF = mf.pdf_trowbridge_reitz(woLocal, wmLocal, wiLocal);
break;
}
case(FresnelBeckmann):
{
PDF = mf.pdf_beckmann(woLocal, wmLocal, wiLocal);
break;
}
case(FresnelGGX):
{
PDF = mf.pdf_ggx(woLocal, wmLocal, wiLocal);
break;
}
case(FresnelTrowbridge):
{
PDF = mf.pdf_trowbridge_reitz(woLocal, wmLocal, wiLocal);
break;
}
default:
break;
}
ray.origin += wm * scene.normalEpsilon;
if (mat.type >= FresnelBeckmann)
{
ray.lastBounceType = mat.type;
const float n1 = backFacing ? mat.refractIdx : 1.0f;
const float n2 = backFacing ? 1.0f : mat.refractIdx;
const float n = n1 / n2;
const float cosTheta = dot(wm, wi);
const float k = 1.0f - (n * n) * (1.0f - cosTheta * cosTheta);
if (k > 0)
{
const float a = n1 - n2;
const float b = n1 + n2;
const float R0 = (a * a) / (b * b);
const float c = 1.0f - cosTheta;
const float Fr = R0 + (1.0f - R0) * (c * c * c * c * c);
const float r = RandomFloat(seed);
if (r > Fr)
{
ray.lastBounceType = Fresnel;
if (backFacing)
ray.throughput *= exp(-mat.absorption * ray.t);;
ray.origin -= EPSILON * 2.0f * wm;
wo = normalize(n * -wi + wm * (n * cosTheta - sqrtf(k)));
}
}
}
ray.throughput *= matColor * PDF;
ray.direction = wo;
ray.throughput = glm::max(vec3(0.0f), ray.throughput);
const float prob = ray.bounces >= 3 ? min(0.5f, max(ray.throughput.x, min(ray.throughput.y, ray.throughput.z))) : 1.0f;
if (ray.bounces < MAX_DEPTH && prob > EPSILON && prob > RandomFloat(seed))
{
ray.bounces++;
ray.throughput /= prob;
unsigned int primary_index = atomicAdd(&primary_ray_cnt, 1);
ray.lastBounceType = mat.type;
ray.lastNormal = wm;
eRays[primary_index] = ray;
}
else
{
ray.throughput = vec3(0.0f);
atomicAdd(&scene.currentFrame[ray.index].a, 1.0f);
}
}
}
__global__ void LAUNCH_BOUNDS connect(ShadowRay * sRays, SceneData scene, int rayBufferSize)
{
while (true)
{
const int index = atomicAdd(&ray_nr_connect, 1);
if (index >= shadow_ray_cnt) return;
const ShadowRay & ray = sRays[index];
if (MBVHNode::traverseMBVHShadow(ray.origin, ray.direction, ray.t, scene))
{
atomicAdd(&scene.currentFrame[ray.index].r, ray.color.r);
atomicAdd(&scene.currentFrame[ray.index].g, ray.color.g);
atomicAdd(&scene.currentFrame[ray.index].b, ray.color.b);
}
}
}
__global__ void draw_framebuffer(vec4 * currentBuffer, int width, int height)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= width || y >= height) return;
const int index = x + y * width;
const vec4 & color = currentBuffer[index];
const vec3 col = vec3(color.r, color.g, color.b) / color.a;
const vec3 exponent = vec3(1.0f / 2.2f);
draw(x, y, vec4(glm::pow(col, exponent), 1.0f));
}
__host__ inline void sample(uint &frame, Params& params, int rayBufferSize)
{
const auto* camera = params.camera;
const vec3 w = camera->getForward();
const vec3 up = camera->getUp();
const vec3 u = normalize(cross(w, up));
const vec3 v = normalize(cross(u, w));
vec3 hor, ver;
const float dist = camera->getPlaneDistance();
hor = u * dist;
ver = v * dist;
const float aspectRatio = float(params.width) / float(params.height);
if (params.width > params.height)
hor *= aspectRatio;
else
ver *= aspectRatio;
generatePrimaryRays << <params.smCores * 8, 128 >> > (params.gpuRays, camera->getPosition(), w, hor, ver, params.width, params.height,
1.0f / float(params.width), 1.0f / float(params.height), rayBufferSize, frame);
setGlobals << <1, 1 >> > (rayBufferSize, params.width, params.height);
extend << <params.smCores * 8, 128 >> > (params.gpuRays, params.gpuScene, rayBufferSize);
if (params.reference)
{
shade_regular_ref << <params.smCores * 8, 128 >> > (params.gpuRays, params.gpuNextRays, params.gpuShadowRays, params.gpuScene, frame, rayBufferSize);
shade_invalid_ref << <params.smCores * 8, 128 >> > (params.gpuRays, params.gpuNextRays, params.gpuShadowRays, params.gpuScene, frame, rayBufferSize);
shade_microfacet_ref << <params.smCores * 8, 128 >> > (params.gpuRays, params.gpuNextRays, params.gpuShadowRays, params.gpuScene, frame, rayBufferSize);
}
else
{
shade_regular << <params.smCores * 8, 128 >> > (params.gpuRays, params.gpuNextRays, params.gpuShadowRays, params.gpuScene, frame, rayBufferSize);
shade_invalid << <params.smCores * 8, 128 >> > (params.gpuRays, params.gpuNextRays, params.gpuShadowRays, params.gpuScene, frame, rayBufferSize);
shade_microfacet << <params.smCores * 8, 128 >> > (params.gpuRays, params.gpuNextRays, params.gpuShadowRays, params.gpuScene, frame, rayBufferSize);
connect << <params.smCores * 8, 128 >> > (params.gpuShadowRays, params.gpuScene, rayBufferSize);
}
std::swap(params.gpuRays, params.gpuNextRays);
frame++;
}
cudaError launchKernels(cudaArray_const_t array, Params & params, int rayBufferSize)
{
static uint frame = 1;
cudaError err;
err = cuda(BindSurfaceToArray(framebuffer, array));
if (params.samples == 0)
cuda(MemcpyToSymbol(primary_ray_cnt, ¶ms.samples, sizeof(int)));
sample(frame, params, rayBufferSize);
dim3 dimBlock(16, 16);
dim3 dimGrid((params.width + dimBlock.x - 1) / dimBlock.x, (params.height + dimBlock.y - 1) / dimBlock.y);
draw_framebuffer << <dimGrid, dimBlock >> > (params.gpuScene.currentFrame, params.width, params.height);
cuda(DeviceSynchronize());
params.samples++;
if (frame >= UINT_MAX) frame = 1;
return err;
} |
9495d47e88d634e847c1aec25396dd785d5de3e8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <cassert>
#include "dali/core/error_handling.h"
#include "dali/core/format.h"
#include "dali/core/static_switch.h"
#include "dali/core/util.h"
#include "dali/kernels/imgproc/color_manipulation/color_space_conversion_kernel.cuh"
#include "dali/operators/decoder/nvjpeg/permute_layout.h"
namespace dali {
template <int C, typename Output, typename Input>
__global__ void planar_to_interleaved(Output *output, const Input *input, int64_t npixels) {
auto tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid >= npixels) return;
Output *out = output + C * tid;
for (int c = 0; c < C; ++c) {
out[c] = ConvertSatNorm<Output>(input[c * npixels + tid]);
}
}
template <typename Output, typename Input>
__global__ void planar_rgb_to_bgr(Output *output, const Input *input, int64_t npixels) {
auto tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid >= npixels) return;
Output r = ConvertSatNorm<Output>(input[tid]);
Output g = ConvertSatNorm<Output>(input[tid + npixels]);
Output b = ConvertSatNorm<Output>(input[tid + 2 * npixels]);
Output *out = output + 3 * tid;
out[0] = b;
out[1] = g;
out[2] = r;
}
template <typename Output, typename Input>
__global__ void planar_rgb_to_ycbcr(Output *output, const Input *input, int64_t npixels) {
auto tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid >= npixels) return;
vec<3, float> rgb = {ConvertNorm<float>(input[tid]), ConvertNorm<float>(input[tid + npixels]),
ConvertNorm<float>(input[tid + 2 * npixels])};
Output *out = output + 3 * tid;
out[0] = kernels::color::itu_r_bt_601::rgb_to_y<Output>(rgb);
out[1] = kernels::color::itu_r_bt_601::rgb_to_cb<Output>(rgb);
out[2] = kernels::color::itu_r_bt_601::rgb_to_cr<Output>(rgb);
}
template <typename Output, typename Input>
__global__ void planar_rgb_to_gray(Output *output, const Input *input, int64_t npixels) {
auto tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid >= npixels) return;
vec<3, float> rgb = {ConvertNorm<float>(input[tid]), ConvertNorm<float>(input[tid + npixels]),
ConvertNorm<float>(input[tid + 2 * npixels])};
output[tid] = kernels::color::rgb_to_gray<Output>(rgb);
}
template <typename Output, typename Input>
void PlanarToInterleaved(Output *output, const Input *input, int64_t npixels,
int64_t comp_count, DALIImageType out_img_type, DALIDataType pixel_type,
hipStream_t stream) {
if (comp_count < 2) {
CUDA_CALL(
hipMemcpyAsync(output, input, npixels * comp_count, hipMemcpyDeviceToDevice, stream));
return;
}
int num_blocks = div_ceil(npixels, 1024);
int block_size = (npixels < 1024) ? npixels : 1024;
if (out_img_type == DALI_RGB || out_img_type == DALI_ANY_DATA) {
VALUE_SWITCH(comp_count, c_static, (2, 3, 4), (
hipLaunchKernelGGL(( planar_to_interleaved<c_static>)
, dim3(num_blocks), dim3(block_size), 0, stream, output, input, npixels);
), DALI_FAIL(make_string("Unsupported number of components: ", comp_count));); // NOLINT
} else if (out_img_type == DALI_BGR) {
hipLaunchKernelGGL(( planar_rgb_to_bgr), dim3(num_blocks), dim3(block_size), 0, stream, output, input, npixels);
} else if (out_img_type == DALI_YCbCr) {
hipLaunchKernelGGL(( planar_rgb_to_ycbcr), dim3(num_blocks), dim3(block_size), 0, stream, output, input, npixels);
} else {
assert(false);
}
}
template <typename Output, typename Input>
void PlanarRGBToGray(Output *output, const Input *input, int64_t npixels,
DALIDataType pixel_type, hipStream_t stream) {
int num_blocks = div_ceil(npixels, 1024);
int block_size = (npixels < 1024) ? npixels : 1024;
hipLaunchKernelGGL(( planar_rgb_to_gray), dim3(num_blocks), dim3(block_size), 0, stream, output, input, npixels);
}
template <typename Output, typename Input>
void Convert_RGB_to_YCbCr(Output *out_data, const Input *in_data, int64_t npixels,
hipStream_t stream) {
kernels::color::RunColorSpaceConversionKernel(out_data, in_data, DALI_YCbCr, DALI_RGB, npixels,
stream);
}
template void PlanarToInterleaved<uint8_t, uint16_t>(uint8_t *, const uint16_t *, int64_t, int64_t,
DALIImageType, DALIDataType, hipStream_t);
template void PlanarToInterleaved<uint8_t, uint8_t>(uint8_t *, const uint8_t *, int64_t, int64_t,
DALIImageType, DALIDataType, hipStream_t);
template void PlanarRGBToGray<uint8_t, uint16_t>(uint8_t *, const uint16_t *, int64_t, DALIDataType,
hipStream_t);
template void PlanarRGBToGray<uint8_t, uint8_t>(uint8_t *, const uint8_t *, int64_t, DALIDataType,
hipStream_t);
template void Convert_RGB_to_YCbCr<uint8_t, uint8_t>(uint8_t *, const uint8_t *, int64_t,
hipStream_t);
} // namespace dali
| 9495d47e88d634e847c1aec25396dd785d5de3e8.cu | // Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <cassert>
#include "dali/core/error_handling.h"
#include "dali/core/format.h"
#include "dali/core/static_switch.h"
#include "dali/core/util.h"
#include "dali/kernels/imgproc/color_manipulation/color_space_conversion_kernel.cuh"
#include "dali/operators/decoder/nvjpeg/permute_layout.h"
namespace dali {
template <int C, typename Output, typename Input>
__global__ void planar_to_interleaved(Output *output, const Input *input, int64_t npixels) {
auto tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid >= npixels) return;
Output *out = output + C * tid;
for (int c = 0; c < C; ++c) {
out[c] = ConvertSatNorm<Output>(input[c * npixels + tid]);
}
}
template <typename Output, typename Input>
__global__ void planar_rgb_to_bgr(Output *output, const Input *input, int64_t npixels) {
auto tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid >= npixels) return;
Output r = ConvertSatNorm<Output>(input[tid]);
Output g = ConvertSatNorm<Output>(input[tid + npixels]);
Output b = ConvertSatNorm<Output>(input[tid + 2 * npixels]);
Output *out = output + 3 * tid;
out[0] = b;
out[1] = g;
out[2] = r;
}
template <typename Output, typename Input>
__global__ void planar_rgb_to_ycbcr(Output *output, const Input *input, int64_t npixels) {
auto tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid >= npixels) return;
vec<3, float> rgb = {ConvertNorm<float>(input[tid]), ConvertNorm<float>(input[tid + npixels]),
ConvertNorm<float>(input[tid + 2 * npixels])};
Output *out = output + 3 * tid;
out[0] = kernels::color::itu_r_bt_601::rgb_to_y<Output>(rgb);
out[1] = kernels::color::itu_r_bt_601::rgb_to_cb<Output>(rgb);
out[2] = kernels::color::itu_r_bt_601::rgb_to_cr<Output>(rgb);
}
template <typename Output, typename Input>
__global__ void planar_rgb_to_gray(Output *output, const Input *input, int64_t npixels) {
auto tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid >= npixels) return;
vec<3, float> rgb = {ConvertNorm<float>(input[tid]), ConvertNorm<float>(input[tid + npixels]),
ConvertNorm<float>(input[tid + 2 * npixels])};
output[tid] = kernels::color::rgb_to_gray<Output>(rgb);
}
template <typename Output, typename Input>
void PlanarToInterleaved(Output *output, const Input *input, int64_t npixels,
int64_t comp_count, DALIImageType out_img_type, DALIDataType pixel_type,
cudaStream_t stream) {
if (comp_count < 2) {
CUDA_CALL(
cudaMemcpyAsync(output, input, npixels * comp_count, cudaMemcpyDeviceToDevice, stream));
return;
}
int num_blocks = div_ceil(npixels, 1024);
int block_size = (npixels < 1024) ? npixels : 1024;
if (out_img_type == DALI_RGB || out_img_type == DALI_ANY_DATA) {
VALUE_SWITCH(comp_count, c_static, (2, 3, 4), (
planar_to_interleaved<c_static>
<<<num_blocks, block_size, 0, stream>>>(output, input, npixels);
), DALI_FAIL(make_string("Unsupported number of components: ", comp_count));); // NOLINT
} else if (out_img_type == DALI_BGR) {
planar_rgb_to_bgr<<<num_blocks, block_size, 0, stream>>>(output, input, npixels);
} else if (out_img_type == DALI_YCbCr) {
planar_rgb_to_ycbcr<<<num_blocks, block_size, 0, stream>>>(output, input, npixels);
} else {
assert(false);
}
}
template <typename Output, typename Input>
void PlanarRGBToGray(Output *output, const Input *input, int64_t npixels,
DALIDataType pixel_type, cudaStream_t stream) {
int num_blocks = div_ceil(npixels, 1024);
int block_size = (npixels < 1024) ? npixels : 1024;
planar_rgb_to_gray<<<num_blocks, block_size, 0, stream>>>(output, input, npixels);
}
template <typename Output, typename Input>
void Convert_RGB_to_YCbCr(Output *out_data, const Input *in_data, int64_t npixels,
cudaStream_t stream) {
kernels::color::RunColorSpaceConversionKernel(out_data, in_data, DALI_YCbCr, DALI_RGB, npixels,
stream);
}
template void PlanarToInterleaved<uint8_t, uint16_t>(uint8_t *, const uint16_t *, int64_t, int64_t,
DALIImageType, DALIDataType, cudaStream_t);
template void PlanarToInterleaved<uint8_t, uint8_t>(uint8_t *, const uint8_t *, int64_t, int64_t,
DALIImageType, DALIDataType, cudaStream_t);
template void PlanarRGBToGray<uint8_t, uint16_t>(uint8_t *, const uint16_t *, int64_t, DALIDataType,
cudaStream_t);
template void PlanarRGBToGray<uint8_t, uint8_t>(uint8_t *, const uint8_t *, int64_t, DALIDataType,
cudaStream_t);
template void Convert_RGB_to_YCbCr<uint8_t, uint8_t>(uint8_t *, const uint8_t *, int64_t,
cudaStream_t);
} // namespace dali
|
7448cef6902c980d05b85aef8ca61ccf2bc1a35a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cassert>
#include <iostream>
#include "../include/list_stockham.h"
#include "../include/defines.h"
#include "../include/inlines.h"
#include "../include/printing.h"
#include "../include/cudautils.h"
#include "../include/fft_aux.h"
#include "../include/rdr_poly.h"
///////////////////////////////////////////////////////////////////////////////
// Reversion history:
//
// File created at Mon Jul 12 EDT 2010, WP
//
// -- list of 1d FFT implemented
// -- 2d FFT implemented
// -- list of 2d FFT implemented
//
///////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////
// When the fft size is small, say 256, it is not a good idea to run a single
// fft using GPUs. We implement the formula I_m @ DFT_n with the condition
// m * n large enough. Currently, we assume the n-th primitive root unity w
// is used for all DFTs.
//
// We use Stockham's FFT inside and let m = 2^j, n = 2^k.
//
// I_m @ DFT_n = Prod_{i = 0}^{k - 1}
// (I_m @ DFT2 @ I_{2^{k - 1}})
// (I_m @ D_{2, 2^{k - i - 1}} @ I_{2^i})
// (I_m @ L_2^{2^{k - i}} @ I_{2^i})
//
///////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////
// List of stride permutations: (I_m @ L_2^{2^{k - i}} @ I_{2^i})
///////////////////////////////////////////////////////////////////////////////
// each thread block consists of N_THD number of threads
// each thread block use N_THD X sizeof(sfixn) bytes shared memory
#if DEBUG > 0
#define E_THD (2)
#else
#define E_THD (7)
#endif
#define N_THD (1 << E_THD)
/**
* @X, input array of length m * n
* @Y, output array of length m * n
* @m,
* @k, n = 2^k
* @i, index from 0 to k - 1
*
* Compute
*
* Y = (I_m @ L_2^{2^{k-i}} @ I_{2^i}) X
*
* with the case that s = 2^i >= N_THD, multiple thread blocks move one stride.
*
* The total number of thread blocks required is m * n / N_THD.
*
* Each group of n / N_THD thread blocks handle a subvector of size n.
*
* Requirements:
*
* (1) m >= 1 (2) k > i >= E_THD
*/
__global__
void list_stride_transpose2a_ker(sfixn *Y, const sfixn * const X,
sfixn k, sfixn i)
{
__shared__ sfixn block[N_THD];
// block index in the kernel
sfixn bid = (blockIdx.y << 15) + blockIdx.x;
// determine which subvector to work
// squo = bid / (n / N_THD)
// sbid = bid % (n / N_THD)
sfixn squo = (bid >> (k - E_THD));
sfixn sbid = (bid & ((1 << (k - E_THD)) - 1));
// now sbid is the block index inside each group
// delta = s / N_THD;
sfixn edelta = i - E_THD;
// iq = quo(sbid, delta) and ir = rem(sbid, delta)
sfixn iq = (sbid >> edelta);
sfixn ir = (sbid & ((1 << edelta) - 1));
// iqq = quo(iq, 2) and iqr = rem(iq, 2)
sfixn iqq = (iq >> 1);
sfixn iqr = (iq & 1);
// read in data from X
// the input offset for this block is squo * n + iq * s + ir * N_THD
const sfixn *din = X + (squo << k) + (iq << i) + (ir << E_THD);
// each thread read in one element
block[threadIdx.x] = din[threadIdx.x];
__syncthreads();
// write data out to Y
// the output offset for this block is
// squo * n + rem(iq, 2) * n / 2 + quo(iq, 2) * s + ir * N_THD
sfixn *dout = Y + (squo << k) + (iqr << (k - 1))
+ (iqq << i) + (ir << E_THD);
dout[threadIdx.x] = block[threadIdx.x];
}
/**
* @X, input array of length m * n
* @Y, output array of length m * n
* @m,
* @k, n = 2^k
* @i, index from 0 to k - 1
*
* Compute
*
* Y = (I_m @ L_2^{2^{k-i}} @ I_{2^i}) X
*
* with the case that s = 2^i < N_THD, one thread block moves at least
* two strides. The total number of thread blocks required is m * n / N_THD.
* Each group of n / N_THD thread blocks handle a subvector of size n.
*
* Requirements:
*
* (1) m >= 1, (2) 0 <= i < E_THD, (3) k >= E_THD
*/
__global__
void list_stride_transpose2b_ker(sfixn *Y, const sfixn * const X,
sfixn k, sfixn i)
{
__shared__ sfixn block[N_THD];
// block index in the kernel
sfixn bid = (blockIdx.y << 15) + blockIdx.x;
// read in data from X
// the input offset for this block is bid * N_THD
const sfixn *din = X + (bid << E_THD);
block[threadIdx.x] = din[threadIdx.x];
__syncthreads();
// determine which subvector to work
// squo = bid / (n / N_THD)
// sbid = bid / (n / N_THD)
sfixn squo = (bid >> (k - E_THD));
sfixn sbid = (bid & ((1 << (k - E_THD)) - 1));
// now sbid is the block index inside each group
// the following code is to the in-block shuffle,
// hard to explain and check the note
// offset0 = squo * n + sbid * N_THD / 2
// offset1 = squo * n + sbid * N_THD / 2 + n / 2
// base = Y + offset0
sfixn *base = Y + (squo << k) + (sbid << (E_THD - 1));
sfixn tid = threadIdx.x;
// iq = quo(tid, s) and ir = rem(tid, s);
sfixn iq = (tid >> i);
sfixn ir = tid & ((1 << i) - 1);
// f(i) = (rem(2iq, N_THD/s) + quo(2iq, N_THD/s)) * s + ir
sfixn fi = (iq << 1) >> (E_THD - i);
fi += ((iq << 1) & ((1 << (E_THD - i)) - 1));
fi <<= i;
fi += ir;
// replace the following code by the branch-free code
// if (tid < N_THD/2)
// dout[tid] = block[fi];
// else
// dout[tid - N_THD / 2 + (1 << (k-1))] = block[fi];
sfixn *dout = base + (tid >> (E_THD - 1))
* ((1 << (k - 1)) - (1 << (E_THD - 1)));
dout[tid] = block[fi];
}
/**
* @X, device array of length m * n (input)
* @Y, device array of length m * n (output)
* @m,
* @k, n = 2^k
* @i, index from 0 to k - 2
*
* Compute
*
* Y = (I_m @ L_2^{2^{k-i}} @ I_{2^i}) X
*
* Requirememts:
*
* (1) m >= 1, (2) 0 <= i <= k - 2, (3) k >= N_THD
*
* TESTED
*
*/
void list_stride_transpose2_dev(sfixn *Y, const sfixn * const X, sfixn m,
sfixn k, sfixn i)
{
if (DEBUG) assert((m >= 1) && (i >= 0) && (k >= E_THD) && (i < k - 1));
sfixn nb = (m << (k - E_THD));
dim3 nBlk(nb, 1, 1);
// the maximal possible dimension is 2^15 = 32768 < 65535
// this requires nb <= 2^30, OK for now.
if (nb > (1 << 15)) { nBlk.x = (1 << 15); nBlk.y = (nb >> 15); }
if (i >= E_THD) {
// printf("Calling transpose2a_ker k = %d, i = %d\n", k, i);
hipLaunchKernelGGL(( list_stride_transpose2a_ker), dim3(nBlk), dim3(N_THD), 0, 0, Y, X, k, i);
} else {
// printf("Calling transpose2b_ker k = %d, i = %d\n", k, i);
hipLaunchKernelGGL(( list_stride_transpose2b_ker), dim3(nBlk), dim3(N_THD), 0, 0, Y, X, k, i);
}
hipDeviceSynchronize();
}
#undef E_THD
#undef N_THD
///////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////
// (I_m @ D_{2, 2^{k-i-1}} @ I_{2^i}) //
///////////////////////////////////////////////////////////////////////////////
#if DEBUG > 0
#define E_THD (2)
#else
#define E_THD (7)
#endif
#define N_THD (1 << E_THD)
/**
* D_{2, 2^{k - i - 1}} is a matrix of size 2^{k-i} X 2^{k-i}
*
* For example, let j = 0, k = 4, i = 1 and w^8 = -1
*
* [ 1 ] 0
* [ 1 ] 1
* [ 1 ] 2
* [ 1 ] 3
* D_{2, 4} = [ 1 ] 4
* [ w^2 ] 5
* [ w^4 ] 6
* [ w^6 ] 7
*
* [ 1 ] 0
* [ 1 ] 1
* [ 1 ] 2
* [ 1 ] 3
* [ 1 ] 4
* [ 1 ] 5
* [ 1 ] 6
* D_{2, 4} @ I_2 = [ 1 ] 7
* [ 1 ] 8
* [ 1 ] 9
* [ w^2 ] 10
* [ w^2 ] 11
* [ w^4 ] 12
* [ w^4 ] 13
* [ w^6 ] 14
* [ w^6 ] 15
*
* If N_THD = 4, then 2 blocks are needed. Block 0 handles [1, 1, w^2, w^2]
* and block 1 handle [w^4, w^4, w^6, w^6].
*
* For each group, only half of the elements needs to be modified,
* and the index range is from n / 2 to n - 1.
*
* Hence the number of thread blocks for each group is n / (2 * N_THD).
* The total number of thread blocks is m * n / (2 * N_THD).
*
**/
/**
* @X, input/output array of length m * n
* @w, n-th primitive root of unity
* @W, powers of primitive root of unity [1, w, w^2, ..., w^{n/2-1}]
* @m,
* @k, n = 2^k
* @i, index from 0 to k - 1
*
* Multiple thread blocks (>1) handle a stride, (s = 2^i is big)
*
* Requirements: m >= 1, k > i > E_THD
*
*/
__global__ void
list_stride_twiddle2a_ker(sfixn *X, const sfixn * const W, sfixn k, sfixn i,
sfixn p, double pinv)
{
// block index
sfixn bid = (blockIdx.y << 15) + blockIdx.x;
// determine which subvector to work
// squo = bid / (n / (2 * N_THD))
// sbid = bid % (n / (2 * N_THD))
// sbid is the block index inside each group
sfixn squo = (bid >> (k - 1 - E_THD));
sfixn sbid = (bid & ((1 << (k - 1 - E_THD)) - 1));
// all threads in a thread block are using the same power!
// This power is w^(s*e), with e = quo(sbid, s/N_THD)
sfixn w = W[(sbid >> (i - E_THD)) << i];
// starting position for the block, the first n / 2 elements unchanged
sfixn *base = X + (squo << k) + ((sfixn)1 << (k - 1)) + (sbid << E_THD);
base[threadIdx.x] = mul_mod(w, base[threadIdx.x], p, pinv);
}
/**
* @X, input/output array of length m * n
* @w, n-th primitive root of unity
* @W, powers of primitive root of unity [1, w, w^2, ..., w^{n/2-1}]
* @m,
* @k, n = 2^k
* @i, index from 0 to k - 1
*
* A thread block handle multiple strides (s = 2^i is small)
*
* Requirements:
*
* (1) m >= 1 (2) k > E_THD >= i >= 0
*
*/
__global__ void
list_stride_twiddle2b_ker(sfixn *X, const sfixn * const W, sfixn k, sfixn i,
sfixn p, double pinv)
{
// block index
sfixn bid = (blockIdx.y << 15) + blockIdx.x;
// determine which subvector to work
// squo = bid / (n / (2 * N_THD))
// sbid = bid % (n / (2 * N_THD))
// sbid is the block index inside each group
sfixn squo = (bid >> (k - 1 - E_THD));
sfixn sbid = (bid & ((1 << (k - 1 - E_THD)) - 1));
// starting position for this block
// the first n / 2 elements will be unchanged.
sfixn *base = X + (squo << k) + ((sfixn)1 << (k - 1)) + (sbid << E_THD);
// the starting root for the thread block is w^(e*s)
// with e = sbid * (N_THD / s). Thus e*s = sbid * N_THD.
sfixn tid = threadIdx.x;
// the power for the thread e * s + s * quo(tid, s)
sfixn iq = (sbid << E_THD) + ((tid >> i) << i);
base[tid] = mul_mod(W[iq], base[tid], p, pinv);
}
/**
* @X, input/output array of length m * n = 2^{k+j}
* @w, n-th primitive root of unity
* @W, powers of primitive root of unity [1, w, w^2, ..., w^{n/2-1}]
* @m,
* @k, n = 2^k
* @i, index from 0 to k - 1
*
* Requirements: 0 <= i <= k - 2, n > N_THD, m >= 1
*
* TESTED
*
*/
void list_stride_twiddle2_dev(sfixn *X, const sfixn * const W, sfixn m,
sfixn k, sfixn i, sfixn p)
{
if (DEBUG) assert((i >= 0) && (i < k - 1));
if (DEBUG) assert((m >= 1) && (k > E_THD));
// number of blocks is m * (n / 2) / N_THD
sfixn nb = (m << ( k - 1 - E_THD));
dim3 nBlk(nb, 1, 1);
if (nb > (1 << 15)) { nBlk.x = (1 << 15); nBlk.y = (nb >> 15); }
double pinv = 1 / (double)p;
if (i > E_THD) {
// printf("Calling twiddle2a_ker m = %d, k = %d, i = %d\n", m, k, i);
// printf("nBlk = %d, nThd = %d\n", nBlk.x, N_THD);
hipLaunchKernelGGL(( list_stride_twiddle2a_ker), dim3(nBlk), dim3(N_THD), 0, 0, X, W, k, i, p, pinv);
} else {
// printf("Calling twiddle2b_ker m = %d, k = %d, i = %d\n", m, k, i);
// printf("nBlk = %d, nThd = %d\n", nBlk.x, N_THD);
hipLaunchKernelGGL(( list_stride_twiddle2b_ker), dim3(nBlk), dim3(N_THD), 0, 0, X, W, k, i, p, pinv);
}
hipDeviceSynchronize();
}
#undef E_THD
#undef N_THD
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
// List of butterflies (I_m @ DFT2 @ I_{n/2}) //
////////////////////////////////////////////////////////////////////////////////
//
// a butterfly operation is defined as
//
// x0 y0
// \/
// /\
// xs ys
//
// with y0 = x0 + xs and ys = x0 - xs.
// In total, 2 + 2 elements are involved for each butterfly
////////////////////////////////////////////////////////////////////////////////
#if DEBUG > 0
#define E_THD (2)
#else
#define E_THD (7)
#endif
#define N_THD (1 << E_THD)
/**
* @X, device array of length m * n (input)
* @Y, device array of length m * n (output)
* @m,
* @k, n = 2^k
* @p, prime number
*
* Implements I_m @ DFT2 @ I_{n/2}
*
* Requires m >= 1, k > E_THD
*
*/
__global__ void
list_butterfly_ker(sfixn *Y, const sfixn * const X, sfixn k, sfixn p)
{
sfixn bid = (blockIdx.y << 15) + blockIdx.x;
// determine which subvector to work
// squo = bid / ((n / 2) / N_THD)
// sbid = bid % ((n / 2) / N_THD)
// sbid is the block index inside each group
sfixn squo = (bid >> (k - 1 - E_THD));
sfixn sbid = (bid & ((1 << (k - 1 - E_THD)) - 1));
sfixn *B = Y + (squo << k) + (sbid << E_THD);
const sfixn *A = X + (squo << k) + (sbid << E_THD);
sfixn tid = threadIdx.x;
sfixn halfn = ((sfixn )1 << (k - 1));
B[tid] = add_mod(A[tid], A[tid + halfn], p);
B[tid + halfn] = sub_mod(A[tid], A[tid + halfn], p);
}
/**
* @X, device array of length m * n (input)
* @Y, device array of length m * n (output)
* @m,
* @k, n = 2^k
* @p, prime number
*
* I_m @ DFT2 @ I_{n/2}
*
* Requirements: m >= 1, k > E_THD
*
* TESTED
*
*/
void list_butterfly_dev(sfixn *Y, const sfixn *X, sfixn m, sfixn k, sfixn p)
{
if (DEBUG) assert(m >= 1 && k > E_THD);
sfixn nb = (m << (k - E_THD - 1));
dim3 nBlk(nb, 1, 1);
if (nb > (1 << 15)) { nBlk.x = (1 << 15); nBlk.y = (nb >> 15); }
hipLaunchKernelGGL(( list_butterfly_ker), dim3(nBlk), dim3(N_THD), 0, 0, Y, X, k, p);
hipDeviceSynchronize();
}
#undef E_THD
#undef N_THD
////////////////////////////////////////////////////////////////////////////////
// List of FFTs: I_m @ DFT_n, Main Program //
////////////////////////////////////////////////////////////////////////////////
/**
* @X, input / output device array of length m * n
* @m,
* @k, n = 2^k
* @w, n-th primitive root of unity
* @W, [1, w, w^2, ..., w^{n/2-1}]
* @p, fourier prime number
*
* X will be filled by I_m @ DFT_n(X, w)
*
* :::Warning::: m could be a non-power of 2
*
*/
void list_stockham_dev(sfixn *X, sfixn m, sfixn k, const sfixn *W, sfixn p)
{
// TODO: check the size of k
if (DEBUG) assert((m >= 1));
sfixn *Y;
hipMalloc((void**)&Y, sizeof(sfixn) * (m << k));
// sequence of applications
// i = k - 1 is trival for the other operations
list_butterfly_dev(Y, X, m, k, p);
for (sfixn i = k - 2; i >= 0; --i) {
list_stride_transpose2_dev(X, Y, m, k, i);
list_stride_twiddle2_dev(X, W, m, k, i, p);
list_butterfly_dev(Y, X, m, k, p);
}
hipMemcpy(X, Y, sizeof(sfixn) * (m << k), hipMemcpyDeviceToDevice);
hipFree(Y);
if (DEBUG) checkCudaError("error found in list_stockham_dev");
}
void list_stockham_dev(sfixn *X, sfixn m, sfixn k, sfixn w, sfixn p)
{
// initialize the primitive roots
sfixn *W;
hipMalloc((void**)&W, sizeof(sfixn) << (k - 1));
get_powers_binary(k - 1, W, w, p);
list_stockham_dev(X, m, k, W, p);
hipFree(W);
if (DEBUG) checkCudaError("error found in list_stockham_dev");
}
/**
* @X, input / output host array of length m * n
* @m,
* @k, n = 2^k
* @w, n-th primitive root of unity
* @p, fourier prime number
*
* X will be filled by I_m @ DFT_n(X, w)
*/
void list_stockham_host(sfixn *X, sfixn m, sfixn k, sfixn w, sfixn p) {
sfixn *X_d;
hipMalloc((void**)&X_d, sizeof(sfixn) * (m << k));
hipMemcpy(X_d, X, sizeof(sfixn) * (m << k), hipMemcpyHostToDevice);
///////////////////////////////////////
list_stockham_dev(X_d, m, k, w, p);
///////////////////////////////////////
hipMemcpy(X, X_d, sizeof(sfixn) * (m << k), hipMemcpyDeviceToHost);
hipFree(X_d);
}
////////////////////////////////////////////////////////////////////////////////
// The goal is to implement DFT_n @ I_m. We use the Stockham FFT, that is
//
// DFT_n @ I_m = Prod_{i = 0}^{k - 1}
// DFT2 @ I_{2^{k - 1}} @ I_{2^j}
// D_{2, 2^{k - i - 1}} @ I_{2^i} @ I_{2^j}
// L_2^{2^{k - i}} @ I_{2^i} @ I_{2^j}
//
// = DFT2 @ I_{2^{k - 1 + j}} (1)
// D_{2, 2^{k - i - 1}} @ I_{2^{i+^j}} (2)
// L_2^{2^{k - i}} @ I_{2^{i+j}} (3)
//
// Note that (1) has been impmenented by stockham.cu, however both (2) and (3)
// invalidate its assumptions. We now relax them.
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
// Implementation of L_2^{n} @ I_s, extended version, that is, it works
// for any input such that n >= 4 and s >= 1 and n * s >= N_THD.
////////////////////////////////////////////////////////////////////////////////
#if DEBUG > 0
#define E_THD (2)
#else
#define E_THD (7)
#endif
#define N_THD (1 << E_THD)
/**
* @X, input array of length n * s = 2^{u + v}
* @Y, output array of length n * s = 2^{u + v}
* @u, n = 2^u
* @v, s = 2^v
*
* Compute the general stride transposition
*
* Y = (L_2^n @ I_s) X
*
* Multiple thread blocks (>= 1) move a stride of s elements
*
* Requirements: u >= 2 and v >= E_THD
*
* If u = 1, then do nothing and the transiposition is trivial.
*
* TESTED
*/
__global__
void ext_stride_transpose2a_ker(sfixn *Y, const sfixn *X, sfixn u, sfixn v)
{
__shared__ sfixn block[N_THD];
sfixn bid = (blockIdx.y << 15) + blockIdx.x;
// delta = s / N_THD;
// the number of thread block needed for each stride
sfixn edelta = v - E_THD;
// iq = quo(bid, delta) and ir = rem(bid, delta)
// iq tells which stride the block is working on
// ir tells which portion of the stride the block is working on
sfixn iq = bid >> edelta;
sfixn ir = bid & ((1 << edelta) - 1);
// iqq = quo(iq, 2) and iqr = rem(iq, 2)
sfixn iqq = (iq >> 1);
sfixn iqr = (iq & 1);
// read in data from X
// the input offset for this block is iq * s + ir * N_THD
const sfixn *din = X + (iq << v) + (ir << E_THD);
block[threadIdx.x] = din[threadIdx.x];
__syncthreads();
// write data out to Y
//
// if iqr = 0 (even), write to Y + iqq * s + ir * N_THD
// if iqr = 1 (odd), write to Y + (n / 2) * s + iqq * s + ir * N_THD
// that is, iqr * (n / 2) * s + iqq * s + ir * N_THD
sfixn *dout = Y + (iqr << (u + v - 1)) + (iqq << v) + (ir << E_THD);
dout[threadIdx.x] = block[threadIdx.x];
__syncthreads();
}
/**
* @X, input array of length n * s = 2^{u + v}
* @Y, output array of length n * s = 2^{u + v}
* @u, n = 2^u
* @v, s = 2^v
*
* Compute the general stride transposition
*
* Y = (L_2^n @ I_s) X
*
* A thread block moves multiple strides
*
* Requirements: u >= 2 and v < E_THD
*
* If u = 1, then do nothing and the transiposition is trivial.
*
* TESTED
*/
__global__
void ext_stride_transpose2b_ker(sfixn *Y, const sfixn *X, sfixn u, sfixn v) {
__shared__ sfixn block[N_THD];
// block index in the kernel
sfixn bid = (blockIdx.y << 15) + blockIdx.x;
// read in data from X
// the input offset for this block is bid * N_THD
const sfixn *din = X + (bid << E_THD);
block[threadIdx.x] = din[threadIdx.x];
__syncthreads();
// offset0 = bid * N_THD / 2
// offset1 = bid * N_THD / 2 + (n / 2) * s
// base = Y + offset0
sfixn *base = Y + (bid << (E_THD - 1));
sfixn tid = threadIdx.x;
// iq = quo(tid, s) and ir = rem(tid, s);
sfixn iq = (tid >> v);
sfixn ir = tid & ((1 << v) - 1);
// the following code is to the in-block shuffle
// f(i) = (rem(2iq, N_THD/s) + quo(2iq, N_THD/s)) * s + ir
sfixn fi = (iq << 1) >> (E_THD - v);
fi += ((iq << 1) & ((1 << (E_THD - v)) - 1));
fi <<= v;
fi += ir;
// replace the following code by the branch-free code
// if (tid < N_THD/2)
// dout[tid] = block[fi];
// else
// dout[tid - N_THD / 2 + (1 << (u + v - 1))] = block[fi];
sfixn *dout = base + (tid >> (E_THD - 1))
* ((1 << (u + v - 1)) - (1 << (E_THD - 1)));
dout[tid] = block[fi];
}
void ext_stride_transpose2_dev(sfixn *Y, const sfixn *X, sfixn u, sfixn v)
{
if (DEBUG) assert((u >= 2) && (v >= 0) && (u + v >= E_THD));
sfixn nb = ((sfixn)1 << (u + v - E_THD));
dim3 nBlk(nb, 1, 1);
if (nb > (1 << 15)) { nBlk.x = (1 << 15); nBlk.y = (nb >> 15); }
if (v >= E_THD) {
// printf("Calling transpose2a_ker with u = %d, v = %d\n", u, v);
hipLaunchKernelGGL(( ext_stride_transpose2a_ker), dim3(nBlk), dim3(N_THD), 0, 0, Y, X, u, v);
} else {
// printf("Calling transpose2b_ker with u = %d, v = %d\n", u, v);
hipLaunchKernelGGL(( ext_stride_transpose2b_ker), dim3(nBlk), dim3(N_THD), 0, 0, Y, X, u, v);
}
hipDeviceSynchronize();
}
#undef E_THD
#undef N_THD
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
// Implementation of D_{2, m} @ I_{s}, extended version, that is, it works
// for any input such that m >= 2 and s >= 1 and m * s >= N_THD. Note that
// the FFT size n is still in use and we also require m <= n / 2.
//
// For example, let n = 16 be the FFT size. We have
//
// [ 1 ] 0
// [ 1 ] 1
// D_{2, 2} = [ 1 ] 2
// [ w^4 ] 3
//
// [ 1 ] 0
// [ 1 ] 1
// [ 1 ] 2
// [ 1 ] 3
// D_{2, 2} @ I_4 = [ 1 ] 4
// [ 1 ] 5
// [ 1 ] 6
// [ 1 ] 7
// [ 1 ] 8
// [ 1 ] 9
// [ 1 ] 10
// [ 1 ] 11
// [ w^4 ] 12
// [ w^4 ] 13
// [ w^4 ] 14
// [ w^4 ] 15
//
// [ 1 ] 0
// [ 1 ] 1
// [ 1 ] 2
// [ 1 ] 3
// [ 1 ] 4
// [ 1 ] 5
// [ 1 ] 6
// D_{2, 4} @ I_2 = [ 1 ] 7
// [ 1 ] 8
// [ 1 ] 9
// [ w^2 ] 10
// [ w^2 ] 11
// [ w^4 ] 12
// [ w^4 ] 13
// [ w^6 ] 14
// [ w^6 ] 15
//
// Warning:
//
// The purpuse to handle the case 2 * m * s >= n. If this does not hold,
// be aware of the meaning of the computation result.
//
// For example, let n = 16, s = 1 and m = 4, (2 * m * s = 8 < 16)
// Its matrix representation is
//
// [ 1 ]
// [ 1 ]
// [ 1 ]
// [ 1 ]
// [ 1 ]
// [ w ]
// [ w^2 ]
// [ w^3 ]
//
// where w is a 16-primitive root of unity.
//
// The usual D_{2, 4} @ I_1 is represented by
//
// [ 1 ]
// [ 1 ]
// [ 1 ]
// [ 1 ]
// [ 1 ]
// [ x ]
// [ x^2 ]
// [ x^3 ]
//
// with x being a 8-th primitive root of unity.
//
// For the above reason, we require the condition 2 * m * s >= n, for safety.
//
////////////////////////////////////////////////////////////////////////////////
#if DEBUG > 0
#define E_THD (2)
#else
#define E_THD (7)
#endif
#define N_THD (1 << E_THD)
/**
* @X, input/output array of length 2 * m * s = 2^{u + v + 1}
* @n, FFT size n = 2^e
* @w, n-th primitive root of unity
* @W, powers of primitive root of unity [1, w, w^2, ..., w^{n/2-1}]
* @u, m = 2^u
* @v, s = 2^v
*
* Implements D_{2, m} @ I_{s} with FFT size n
*
* Multiple thread blocks handle a stride, (s = 2^v is big)
*
* Requirements: (1) v >= E_THD, (2) 2 <= m < n
*
* TESTED
*/
__global__ void
ext_stride_twiddle2a_ker(sfixn *X, const sfixn * const W, sfixn e, sfixn u,
sfixn v, sfixn p, double pinv)
{
sfixn bid = (blockIdx.y << 15) + blockIdx.x;
// all threads in a thread block are using the same power!
// the base power for the grid is exp = (n / 2) / m
// The power for the block is w^(exp * e), with e = quo(bid, s / N_THD)
sfixn w = W[(bid >> (v - E_THD)) << (e - 1 - u)];
// starting position for the block, the first m * s elements unchanged
sfixn *base = X + ((sfixn)1 << (u + v)) + (bid << E_THD);
base[threadIdx.x] = mul_mod(w, base[threadIdx.x], p, pinv);
}
/**
* @X, input/output array of length 2 * m * s = 2^{u + v + 1}
* @n, FFT size n = 2^e
* @w, n-th primitive root of unity
* @W, powers of primitive root of unity [1, w, w^2, ..., w^{n/2-1}]
* @u, m = 2^u
* @v, s = 2^v
*
* Implements: D_{2, m} @ I_{s} with FFT size n
*
* A thread block handles multiple strides, (s = 2^v is small)
*
* Requirements: (1) v < E_THD, (2) 2 <= m < n
*
* TESTED
*/
__global__ void
ext_stride_twiddle2b_ker(sfixn *X, const sfixn * const W, sfixn e, sfixn u,
sfixn v, sfixn p, double pinv)
{
sfixn bid = (blockIdx.y << 15) + blockIdx.x;
// the first m * s elements will be unchanged.
sfixn *base = X + ((sfixn)1 << (u + v)) + (bid << E_THD);
// threads in a thread block use different powers.
// the base power for the grid is egrid = (n / 2) / m.
// the base power for the block is eblock = bid * (N_THD / s) * egrid.
sfixn eblock = bid << (E_THD - v + e - 1 - u);
sfixn tid = threadIdx.x;
// the power for the thread eblock + s * quo(tid, s)
sfixn iq = eblock + ((tid >> v) << v);
base[tid] = mul_mod(W[iq], base[tid], p, pinv);
}
/**
* @X, input/output array of length 2 * m * s = 2^{u + v + 1}
* @n, FFT size n = 2^e
* @w, n-th primitive root of unity
* @W, powers of primitive root of unity [1, w, w^2, ..., w^{n/2-1}]
* @u, m = 2^u
* @v, s = 2^v
*
* Implements: D_{2, m} @ I_{s} with FFT size n
*
* Requirements: 2 <= m < n, s >= 1, 2 * m * s >= n and m * s >= N_THD
*
* TESTED
*/
void ext_stride_twiddle2_dev(sfixn *X, const sfixn * const W, sfixn e, sfixn u,
sfixn v, sfixn p)
{
if (DEBUG) assert((v >= 0) && (u > 0) && (u < e));
if (DEBUG) assert((u + v >= E_THD) && (1 + u + v >= e));
// number of blocks is m * s / N_THD
sfixn nb = (1 << (u + v - E_THD));
dim3 nBlk(nb, 1, 1);
if (nb > (1 << 15)) { nBlk.x = (1 << 15); nBlk.y = (nb >> 15); }
double pinv = 1 / (double)p;
if (v >= E_THD) {
// printf("Calling twiddle2a_ker e = %d, u = %d, v = %d\n", e, u, v);
hipLaunchKernelGGL(( ext_stride_twiddle2a_ker), dim3(nBlk), dim3(N_THD), 0, 0, X, W, e, u, v, p, pinv);
} else {
// printf("Calling twiddle2b_ker e = %d, u = %d, v = %d\n", e, u, v);
hipLaunchKernelGGL(( ext_stride_twiddle2b_ker), dim3(nBlk), dim3(N_THD), 0, 0, X, W, e, u, v, p, pinv);
}
hipDeviceSynchronize();
}
/**
* @X, device array of length 2 * s = 2^{v + 1)
* @Y, device array of length 2 * s = 2^(v + 1) (output)
* @v, s = 2^v
* @p, prime number
*
* Implements: DFT2 @ I_{s}
*
* Requires: s >= N_THD
*
*/
__global__
void ext_butterfly_ker(sfixn *Y, const sfixn * const X, sfixn v, sfixn p)
{
sfixn bid = (blockIdx.y << 15) + blockIdx.x;
sfixn *B = Y + (bid << E_THD);
const sfixn *A = X + (bid << E_THD);
sfixn s = ((sfixn)1 << v);
B[threadIdx.x] = add_mod(A[threadIdx.x], A[threadIdx.x + s], p);
B[threadIdx.x + s] = sub_mod(A[threadIdx.x], A[threadIdx.x + s], p);
}
/**
* @X, device array of length 2 * s = 2^{v + 1)
* @Y, device array of length 2 * s = 2^(v + 1) (output)
* @v, s = 2^v
* @p, prime number
*
* Implements: DFT2 @ I_{s}
*
* Requires: s >= N_THD
*
* TESTED
*/
void ext_butterfly_dev(sfixn *Y, const sfixn * const X, sfixn v, sfixn p)
{
if (DEBUG) assert(v >= E_THD);
sfixn nb = ((sfixn)1 << (v - E_THD));
dim3 nBlk(nb, 1, 1);
if (nb > (1 << 15)) { nBlk.x = (1 << 15); nBlk.y = (nb >> 15); }
hipLaunchKernelGGL(( ext_butterfly_ker), dim3(nBlk), dim3(N_THD), 0, 0, Y, X, v, p);
hipDeviceSynchronize();
}
#undef E_THD
#undef N_THD
////////////////////////////////////////////////////////////////////////////////
// The main program of
//
// DFT_n @ I_m = Prod_{i = 0}^{k - 1}
// DFT2 @ I_{2^{k - 1 + j}} (1)
// D_{2, 2^{k - i - 1}} @ I_{2^{i+j}} (2)
// L_2^{2^{k - i}} @ I_{2^{i+j}} (3)
////////////////////////////////////////////////////////////////////////////////
/**
* @X, input / output device array of length n * m
* @k, n = 2^k
* @j, m = 2^j
* @w, n-th primitive root of unity
* @W, [1, w, w^2, ..., w^{n/2-1}]
* @p, fourier prime number
*
* X will be filled by DFT_n @ I_m(X, w)
*
*/
void ext_stockham_dev(sfixn *X, sfixn k, sfixn j, const sfixn *W, sfixn p) {
sfixn *Y;
hipMalloc((void**)&Y, sizeof(sfixn) << (k + j));
// sequence of applications
// i = k - 1 is trival for the other operations
ext_butterfly_dev(Y, X, k + j - 1, p);
for (sfixn i = k - 2; i >= 0; --i) {
// u = k - i, v = i + j
ext_stride_transpose2_dev(X, Y, k - i, i + j);
// u = k - i - 1, v = i + j
ext_stride_twiddle2_dev(X, W, k, k - i - 1, i + j, p);
ext_butterfly_dev(Y, X, k + j - 1, p);
}
hipMemcpy(X, Y, sizeof(sfixn) << (k + j), hipMemcpyDeviceToDevice);
hipFree(Y);
if (DEBUG) checkCudaError("error found in ext_stockham_dev");
}
/* Without precomputed powers of the root */
void ext_stockham_dev(sfixn *X, sfixn k, sfixn j, sfixn w, sfixn p)
{
// initialize the primitive roots
sfixn *W;
hipMalloc((void**)&W, sizeof(sfixn) << (k - 1));
get_powers_binary(k - 1, W, w, p);
ext_stockham_dev(X, k, j, W, p);
hipFree(W);
if (DEBUG) checkCudaError("error found in ext_stockham_dev");
}
///////////////////////////////////////////////////////////////////////////////
// 2-d FFT, the row-column algorithm is
//
// DFT_{m, n} (X) = (DFT_m @ I_n) (I_m @ DFT_n) (X)
//
///////////////////////////////////////////////////////////////////////////////
/**
* @X, input / output device array of length m * n
* @em, m = 2^em (rows)
* @en, n = 2^en (columns)
* @wn, n-th primitive root of unity
* @Wn, [1, wn, wn^2, ..., wn^{n/2-1}]
* @wm, m-th primitive root of unity
* @Wm, [1, wm, wm^2, ..., wm^{m/2-1}]
* @p, fourier prime number
*
* Compute X = DFT_{m, n}(X) = (DFT_m @ I_n) (I_m @ DFT_n)X
*
*/
void bivariate_stockham_dev(sfixn *X, sfixn em, const sfixn *Wm, sfixn en,
const sfixn *Wn, sfixn p)
{
sfixn *Y;
hipMalloc((void**)&Y, sizeof(sfixn) << (em + en));
// list_stockham_dev(X, 1 << em, en, Wn, p);
sfixn m = ((sfixn)1 << em);
list_butterfly_dev(Y, X, m, en, p);
for (sfixn i = en - 2; i >= 0; --i) {
list_stride_transpose2_dev(X, Y, m, en, i);
list_stride_twiddle2_dev(X, Wn, m, en, i, p);
list_butterfly_dev(Y, X, m, en, p);
}
// ext_stockham_dev(X, en, em, Wm, p);
ext_butterfly_dev(X, Y, en + em - 1, p);
for (sfixn i = en - 2; i >= 0; --i) {
// u = en - i, v = em + i
ext_stride_transpose2_dev(Y, X, en - i, em + i);
// u = en - i - 1, v = em + i
ext_stride_twiddle2_dev(Y, Wm, en, en - i - 1, em + i, p);
ext_butterfly_dev(X, Y, en + em - 1, p);
}
hipFree(Y);
if (DEBUG) checkCudaError("bivariate_stockham_dev");
}
/**
* @X, input / output host array of length n * m
* @en, n = 2^en
* @em, m = 2^em
* @wn, n-th primitive root of unity
* @wm, m-th primitive root of unity
* @p, fourier prime number
*
* Compute X = DFT_{m, n}(X) = (DFT_m @ I_n) (I_m @ DFT_n) X
*
* It computes if X is in the rdr-representation
*
* F(1, 1) F(wn, 1) ... F(wn^(n-1), 1)
* F(1, wm) F(wn, wm) ... F(wn^(n-1), wm)
* ...
* F(1, wm^(m-1)) F(wn, wm^(m-1)) ... F(wn^(n-1), wm^(m-1))
*
*/
void bivariate_stockham_host(sfixn *X, sfixn em, sfixn wm,
sfixn en, sfixn wn, sfixn p)
{
sfixn *X_d;
hipMalloc((void**)&X_d, sizeof(sfixn)<<(em + en));
hipMemcpy(X_d, X, sizeof(sfixn)<<(em + en), hipMemcpyHostToDevice);
sfixn *Wm, *Wn;
hipMalloc((void**)&Wm, sizeof(sfixn) << (em - 1));
hipMalloc((void**)&Wn, sizeof(sfixn) << (en - 1));
get_powers_binary(em - 1, Wm, wm, p);
get_powers_binary(en - 1, Wn, wn, p);
bivariate_stockham_dev(X_d, em, Wm, en, Wn, p);
hipMemcpy(X, X_d, sizeof(sfixn)<<(em + en), hipMemcpyDeviceToHost);
hipFree(X_d);
hipFree(Wm);
hipFree(Wn);
if (DEBUG) checkCudaError("bivariate_stockham_host");
}
/**
* Assume that the inverse roots have been precomputed.
*/
void inverse_bivariate_stockham_dev(sfixn *X, sfixn em, const sfixn *invWm,
sfixn en, const sfixn *invWn, sfixn p)
{
sfixn m = sfixn(1) << em;
sfixn n = sfixn(1) << en;
sfixn minv = inv_mod(m, p);
sfixn ninv = inv_mod(n, p);
sfixn mninv = mul_mod(minv, ninv, p);
bivariate_stockham_dev(X, em, invWm, en, invWn, p);
scale_vector_dev(mninv, m * n, X, p);
}
/**
* Since (DFT_m^(-1) @ DFT_n^(-1)) (DFT_m @ DFT_n) = I_m @ I_n, we have
*
* (DFT_m @ DFT_n)^(-1) = DFT_m^(-1) @ DFT_n^(-1)
*/
void inverse_bivariate_stockham_host(sfixn *X, sfixn em, sfixn wm,
sfixn en, sfixn wn, sfixn p)
{
sfixn m = sfixn(1) << em;
sfixn n = sfixn(1) << en;
sfixn wminv = inv_mod(wm, p);
sfixn wninv = inv_mod(wn, p);
sfixn minv = inv_mod(m, p);
sfixn ninv = inv_mod(n, p);
sfixn mninv = mul_mod(minv, ninv, p);
sfixn *X_d;
hipMalloc((void**)&X_d, sizeof(sfixn)<<(em + en));
hipMemcpy(X_d, X, sizeof(sfixn)<<(em + en), hipMemcpyHostToDevice);
sfixn *Wm, *Wn;
hipMalloc((void**)&Wm, sizeof(sfixn) << (em - 1));
hipMalloc((void**)&Wn, sizeof(sfixn) << (en - 1));
get_powers_binary(em - 1, Wm, wminv, p);
get_powers_binary(en - 1, Wn, wninv, p);
bivariate_stockham_dev(X_d, em, Wm, en, Wn, p);
scale_vector_dev(mninv, m * n, X_d, p);
hipMemcpy(X, X_d, sizeof(sfixn)<<(em + en), hipMemcpyDeviceToHost);
hipFree(X_d);
hipFree(Wm);
hipFree(Wn);
if (DEBUG) checkCudaError("inverse_bivariate_stockham_host");
}
/**
* Multiply two bivariate polynomials of size POT, in place version
*
* @m : FFT size in y (rows)
* @n : FFT size in x (columns)
* @em : m = 2^em
* @en : n = 2^en
* @F : coefficient vector of F, padded to size n, input & output
* @G : coefficient vector of G, padded to size n, input
* @p : prime number
*
* F <-- DFT_{m, n}^{-1}(DFT_{m, n}(F) * DFT_{m, n}(G))
*
**/
void bi_stockham_poly_mul_dev(sfixn m, sfixn em, sfixn n, sfixn en,
sfixn *F, sfixn *G, sfixn p)
{
sfixn wm = primitive_root(em, p);
sfixn wn = primitive_root(en, p);
sfixn wninv = inv_mod(wn, p);
sfixn wminv = inv_mod(wm, p);
sfixn minv = inv_mod(m, p);
sfixn ninv = inv_mod(n, p);
sfixn mninv = mul_mod(minv, ninv, p);
sfixn *Wm, *Wn;
hipMalloc((void**)&Wm, sizeof(sfixn) << (em - 1));
hipMalloc((void**)&Wn, sizeof(sfixn) << (en - 1));
get_powers_binary(em - 1, Wm, wm, p);
get_powers_binary(en - 1, Wn, wn, p);
bivariate_stockham_dev(F, em, Wm, en, Wn, p);
bivariate_stockham_dev(G, em, Wm, en, Wn, p);
pointwise_mul_dev(n * m, em + en, F, G, p);
get_powers_binary(em - 1, Wm, wminv, p);
get_powers_binary(en - 1, Wn, wninv, p);
bivariate_stockham_dev(F, em, Wm, en, Wn, p);
scale_vector_dev(mninv, m * n, F, p);
hipFree(Wm);
hipFree(Wn);
if (DEBUG) checkCudaError("bi_stockham_poly_mul_dev");
}
/**
* Multiply two balanced bivariate polynomials
*/
rdr_poly*
bi_stockham_poly_mul_host(const rdr_poly &F, const rdr_poly &G, sfixn p)
{
sfixn lx = F.ns[0] + G.ns[0] - 1;
sfixn ly = F.ns[1] + G.ns[1] - 1;
sfixn ex = ceiling_log2(lx);
sfixn ey = ceiling_log2(ly);
sfixn nx = (sfixn)1 << ex;
sfixn ny = (sfixn)1 << ey;
sfixn szh = ((sfixn)1 << (ex + ey));
sfixn *F_d;
hipMalloc((void**)&F_d, sizeof(sfixn)*F.sz);
hipMemcpy(F_d, F.coeffs, sizeof(sfixn)*F.sz, hipMemcpyHostToDevice);
sfixn *F2_d;
hipMalloc((void**)&F2_d, sizeof(sfixn)*szh);
expand_to_fft2_dev(ex, ey, F2_d, F.ns[0], F.ns[1], F_d);
hipFree(F_d);
sfixn *G_d;
hipMalloc((void**)&G_d, sizeof(sfixn)*G.sz);
hipMemcpy(G_d, G.coeffs, sizeof(sfixn)*G.sz, hipMemcpyHostToDevice);
sfixn *G2_d;
hipMalloc((void**)&G2_d, sizeof(sfixn)*szh);
expand_to_fft2_dev(ex, ey, G2_d, G.ns[0], G.ns[1], G_d);
hipFree(G_d);
bi_stockham_poly_mul_dev(ny, ey, nx, ex, F2_d, G2_d, p);
// use G2_d to store the compacted result
// construct the result directly from G2_d
extract_from_fft2_dev(lx, ly, G2_d, ex, F2_d);
if (DEBUG) checkCudaError("bi_stockham_poly_mul_host");
rdr_poly *H = new rdr_poly(lx, ly, G2_d, false);
hipFree(F2_d);
hipFree(G2_d);
return H;
}
////////////////////////////////////////////////////////////////////////////////
// Most general formulas to realize a list of bivariate FFTs
//
// I_q @ DFT_{m, n} = (I_q @ (DFT_m @ I_n)) (I_q @ (I_m @ DFT_n))
// = (I_q @ ( Prod_{i=0}^{u - 1}
// (DFT_2 @ I_{m / 2})
// (D_{2, 2^{u - i - 1}} @ I_{2^i})
// (L_2^{2^{u - i} @ I_{2^i}}) )
// @ I_n)
// (I_q @ I_m @ (Prod_{i = 0}^{k - 1}
// (DFT_2 @ I_{n / 2})
// (D_{2, 2^{k - i - 1}} @ I_{2^i})
// (L_2^{2^{k - i}} @ I_{2^i})))
//
// where m = 2^u and n = 2^k are powers of two, but q usually not.
//
// What are to be implemented,
//
// (1) I_q @ DFT_2 @ I_{m / 2} @ I_n
// (2) I_q @ D_{2, 2^{u - i - 1}} @ I_{2^i} @ I_n
// (3) I_q @ L_2^{2^{u - i}} @ I_{2^i} @ I_n
//
// with m = 2^u;
//
// (4) I_q @ I_m @ DFT_2 @ I_{n / 2} use (3)
// (5) I_q @ I_m @ D_{2, 2^{k - i - 1}} @ I_{2^i} use (2)
// (6) I_q @ I_m @ L_2^{2^{k - i}} @ I_{2^i} use (1)
//
// with n = 2^k.
//
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
// Implementation of I_q @ L_2^m @ I_s
////////////////////////////////////////////////////////////////////////////////
#if DEBUG > 0
#define E_THD (2)
#else
#define E_THD (7)
#endif
#define N_THD (1 << E_THD)
/**
* @X, input array of length q * m * s = q * 2^{u + v}
* @Y, output array of length q * m * s = q * 2^{u + v}
* @u, m = 2^u
* @v, s = 2^v
*
* Compute the general stride transposition
*
* Y = (I_q @ L_2^m @ I_s) X
*
* Multiple thread blocks (>= 1) move a stride of s elements
*
* Requirements: q >= 1, u >= 2 and v >= E_THD
*
* If u = 1, then do nothing and the transiposition is trivial.
*/
__global__ void q_ext_stride_transpose2a_ker(sfixn *Y, const sfixn *X,
sfixn u, sfixn v)
{
__shared__ sfixn block[N_THD];
sfixn bid = (blockIdx.y << 15) + blockIdx.x;
// read in data from X
// the input offset for this block is bid * N_THD
const sfixn *din = X + (bid << E_THD);
block[threadIdx.x] = din[threadIdx.x];
__syncthreads();
// The number of thread blocks for one subvector is snb = m * s / N_THD
// squo = bid / snb
// sbid = bid % snb
sfixn snb = ((sfixn)1 << ( u + v - E_THD));
sfixn squo = bid >> (u + v - E_THD);
sfixn sbid = bid & (snb - 1);
// delta = s / N_THD;
// the number of thread block needed for each stride
sfixn edelta = v - E_THD;
// iq = quo(sbid, delta) and ir = rem(sbid, delta)
// iq tells which stride the block is working on
// ir tells which portion of the stride the block is working on
sfixn iq = sbid >> edelta;
sfixn ir = sbid & ((1 << edelta) - 1);
// iqq = quo(iq, 2) and iqr = rem(iq, 2)
sfixn iqq = (iq >> 1);
sfixn iqr = (iq & 1);
// write data out to Y
//
// if iqr = 0 (even), write to
// Y + squo * m * s + iqq * s + ir * N_THD
// if iqr = 1 (odd), write to
// Y + squo * m * s + (n / 2) * s + iqq * s + ir * N_THD
//
// that is, squo * m * s + iqr * (n / 2) * s + iqq * s + ir * N_THD
sfixn *dout = Y + (squo << (u + v))
+ (iqr << (u + v - 1))
+ (iqq << v)
+ (ir << E_THD);
dout[threadIdx.x] = block[threadIdx.x];
__syncthreads();
}
/**
* @X, input array of length q * m * s = q * 2^{u + v}
* @Y, output array of length q * m * s = q * 2^{u + v}
* @u, m = 2^u
* @v, s = 2^v
*
* Compute the general stride transposition
*
* Y = (I_q @ L_2^m @ I_s) X
*
* A thread block moves multiple strides
*
* Requirements: q >= 1, u >= 2, 0 <= v < E_THD, m * s >= N_THD
*
*/
__global__ void q_ext_stride_transpose2b_ker(sfixn *Y, const sfixn *X,
sfixn u, sfixn v)
{
__shared__ sfixn block[N_THD];
// block index in the kernel
sfixn bid = (blockIdx.y << 15) + blockIdx.x;
// read in data from X
// the input offset for this block is bid * N_THD
const sfixn *din = X + (bid << E_THD);
block[threadIdx.x] = din[threadIdx.x];
__syncthreads();
// The number of thread blocks for one subvector is snb = m * s / N_THD
// squo = bid / snb
// sbid = bid % snb
sfixn snb = ((sfixn)1 << (u + v - E_THD));
sfixn squo = bid >> (u + v - E_THD);
sfixn sbid = bid & (snb - 1);
// offset0 = squo * m * s + sbid * N_THD / 2
// offset1 = squo * m * s + sbid * N_THD / 2 + (m / 2) * s
// base = Y + offset0
sfixn *base = Y + (squo << (u + v)) + (sbid << (E_THD - 1));
sfixn tid = threadIdx.x;
// iq = quo(tid, s) and ir = rem(tid, s);
sfixn iq = (tid >> v);
sfixn ir = tid & ((1 << v) - 1);
// the following code is to the in-block shuffle
// f(i) = (rem(2iq, N_THD/s) + quo(2iq, N_THD/s)) * s + ir
sfixn fi = (iq << 1) >> (E_THD - v);
fi += ((iq << 1) & ((1 << (E_THD - v)) - 1));
fi <<= v;
fi += ir;
// replace the following code by the branch-free code
// if (tid < N_THD/2)
// dout[tid] = block[fi];
// else
// dout[tid - N_THD / 2 + (1 << (u + v - 1))] = block[fi];
sfixn *dout = base + (tid >> (E_THD - 1))
* ((1 << (u + v - 1)) - (1 << (E_THD - 1)));
dout[tid] = block[fi];
}
/**
* @X, input array of length q * m * s = q * 2^{u + v}
* @Y, output array of length q * m * s = q * 2^{u + v}
* @u, m = 2^u
* @v, s = 2^v
*
* Compute the general stride transposition
*
* Y = (I_q @ L_2^m @ I_s) X
*/
void q_ext_stride_transpose2_dev(sfixn *Y, const sfixn *X, sfixn q,
sfixn u, sfixn v)
{
if (DEBUG) assert((u >= 2) && (v >= 0) && (u + v >= E_THD) && (q >= 1));
sfixn nb = (q << (u + v - E_THD));
dim3 nBlk(nb, 1, 1);
if (nb > (1 << 15)) { nBlk.x = (1 << 15); nBlk.y = (nb >> 15); }
if (v >= E_THD) {
// printf("q_transpose2a_ker with q = %d, u = %d, v = %d\n", q, u, v);
hipLaunchKernelGGL(( q_ext_stride_transpose2a_ker), dim3(nBlk), dim3(N_THD), 0, 0, Y, X, u, v);
} else {
// printf("q_transpose2b_ker with q = %d, u = %d, v = %d\n", q, u, v);
hipLaunchKernelGGL(( q_ext_stride_transpose2b_ker), dim3(nBlk), dim3(N_THD), 0, 0, Y, X, u, v);
}
hipDeviceSynchronize();
}
#undef E_THD
#undef N_THD
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
// I_q @ D_{2, m} @ I_s
////////////////////////////////////////////////////////////////////////////////
#if DEBUG > 0
#define E_THD (2)
#else
#define E_THD (7)
#endif
#define N_THD (1 << E_THD)
/**
* @X, input/output array of length q * 2 * m * s = q * 2^{u + v + 1}
* @n, FFT size n = 2^e
* @w, n-th primitive root of unity
* @W, powers of primitive root of unity [1, w, w^2, ..., w^{n/2-1}]
* @u, m = 2^u
* @v, s = 2^v
*
* Implements I_q @ D_{2, m} @ I_{s} with FFT size n
*
* Multiple thread blocks handle a stride, (s = 2^v is big)
*
* Requirements: (1) v >= E_THD, (2) 2 <= m < n
*/
__global__ void q_ext_stride_twiddle2a_ker(sfixn *X, const sfixn * const W,
sfixn e, sfixn u, sfixn v, sfixn p, double pinv)
{
sfixn bid = (blockIdx.y << 15) + blockIdx.x;
// The number of thread blocks for one subvector is snb = m * s / N_THD
// squo = bid / snb
// sbid = bid % snb
sfixn snb = ((sfixn)1 << (u + v - E_THD));
sfixn squo = bid >> (u + v - E_THD);
sfixn sbid = bid & (snb - 1);
// all threads in a thread block are using the same power!
// the base power for the grid is exp = (n / 2) / m
// The power for the block is w^(exp * e), with e = quo(sbid, s / N_THD)
sfixn w = W[(sbid >> (v - E_THD)) << (e - 1 - u)];
// starting position for the block, the first m * s elements unchanged
sfixn *base = X + (squo << (u + v + 1))
+ ((sfixn)1 << (u + v))
+ (sbid << E_THD);
base[threadIdx.x] = mul_mod(w, base[threadIdx.x], p, pinv);
}
/**
* @X, input/output array of length q * 2 * m * s = 2^{u + v + 1}
* @n, FFT size n = 2^e
* @w, n-th primitive root of unity
* @W, powers of primitive root of unity [1, w, w^2, ..., w^{n/2-1}]
* @u, m = 2^u
* @v, s = 2^v
*
* Implements: I_q @ D_{2, m} @ I_{s} with FFT size n
*
* A thread block handles multiple strides, (s = 2^v is small)
*
* Requirements: v < E_THD, 2 <= m < n, q >= 1, m * s >= N_THD
*/
__global__ void q_ext_stride_twiddle2b_ker(sfixn *X, const sfixn * const W,
sfixn e, sfixn u, sfixn v, sfixn p, double pinv)
{
sfixn bid = (blockIdx.y << 15) + blockIdx.x;
// The number of thread blocks for one subvector is snb = m * s / N_THD
// squo = bid / snb
// sbid = bid % snb
sfixn snb = ((sfixn)1 << (u + v - E_THD));
sfixn squo = bid >> (u + v - E_THD);
sfixn sbid = bid & (snb - 1);
// the first m * s elements will be unchanged.
sfixn *base = X + (squo << (u + v + 1))
+ ((sfixn)1 << (u + v))
+ (sbid << E_THD);
// threads in a thread block use different powers.
// the base power for the grid is egrid = (n / 2) / m.
// the base power for the block is eblock = sbid * (N_THD / s) * egrid.
sfixn eblock = sbid << (E_THD - v + e - 1 - u);
sfixn tid = threadIdx.x;
// the power for the thread eblock + s * quo(tid, s)
sfixn iq = eblock + ((tid >> v) << v);
base[tid] = mul_mod(W[iq], base[tid], p, pinv);
}
/**
* @X, input/output array of length q * 2 * m * s = q * 2^{u + v + 1}
* @n, FFT size n = 2^e
* @w, n-th primitive root of unity
* @W, powers of primitive root of unity [1, w, w^2, ..., w^{n/2-1}]
* @u, m = 2^u
* @v, s = 2^v
*
* Implements: I_q @ D_{2, m} @ I_{s} with FFT size n
*
* Requirements: 2 <= m < n, s >= 1, 2 * m * s >= n and m * s >= N_THD
*/
void q_ext_stride_twiddle2_dev(sfixn *X, const sfixn * const W, sfixn e,
sfixn q, sfixn u, sfixn v, sfixn p)
{
if (DEBUG) assert((v >= 0) && (u > 0) && (u < e));
if (DEBUG) assert((u + v >= E_THD) && (1 + u + v >= e));
// number of blocks is q * m * s / N_THD
sfixn nb = (q << (u + v - E_THD));
dim3 nBlk(nb, 1, 1);
if (nb > (1 << 15)) { nBlk.x = (1 << 15); nBlk.y = (nb >> 15); }
double pinv = 1 / (double)p;
if (v >= E_THD) {
//printf("q_twiddle2a_ker q = %d, e = %d, u = %d, v = %d\n", q, e, u, v);
hipLaunchKernelGGL(( q_ext_stride_twiddle2a_ker), dim3(nBlk), dim3(N_THD), 0, 0, X, W, e, u, v, p, pinv);
} else {
//printf("q_twiddle2b_ker q = %d, e = %d, u = %d, v = %d\n", q, e, u, v);
hipLaunchKernelGGL(( q_ext_stride_twiddle2b_ker), dim3(nBlk), dim3(N_THD), 0, 0, X, W, e, u, v, p, pinv);
}
hipDeviceSynchronize();
}
#undef E_THD
#undef N_THD
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
// I_q @ DFT_2 @ I_s
////////////////////////////////////////////////////////////////////////////////
#if DEBUG > 0
#define E_THD (2)
#else
#define E_THD (7)
#endif
#define N_THD (1 << E_THD)
/**
* @X, device array of length q * 2 * s = q * 2^{v + 1)
* @Y, device array of length q * 2 * s = q * 2^(v + 1) (output)
* @v, s = 2^v
* @p, prime number
*
* Compute: Y = (I_q @ DFT2 @ I_s)X
*
* Requires: s >= N_THD
*
*/
__global__
void q_ext_butterfly_ker(sfixn *Y, const sfixn * const X, sfixn v, sfixn p)
{
sfixn bid = (blockIdx.y << 15) + blockIdx.x;
// The number of thread blocks for one subvector is
// snb = s / N_THD
// squo = bid / snb
// sbid = bid % snb
sfixn s = ((sfixn)1 << v);
sfixn snb = (s >> E_THD);
sfixn squo = bid >> (v - E_THD);
sfixn sbid = bid & (snb - 1);
sfixn *B = Y + (squo << (1 + v)) + (sbid << E_THD);
const sfixn *A = X + (squo << (1 + v)) + (sbid << E_THD);
B[threadIdx.x] = add_mod(A[threadIdx.x], A[threadIdx.x + s], p);
B[threadIdx.x + s] = sub_mod(A[threadIdx.x], A[threadIdx.x + s], p);
}
/**
* @X, device array of length q * 2 * s = q * 2^{v + 1)
* @Y, device array of length q * 2 * s = q * 2^(v + 1) (output)
* @v, s = 2^v
* @p, prime number
*
* Implements: I_q @ DFT2 @ I_{s}
*
* Requires: s >= N_THD
*/
void
q_ext_butterfly_dev(sfixn *Y, const sfixn * const X, sfixn q, sfixn v, sfixn p)
{
if (DEBUG) assert(v >= E_THD);
sfixn nb = (q << (v - E_THD));
dim3 nBlk(nb, 1, 1);
if (nb > (1 << 15)) { nBlk.x = (1 << 15); nBlk.y = (nb >> 15); }
hipLaunchKernelGGL(( q_ext_butterfly_ker), dim3(nBlk), dim3(N_THD), 0, 0, Y, X, v, p);
hipDeviceSynchronize();
}
#undef E_THD
#undef N_THD
////////////////////////////////////////////////////////////////////////////////
//
// List of 2d FFTs, main program
//
// I_q @ DFT_{m, n} = (I_q @ (DFT_m @ I_n)) (I_q @ (I_m @ DFT_n))
//
////////////////////////////////////////////////////////////////////////////////
/**
* @X, device array of length q * m * n
* @q, positive integer, the number of 2d FFTs
* @em, m = 2^em
* @en, n = 2^en
* @wm, m-th primitive root of unity
* @wn, n-th primitive root of unity
* @Wm, [1, wm, ..., wm^{m/2-1}]
* @Wn, [1, wn, ..., wn^{n/2-1}]
* @p, prime number
*
* Compute X = I_q @ DFT_{m, n}(X, wm, wn)
*
* Requirements: q >= 1, m >= , n >= 2 * N_THD
*
*/
void list_bivariate_stockham_dev(sfixn *X, sfixn q, sfixn em,
const sfixn *Wm, sfixn en, const sfixn *Wn, sfixn p)
{
// work space Y;
sfixn *Y;
hipMalloc((void**)&Y, (q << (em + en)) * sizeof(sfixn) );
// Step 1:
//
// I_q @ I_m @ DFT_n = I_{qm} @ prod_{i = 0}^{en - 1}
// (DFT_2 @ I_{n/2}) (1.3)
// (D_{2, 2^{en - i - 1}} @ I_{2^i}) (1.2)
// (L_2^{2^{en - i}} @ I_{2^i}) (1.1)
// i = en - 1, both (1.1) and (1.2) are trivial
// (1.3) becomes I_{qm} @ DFT_2 @ I_{n/2}
q_ext_butterfly_dev(Y, X, (q << em), (en - 1), p);
for (sfixn i = en - 2; i >= 0; --i) {
// (1.1)
q_ext_stride_transpose2_dev(X, Y, (q << em), (en - i), i);
// (1.2)
q_ext_stride_twiddle2_dev(X, Wn, en, (q << em), (en - i - 1), i, p);
// (1.3)
q_ext_butterfly_dev(Y, X, (q << em), (en - 1), p);
}
hipMemcpy(X, Y, sizeof(sfixn)* (q << (em + en)), hipMemcpyDeviceToDevice);
// Step 2:
// I_q @ (DFT_m @ I_n) = I_q @ prod_{i = 0}^{em - 1}
// (DFT_2 @ I_{m/2} @ I_n) (2.3)
// (D_{2, 2^{em - i - 1}} @ I_{2^i} @ I_n) (2.2)
// (L_2^{2^{em - i}} @ I_{2^i} @ I_n) (2.1)
// now effective data are in Y.
// i = em - 1, both (2.1) and (2.2) are trivial
// (2.3) becomes I_{q} @ DFT_2 @ I_{m/2} @ I_{n}
q_ext_butterfly_dev(X, Y, q, em - 1 + en, p);
for (sfixn i = em - 2; i >= 0; --i) {
// (2.1)
q_ext_stride_transpose2_dev(Y, X, q, (em - i), (i + en));
// (2.2)
q_ext_stride_twiddle2_dev(Y, Wm, em, q, (em - i - 1), (i + en), p);
// (2.3)
q_ext_butterfly_dev(X, Y, q, (em - 1 + en), p);
}
hipFree(Y);
if (DEBUG) checkCudaError("After list_bivariate_stockham_dev");
}
void list_bivariate_stockham_dev(sfixn *X, sfixn q, sfixn em, sfixn wm,
sfixn en, sfixn wn, sfixn p)
{
// initialize the primitive roots
sfixn *Wn, *Wm;
hipMalloc((void**)&Wn, sizeof(sfixn) << (en - 1));
hipMalloc((void**)&Wm, sizeof(sfixn) << (em - 1));
get_powers_binary(en - 1, Wn, wn, p);
get_powers_binary(em - 1, Wm, wm, p);
list_bivariate_stockham_dev(X, q, em, Wm, en, Wn, p);
hipFree(Wn);
hipFree(Wm);
if (DEBUG) checkCudaError("After list_bivariate_stockham_dev");
}
////////////////////////////////////////////////////////////////////////////////
// Some testing functions
////////////////////////////////////////////////////////////////////////////////
void test_list_stockham()
{
//sfixn p = 469762049;
sfixn p = 257;
sfixn m = 10;
sfixn k = 4;
sfixn n = ((sfixn)1 << k);
sfixn *X = new sfixn[n*m];
sfixn w = primitive_root(k, p);
sfixn invw = inv_mod(w, p);
for (sfixn u = 0; u < m; ++u) {
for (sfixn v = 0; v < n; ++v) { X[u * n + v] = v; }
}
printf("Input: \n");
printf("w = %d\n", w);
for (sfixn i = 0; i < m; ++i) print_vector(n, X + i * n);
///////////////////////////////////////
list_stockham_host(X, m, k, w, p);
///////////////////////////////////////
printf("Output: \n");
for (sfixn i = 0; i < m; ++i) print_vector(n, X + i * n);
delete [] X;
checkCudaError("Error found");
}
////////////////////////////////////////////////////////////////////////////////
void test_list_bivariate_stockham_dev() {
sfixn q = 2;
sfixn em = 3;
sfixn en = 3;
sfixn m = (1 << em);
sfixn n = (1 << en);
sfixn *X = new sfixn[q*m*n]();
for (sfixn i = 0; i < q * m * n; ++i) X[i] = i % n;
for (sfixn i = 0; i < q; ++i) {
printf("matrix %d:\n", i);
print_matrix(m, n, X + i * m * n);
}
sfixn p = 257;
sfixn wn = primitive_root(en, p);
sfixn wm = primitive_root(em, p);
printf("n = %d, wn = %d\n", n, wn);
printf("m = %d, wm = %d\n", m, wm);
sfixn *X_d;
hipMalloc((void**)&X_d, sizeof(sfixn)*m*n*q);
hipMemcpy(X_d, X, sizeof(sfixn)*m*n*q, hipMemcpyHostToDevice);
list_bivariate_stockham_dev(X_d, q, em, wm, en, wn, p);
hipMemcpy(X, X_d, sizeof(sfixn)*m*n*q, hipMemcpyDeviceToHost);
for (sfixn i = 0; i < q; ++i) {
printf("matrix %d:\n", i);
print_matrix(m, n, X + i * m * n);
}
delete [] X;
hipFree(X_d);
checkCudaError("Error found");
}
| 7448cef6902c980d05b85aef8ca61ccf2bc1a35a.cu | #include <cassert>
#include <iostream>
#include "../include/list_stockham.h"
#include "../include/defines.h"
#include "../include/inlines.h"
#include "../include/printing.h"
#include "../include/cudautils.h"
#include "../include/fft_aux.h"
#include "../include/rdr_poly.h"
///////////////////////////////////////////////////////////////////////////////
// Reversion history:
//
// File created at Mon Jul 12 EDT 2010, WP
//
// -- list of 1d FFT implemented
// -- 2d FFT implemented
// -- list of 2d FFT implemented
//
///////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////
// When the fft size is small, say 256, it is not a good idea to run a single
// fft using GPUs. We implement the formula I_m @ DFT_n with the condition
// m * n large enough. Currently, we assume the n-th primitive root unity w
// is used for all DFTs.
//
// We use Stockham's FFT inside and let m = 2^j, n = 2^k.
//
// I_m @ DFT_n = Prod_{i = 0}^{k - 1}
// (I_m @ DFT2 @ I_{2^{k - 1}})
// (I_m @ D_{2, 2^{k - i - 1}} @ I_{2^i})
// (I_m @ L_2^{2^{k - i}} @ I_{2^i})
//
///////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////
// List of stride permutations: (I_m @ L_2^{2^{k - i}} @ I_{2^i})
///////////////////////////////////////////////////////////////////////////////
// each thread block consists of N_THD number of threads
// each thread block use N_THD X sizeof(sfixn) bytes shared memory
#if DEBUG > 0
#define E_THD (2)
#else
#define E_THD (7)
#endif
#define N_THD (1 << E_THD)
/**
* @X, input array of length m * n
* @Y, output array of length m * n
* @m,
* @k, n = 2^k
* @i, index from 0 to k - 1
*
* Compute
*
* Y = (I_m @ L_2^{2^{k-i}} @ I_{2^i}) X
*
* with the case that s = 2^i >= N_THD, multiple thread blocks move one stride.
*
* The total number of thread blocks required is m * n / N_THD.
*
* Each group of n / N_THD thread blocks handle a subvector of size n.
*
* Requirements:
*
* (1) m >= 1 (2) k > i >= E_THD
*/
__global__
void list_stride_transpose2a_ker(sfixn *Y, const sfixn * const X,
sfixn k, sfixn i)
{
__shared__ sfixn block[N_THD];
// block index in the kernel
sfixn bid = (blockIdx.y << 15) + blockIdx.x;
// determine which subvector to work
// squo = bid / (n / N_THD)
// sbid = bid % (n / N_THD)
sfixn squo = (bid >> (k - E_THD));
sfixn sbid = (bid & ((1 << (k - E_THD)) - 1));
// now sbid is the block index inside each group
// delta = s / N_THD;
sfixn edelta = i - E_THD;
// iq = quo(sbid, delta) and ir = rem(sbid, delta)
sfixn iq = (sbid >> edelta);
sfixn ir = (sbid & ((1 << edelta) - 1));
// iqq = quo(iq, 2) and iqr = rem(iq, 2)
sfixn iqq = (iq >> 1);
sfixn iqr = (iq & 1);
// read in data from X
// the input offset for this block is squo * n + iq * s + ir * N_THD
const sfixn *din = X + (squo << k) + (iq << i) + (ir << E_THD);
// each thread read in one element
block[threadIdx.x] = din[threadIdx.x];
__syncthreads();
// write data out to Y
// the output offset for this block is
// squo * n + rem(iq, 2) * n / 2 + quo(iq, 2) * s + ir * N_THD
sfixn *dout = Y + (squo << k) + (iqr << (k - 1))
+ (iqq << i) + (ir << E_THD);
dout[threadIdx.x] = block[threadIdx.x];
}
/**
* @X, input array of length m * n
* @Y, output array of length m * n
* @m,
* @k, n = 2^k
* @i, index from 0 to k - 1
*
* Compute
*
* Y = (I_m @ L_2^{2^{k-i}} @ I_{2^i}) X
*
* with the case that s = 2^i < N_THD, one thread block moves at least
* two strides. The total number of thread blocks required is m * n / N_THD.
* Each group of n / N_THD thread blocks handle a subvector of size n.
*
* Requirements:
*
* (1) m >= 1, (2) 0 <= i < E_THD, (3) k >= E_THD
*/
__global__
void list_stride_transpose2b_ker(sfixn *Y, const sfixn * const X,
sfixn k, sfixn i)
{
__shared__ sfixn block[N_THD];
// block index in the kernel
sfixn bid = (blockIdx.y << 15) + blockIdx.x;
// read in data from X
// the input offset for this block is bid * N_THD
const sfixn *din = X + (bid << E_THD);
block[threadIdx.x] = din[threadIdx.x];
__syncthreads();
// determine which subvector to work
// squo = bid / (n / N_THD)
// sbid = bid / (n / N_THD)
sfixn squo = (bid >> (k - E_THD));
sfixn sbid = (bid & ((1 << (k - E_THD)) - 1));
// now sbid is the block index inside each group
// the following code is to the in-block shuffle,
// hard to explain and check the note
// offset0 = squo * n + sbid * N_THD / 2
// offset1 = squo * n + sbid * N_THD / 2 + n / 2
// base = Y + offset0
sfixn *base = Y + (squo << k) + (sbid << (E_THD - 1));
sfixn tid = threadIdx.x;
// iq = quo(tid, s) and ir = rem(tid, s);
sfixn iq = (tid >> i);
sfixn ir = tid & ((1 << i) - 1);
// f(i) = (rem(2iq, N_THD/s) + quo(2iq, N_THD/s)) * s + ir
sfixn fi = (iq << 1) >> (E_THD - i);
fi += ((iq << 1) & ((1 << (E_THD - i)) - 1));
fi <<= i;
fi += ir;
// replace the following code by the branch-free code
// if (tid < N_THD/2)
// dout[tid] = block[fi];
// else
// dout[tid - N_THD / 2 + (1 << (k-1))] = block[fi];
sfixn *dout = base + (tid >> (E_THD - 1))
* ((1 << (k - 1)) - (1 << (E_THD - 1)));
dout[tid] = block[fi];
}
/**
* @X, device array of length m * n (input)
* @Y, device array of length m * n (output)
* @m,
* @k, n = 2^k
* @i, index from 0 to k - 2
*
* Compute
*
* Y = (I_m @ L_2^{2^{k-i}} @ I_{2^i}) X
*
* Requirememts:
*
* (1) m >= 1, (2) 0 <= i <= k - 2, (3) k >= N_THD
*
* TESTED
*
*/
void list_stride_transpose2_dev(sfixn *Y, const sfixn * const X, sfixn m,
sfixn k, sfixn i)
{
if (DEBUG) assert((m >= 1) && (i >= 0) && (k >= E_THD) && (i < k - 1));
sfixn nb = (m << (k - E_THD));
dim3 nBlk(nb, 1, 1);
// the maximal possible dimension is 2^15 = 32768 < 65535
// this requires nb <= 2^30, OK for now.
if (nb > (1 << 15)) { nBlk.x = (1 << 15); nBlk.y = (nb >> 15); }
if (i >= E_THD) {
// printf("Calling transpose2a_ker k = %d, i = %d\n", k, i);
list_stride_transpose2a_ker<<<nBlk, N_THD>>>(Y, X, k, i);
} else {
// printf("Calling transpose2b_ker k = %d, i = %d\n", k, i);
list_stride_transpose2b_ker<<<nBlk, N_THD>>>(Y, X, k, i);
}
cudaThreadSynchronize();
}
#undef E_THD
#undef N_THD
///////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////
// (I_m @ D_{2, 2^{k-i-1}} @ I_{2^i}) //
///////////////////////////////////////////////////////////////////////////////
#if DEBUG > 0
#define E_THD (2)
#else
#define E_THD (7)
#endif
#define N_THD (1 << E_THD)
/**
* D_{2, 2^{k - i - 1}} is a matrix of size 2^{k-i} X 2^{k-i}
*
* For example, let j = 0, k = 4, i = 1 and w^8 = -1
*
* [ 1 ] 0
* [ 1 ] 1
* [ 1 ] 2
* [ 1 ] 3
* D_{2, 4} = [ 1 ] 4
* [ w^2 ] 5
* [ w^4 ] 6
* [ w^6 ] 7
*
* [ 1 ] 0
* [ 1 ] 1
* [ 1 ] 2
* [ 1 ] 3
* [ 1 ] 4
* [ 1 ] 5
* [ 1 ] 6
* D_{2, 4} @ I_2 = [ 1 ] 7
* [ 1 ] 8
* [ 1 ] 9
* [ w^2 ] 10
* [ w^2 ] 11
* [ w^4 ] 12
* [ w^4 ] 13
* [ w^6 ] 14
* [ w^6 ] 15
*
* If N_THD = 4, then 2 blocks are needed. Block 0 handles [1, 1, w^2, w^2]
* and block 1 handle [w^4, w^4, w^6, w^6].
*
* For each group, only half of the elements needs to be modified,
* and the index range is from n / 2 to n - 1.
*
* Hence the number of thread blocks for each group is n / (2 * N_THD).
* The total number of thread blocks is m * n / (2 * N_THD).
*
**/
/**
* @X, input/output array of length m * n
* @w, n-th primitive root of unity
* @W, powers of primitive root of unity [1, w, w^2, ..., w^{n/2-1}]
* @m,
* @k, n = 2^k
* @i, index from 0 to k - 1
*
* Multiple thread blocks (>1) handle a stride, (s = 2^i is big)
*
* Requirements: m >= 1, k > i > E_THD
*
*/
__global__ void
list_stride_twiddle2a_ker(sfixn *X, const sfixn * const W, sfixn k, sfixn i,
sfixn p, double pinv)
{
// block index
sfixn bid = (blockIdx.y << 15) + blockIdx.x;
// determine which subvector to work
// squo = bid / (n / (2 * N_THD))
// sbid = bid % (n / (2 * N_THD))
// sbid is the block index inside each group
sfixn squo = (bid >> (k - 1 - E_THD));
sfixn sbid = (bid & ((1 << (k - 1 - E_THD)) - 1));
// all threads in a thread block are using the same power!
// This power is w^(s*e), with e = quo(sbid, s/N_THD)
sfixn w = W[(sbid >> (i - E_THD)) << i];
// starting position for the block, the first n / 2 elements unchanged
sfixn *base = X + (squo << k) + ((sfixn)1 << (k - 1)) + (sbid << E_THD);
base[threadIdx.x] = mul_mod(w, base[threadIdx.x], p, pinv);
}
/**
* @X, input/output array of length m * n
* @w, n-th primitive root of unity
* @W, powers of primitive root of unity [1, w, w^2, ..., w^{n/2-1}]
* @m,
* @k, n = 2^k
* @i, index from 0 to k - 1
*
* A thread block handle multiple strides (s = 2^i is small)
*
* Requirements:
*
* (1) m >= 1 (2) k > E_THD >= i >= 0
*
*/
__global__ void
list_stride_twiddle2b_ker(sfixn *X, const sfixn * const W, sfixn k, sfixn i,
sfixn p, double pinv)
{
// block index
sfixn bid = (blockIdx.y << 15) + blockIdx.x;
// determine which subvector to work
// squo = bid / (n / (2 * N_THD))
// sbid = bid % (n / (2 * N_THD))
// sbid is the block index inside each group
sfixn squo = (bid >> (k - 1 - E_THD));
sfixn sbid = (bid & ((1 << (k - 1 - E_THD)) - 1));
// starting position for this block
// the first n / 2 elements will be unchanged.
sfixn *base = X + (squo << k) + ((sfixn)1 << (k - 1)) + (sbid << E_THD);
// the starting root for the thread block is w^(e*s)
// with e = sbid * (N_THD / s). Thus e*s = sbid * N_THD.
sfixn tid = threadIdx.x;
// the power for the thread e * s + s * quo(tid, s)
sfixn iq = (sbid << E_THD) + ((tid >> i) << i);
base[tid] = mul_mod(W[iq], base[tid], p, pinv);
}
/**
* @X, input/output array of length m * n = 2^{k+j}
* @w, n-th primitive root of unity
* @W, powers of primitive root of unity [1, w, w^2, ..., w^{n/2-1}]
* @m,
* @k, n = 2^k
* @i, index from 0 to k - 1
*
* Requirements: 0 <= i <= k - 2, n > N_THD, m >= 1
*
* TESTED
*
*/
void list_stride_twiddle2_dev(sfixn *X, const sfixn * const W, sfixn m,
sfixn k, sfixn i, sfixn p)
{
if (DEBUG) assert((i >= 0) && (i < k - 1));
if (DEBUG) assert((m >= 1) && (k > E_THD));
// number of blocks is m * (n / 2) / N_THD
sfixn nb = (m << ( k - 1 - E_THD));
dim3 nBlk(nb, 1, 1);
if (nb > (1 << 15)) { nBlk.x = (1 << 15); nBlk.y = (nb >> 15); }
double pinv = 1 / (double)p;
if (i > E_THD) {
// printf("Calling twiddle2a_ker m = %d, k = %d, i = %d\n", m, k, i);
// printf("nBlk = %d, nThd = %d\n", nBlk.x, N_THD);
list_stride_twiddle2a_ker<<<nBlk, N_THD>>>(X, W, k, i, p, pinv);
} else {
// printf("Calling twiddle2b_ker m = %d, k = %d, i = %d\n", m, k, i);
// printf("nBlk = %d, nThd = %d\n", nBlk.x, N_THD);
list_stride_twiddle2b_ker<<<nBlk, N_THD>>>(X, W, k, i, p, pinv);
}
cudaThreadSynchronize();
}
#undef E_THD
#undef N_THD
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
// List of butterflies (I_m @ DFT2 @ I_{n/2}) //
////////////////////////////////////////////////////////////////////////////////
//
// a butterfly operation is defined as
//
// x0 y0
// \/
// /\
// xs ys
//
// with y0 = x0 + xs and ys = x0 - xs.
// In total, 2 + 2 elements are involved for each butterfly
////////////////////////////////////////////////////////////////////////////////
#if DEBUG > 0
#define E_THD (2)
#else
#define E_THD (7)
#endif
#define N_THD (1 << E_THD)
/**
* @X, device array of length m * n (input)
* @Y, device array of length m * n (output)
* @m,
* @k, n = 2^k
* @p, prime number
*
* Implements I_m @ DFT2 @ I_{n/2}
*
* Requires m >= 1, k > E_THD
*
*/
__global__ void
list_butterfly_ker(sfixn *Y, const sfixn * const X, sfixn k, sfixn p)
{
sfixn bid = (blockIdx.y << 15) + blockIdx.x;
// determine which subvector to work
// squo = bid / ((n / 2) / N_THD)
// sbid = bid % ((n / 2) / N_THD)
// sbid is the block index inside each group
sfixn squo = (bid >> (k - 1 - E_THD));
sfixn sbid = (bid & ((1 << (k - 1 - E_THD)) - 1));
sfixn *B = Y + (squo << k) + (sbid << E_THD);
const sfixn *A = X + (squo << k) + (sbid << E_THD);
sfixn tid = threadIdx.x;
sfixn halfn = ((sfixn )1 << (k - 1));
B[tid] = add_mod(A[tid], A[tid + halfn], p);
B[tid + halfn] = sub_mod(A[tid], A[tid + halfn], p);
}
/**
* @X, device array of length m * n (input)
* @Y, device array of length m * n (output)
* @m,
* @k, n = 2^k
* @p, prime number
*
* I_m @ DFT2 @ I_{n/2}
*
* Requirements: m >= 1, k > E_THD
*
* TESTED
*
*/
void list_butterfly_dev(sfixn *Y, const sfixn *X, sfixn m, sfixn k, sfixn p)
{
if (DEBUG) assert(m >= 1 && k > E_THD);
sfixn nb = (m << (k - E_THD - 1));
dim3 nBlk(nb, 1, 1);
if (nb > (1 << 15)) { nBlk.x = (1 << 15); nBlk.y = (nb >> 15); }
list_butterfly_ker<<<nBlk, N_THD>>>(Y, X, k, p);
cudaThreadSynchronize();
}
#undef E_THD
#undef N_THD
////////////////////////////////////////////////////////////////////////////////
// List of FFTs: I_m @ DFT_n, Main Program //
////////////////////////////////////////////////////////////////////////////////
/**
* @X, input / output device array of length m * n
* @m,
* @k, n = 2^k
* @w, n-th primitive root of unity
* @W, [1, w, w^2, ..., w^{n/2-1}]
* @p, fourier prime number
*
* X will be filled by I_m @ DFT_n(X, w)
*
* :::Warning::: m could be a non-power of 2
*
*/
void list_stockham_dev(sfixn *X, sfixn m, sfixn k, const sfixn *W, sfixn p)
{
// TODO: check the size of k
if (DEBUG) assert((m >= 1));
sfixn *Y;
cudaMalloc((void**)&Y, sizeof(sfixn) * (m << k));
// sequence of applications
// i = k - 1 is trival for the other operations
list_butterfly_dev(Y, X, m, k, p);
for (sfixn i = k - 2; i >= 0; --i) {
list_stride_transpose2_dev(X, Y, m, k, i);
list_stride_twiddle2_dev(X, W, m, k, i, p);
list_butterfly_dev(Y, X, m, k, p);
}
cudaMemcpy(X, Y, sizeof(sfixn) * (m << k), cudaMemcpyDeviceToDevice);
cudaFree(Y);
if (DEBUG) checkCudaError("error found in list_stockham_dev");
}
void list_stockham_dev(sfixn *X, sfixn m, sfixn k, sfixn w, sfixn p)
{
// initialize the primitive roots
sfixn *W;
cudaMalloc((void**)&W, sizeof(sfixn) << (k - 1));
get_powers_binary(k - 1, W, w, p);
list_stockham_dev(X, m, k, W, p);
cudaFree(W);
if (DEBUG) checkCudaError("error found in list_stockham_dev");
}
/**
* @X, input / output host array of length m * n
* @m,
* @k, n = 2^k
* @w, n-th primitive root of unity
* @p, fourier prime number
*
* X will be filled by I_m @ DFT_n(X, w)
*/
void list_stockham_host(sfixn *X, sfixn m, sfixn k, sfixn w, sfixn p) {
sfixn *X_d;
cudaMalloc((void**)&X_d, sizeof(sfixn) * (m << k));
cudaMemcpy(X_d, X, sizeof(sfixn) * (m << k), cudaMemcpyHostToDevice);
///////////////////////////////////////
list_stockham_dev(X_d, m, k, w, p);
///////////////////////////////////////
cudaMemcpy(X, X_d, sizeof(sfixn) * (m << k), cudaMemcpyDeviceToHost);
cudaFree(X_d);
}
////////////////////////////////////////////////////////////////////////////////
// The goal is to implement DFT_n @ I_m. We use the Stockham FFT, that is
//
// DFT_n @ I_m = Prod_{i = 0}^{k - 1}
// DFT2 @ I_{2^{k - 1}} @ I_{2^j}
// D_{2, 2^{k - i - 1}} @ I_{2^i} @ I_{2^j}
// L_2^{2^{k - i}} @ I_{2^i} @ I_{2^j}
//
// = DFT2 @ I_{2^{k - 1 + j}} (1)
// D_{2, 2^{k - i - 1}} @ I_{2^{i+^j}} (2)
// L_2^{2^{k - i}} @ I_{2^{i+j}} (3)
//
// Note that (1) has been impmenented by stockham.cu, however both (2) and (3)
// invalidate its assumptions. We now relax them.
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
// Implementation of L_2^{n} @ I_s, extended version, that is, it works
// for any input such that n >= 4 and s >= 1 and n * s >= N_THD.
////////////////////////////////////////////////////////////////////////////////
#if DEBUG > 0
#define E_THD (2)
#else
#define E_THD (7)
#endif
#define N_THD (1 << E_THD)
/**
* @X, input array of length n * s = 2^{u + v}
* @Y, output array of length n * s = 2^{u + v}
* @u, n = 2^u
* @v, s = 2^v
*
* Compute the general stride transposition
*
* Y = (L_2^n @ I_s) X
*
* Multiple thread blocks (>= 1) move a stride of s elements
*
* Requirements: u >= 2 and v >= E_THD
*
* If u = 1, then do nothing and the transiposition is trivial.
*
* TESTED
*/
__global__
void ext_stride_transpose2a_ker(sfixn *Y, const sfixn *X, sfixn u, sfixn v)
{
__shared__ sfixn block[N_THD];
sfixn bid = (blockIdx.y << 15) + blockIdx.x;
// delta = s / N_THD;
// the number of thread block needed for each stride
sfixn edelta = v - E_THD;
// iq = quo(bid, delta) and ir = rem(bid, delta)
// iq tells which stride the block is working on
// ir tells which portion of the stride the block is working on
sfixn iq = bid >> edelta;
sfixn ir = bid & ((1 << edelta) - 1);
// iqq = quo(iq, 2) and iqr = rem(iq, 2)
sfixn iqq = (iq >> 1);
sfixn iqr = (iq & 1);
// read in data from X
// the input offset for this block is iq * s + ir * N_THD
const sfixn *din = X + (iq << v) + (ir << E_THD);
block[threadIdx.x] = din[threadIdx.x];
__syncthreads();
// write data out to Y
//
// if iqr = 0 (even), write to Y + iqq * s + ir * N_THD
// if iqr = 1 (odd), write to Y + (n / 2) * s + iqq * s + ir * N_THD
// that is, iqr * (n / 2) * s + iqq * s + ir * N_THD
sfixn *dout = Y + (iqr << (u + v - 1)) + (iqq << v) + (ir << E_THD);
dout[threadIdx.x] = block[threadIdx.x];
__syncthreads();
}
/**
* @X, input array of length n * s = 2^{u + v}
* @Y, output array of length n * s = 2^{u + v}
* @u, n = 2^u
* @v, s = 2^v
*
* Compute the general stride transposition
*
* Y = (L_2^n @ I_s) X
*
* A thread block moves multiple strides
*
* Requirements: u >= 2 and v < E_THD
*
* If u = 1, then do nothing and the transiposition is trivial.
*
* TESTED
*/
__global__
void ext_stride_transpose2b_ker(sfixn *Y, const sfixn *X, sfixn u, sfixn v) {
__shared__ sfixn block[N_THD];
// block index in the kernel
sfixn bid = (blockIdx.y << 15) + blockIdx.x;
// read in data from X
// the input offset for this block is bid * N_THD
const sfixn *din = X + (bid << E_THD);
block[threadIdx.x] = din[threadIdx.x];
__syncthreads();
// offset0 = bid * N_THD / 2
// offset1 = bid * N_THD / 2 + (n / 2) * s
// base = Y + offset0
sfixn *base = Y + (bid << (E_THD - 1));
sfixn tid = threadIdx.x;
// iq = quo(tid, s) and ir = rem(tid, s);
sfixn iq = (tid >> v);
sfixn ir = tid & ((1 << v) - 1);
// the following code is to the in-block shuffle
// f(i) = (rem(2iq, N_THD/s) + quo(2iq, N_THD/s)) * s + ir
sfixn fi = (iq << 1) >> (E_THD - v);
fi += ((iq << 1) & ((1 << (E_THD - v)) - 1));
fi <<= v;
fi += ir;
// replace the following code by the branch-free code
// if (tid < N_THD/2)
// dout[tid] = block[fi];
// else
// dout[tid - N_THD / 2 + (1 << (u + v - 1))] = block[fi];
sfixn *dout = base + (tid >> (E_THD - 1))
* ((1 << (u + v - 1)) - (1 << (E_THD - 1)));
dout[tid] = block[fi];
}
void ext_stride_transpose2_dev(sfixn *Y, const sfixn *X, sfixn u, sfixn v)
{
if (DEBUG) assert((u >= 2) && (v >= 0) && (u + v >= E_THD));
sfixn nb = ((sfixn)1 << (u + v - E_THD));
dim3 nBlk(nb, 1, 1);
if (nb > (1 << 15)) { nBlk.x = (1 << 15); nBlk.y = (nb >> 15); }
if (v >= E_THD) {
// printf("Calling transpose2a_ker with u = %d, v = %d\n", u, v);
ext_stride_transpose2a_ker<<<nBlk, N_THD>>>(Y, X, u, v);
} else {
// printf("Calling transpose2b_ker with u = %d, v = %d\n", u, v);
ext_stride_transpose2b_ker<<<nBlk, N_THD>>>(Y, X, u, v);
}
cudaThreadSynchronize();
}
#undef E_THD
#undef N_THD
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
// Implementation of D_{2, m} @ I_{s}, extended version, that is, it works
// for any input such that m >= 2 and s >= 1 and m * s >= N_THD. Note that
// the FFT size n is still in use and we also require m <= n / 2.
//
// For example, let n = 16 be the FFT size. We have
//
// [ 1 ] 0
// [ 1 ] 1
// D_{2, 2} = [ 1 ] 2
// [ w^4 ] 3
//
// [ 1 ] 0
// [ 1 ] 1
// [ 1 ] 2
// [ 1 ] 3
// D_{2, 2} @ I_4 = [ 1 ] 4
// [ 1 ] 5
// [ 1 ] 6
// [ 1 ] 7
// [ 1 ] 8
// [ 1 ] 9
// [ 1 ] 10
// [ 1 ] 11
// [ w^4 ] 12
// [ w^4 ] 13
// [ w^4 ] 14
// [ w^4 ] 15
//
// [ 1 ] 0
// [ 1 ] 1
// [ 1 ] 2
// [ 1 ] 3
// [ 1 ] 4
// [ 1 ] 5
// [ 1 ] 6
// D_{2, 4} @ I_2 = [ 1 ] 7
// [ 1 ] 8
// [ 1 ] 9
// [ w^2 ] 10
// [ w^2 ] 11
// [ w^4 ] 12
// [ w^4 ] 13
// [ w^6 ] 14
// [ w^6 ] 15
//
// Warning:
//
// The purpuse to handle the case 2 * m * s >= n. If this does not hold,
// be aware of the meaning of the computation result.
//
// For example, let n = 16, s = 1 and m = 4, (2 * m * s = 8 < 16)
// Its matrix representation is
//
// [ 1 ]
// [ 1 ]
// [ 1 ]
// [ 1 ]
// [ 1 ]
// [ w ]
// [ w^2 ]
// [ w^3 ]
//
// where w is a 16-primitive root of unity.
//
// The usual D_{2, 4} @ I_1 is represented by
//
// [ 1 ]
// [ 1 ]
// [ 1 ]
// [ 1 ]
// [ 1 ]
// [ x ]
// [ x^2 ]
// [ x^3 ]
//
// with x being a 8-th primitive root of unity.
//
// For the above reason, we require the condition 2 * m * s >= n, for safety.
//
////////////////////////////////////////////////////////////////////////////////
#if DEBUG > 0
#define E_THD (2)
#else
#define E_THD (7)
#endif
#define N_THD (1 << E_THD)
/**
* @X, input/output array of length 2 * m * s = 2^{u + v + 1}
* @n, FFT size n = 2^e
* @w, n-th primitive root of unity
* @W, powers of primitive root of unity [1, w, w^2, ..., w^{n/2-1}]
* @u, m = 2^u
* @v, s = 2^v
*
* Implements D_{2, m} @ I_{s} with FFT size n
*
* Multiple thread blocks handle a stride, (s = 2^v is big)
*
* Requirements: (1) v >= E_THD, (2) 2 <= m < n
*
* TESTED
*/
__global__ void
ext_stride_twiddle2a_ker(sfixn *X, const sfixn * const W, sfixn e, sfixn u,
sfixn v, sfixn p, double pinv)
{
sfixn bid = (blockIdx.y << 15) + blockIdx.x;
// all threads in a thread block are using the same power!
// the base power for the grid is exp = (n / 2) / m
// The power for the block is w^(exp * e), with e = quo(bid, s / N_THD)
sfixn w = W[(bid >> (v - E_THD)) << (e - 1 - u)];
// starting position for the block, the first m * s elements unchanged
sfixn *base = X + ((sfixn)1 << (u + v)) + (bid << E_THD);
base[threadIdx.x] = mul_mod(w, base[threadIdx.x], p, pinv);
}
/**
* @X, input/output array of length 2 * m * s = 2^{u + v + 1}
* @n, FFT size n = 2^e
* @w, n-th primitive root of unity
* @W, powers of primitive root of unity [1, w, w^2, ..., w^{n/2-1}]
* @u, m = 2^u
* @v, s = 2^v
*
* Implements: D_{2, m} @ I_{s} with FFT size n
*
* A thread block handles multiple strides, (s = 2^v is small)
*
* Requirements: (1) v < E_THD, (2) 2 <= m < n
*
* TESTED
*/
__global__ void
ext_stride_twiddle2b_ker(sfixn *X, const sfixn * const W, sfixn e, sfixn u,
sfixn v, sfixn p, double pinv)
{
sfixn bid = (blockIdx.y << 15) + blockIdx.x;
// the first m * s elements will be unchanged.
sfixn *base = X + ((sfixn)1 << (u + v)) + (bid << E_THD);
// threads in a thread block use different powers.
// the base power for the grid is egrid = (n / 2) / m.
// the base power for the block is eblock = bid * (N_THD / s) * egrid.
sfixn eblock = bid << (E_THD - v + e - 1 - u);
sfixn tid = threadIdx.x;
// the power for the thread eblock + s * quo(tid, s)
sfixn iq = eblock + ((tid >> v) << v);
base[tid] = mul_mod(W[iq], base[tid], p, pinv);
}
/**
* @X, input/output array of length 2 * m * s = 2^{u + v + 1}
* @n, FFT size n = 2^e
* @w, n-th primitive root of unity
* @W, powers of primitive root of unity [1, w, w^2, ..., w^{n/2-1}]
* @u, m = 2^u
* @v, s = 2^v
*
* Implements: D_{2, m} @ I_{s} with FFT size n
*
* Requirements: 2 <= m < n, s >= 1, 2 * m * s >= n and m * s >= N_THD
*
* TESTED
*/
void ext_stride_twiddle2_dev(sfixn *X, const sfixn * const W, sfixn e, sfixn u,
sfixn v, sfixn p)
{
if (DEBUG) assert((v >= 0) && (u > 0) && (u < e));
if (DEBUG) assert((u + v >= E_THD) && (1 + u + v >= e));
// number of blocks is m * s / N_THD
sfixn nb = (1 << (u + v - E_THD));
dim3 nBlk(nb, 1, 1);
if (nb > (1 << 15)) { nBlk.x = (1 << 15); nBlk.y = (nb >> 15); }
double pinv = 1 / (double)p;
if (v >= E_THD) {
// printf("Calling twiddle2a_ker e = %d, u = %d, v = %d\n", e, u, v);
ext_stride_twiddle2a_ker<<<nBlk, N_THD>>>(X, W, e, u, v, p, pinv);
} else {
// printf("Calling twiddle2b_ker e = %d, u = %d, v = %d\n", e, u, v);
ext_stride_twiddle2b_ker<<<nBlk, N_THD>>>(X, W, e, u, v, p, pinv);
}
cudaThreadSynchronize();
}
/**
* @X, device array of length 2 * s = 2^{v + 1)
* @Y, device array of length 2 * s = 2^(v + 1) (output)
* @v, s = 2^v
* @p, prime number
*
* Implements: DFT2 @ I_{s}
*
* Requires: s >= N_THD
*
*/
__global__
void ext_butterfly_ker(sfixn *Y, const sfixn * const X, sfixn v, sfixn p)
{
sfixn bid = (blockIdx.y << 15) + blockIdx.x;
sfixn *B = Y + (bid << E_THD);
const sfixn *A = X + (bid << E_THD);
sfixn s = ((sfixn)1 << v);
B[threadIdx.x] = add_mod(A[threadIdx.x], A[threadIdx.x + s], p);
B[threadIdx.x + s] = sub_mod(A[threadIdx.x], A[threadIdx.x + s], p);
}
/**
* @X, device array of length 2 * s = 2^{v + 1)
* @Y, device array of length 2 * s = 2^(v + 1) (output)
* @v, s = 2^v
* @p, prime number
*
* Implements: DFT2 @ I_{s}
*
* Requires: s >= N_THD
*
* TESTED
*/
void ext_butterfly_dev(sfixn *Y, const sfixn * const X, sfixn v, sfixn p)
{
if (DEBUG) assert(v >= E_THD);
sfixn nb = ((sfixn)1 << (v - E_THD));
dim3 nBlk(nb, 1, 1);
if (nb > (1 << 15)) { nBlk.x = (1 << 15); nBlk.y = (nb >> 15); }
ext_butterfly_ker<<<nBlk, N_THD>>>(Y, X, v, p);
cudaThreadSynchronize();
}
#undef E_THD
#undef N_THD
////////////////////////////////////////////////////////////////////////////////
// The main program of
//
// DFT_n @ I_m = Prod_{i = 0}^{k - 1}
// DFT2 @ I_{2^{k - 1 + j}} (1)
// D_{2, 2^{k - i - 1}} @ I_{2^{i+j}} (2)
// L_2^{2^{k - i}} @ I_{2^{i+j}} (3)
////////////////////////////////////////////////////////////////////////////////
/**
* @X, input / output device array of length n * m
* @k, n = 2^k
* @j, m = 2^j
* @w, n-th primitive root of unity
* @W, [1, w, w^2, ..., w^{n/2-1}]
* @p, fourier prime number
*
* X will be filled by DFT_n @ I_m(X, w)
*
*/
void ext_stockham_dev(sfixn *X, sfixn k, sfixn j, const sfixn *W, sfixn p) {
sfixn *Y;
cudaMalloc((void**)&Y, sizeof(sfixn) << (k + j));
// sequence of applications
// i = k - 1 is trival for the other operations
ext_butterfly_dev(Y, X, k + j - 1, p);
for (sfixn i = k - 2; i >= 0; --i) {
// u = k - i, v = i + j
ext_stride_transpose2_dev(X, Y, k - i, i + j);
// u = k - i - 1, v = i + j
ext_stride_twiddle2_dev(X, W, k, k - i - 1, i + j, p);
ext_butterfly_dev(Y, X, k + j - 1, p);
}
cudaMemcpy(X, Y, sizeof(sfixn) << (k + j), cudaMemcpyDeviceToDevice);
cudaFree(Y);
if (DEBUG) checkCudaError("error found in ext_stockham_dev");
}
/* Without precomputed powers of the root */
void ext_stockham_dev(sfixn *X, sfixn k, sfixn j, sfixn w, sfixn p)
{
// initialize the primitive roots
sfixn *W;
cudaMalloc((void**)&W, sizeof(sfixn) << (k - 1));
get_powers_binary(k - 1, W, w, p);
ext_stockham_dev(X, k, j, W, p);
cudaFree(W);
if (DEBUG) checkCudaError("error found in ext_stockham_dev");
}
///////////////////////////////////////////////////////////////////////////////
// 2-d FFT, the row-column algorithm is
//
// DFT_{m, n} (X) = (DFT_m @ I_n) (I_m @ DFT_n) (X)
//
///////////////////////////////////////////////////////////////////////////////
/**
* @X, input / output device array of length m * n
* @em, m = 2^em (rows)
* @en, n = 2^en (columns)
* @wn, n-th primitive root of unity
* @Wn, [1, wn, wn^2, ..., wn^{n/2-1}]
* @wm, m-th primitive root of unity
* @Wm, [1, wm, wm^2, ..., wm^{m/2-1}]
* @p, fourier prime number
*
* Compute X = DFT_{m, n}(X) = (DFT_m @ I_n) (I_m @ DFT_n)X
*
*/
void bivariate_stockham_dev(sfixn *X, sfixn em, const sfixn *Wm, sfixn en,
const sfixn *Wn, sfixn p)
{
sfixn *Y;
cudaMalloc((void**)&Y, sizeof(sfixn) << (em + en));
// list_stockham_dev(X, 1 << em, en, Wn, p);
sfixn m = ((sfixn)1 << em);
list_butterfly_dev(Y, X, m, en, p);
for (sfixn i = en - 2; i >= 0; --i) {
list_stride_transpose2_dev(X, Y, m, en, i);
list_stride_twiddle2_dev(X, Wn, m, en, i, p);
list_butterfly_dev(Y, X, m, en, p);
}
// ext_stockham_dev(X, en, em, Wm, p);
ext_butterfly_dev(X, Y, en + em - 1, p);
for (sfixn i = en - 2; i >= 0; --i) {
// u = en - i, v = em + i
ext_stride_transpose2_dev(Y, X, en - i, em + i);
// u = en - i - 1, v = em + i
ext_stride_twiddle2_dev(Y, Wm, en, en - i - 1, em + i, p);
ext_butterfly_dev(X, Y, en + em - 1, p);
}
cudaFree(Y);
if (DEBUG) checkCudaError("bivariate_stockham_dev");
}
/**
* @X, input / output host array of length n * m
* @en, n = 2^en
* @em, m = 2^em
* @wn, n-th primitive root of unity
* @wm, m-th primitive root of unity
* @p, fourier prime number
*
* Compute X = DFT_{m, n}(X) = (DFT_m @ I_n) (I_m @ DFT_n) X
*
* It computes if X is in the rdr-representation
*
* F(1, 1) F(wn, 1) ... F(wn^(n-1), 1)
* F(1, wm) F(wn, wm) ... F(wn^(n-1), wm)
* ...
* F(1, wm^(m-1)) F(wn, wm^(m-1)) ... F(wn^(n-1), wm^(m-1))
*
*/
void bivariate_stockham_host(sfixn *X, sfixn em, sfixn wm,
sfixn en, sfixn wn, sfixn p)
{
sfixn *X_d;
cudaMalloc((void**)&X_d, sizeof(sfixn)<<(em + en));
cudaMemcpy(X_d, X, sizeof(sfixn)<<(em + en), cudaMemcpyHostToDevice);
sfixn *Wm, *Wn;
cudaMalloc((void**)&Wm, sizeof(sfixn) << (em - 1));
cudaMalloc((void**)&Wn, sizeof(sfixn) << (en - 1));
get_powers_binary(em - 1, Wm, wm, p);
get_powers_binary(en - 1, Wn, wn, p);
bivariate_stockham_dev(X_d, em, Wm, en, Wn, p);
cudaMemcpy(X, X_d, sizeof(sfixn)<<(em + en), cudaMemcpyDeviceToHost);
cudaFree(X_d);
cudaFree(Wm);
cudaFree(Wn);
if (DEBUG) checkCudaError("bivariate_stockham_host");
}
/**
* Assume that the inverse roots have been precomputed.
*/
void inverse_bivariate_stockham_dev(sfixn *X, sfixn em, const sfixn *invWm,
sfixn en, const sfixn *invWn, sfixn p)
{
sfixn m = sfixn(1) << em;
sfixn n = sfixn(1) << en;
sfixn minv = inv_mod(m, p);
sfixn ninv = inv_mod(n, p);
sfixn mninv = mul_mod(minv, ninv, p);
bivariate_stockham_dev(X, em, invWm, en, invWn, p);
scale_vector_dev(mninv, m * n, X, p);
}
/**
* Since (DFT_m^(-1) @ DFT_n^(-1)) (DFT_m @ DFT_n) = I_m @ I_n, we have
*
* (DFT_m @ DFT_n)^(-1) = DFT_m^(-1) @ DFT_n^(-1)
*/
void inverse_bivariate_stockham_host(sfixn *X, sfixn em, sfixn wm,
sfixn en, sfixn wn, sfixn p)
{
sfixn m = sfixn(1) << em;
sfixn n = sfixn(1) << en;
sfixn wminv = inv_mod(wm, p);
sfixn wninv = inv_mod(wn, p);
sfixn minv = inv_mod(m, p);
sfixn ninv = inv_mod(n, p);
sfixn mninv = mul_mod(minv, ninv, p);
sfixn *X_d;
cudaMalloc((void**)&X_d, sizeof(sfixn)<<(em + en));
cudaMemcpy(X_d, X, sizeof(sfixn)<<(em + en), cudaMemcpyHostToDevice);
sfixn *Wm, *Wn;
cudaMalloc((void**)&Wm, sizeof(sfixn) << (em - 1));
cudaMalloc((void**)&Wn, sizeof(sfixn) << (en - 1));
get_powers_binary(em - 1, Wm, wminv, p);
get_powers_binary(en - 1, Wn, wninv, p);
bivariate_stockham_dev(X_d, em, Wm, en, Wn, p);
scale_vector_dev(mninv, m * n, X_d, p);
cudaMemcpy(X, X_d, sizeof(sfixn)<<(em + en), cudaMemcpyDeviceToHost);
cudaFree(X_d);
cudaFree(Wm);
cudaFree(Wn);
if (DEBUG) checkCudaError("inverse_bivariate_stockham_host");
}
/**
* Multiply two bivariate polynomials of size POT, in place version
*
* @m : FFT size in y (rows)
* @n : FFT size in x (columns)
* @em : m = 2^em
* @en : n = 2^en
* @F : coefficient vector of F, padded to size n, input & output
* @G : coefficient vector of G, padded to size n, input
* @p : prime number
*
* F <-- DFT_{m, n}^{-1}(DFT_{m, n}(F) * DFT_{m, n}(G))
*
**/
void bi_stockham_poly_mul_dev(sfixn m, sfixn em, sfixn n, sfixn en,
sfixn *F, sfixn *G, sfixn p)
{
sfixn wm = primitive_root(em, p);
sfixn wn = primitive_root(en, p);
sfixn wninv = inv_mod(wn, p);
sfixn wminv = inv_mod(wm, p);
sfixn minv = inv_mod(m, p);
sfixn ninv = inv_mod(n, p);
sfixn mninv = mul_mod(minv, ninv, p);
sfixn *Wm, *Wn;
cudaMalloc((void**)&Wm, sizeof(sfixn) << (em - 1));
cudaMalloc((void**)&Wn, sizeof(sfixn) << (en - 1));
get_powers_binary(em - 1, Wm, wm, p);
get_powers_binary(en - 1, Wn, wn, p);
bivariate_stockham_dev(F, em, Wm, en, Wn, p);
bivariate_stockham_dev(G, em, Wm, en, Wn, p);
pointwise_mul_dev(n * m, em + en, F, G, p);
get_powers_binary(em - 1, Wm, wminv, p);
get_powers_binary(en - 1, Wn, wninv, p);
bivariate_stockham_dev(F, em, Wm, en, Wn, p);
scale_vector_dev(mninv, m * n, F, p);
cudaFree(Wm);
cudaFree(Wn);
if (DEBUG) checkCudaError("bi_stockham_poly_mul_dev");
}
/**
* Multiply two balanced bivariate polynomials
*/
rdr_poly*
bi_stockham_poly_mul_host(const rdr_poly &F, const rdr_poly &G, sfixn p)
{
sfixn lx = F.ns[0] + G.ns[0] - 1;
sfixn ly = F.ns[1] + G.ns[1] - 1;
sfixn ex = ceiling_log2(lx);
sfixn ey = ceiling_log2(ly);
sfixn nx = (sfixn)1 << ex;
sfixn ny = (sfixn)1 << ey;
sfixn szh = ((sfixn)1 << (ex + ey));
sfixn *F_d;
cudaMalloc((void**)&F_d, sizeof(sfixn)*F.sz);
cudaMemcpy(F_d, F.coeffs, sizeof(sfixn)*F.sz, cudaMemcpyHostToDevice);
sfixn *F2_d;
cudaMalloc((void**)&F2_d, sizeof(sfixn)*szh);
expand_to_fft2_dev(ex, ey, F2_d, F.ns[0], F.ns[1], F_d);
cudaFree(F_d);
sfixn *G_d;
cudaMalloc((void**)&G_d, sizeof(sfixn)*G.sz);
cudaMemcpy(G_d, G.coeffs, sizeof(sfixn)*G.sz, cudaMemcpyHostToDevice);
sfixn *G2_d;
cudaMalloc((void**)&G2_d, sizeof(sfixn)*szh);
expand_to_fft2_dev(ex, ey, G2_d, G.ns[0], G.ns[1], G_d);
cudaFree(G_d);
bi_stockham_poly_mul_dev(ny, ey, nx, ex, F2_d, G2_d, p);
// use G2_d to store the compacted result
// construct the result directly from G2_d
extract_from_fft2_dev(lx, ly, G2_d, ex, F2_d);
if (DEBUG) checkCudaError("bi_stockham_poly_mul_host");
rdr_poly *H = new rdr_poly(lx, ly, G2_d, false);
cudaFree(F2_d);
cudaFree(G2_d);
return H;
}
////////////////////////////////////////////////////////////////////////////////
// Most general formulas to realize a list of bivariate FFTs
//
// I_q @ DFT_{m, n} = (I_q @ (DFT_m @ I_n)) (I_q @ (I_m @ DFT_n))
// = (I_q @ ( Prod_{i=0}^{u - 1}
// (DFT_2 @ I_{m / 2})
// (D_{2, 2^{u - i - 1}} @ I_{2^i})
// (L_2^{2^{u - i} @ I_{2^i}}) )
// @ I_n)
// (I_q @ I_m @ (Prod_{i = 0}^{k - 1}
// (DFT_2 @ I_{n / 2})
// (D_{2, 2^{k - i - 1}} @ I_{2^i})
// (L_2^{2^{k - i}} @ I_{2^i})))
//
// where m = 2^u and n = 2^k are powers of two, but q usually not.
//
// What are to be implemented,
//
// (1) I_q @ DFT_2 @ I_{m / 2} @ I_n
// (2) I_q @ D_{2, 2^{u - i - 1}} @ I_{2^i} @ I_n
// (3) I_q @ L_2^{2^{u - i}} @ I_{2^i} @ I_n
//
// with m = 2^u;
//
// (4) I_q @ I_m @ DFT_2 @ I_{n / 2} use (3)
// (5) I_q @ I_m @ D_{2, 2^{k - i - 1}} @ I_{2^i} use (2)
// (6) I_q @ I_m @ L_2^{2^{k - i}} @ I_{2^i} use (1)
//
// with n = 2^k.
//
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
// Implementation of I_q @ L_2^m @ I_s
////////////////////////////////////////////////////////////////////////////////
#if DEBUG > 0
#define E_THD (2)
#else
#define E_THD (7)
#endif
#define N_THD (1 << E_THD)
/**
* @X, input array of length q * m * s = q * 2^{u + v}
* @Y, output array of length q * m * s = q * 2^{u + v}
* @u, m = 2^u
* @v, s = 2^v
*
* Compute the general stride transposition
*
* Y = (I_q @ L_2^m @ I_s) X
*
* Multiple thread blocks (>= 1) move a stride of s elements
*
* Requirements: q >= 1, u >= 2 and v >= E_THD
*
* If u = 1, then do nothing and the transiposition is trivial.
*/
__global__ void q_ext_stride_transpose2a_ker(sfixn *Y, const sfixn *X,
sfixn u, sfixn v)
{
__shared__ sfixn block[N_THD];
sfixn bid = (blockIdx.y << 15) + blockIdx.x;
// read in data from X
// the input offset for this block is bid * N_THD
const sfixn *din = X + (bid << E_THD);
block[threadIdx.x] = din[threadIdx.x];
__syncthreads();
// The number of thread blocks for one subvector is snb = m * s / N_THD
// squo = bid / snb
// sbid = bid % snb
sfixn snb = ((sfixn)1 << ( u + v - E_THD));
sfixn squo = bid >> (u + v - E_THD);
sfixn sbid = bid & (snb - 1);
// delta = s / N_THD;
// the number of thread block needed for each stride
sfixn edelta = v - E_THD;
// iq = quo(sbid, delta) and ir = rem(sbid, delta)
// iq tells which stride the block is working on
// ir tells which portion of the stride the block is working on
sfixn iq = sbid >> edelta;
sfixn ir = sbid & ((1 << edelta) - 1);
// iqq = quo(iq, 2) and iqr = rem(iq, 2)
sfixn iqq = (iq >> 1);
sfixn iqr = (iq & 1);
// write data out to Y
//
// if iqr = 0 (even), write to
// Y + squo * m * s + iqq * s + ir * N_THD
// if iqr = 1 (odd), write to
// Y + squo * m * s + (n / 2) * s + iqq * s + ir * N_THD
//
// that is, squo * m * s + iqr * (n / 2) * s + iqq * s + ir * N_THD
sfixn *dout = Y + (squo << (u + v))
+ (iqr << (u + v - 1))
+ (iqq << v)
+ (ir << E_THD);
dout[threadIdx.x] = block[threadIdx.x];
__syncthreads();
}
/**
* @X, input array of length q * m * s = q * 2^{u + v}
* @Y, output array of length q * m * s = q * 2^{u + v}
* @u, m = 2^u
* @v, s = 2^v
*
* Compute the general stride transposition
*
* Y = (I_q @ L_2^m @ I_s) X
*
* A thread block moves multiple strides
*
* Requirements: q >= 1, u >= 2, 0 <= v < E_THD, m * s >= N_THD
*
*/
__global__ void q_ext_stride_transpose2b_ker(sfixn *Y, const sfixn *X,
sfixn u, sfixn v)
{
__shared__ sfixn block[N_THD];
// block index in the kernel
sfixn bid = (blockIdx.y << 15) + blockIdx.x;
// read in data from X
// the input offset for this block is bid * N_THD
const sfixn *din = X + (bid << E_THD);
block[threadIdx.x] = din[threadIdx.x];
__syncthreads();
// The number of thread blocks for one subvector is snb = m * s / N_THD
// squo = bid / snb
// sbid = bid % snb
sfixn snb = ((sfixn)1 << (u + v - E_THD));
sfixn squo = bid >> (u + v - E_THD);
sfixn sbid = bid & (snb - 1);
// offset0 = squo * m * s + sbid * N_THD / 2
// offset1 = squo * m * s + sbid * N_THD / 2 + (m / 2) * s
// base = Y + offset0
sfixn *base = Y + (squo << (u + v)) + (sbid << (E_THD - 1));
sfixn tid = threadIdx.x;
// iq = quo(tid, s) and ir = rem(tid, s);
sfixn iq = (tid >> v);
sfixn ir = tid & ((1 << v) - 1);
// the following code is to the in-block shuffle
// f(i) = (rem(2iq, N_THD/s) + quo(2iq, N_THD/s)) * s + ir
sfixn fi = (iq << 1) >> (E_THD - v);
fi += ((iq << 1) & ((1 << (E_THD - v)) - 1));
fi <<= v;
fi += ir;
// replace the following code by the branch-free code
// if (tid < N_THD/2)
// dout[tid] = block[fi];
// else
// dout[tid - N_THD / 2 + (1 << (u + v - 1))] = block[fi];
sfixn *dout = base + (tid >> (E_THD - 1))
* ((1 << (u + v - 1)) - (1 << (E_THD - 1)));
dout[tid] = block[fi];
}
/**
* @X, input array of length q * m * s = q * 2^{u + v}
* @Y, output array of length q * m * s = q * 2^{u + v}
* @u, m = 2^u
* @v, s = 2^v
*
* Compute the general stride transposition
*
* Y = (I_q @ L_2^m @ I_s) X
*/
void q_ext_stride_transpose2_dev(sfixn *Y, const sfixn *X, sfixn q,
sfixn u, sfixn v)
{
if (DEBUG) assert((u >= 2) && (v >= 0) && (u + v >= E_THD) && (q >= 1));
sfixn nb = (q << (u + v - E_THD));
dim3 nBlk(nb, 1, 1);
if (nb > (1 << 15)) { nBlk.x = (1 << 15); nBlk.y = (nb >> 15); }
if (v >= E_THD) {
// printf("q_transpose2a_ker with q = %d, u = %d, v = %d\n", q, u, v);
q_ext_stride_transpose2a_ker<<<nBlk, N_THD>>>(Y, X, u, v);
} else {
// printf("q_transpose2b_ker with q = %d, u = %d, v = %d\n", q, u, v);
q_ext_stride_transpose2b_ker<<<nBlk, N_THD>>>(Y, X, u, v);
}
cudaThreadSynchronize();
}
#undef E_THD
#undef N_THD
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
// I_q @ D_{2, m} @ I_s
////////////////////////////////////////////////////////////////////////////////
#if DEBUG > 0
#define E_THD (2)
#else
#define E_THD (7)
#endif
#define N_THD (1 << E_THD)
/**
* @X, input/output array of length q * 2 * m * s = q * 2^{u + v + 1}
* @n, FFT size n = 2^e
* @w, n-th primitive root of unity
* @W, powers of primitive root of unity [1, w, w^2, ..., w^{n/2-1}]
* @u, m = 2^u
* @v, s = 2^v
*
* Implements I_q @ D_{2, m} @ I_{s} with FFT size n
*
* Multiple thread blocks handle a stride, (s = 2^v is big)
*
* Requirements: (1) v >= E_THD, (2) 2 <= m < n
*/
__global__ void q_ext_stride_twiddle2a_ker(sfixn *X, const sfixn * const W,
sfixn e, sfixn u, sfixn v, sfixn p, double pinv)
{
sfixn bid = (blockIdx.y << 15) + blockIdx.x;
// The number of thread blocks for one subvector is snb = m * s / N_THD
// squo = bid / snb
// sbid = bid % snb
sfixn snb = ((sfixn)1 << (u + v - E_THD));
sfixn squo = bid >> (u + v - E_THD);
sfixn sbid = bid & (snb - 1);
// all threads in a thread block are using the same power!
// the base power for the grid is exp = (n / 2) / m
// The power for the block is w^(exp * e), with e = quo(sbid, s / N_THD)
sfixn w = W[(sbid >> (v - E_THD)) << (e - 1 - u)];
// starting position for the block, the first m * s elements unchanged
sfixn *base = X + (squo << (u + v + 1))
+ ((sfixn)1 << (u + v))
+ (sbid << E_THD);
base[threadIdx.x] = mul_mod(w, base[threadIdx.x], p, pinv);
}
/**
* @X, input/output array of length q * 2 * m * s = 2^{u + v + 1}
* @n, FFT size n = 2^e
* @w, n-th primitive root of unity
* @W, powers of primitive root of unity [1, w, w^2, ..., w^{n/2-1}]
* @u, m = 2^u
* @v, s = 2^v
*
* Implements: I_q @ D_{2, m} @ I_{s} with FFT size n
*
* A thread block handles multiple strides, (s = 2^v is small)
*
* Requirements: v < E_THD, 2 <= m < n, q >= 1, m * s >= N_THD
*/
__global__ void q_ext_stride_twiddle2b_ker(sfixn *X, const sfixn * const W,
sfixn e, sfixn u, sfixn v, sfixn p, double pinv)
{
sfixn bid = (blockIdx.y << 15) + blockIdx.x;
// The number of thread blocks for one subvector is snb = m * s / N_THD
// squo = bid / snb
// sbid = bid % snb
sfixn snb = ((sfixn)1 << (u + v - E_THD));
sfixn squo = bid >> (u + v - E_THD);
sfixn sbid = bid & (snb - 1);
// the first m * s elements will be unchanged.
sfixn *base = X + (squo << (u + v + 1))
+ ((sfixn)1 << (u + v))
+ (sbid << E_THD);
// threads in a thread block use different powers.
// the base power for the grid is egrid = (n / 2) / m.
// the base power for the block is eblock = sbid * (N_THD / s) * egrid.
sfixn eblock = sbid << (E_THD - v + e - 1 - u);
sfixn tid = threadIdx.x;
// the power for the thread eblock + s * quo(tid, s)
sfixn iq = eblock + ((tid >> v) << v);
base[tid] = mul_mod(W[iq], base[tid], p, pinv);
}
/**
* @X, input/output array of length q * 2 * m * s = q * 2^{u + v + 1}
* @n, FFT size n = 2^e
* @w, n-th primitive root of unity
* @W, powers of primitive root of unity [1, w, w^2, ..., w^{n/2-1}]
* @u, m = 2^u
* @v, s = 2^v
*
* Implements: I_q @ D_{2, m} @ I_{s} with FFT size n
*
* Requirements: 2 <= m < n, s >= 1, 2 * m * s >= n and m * s >= N_THD
*/
void q_ext_stride_twiddle2_dev(sfixn *X, const sfixn * const W, sfixn e,
sfixn q, sfixn u, sfixn v, sfixn p)
{
if (DEBUG) assert((v >= 0) && (u > 0) && (u < e));
if (DEBUG) assert((u + v >= E_THD) && (1 + u + v >= e));
// number of blocks is q * m * s / N_THD
sfixn nb = (q << (u + v - E_THD));
dim3 nBlk(nb, 1, 1);
if (nb > (1 << 15)) { nBlk.x = (1 << 15); nBlk.y = (nb >> 15); }
double pinv = 1 / (double)p;
if (v >= E_THD) {
//printf("q_twiddle2a_ker q = %d, e = %d, u = %d, v = %d\n", q, e, u, v);
q_ext_stride_twiddle2a_ker<<<nBlk, N_THD>>>(X, W, e, u, v, p, pinv);
} else {
//printf("q_twiddle2b_ker q = %d, e = %d, u = %d, v = %d\n", q, e, u, v);
q_ext_stride_twiddle2b_ker<<<nBlk, N_THD>>>(X, W, e, u, v, p, pinv);
}
cudaThreadSynchronize();
}
#undef E_THD
#undef N_THD
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
// I_q @ DFT_2 @ I_s
////////////////////////////////////////////////////////////////////////////////
#if DEBUG > 0
#define E_THD (2)
#else
#define E_THD (7)
#endif
#define N_THD (1 << E_THD)
/**
* @X, device array of length q * 2 * s = q * 2^{v + 1)
* @Y, device array of length q * 2 * s = q * 2^(v + 1) (output)
* @v, s = 2^v
* @p, prime number
*
* Compute: Y = (I_q @ DFT2 @ I_s)X
*
* Requires: s >= N_THD
*
*/
__global__
void q_ext_butterfly_ker(sfixn *Y, const sfixn * const X, sfixn v, sfixn p)
{
sfixn bid = (blockIdx.y << 15) + blockIdx.x;
// The number of thread blocks for one subvector is
// snb = s / N_THD
// squo = bid / snb
// sbid = bid % snb
sfixn s = ((sfixn)1 << v);
sfixn snb = (s >> E_THD);
sfixn squo = bid >> (v - E_THD);
sfixn sbid = bid & (snb - 1);
sfixn *B = Y + (squo << (1 + v)) + (sbid << E_THD);
const sfixn *A = X + (squo << (1 + v)) + (sbid << E_THD);
B[threadIdx.x] = add_mod(A[threadIdx.x], A[threadIdx.x + s], p);
B[threadIdx.x + s] = sub_mod(A[threadIdx.x], A[threadIdx.x + s], p);
}
/**
* @X, device array of length q * 2 * s = q * 2^{v + 1)
* @Y, device array of length q * 2 * s = q * 2^(v + 1) (output)
* @v, s = 2^v
* @p, prime number
*
* Implements: I_q @ DFT2 @ I_{s}
*
* Requires: s >= N_THD
*/
void
q_ext_butterfly_dev(sfixn *Y, const sfixn * const X, sfixn q, sfixn v, sfixn p)
{
if (DEBUG) assert(v >= E_THD);
sfixn nb = (q << (v - E_THD));
dim3 nBlk(nb, 1, 1);
if (nb > (1 << 15)) { nBlk.x = (1 << 15); nBlk.y = (nb >> 15); }
q_ext_butterfly_ker<<<nBlk, N_THD>>>(Y, X, v, p);
cudaThreadSynchronize();
}
#undef E_THD
#undef N_THD
////////////////////////////////////////////////////////////////////////////////
//
// List of 2d FFTs, main program
//
// I_q @ DFT_{m, n} = (I_q @ (DFT_m @ I_n)) (I_q @ (I_m @ DFT_n))
//
////////////////////////////////////////////////////////////////////////////////
/**
* @X, device array of length q * m * n
* @q, positive integer, the number of 2d FFTs
* @em, m = 2^em
* @en, n = 2^en
* @wm, m-th primitive root of unity
* @wn, n-th primitive root of unity
* @Wm, [1, wm, ..., wm^{m/2-1}]
* @Wn, [1, wn, ..., wn^{n/2-1}]
* @p, prime number
*
* Compute X = I_q @ DFT_{m, n}(X, wm, wn)
*
* Requirements: q >= 1, m >= , n >= 2 * N_THD
*
*/
void list_bivariate_stockham_dev(sfixn *X, sfixn q, sfixn em,
const sfixn *Wm, sfixn en, const sfixn *Wn, sfixn p)
{
// work space Y;
sfixn *Y;
cudaMalloc((void**)&Y, (q << (em + en)) * sizeof(sfixn) );
// Step 1:
//
// I_q @ I_m @ DFT_n = I_{qm} @ prod_{i = 0}^{en - 1}
// (DFT_2 @ I_{n/2}) (1.3)
// (D_{2, 2^{en - i - 1}} @ I_{2^i}) (1.2)
// (L_2^{2^{en - i}} @ I_{2^i}) (1.1)
// i = en - 1, both (1.1) and (1.2) are trivial
// (1.3) becomes I_{qm} @ DFT_2 @ I_{n/2}
q_ext_butterfly_dev(Y, X, (q << em), (en - 1), p);
for (sfixn i = en - 2; i >= 0; --i) {
// (1.1)
q_ext_stride_transpose2_dev(X, Y, (q << em), (en - i), i);
// (1.2)
q_ext_stride_twiddle2_dev(X, Wn, en, (q << em), (en - i - 1), i, p);
// (1.3)
q_ext_butterfly_dev(Y, X, (q << em), (en - 1), p);
}
cudaMemcpy(X, Y, sizeof(sfixn)* (q << (em + en)), cudaMemcpyDeviceToDevice);
// Step 2:
// I_q @ (DFT_m @ I_n) = I_q @ prod_{i = 0}^{em - 1}
// (DFT_2 @ I_{m/2} @ I_n) (2.3)
// (D_{2, 2^{em - i - 1}} @ I_{2^i} @ I_n) (2.2)
// (L_2^{2^{em - i}} @ I_{2^i} @ I_n) (2.1)
// now effective data are in Y.
// i = em - 1, both (2.1) and (2.2) are trivial
// (2.3) becomes I_{q} @ DFT_2 @ I_{m/2} @ I_{n}
q_ext_butterfly_dev(X, Y, q, em - 1 + en, p);
for (sfixn i = em - 2; i >= 0; --i) {
// (2.1)
q_ext_stride_transpose2_dev(Y, X, q, (em - i), (i + en));
// (2.2)
q_ext_stride_twiddle2_dev(Y, Wm, em, q, (em - i - 1), (i + en), p);
// (2.3)
q_ext_butterfly_dev(X, Y, q, (em - 1 + en), p);
}
cudaFree(Y);
if (DEBUG) checkCudaError("After list_bivariate_stockham_dev");
}
void list_bivariate_stockham_dev(sfixn *X, sfixn q, sfixn em, sfixn wm,
sfixn en, sfixn wn, sfixn p)
{
// initialize the primitive roots
sfixn *Wn, *Wm;
cudaMalloc((void**)&Wn, sizeof(sfixn) << (en - 1));
cudaMalloc((void**)&Wm, sizeof(sfixn) << (em - 1));
get_powers_binary(en - 1, Wn, wn, p);
get_powers_binary(em - 1, Wm, wm, p);
list_bivariate_stockham_dev(X, q, em, Wm, en, Wn, p);
cudaFree(Wn);
cudaFree(Wm);
if (DEBUG) checkCudaError("After list_bivariate_stockham_dev");
}
////////////////////////////////////////////////////////////////////////////////
// Some testing functions
////////////////////////////////////////////////////////////////////////////////
void test_list_stockham()
{
//sfixn p = 469762049;
sfixn p = 257;
sfixn m = 10;
sfixn k = 4;
sfixn n = ((sfixn)1 << k);
sfixn *X = new sfixn[n*m];
sfixn w = primitive_root(k, p);
sfixn invw = inv_mod(w, p);
for (sfixn u = 0; u < m; ++u) {
for (sfixn v = 0; v < n; ++v) { X[u * n + v] = v; }
}
printf("Input: \n");
printf("w = %d\n", w);
for (sfixn i = 0; i < m; ++i) print_vector(n, X + i * n);
///////////////////////////////////////
list_stockham_host(X, m, k, w, p);
///////////////////////////////////////
printf("Output: \n");
for (sfixn i = 0; i < m; ++i) print_vector(n, X + i * n);
delete [] X;
checkCudaError("Error found");
}
////////////////////////////////////////////////////////////////////////////////
void test_list_bivariate_stockham_dev() {
sfixn q = 2;
sfixn em = 3;
sfixn en = 3;
sfixn m = (1 << em);
sfixn n = (1 << en);
sfixn *X = new sfixn[q*m*n]();
for (sfixn i = 0; i < q * m * n; ++i) X[i] = i % n;
for (sfixn i = 0; i < q; ++i) {
printf("matrix %d:\n", i);
print_matrix(m, n, X + i * m * n);
}
sfixn p = 257;
sfixn wn = primitive_root(en, p);
sfixn wm = primitive_root(em, p);
printf("n = %d, wn = %d\n", n, wn);
printf("m = %d, wm = %d\n", m, wm);
sfixn *X_d;
cudaMalloc((void**)&X_d, sizeof(sfixn)*m*n*q);
cudaMemcpy(X_d, X, sizeof(sfixn)*m*n*q, cudaMemcpyHostToDevice);
list_bivariate_stockham_dev(X_d, q, em, wm, en, wn, p);
cudaMemcpy(X, X_d, sizeof(sfixn)*m*n*q, cudaMemcpyDeviceToHost);
for (sfixn i = 0; i < q; ++i) {
printf("matrix %d:\n", i);
print_matrix(m, n, X + i * m * n);
}
delete [] X;
cudaFree(X_d);
checkCudaError("Error found");
}
|
scan.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* -----------------------------------------------------------------------------
*
* Module : Scan
* Copyright : (c) 2009 Trevor L. McDonell
* License : BSD
*
* ---------------------------------------------------------------------------*/
#include "scan.h"
#include "algorithms.h"
#include "utils.h"
#include "operator.h"
#include "cudpp/cudpp_globals.h"
#include "cudpp/scan_kernel.cu"
#include "cudpp/vector_kernel.cu"
template <typename T>
struct scan_plan
{
T **block_sums;
size_t num_levels;
};
static inline unsigned int
calc_num_blocks(unsigned int N)
{
return max(1u, (unsigned int)ceil((double)N / (SCAN_ELTS_PER_THREAD * CTA_SIZE)));
}
/*
* This is the CPU-side workhorse of the scan operation, invoking the kernel on
* each of the reduction blocks.
*/
template <class op, typename T, bool backward, bool exclusive>
static void
scan_recursive
(
const T *in,
T *out,
scan_plan<T> *plan,
const unsigned int N,
const unsigned int level
)
{
size_t num_blocks = calc_num_blocks(N);
bool is_full = N == num_blocks * SCAN_ELTS_PER_THREAD * CTA_SIZE;
dim3 grid(num_blocks, 1, 1);
dim3 block(CTA_SIZE, 1, 1);
size_t smem = sizeof(T) * CTA_SIZE * 2;
#define MULTIBLOCK 0x01
#define FULLBLOCK 0x04
int traits = 0;
if (num_blocks > 1) traits |= MULTIBLOCK;
if (is_full) traits |= FULLBLOCK;
/*
* Set up execution parameters, and execute the scan
*/
switch (traits)
{
case 0:
hipLaunchKernelGGL(( scan4
< T, ScanTraits<T, op, backward, exclusive, false, false, false> >)
, dim3(grid), dim3(block), smem, 0, out, in, NULL, N, 1, 1);
break;
case MULTIBLOCK:
hipLaunchKernelGGL(( scan4
< T, ScanTraits<T, op, backward, exclusive, false, true, false> >)
, dim3(grid), dim3(block), smem, 0, out, in, plan->block_sums[level], N, 1, 1);
break;
case FULLBLOCK:
hipLaunchKernelGGL(( scan4
< T, ScanTraits<T, op, backward, exclusive, false, false, true> >)
, dim3(grid), dim3(block), smem, 0, out, in, NULL, N, 1, 1);
break;
case MULTIBLOCK | FULLBLOCK:
hipLaunchKernelGGL(( scan4
< T, ScanTraits<T, op, backward, exclusive, false, true, true> >)
, dim3(grid), dim3(block), smem, 0, out, in, plan->block_sums[level], N, 1, 1);
break;
default:
assert(!"Non-exhaustive patterns in match");
}
/*
* After scanning the sub-blocks, we now need to combine those results by
* taking the last value from each sub-block, and adding that to each of the
* successive blocks (i.e. scan across the sub-computations)
*/
if (num_blocks > 1)
{
T *sums = plan->block_sums[level];
scan_recursive
<op, T, backward, true>
(sums, sums, plan, num_blocks, level+1);
hipLaunchKernelGGL(( vectorAddUniform4
<T, op, SCAN_ELTS_PER_THREAD>)
, dim3(grid),dim3(block), 0, 0,
out, sums, N, 4, 4, 0, 0);
}
#undef MULTIBLOCK
#undef FULLBLOCK
}
/*
* Allocate temporary memory used by the scan.
*/
template <typename T>
static void
scan_init(const unsigned int N, scan_plan<T> *plan)
{
size_t level = 0;
size_t elements = N;
size_t num_blocks;
/*
* Determine how many intermediate block-level summations will be required
*/
for (elements = N; elements > 1; elements = num_blocks)
{
num_blocks = calc_num_blocks(elements);
if (num_blocks > 1)
++level;
}
plan->block_sums = (T**) malloc(level * sizeof(T*));
plan->num_levels = level;
/*
* Now, allocate the necessary storage at each level
*/
for (elements = N, level = 0; elements > 1; elements = num_blocks, level++)
{
num_blocks = calc_num_blocks(elements);
if (num_blocks > 1)
hipMalloc((void**) &plan->block_sums[level], num_blocks * sizeof(T));
}
}
/*
* Clean up temporary memory used by the scan
*/
template <typename T>
static void
scan_finalise(scan_plan<T> *p)
{
for (size_t l = 0; l < p->num_levels; ++l)
hipFree(p->block_sums[l]);
free(p->block_sums);
}
/*
* Apply a binary operator to an array similar to `fold', but return a
* successive list of values reduced from the left. The reduction will take
* place in parallel, so the operator must be associative.
*/
template <class op, typename T, bool backward, bool exclusive>
void
scan
(
const T *d_in,
T *d_out,
const unsigned int length
)
{
scan_plan<T> plan;
scan_init<T>(length, &plan);
scan_recursive<op, T, backward, exclusive>(d_in, d_out, &plan, length, 0);
scan_finalise<T>(&plan);
}
// -----------------------------------------------------------------------------
// Instances
// -----------------------------------------------------------------------------
void prescanl_plusui(const unsigned int *d_in, unsigned int *d_out, const unsigned int N)
{
scan< Plus<unsigned int>, unsigned int, false, true >(d_in, d_out, N);
}
void prescanr_plusui(const unsigned int *d_in, unsigned int *d_out, const unsigned int N)
{
scan< Plus<unsigned int>, unsigned int, true, true >(d_in, d_out, N);
}
| scan.cu | /* -----------------------------------------------------------------------------
*
* Module : Scan
* Copyright : (c) 2009 Trevor L. McDonell
* License : BSD
*
* ---------------------------------------------------------------------------*/
#include "scan.h"
#include "algorithms.h"
#include "utils.h"
#include "operator.h"
#include "cudpp/cudpp_globals.h"
#include "cudpp/scan_kernel.cu"
#include "cudpp/vector_kernel.cu"
template <typename T>
struct scan_plan
{
T **block_sums;
size_t num_levels;
};
static inline unsigned int
calc_num_blocks(unsigned int N)
{
return max(1u, (unsigned int)ceil((double)N / (SCAN_ELTS_PER_THREAD * CTA_SIZE)));
}
/*
* This is the CPU-side workhorse of the scan operation, invoking the kernel on
* each of the reduction blocks.
*/
template <class op, typename T, bool backward, bool exclusive>
static void
scan_recursive
(
const T *in,
T *out,
scan_plan<T> *plan,
const unsigned int N,
const unsigned int level
)
{
size_t num_blocks = calc_num_blocks(N);
bool is_full = N == num_blocks * SCAN_ELTS_PER_THREAD * CTA_SIZE;
dim3 grid(num_blocks, 1, 1);
dim3 block(CTA_SIZE, 1, 1);
size_t smem = sizeof(T) * CTA_SIZE * 2;
#define MULTIBLOCK 0x01
#define FULLBLOCK 0x04
int traits = 0;
if (num_blocks > 1) traits |= MULTIBLOCK;
if (is_full) traits |= FULLBLOCK;
/*
* Set up execution parameters, and execute the scan
*/
switch (traits)
{
case 0:
scan4
< T, ScanTraits<T, op, backward, exclusive, false, false, false> >
<<<grid, block, smem>>>(out, in, NULL, N, 1, 1);
break;
case MULTIBLOCK:
scan4
< T, ScanTraits<T, op, backward, exclusive, false, true, false> >
<<<grid, block, smem>>>(out, in, plan->block_sums[level], N, 1, 1);
break;
case FULLBLOCK:
scan4
< T, ScanTraits<T, op, backward, exclusive, false, false, true> >
<<<grid, block, smem>>>(out, in, NULL, N, 1, 1);
break;
case MULTIBLOCK | FULLBLOCK:
scan4
< T, ScanTraits<T, op, backward, exclusive, false, true, true> >
<<<grid, block, smem>>>(out, in, plan->block_sums[level], N, 1, 1);
break;
default:
assert(!"Non-exhaustive patterns in match");
}
/*
* After scanning the sub-blocks, we now need to combine those results by
* taking the last value from each sub-block, and adding that to each of the
* successive blocks (i.e. scan across the sub-computations)
*/
if (num_blocks > 1)
{
T *sums = plan->block_sums[level];
scan_recursive
<op, T, backward, true>
(sums, sums, plan, num_blocks, level+1);
vectorAddUniform4
<T, op, SCAN_ELTS_PER_THREAD>
<<<grid,block>>>
(out, sums, N, 4, 4, 0, 0);
}
#undef MULTIBLOCK
#undef FULLBLOCK
}
/*
* Allocate temporary memory used by the scan.
*/
template <typename T>
static void
scan_init(const unsigned int N, scan_plan<T> *plan)
{
size_t level = 0;
size_t elements = N;
size_t num_blocks;
/*
* Determine how many intermediate block-level summations will be required
*/
for (elements = N; elements > 1; elements = num_blocks)
{
num_blocks = calc_num_blocks(elements);
if (num_blocks > 1)
++level;
}
plan->block_sums = (T**) malloc(level * sizeof(T*));
plan->num_levels = level;
/*
* Now, allocate the necessary storage at each level
*/
for (elements = N, level = 0; elements > 1; elements = num_blocks, level++)
{
num_blocks = calc_num_blocks(elements);
if (num_blocks > 1)
cudaMalloc((void**) &plan->block_sums[level], num_blocks * sizeof(T));
}
}
/*
* Clean up temporary memory used by the scan
*/
template <typename T>
static void
scan_finalise(scan_plan<T> *p)
{
for (size_t l = 0; l < p->num_levels; ++l)
cudaFree(p->block_sums[l]);
free(p->block_sums);
}
/*
* Apply a binary operator to an array similar to `fold', but return a
* successive list of values reduced from the left. The reduction will take
* place in parallel, so the operator must be associative.
*/
template <class op, typename T, bool backward, bool exclusive>
void
scan
(
const T *d_in,
T *d_out,
const unsigned int length
)
{
scan_plan<T> plan;
scan_init<T>(length, &plan);
scan_recursive<op, T, backward, exclusive>(d_in, d_out, &plan, length, 0);
scan_finalise<T>(&plan);
}
// -----------------------------------------------------------------------------
// Instances
// -----------------------------------------------------------------------------
void prescanl_plusui(const unsigned int *d_in, unsigned int *d_out, const unsigned int N)
{
scan< Plus<unsigned int>, unsigned int, false, true >(d_in, d_out, N);
}
void prescanr_plusui(const unsigned int *d_in, unsigned int *d_out, const unsigned int N)
{
scan< Plus<unsigned int>, unsigned int, true, true >(d_in, d_out, N);
}
|
161705dd76cb95c24469c35e9856d4b879b631c2.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include "common.h"
#include "efficient.h"
namespace StreamCompaction {
namespace Efficient {
using StreamCompaction::Common::PerformanceTimer;
using StreamCompaction::Common::kernMapToBoolean;
using StreamCompaction::Common::kernScatter;
PerformanceTimer& timer()
{
static PerformanceTimer timer;
return timer;
}
__global__ void kernUpSweep(int d, int n, int* idata) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= n) {
return;
}
if (index % (1 << (d + 1)) == 0) {
idata[index + (1 << (d + 1)) - 1] += idata[index + (1 << d) - 1];
}
}
__global__ void kernDownSweep(int d, int n, int* idata) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= n) {
return;
}
if (index % (1 << (d + 1)) == 0) {
int t = idata[index + (1 << d) - 1];
idata[index + (1 << d) - 1] = idata[index + (1 << (d + 1)) - 1];
idata[index + (1 << (d + 1)) - 1] += t;
}
}
__global__ void kernSetZero(int n, int* idata) {
idata[n - 1] = 0;
}
/**
* Performs prefix-sum (aka scan) on idata, storing the result into odata.
*/
void scan(int n, int *odata, const int *idata, bool timing) {
//pad to power of 2
int paddedSize = 1 << ilog2ceil(n);
int* deviceIn;
hipMalloc((void**)&deviceIn, paddedSize * sizeof(int));
dim3 fullBlocksPerGrid((paddedSize + BLOCK_SIZE - 1) / BLOCK_SIZE);
//From index n to paddedSize are 0s.
hipMemcpy(deviceIn, idata, sizeof(int) * n, hipMemcpyHostToDevice);
if (timing) {
timer().startGpuTimer();
}
//Up sweep
for (int d = 0; d <= ilog2ceil(paddedSize) - 1; d++) {
kernUpSweep << < fullBlocksPerGrid, BLOCK_SIZE >> > (d, paddedSize, deviceIn);
checkCUDAError("kernUpSweep failed");
}
//Down sweep
kernSetZero << < 1, 1 >> > (paddedSize, deviceIn);
for (int d = ilog2ceil(paddedSize) - 1; d >= 0; d--) {
kernDownSweep << < fullBlocksPerGrid, BLOCK_SIZE >> > (d, paddedSize, deviceIn);
checkCUDAError("kernDownSweep failed");
}
if (timing) {
timer().endGpuTimer();
}
hipMemcpy(odata, deviceIn, sizeof(int) * n, hipMemcpyDeviceToHost);
hipFree(deviceIn);
}
/**
* Performs stream compaction on idata, storing the result into odata.
* All zeroes are discarded.
*
* @param n The number of elements in idata.
* @param odata The array into which to store elements.
* @param idata The array of elements to compact.
* @returns The number of elements remaining after compaction.
*/
int compact(int n, int *odata, const int *idata) {
dim3 fullBlocksPerGrid((n + BLOCK_SIZE - 1) / BLOCK_SIZE);
int* count = new int[2];
int* deviceIn;
hipMalloc((void**)&deviceIn, n * sizeof(int));
int* deviceBool;
hipMalloc((void**)&deviceBool, n * sizeof(int));
int* deviceBoolPSum;
hipMalloc((void**)&deviceBoolPSum, n * sizeof(int));
int* deviceOut;
hipMalloc((void**)&deviceOut, n * sizeof(int));
hipMemcpy(deviceIn, idata, sizeof(int) * n, hipMemcpyHostToDevice);
timer().startGpuTimer();
kernMapToBoolean << <fullBlocksPerGrid, BLOCK_SIZE >> > (n, deviceBool, deviceIn);
checkCUDAError("kernMapToBoolean failed!");
scan(n, deviceBoolPSum, deviceBool, false);
kernScatter << <fullBlocksPerGrid, BLOCK_SIZE >> > (n, deviceOut, deviceIn, deviceBool, deviceBoolPSum);
checkCUDAError("kernScatter failed!");
timer().endGpuTimer();
hipMemcpy(count, deviceBool + n - 1, sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(count + 1, deviceBoolPSum + n - 1, sizeof(int), hipMemcpyDeviceToHost);
//size equals to last of boolean array and last of boolean prefix sum array
int compactedSize = count[0] + count[1];
hipMemcpy(odata, deviceOut, sizeof(int) * compactedSize, hipMemcpyDeviceToHost);
checkCUDAError("hipMemcpy back failed!");
hipFree(deviceIn);
hipFree(deviceBool);
hipFree(deviceBoolPSum);
hipFree(deviceOut);
return compactedSize;
}
}
}
| 161705dd76cb95c24469c35e9856d4b879b631c2.cu | #include <cuda.h>
#include <cuda_runtime.h>
#include "common.h"
#include "efficient.h"
namespace StreamCompaction {
namespace Efficient {
using StreamCompaction::Common::PerformanceTimer;
using StreamCompaction::Common::kernMapToBoolean;
using StreamCompaction::Common::kernScatter;
PerformanceTimer& timer()
{
static PerformanceTimer timer;
return timer;
}
__global__ void kernUpSweep(int d, int n, int* idata) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= n) {
return;
}
if (index % (1 << (d + 1)) == 0) {
idata[index + (1 << (d + 1)) - 1] += idata[index + (1 << d) - 1];
}
}
__global__ void kernDownSweep(int d, int n, int* idata) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= n) {
return;
}
if (index % (1 << (d + 1)) == 0) {
int t = idata[index + (1 << d) - 1];
idata[index + (1 << d) - 1] = idata[index + (1 << (d + 1)) - 1];
idata[index + (1 << (d + 1)) - 1] += t;
}
}
__global__ void kernSetZero(int n, int* idata) {
idata[n - 1] = 0;
}
/**
* Performs prefix-sum (aka scan) on idata, storing the result into odata.
*/
void scan(int n, int *odata, const int *idata, bool timing) {
//pad to power of 2
int paddedSize = 1 << ilog2ceil(n);
int* deviceIn;
cudaMalloc((void**)&deviceIn, paddedSize * sizeof(int));
dim3 fullBlocksPerGrid((paddedSize + BLOCK_SIZE - 1) / BLOCK_SIZE);
//From index n to paddedSize are 0s.
cudaMemcpy(deviceIn, idata, sizeof(int) * n, cudaMemcpyHostToDevice);
if (timing) {
timer().startGpuTimer();
}
//Up sweep
for (int d = 0; d <= ilog2ceil(paddedSize) - 1; d++) {
kernUpSweep << < fullBlocksPerGrid, BLOCK_SIZE >> > (d, paddedSize, deviceIn);
checkCUDAError("kernUpSweep failed");
}
//Down sweep
kernSetZero << < 1, 1 >> > (paddedSize, deviceIn);
for (int d = ilog2ceil(paddedSize) - 1; d >= 0; d--) {
kernDownSweep << < fullBlocksPerGrid, BLOCK_SIZE >> > (d, paddedSize, deviceIn);
checkCUDAError("kernDownSweep failed");
}
if (timing) {
timer().endGpuTimer();
}
cudaMemcpy(odata, deviceIn, sizeof(int) * n, cudaMemcpyDeviceToHost);
cudaFree(deviceIn);
}
/**
* Performs stream compaction on idata, storing the result into odata.
* All zeroes are discarded.
*
* @param n The number of elements in idata.
* @param odata The array into which to store elements.
* @param idata The array of elements to compact.
* @returns The number of elements remaining after compaction.
*/
int compact(int n, int *odata, const int *idata) {
dim3 fullBlocksPerGrid((n + BLOCK_SIZE - 1) / BLOCK_SIZE);
int* count = new int[2];
int* deviceIn;
cudaMalloc((void**)&deviceIn, n * sizeof(int));
int* deviceBool;
cudaMalloc((void**)&deviceBool, n * sizeof(int));
int* deviceBoolPSum;
cudaMalloc((void**)&deviceBoolPSum, n * sizeof(int));
int* deviceOut;
cudaMalloc((void**)&deviceOut, n * sizeof(int));
cudaMemcpy(deviceIn, idata, sizeof(int) * n, cudaMemcpyHostToDevice);
timer().startGpuTimer();
kernMapToBoolean << <fullBlocksPerGrid, BLOCK_SIZE >> > (n, deviceBool, deviceIn);
checkCUDAError("kernMapToBoolean failed!");
scan(n, deviceBoolPSum, deviceBool, false);
kernScatter << <fullBlocksPerGrid, BLOCK_SIZE >> > (n, deviceOut, deviceIn, deviceBool, deviceBoolPSum);
checkCUDAError("kernScatter failed!");
timer().endGpuTimer();
cudaMemcpy(count, deviceBool + n - 1, sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(count + 1, deviceBoolPSum + n - 1, sizeof(int), cudaMemcpyDeviceToHost);
//size equals to last of boolean array and last of boolean prefix sum array
int compactedSize = count[0] + count[1];
cudaMemcpy(odata, deviceOut, sizeof(int) * compactedSize, cudaMemcpyDeviceToHost);
checkCUDAError("cudaMemcpy back failed!");
cudaFree(deviceIn);
cudaFree(deviceBool);
cudaFree(deviceBoolPSum);
cudaFree(deviceOut);
return compactedSize;
}
}
}
|
7aa9bf0290b2f17778faa8ea4487f7b127c7bf4c.hip | // !!! This is a file automatically generated by hipify!!!
/*
* SHA-1 benchmark program. Calculates execution time of SHA-1 on CPU and GPU.
* Also includes function sha1_gpu_global() which prepares SHA-1 to be executed
* on GPU.
*
* 2008, Tadas Vilkeliskis <vilkeliskis.t@gmail.com>
*/
#include <stdlib.h>
#include <stdio.h>
#include <hip/hip_runtime.h>
#include "common.h"
#include <cstdlib>
#define MAX_THREADS_PER_BLOCK 128
typedef struct {
unsigned long state[5];
} sha1_gpu_context;
extern __global__ void sha1_kernel_global (unsigned char *data, sha1_gpu_context *ctx, int total_threads, unsigned long *extended);
/*
* Run sha1 kernel on GPU
* input - message
* size - message size
* output - buffer to store hash value
* proc - maximum threads per block
*/
void sha1_gpu_global (unsigned char *input, unsigned long size, unsigned char *output, int proc)
{
int total_threads; /* Total number of threads in the grid */
int blocks_per_grid; /* Number of blocks in the grid */
int threads_per_block; /* Number of threads in a block */
int pad, size_be; /* Number of zeros to pad, message size in big-enadian. */
int total_datablocks; /* Total number of blocks message is split into */
int i, k; /* Temporary variables */
unsigned char *d_message; /* Input message on the device */
unsigned long *d_extended; /* Extended blocks on the device */
sha1_gpu_context ctx, *d_ctx; /* Intermediate hash states */
/* Initialization vector for SHA-1 */
ctx.state[0] = 0x67452301;
ctx.state[1] = 0xEFCDAB89;
ctx.state[2] = 0x98BADCFE;
ctx.state[3] = 0x10325476;
ctx.state[4] = 0xC3D2E1F0;
pad = padding_256 (size);
threads_per_block = proc;
blocks_per_grid = 1;
/* How many blocks in the message */
total_datablocks = (size + pad + 8) / 64;
if (total_datablocks > threads_per_block)
total_threads = threads_per_block;
else
total_threads = total_datablocks;
size_be = LETOBE32 (size * 8);
/* Allocate enough memory on the device */
hipMalloc ((void**)&d_extended, proc * 80 * sizeof(unsigned long));
hipMalloc ((void**)&d_message, size + pad + 8);
hipMalloc ((void**)&d_ctx, sizeof (sha1_gpu_context));
/*
* Copy the data from host to device and perform padding
*/
hipMemcpy (d_ctx, &ctx, sizeof (sha1_gpu_context), hipMemcpyHostToDevice);
hipMemcpy (d_message, input, size, hipMemcpyHostToDevice);
hipMemset (d_message + size, 0x80, 1);
hipMemset (d_message + size + 1, 0, pad + 7);
hipMemcpy (d_message + size + pad + 4, &size_be, 4, hipMemcpyHostToDevice);
/*
* Run the algorithm
*/
i = 0;
k = total_datablocks / total_threads;
printf("%d %d\n", total_datablocks, total_threads);
if (k - 1 > 0) {
/*
* Kernel is executed multiple times and only one block in the grid is used.
* Since thread synchronization is allowed only within a block.
*/
for (i = 0; i < k; i++) {
hipLaunchKernelGGL(( sha1_kernel_global) , dim3(blocks_per_grid), dim3(proc), 0, 0, d_message + threads_per_block * i * 64, d_ctx, threads_per_block, d_extended);
/*
* Here I do not perform thread synchronization
* since threads are shynchronized in the kernel
*/
}
}
threads_per_block = total_datablocks - (i * total_threads);
hipLaunchKernelGGL(( sha1_kernel_global) , dim3(blocks_per_grid), dim3(proc), 0, 0, d_message + total_threads * i * 64, d_ctx, threads_per_block, d_extended);
hipMemcpy (&ctx, d_ctx, sizeof(sha1_gpu_context), hipMemcpyDeviceToHost);
printf("%d %d %d %d %d\n", ctx.state[0],
ctx.state[1],
ctx.state[2],
ctx.state[3],
ctx.state[4]
);
/* Put the hash value in the users' buffer */
PUT_UINT32_BE( ctx.state[0], output, 0 );
PUT_UINT32_BE( ctx.state[1], output, 4 );
PUT_UINT32_BE( ctx.state[2], output, 8 );
PUT_UINT32_BE( ctx.state[3], output, 12 );
PUT_UINT32_BE( ctx.state[4], output, 16 );
hipFree (d_message);
hipFree (d_ctx);
hipFree (d_extended);
}
// int main(int argc, char *argv[])
// {
// unsigned char hash[20];
// unsigned char *data = NULL;
// int i;
// int max_threads_per_block = MAX_THREADS_PER_BLOCK;
// unsigned int nonce = 0;
// bool done = false;
// //data = (unsigned char *) malloc (100);
// if(argc <= 2)
// {
// printf("Give 2 arguments");
// exit(1);
// }
// int maxIterations = atoi(argv[1]);
// int difficulty = atoi(argv[2]);
// for(int j=0;j<maxIterations;j++)
// {
// memset (hash, 0, 20);
// char t[20];
// itoa(j, t, 10);
// char data[100] = "cgCevIRCeDhqIJExqnjwidTkKHeGOPXYgviPiwZhOImJbvKZUGEkjkrkHQPoSlFl";
// for(int k=0;k<strlen(t);k++)
// {
// data[k + 64] = t[k];
// }
// sha1_gpu_global ((unsigned char *)data, strlen(data), hash, max_threads_per_block);
// char hashHex[20];
// sprintf(hashHex, "%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x",
// (unsigned int)hash[0],
// (unsigned int)hash[1],
// (unsigned int)hash[2],
// (unsigned int)hash[3],
// (unsigned int)hash[4],
// (unsigned int)hash[5],
// (unsigned int)hash[6],
// (unsigned int)hash[7],
// (unsigned int)hash[8],
// (unsigned int)hash[9],
// (unsigned int)hash[10],
// (unsigned int)hash[11],
// (unsigned int)hash[12],
// (unsigned int)hash[13],
// (unsigned int)hash[14],
// (unsigned int)hash[15],
// (unsigned int)hash[16],
// (unsigned int)hash[17],
// (unsigned int)hash[18],
// (unsigned int)hash[19]
// );
// done = true;
// for(i=0;i<difficulty;i++)
// done = done && hashHex[i] == '0';
// if(done){
// printf("Solution: %d ", j);
// printf("%s", hashHex);
// break;
// }
// }
// // for (i = 1000; i < 100000000; i = i * 10) {
// // data = (unsigned char *) malloc (i);
// // if (data == NULL) {
// // printf ("ERROR: Insufficient memory on host\n");
// // return -1;
// // }
// // sha1_cpu (data, i, hash);
// // memset (hash, 0, 20);
// // sha1_gpu_global (data, i, hash, max_threads_per_block);
// // free (data);
// // }
// return 0;
// }
int main(void)
{
char data[100] = "hello2";
unsigned char hash[20];
sha1_gpu_global ((unsigned char *)data, strlen(data), hash, 1);
char hashHex[20];
sprintf(hashHex, "%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x",
(unsigned int)hash[0],
(unsigned int)hash[1],
(unsigned int)hash[2],
(unsigned int)hash[3],
(unsigned int)hash[4],
(unsigned int)hash[5],
(unsigned int)hash[6],
(unsigned int)hash[7],
(unsigned int)hash[8],
(unsigned int)hash[9],
(unsigned int)hash[10],
(unsigned int)hash[11],
(unsigned int)hash[12],
(unsigned int)hash[13],
(unsigned int)hash[14],
(unsigned int)hash[15],
(unsigned int)hash[16],
(unsigned int)hash[17],
(unsigned int)hash[18],
(unsigned int)hash[19]
);
printf("%s", hashHex);
} | 7aa9bf0290b2f17778faa8ea4487f7b127c7bf4c.cu | /*
* SHA-1 benchmark program. Calculates execution time of SHA-1 on CPU and GPU.
* Also includes function sha1_gpu_global() which prepares SHA-1 to be executed
* on GPU.
*
* 2008, Tadas Vilkeliskis <vilkeliskis.t@gmail.com>
*/
#include <stdlib.h>
#include <stdio.h>
#include <cuda.h>
#include "common.h"
#include <cstdlib>
#define MAX_THREADS_PER_BLOCK 128
typedef struct {
unsigned long state[5];
} sha1_gpu_context;
extern __global__ void sha1_kernel_global (unsigned char *data, sha1_gpu_context *ctx, int total_threads, unsigned long *extended);
/*
* Run sha1 kernel on GPU
* input - message
* size - message size
* output - buffer to store hash value
* proc - maximum threads per block
*/
void sha1_gpu_global (unsigned char *input, unsigned long size, unsigned char *output, int proc)
{
int total_threads; /* Total number of threads in the grid */
int blocks_per_grid; /* Number of blocks in the grid */
int threads_per_block; /* Number of threads in a block */
int pad, size_be; /* Number of zeros to pad, message size in big-enadian. */
int total_datablocks; /* Total number of blocks message is split into */
int i, k; /* Temporary variables */
unsigned char *d_message; /* Input message on the device */
unsigned long *d_extended; /* Extended blocks on the device */
sha1_gpu_context ctx, *d_ctx; /* Intermediate hash states */
/* Initialization vector for SHA-1 */
ctx.state[0] = 0x67452301;
ctx.state[1] = 0xEFCDAB89;
ctx.state[2] = 0x98BADCFE;
ctx.state[3] = 0x10325476;
ctx.state[4] = 0xC3D2E1F0;
pad = padding_256 (size);
threads_per_block = proc;
blocks_per_grid = 1;
/* How many blocks in the message */
total_datablocks = (size + pad + 8) / 64;
if (total_datablocks > threads_per_block)
total_threads = threads_per_block;
else
total_threads = total_datablocks;
size_be = LETOBE32 (size * 8);
/* Allocate enough memory on the device */
cudaMalloc ((void**)&d_extended, proc * 80 * sizeof(unsigned long));
cudaMalloc ((void**)&d_message, size + pad + 8);
cudaMalloc ((void**)&d_ctx, sizeof (sha1_gpu_context));
/*
* Copy the data from host to device and perform padding
*/
cudaMemcpy (d_ctx, &ctx, sizeof (sha1_gpu_context), cudaMemcpyHostToDevice);
cudaMemcpy (d_message, input, size, cudaMemcpyHostToDevice);
cudaMemset (d_message + size, 0x80, 1);
cudaMemset (d_message + size + 1, 0, pad + 7);
cudaMemcpy (d_message + size + pad + 4, &size_be, 4, cudaMemcpyHostToDevice);
/*
* Run the algorithm
*/
i = 0;
k = total_datablocks / total_threads;
printf("%d %d\n", total_datablocks, total_threads);
if (k - 1 > 0) {
/*
* Kernel is executed multiple times and only one block in the grid is used.
* Since thread synchronization is allowed only within a block.
*/
for (i = 0; i < k; i++) {
sha1_kernel_global <<<blocks_per_grid, proc>>>(d_message + threads_per_block * i * 64, d_ctx, threads_per_block, d_extended);
/*
* Here I do not perform thread synchronization
* since threads are shynchronized in the kernel
*/
}
}
threads_per_block = total_datablocks - (i * total_threads);
sha1_kernel_global <<<blocks_per_grid, proc>>>(d_message + total_threads * i * 64, d_ctx, threads_per_block, d_extended);
cudaMemcpy (&ctx, d_ctx, sizeof(sha1_gpu_context), cudaMemcpyDeviceToHost);
printf("%d %d %d %d %d\n", ctx.state[0],
ctx.state[1],
ctx.state[2],
ctx.state[3],
ctx.state[4]
);
/* Put the hash value in the users' buffer */
PUT_UINT32_BE( ctx.state[0], output, 0 );
PUT_UINT32_BE( ctx.state[1], output, 4 );
PUT_UINT32_BE( ctx.state[2], output, 8 );
PUT_UINT32_BE( ctx.state[3], output, 12 );
PUT_UINT32_BE( ctx.state[4], output, 16 );
cudaFree (d_message);
cudaFree (d_ctx);
cudaFree (d_extended);
}
// int main(int argc, char *argv[])
// {
// unsigned char hash[20];
// unsigned char *data = NULL;
// int i;
// int max_threads_per_block = MAX_THREADS_PER_BLOCK;
// unsigned int nonce = 0;
// bool done = false;
// //data = (unsigned char *) malloc (100);
// if(argc <= 2)
// {
// printf("Give 2 arguments");
// exit(1);
// }
// int maxIterations = atoi(argv[1]);
// int difficulty = atoi(argv[2]);
// for(int j=0;j<maxIterations;j++)
// {
// memset (hash, 0, 20);
// char t[20];
// itoa(j, t, 10);
// char data[100] = "cgCevIRCeDhqIJExqnjwidTkKHeGOPXYgviPiwZhOImJbvKZUGEkjkrkHQPoSlFl";
// for(int k=0;k<strlen(t);k++)
// {
// data[k + 64] = t[k];
// }
// sha1_gpu_global ((unsigned char *)data, strlen(data), hash, max_threads_per_block);
// char hashHex[20];
// sprintf(hashHex, "%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x",
// (unsigned int)hash[0],
// (unsigned int)hash[1],
// (unsigned int)hash[2],
// (unsigned int)hash[3],
// (unsigned int)hash[4],
// (unsigned int)hash[5],
// (unsigned int)hash[6],
// (unsigned int)hash[7],
// (unsigned int)hash[8],
// (unsigned int)hash[9],
// (unsigned int)hash[10],
// (unsigned int)hash[11],
// (unsigned int)hash[12],
// (unsigned int)hash[13],
// (unsigned int)hash[14],
// (unsigned int)hash[15],
// (unsigned int)hash[16],
// (unsigned int)hash[17],
// (unsigned int)hash[18],
// (unsigned int)hash[19]
// );
// done = true;
// for(i=0;i<difficulty;i++)
// done = done && hashHex[i] == '0';
// if(done){
// printf("Solution: %d ", j);
// printf("%s", hashHex);
// break;
// }
// }
// // for (i = 1000; i < 100000000; i = i * 10) {
// // data = (unsigned char *) malloc (i);
// // if (data == NULL) {
// // printf ("ERROR: Insufficient memory on host\n");
// // return -1;
// // }
// // sha1_cpu (data, i, hash);
// // memset (hash, 0, 20);
// // sha1_gpu_global (data, i, hash, max_threads_per_block);
// // free (data);
// // }
// return 0;
// }
int main(void)
{
char data[100] = "hello2";
unsigned char hash[20];
sha1_gpu_global ((unsigned char *)data, strlen(data), hash, 1);
char hashHex[20];
sprintf(hashHex, "%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x",
(unsigned int)hash[0],
(unsigned int)hash[1],
(unsigned int)hash[2],
(unsigned int)hash[3],
(unsigned int)hash[4],
(unsigned int)hash[5],
(unsigned int)hash[6],
(unsigned int)hash[7],
(unsigned int)hash[8],
(unsigned int)hash[9],
(unsigned int)hash[10],
(unsigned int)hash[11],
(unsigned int)hash[12],
(unsigned int)hash[13],
(unsigned int)hash[14],
(unsigned int)hash[15],
(unsigned int)hash[16],
(unsigned int)hash[17],
(unsigned int)hash[18],
(unsigned int)hash[19]
);
printf("%s", hashHex);
} |
a8d3ff06bd909af567e370b9e058f477a32ad927.hip | // !!! This is a file automatically generated by hipify!!!
/**
* @copyright (c) 2012- King Abdullah University of Science and
* Technology (KAUST). All rights reserved.
**/
/**
* @file src/batch_triangular/Xpoti_batch.cu
* KBLAS is a high performance CUDA library for subset of BLAS
* and LAPACK routines optimized for NVIDIA GPUs.
* KBLAS is provided by KAUST.
*
* @version 3.0.0
* @author Ali Charara
* @date 2018-11-14
**/
#include <stdlib.h>
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#include "rocblas.h"
#include <typeinfo>
#include "kblas.h"
#include "kblas_struct.h"
#include "kblas_operators.h"
#include "kblas_defs.h"
#include "kblas_common.h"
#include "workspace_queries.ch"
//==============================================================================================
#include "Xblas_core.ch"
#include "Xhelper_funcs.ch"
#include "Xpoti_batch_drivers.cuh"
//==============================================================================================
//Non-Strided form
// workspace needed: device pointers
// A: host pointer to device buffer
int Xpoti_batch_offset(kblasHandle_t handle,
char uplo,
const int n,
TYPE** A, int A_row_off, int A_col_off, int lda,
int batchCount,
int *info_array)
{
KBlasWorkspaceState ws_needed;
poti_batch_wsquery_core<false>( n, batchCount, (kblasWorkspaceState_t)&ws_needed);
if( !ws_needed.isSufficient( &(handle->work_space.allocated_ws_state) ) ){
return KBLAS_InsufficientWorkspace;
}
return Xpoti_batch_core<TYPE, TYPE**, false>(
handle,
uplo, n,
(TYPE**)A, A_row_off, A_col_off, lda, (long)0,
batchCount,
info_array);
}
// workspace needed: device pointers
// A: host pointer to device buffer
int kblas_poti_batch(kblasHandle_t handle,
char uplo,
const int n,
TYPE** A, int lda,
int batchCount,
int *info_array)
{
return Xpoti_batch_offset( handle,
uplo, n,
A, 0, 0, lda,
batchCount,
info_array);
}
// workspace needed: device pointers
// A: host pointer to device buffer
extern "C"
int kblasXpoti_batch(kblasHandle_t handle,
char uplo,
const int n,
TYPE** A, int lda,
int batchCount,
int *info_array)
{
return Xpoti_batch_offset( handle,
uplo, n,
A, 0, 0, lda,
batchCount,
info_array);
}
//==============================================================================================
//Strided form
// template<>
// workspace needed: device pointers
// A: host pointer to device buffer
int Xpoti_batch_offset(kblasHandle_t handle,
char uplo,
const int n,
TYPE* A, int A_row_off, int A_col_off, int lda, long strideA,
int batchCount,
int *info_array)
{
KBlasWorkspaceState ws_needed;
poti_batch_wsquery_core<true>( n, batchCount, (kblasWorkspaceState_t)&ws_needed);
if( !ws_needed.isSufficient( &(handle->work_space.allocated_ws_state) ) ){
return KBLAS_InsufficientWorkspace;
}
return Xpoti_batch_core<TYPE, TYPE*, true>(
handle,
uplo, n,
(TYPE*)A, A_row_off, A_col_off, lda, strideA,
batchCount,
info_array);
}
// workspace needed: device pointers
// A: host pointer to device buffer
int kblas_poti_batch(kblasHandle_t handle,
char uplo,
const int n,
TYPE* A, int lda, long strideA,
int batchCount,
int *info_array)
{
return Xpoti_batch_offset( handle,
uplo, n,
A, 0, 0, lda, strideA,
batchCount,
info_array);
}
// workspace needed: device pointers
// A: host pointer to device buffer
extern "C"
int kblasXpoti_batch_strided(kblasHandle_t handle,
char uplo,
const int n,
TYPE* A, int lda, long strideA,
int batchCount,
int *info_array)
{
return Xpoti_batch_offset( handle,
uplo, n,
A, 0, 0, lda, strideA,
batchCount,
info_array);
}
| a8d3ff06bd909af567e370b9e058f477a32ad927.cu | /**
* @copyright (c) 2012- King Abdullah University of Science and
* Technology (KAUST). All rights reserved.
**/
/**
* @file src/batch_triangular/Xpoti_batch.cu
* KBLAS is a high performance CUDA library for subset of BLAS
* and LAPACK routines optimized for NVIDIA GPUs.
* KBLAS is provided by KAUST.
*
* @version 3.0.0
* @author Ali Charara
* @date 2018-11-14
**/
#include <stdlib.h>
#include <stdio.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <cuda_runtime_api.h>
#include "cublas_v2.h"
#include <typeinfo>
#include "kblas.h"
#include "kblas_struct.h"
#include "kblas_operators.h"
#include "kblas_defs.h"
#include "kblas_common.h"
#include "workspace_queries.ch"
//==============================================================================================
#include "Xblas_core.ch"
#include "Xhelper_funcs.ch"
#include "Xpoti_batch_drivers.cuh"
//==============================================================================================
//Non-Strided form
// workspace needed: device pointers
// A: host pointer to device buffer
int Xpoti_batch_offset(kblasHandle_t handle,
char uplo,
const int n,
TYPE** A, int A_row_off, int A_col_off, int lda,
int batchCount,
int *info_array)
{
KBlasWorkspaceState ws_needed;
poti_batch_wsquery_core<false>( n, batchCount, (kblasWorkspaceState_t)&ws_needed);
if( !ws_needed.isSufficient( &(handle->work_space.allocated_ws_state) ) ){
return KBLAS_InsufficientWorkspace;
}
return Xpoti_batch_core<TYPE, TYPE**, false>(
handle,
uplo, n,
(TYPE**)A, A_row_off, A_col_off, lda, (long)0,
batchCount,
info_array);
}
// workspace needed: device pointers
// A: host pointer to device buffer
int kblas_poti_batch(kblasHandle_t handle,
char uplo,
const int n,
TYPE** A, int lda,
int batchCount,
int *info_array)
{
return Xpoti_batch_offset( handle,
uplo, n,
A, 0, 0, lda,
batchCount,
info_array);
}
// workspace needed: device pointers
// A: host pointer to device buffer
extern "C"
int kblasXpoti_batch(kblasHandle_t handle,
char uplo,
const int n,
TYPE** A, int lda,
int batchCount,
int *info_array)
{
return Xpoti_batch_offset( handle,
uplo, n,
A, 0, 0, lda,
batchCount,
info_array);
}
//==============================================================================================
//Strided form
// template<>
// workspace needed: device pointers
// A: host pointer to device buffer
int Xpoti_batch_offset(kblasHandle_t handle,
char uplo,
const int n,
TYPE* A, int A_row_off, int A_col_off, int lda, long strideA,
int batchCount,
int *info_array)
{
KBlasWorkspaceState ws_needed;
poti_batch_wsquery_core<true>( n, batchCount, (kblasWorkspaceState_t)&ws_needed);
if( !ws_needed.isSufficient( &(handle->work_space.allocated_ws_state) ) ){
return KBLAS_InsufficientWorkspace;
}
return Xpoti_batch_core<TYPE, TYPE*, true>(
handle,
uplo, n,
(TYPE*)A, A_row_off, A_col_off, lda, strideA,
batchCount,
info_array);
}
// workspace needed: device pointers
// A: host pointer to device buffer
int kblas_poti_batch(kblasHandle_t handle,
char uplo,
const int n,
TYPE* A, int lda, long strideA,
int batchCount,
int *info_array)
{
return Xpoti_batch_offset( handle,
uplo, n,
A, 0, 0, lda, strideA,
batchCount,
info_array);
}
// workspace needed: device pointers
// A: host pointer to device buffer
extern "C"
int kblasXpoti_batch_strided(kblasHandle_t handle,
char uplo,
const int n,
TYPE* A, int lda, long strideA,
int batchCount,
int *info_array)
{
return Xpoti_batch_offset( handle,
uplo, n,
A, 0, 0, lda, strideA,
batchCount,
info_array);
}
|
2e50f377e2a2132614fd3600d58043a6e47dab39.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <time.h>
#include "../cuPrintf.cu"
#define NANOSECONDS_PER_SECOND 1E9;
void initializeArray(int*, int);
void stampaArray(int*, int);
void equalArray(int*, int*, int);
void prodottoArrayCompPerCompCPU(int *, int *, int *, int);
__global__ void prodottoArrayCompPerCompGPU(int*, int*, int*, int);
int main(int argc, char *argv[]) {
// numero di blocchi e numero di thread per blocco
dim3 gridDim, blockDim;
int i, N; //numero totale di elementi dell'array
// array memorizzati sull'host
int *A_host, *B_host, *C_host;
// array memorizzati sul device
int *A_device, *B_device, *C_device;
int *copy; //array in cui copieremo i risultati di C_device
int size; //size in byte di ciascun array
int num;
int host_sum, device_sum;
int flag, errorFlag;
hipEvent_t startGPU, stopGPU; // tempi di inizio e fine
float elapsedGPU, elapsedCPU;
struct timespec startCPU, stopCPU;
// const long NANOSECONDS_PER_SECOND = 1E9;
const int MS_IN_S = 1000;
if (argc < 4) {
printf("Numero di parametri insufficiente!\n");
printf("Uso corretto: %s <NumElementi> <NumThreadPerBlocco> <debugFlag>\n", argv[0]);
printf("Uso dei valori di default\n");
N = 128;
num = 32;
flag = 0;
}
else {
N = atoi(argv[1]);
num = atoi(argv[2]);
flag = atoi(argv[3]);
}
blockDim.x = num; // it should be 32 this time
// determinazione esatta del numero di blocchi
gridDim.x = N / blockDim.x + ((N % blockDim.x) == 0? 0: 1); // load balancing, punto terzo
// size in byte di ogni array
size = sizeof(int) * N;
// stampa delle info sull'esecuzione del kernel
if (flag) {
printf("***\t PRODOTTO COMPONENTE PER COMPONENTE DI DUE ARRAY \t***\n");
printf("Numero di elementi = %d\n", N);
printf("Numero di thread per blocco = %d\n", blockDim.x);
printf("Numero di blocchi = %d\n", gridDim.x);
}
// allocazione dati sull'host
A_host = (int *) malloc(size);
B_host = (int *) malloc(size);
C_host = (int *) malloc(size);
copy = (int *) malloc(size);
// allocazione dati sul device
hipMalloc((void **) &A_device, size);
hipMalloc((void **) &B_device, size);
hipMalloc((void **) &C_device, size);
// inizializzazione dati sull'host
initializeArray(A_host, N);
initializeArray(B_host, N);
// copia dei dati dall'host al device
hipMemcpy(A_device, A_host, size, hipMemcpyHostToDevice);
hipMemcpy(B_device, B_host, size, hipMemcpyHostToDevice);
// azzeriamo il contenuto della matrice C
memset(C_host, 0, size);
hipMemset(C_device, 0, size);
// avvia cronometrazione GPU
hipEventCreate(&startGPU);
hipEventCreate(&stopGPU);
// invocazione del kernel
hipEventRecord(startGPU);
hipLaunchKernelGGL(( prodottoArrayCompPerCompGPU), dim3(gridDim), dim3(blockDim), 0, 0, A_device, B_device, C_device, N);
hipEventRecord(stopGPU);
// calcola il tempo impiegato dal device per l'esecuzione del kernel
hipEventSynchronize(stopGPU);
hipEventElapsedTime(&elapsedGPU, startGPU, stopGPU);
hipEventDestroy(startGPU);
hipEventDestroy(stopGPU);
// copia dei risultati dal device all'host
hipMemcpy(copy, C_device, size, hipMemcpyDeviceToHost);
// chiamata alla funzione seriale per il prodotto di due array
clock_gettime(CLOCK_REALTIME, &startCPU);
prodottoArrayCompPerCompCPU(A_host, B_host, C_host, N);
clock_gettime(CLOCK_REALTIME, &stopCPU);
elapsedCPU = (stopCPU.tv_sec - startCPU.tv_sec) + (stopCPU.tv_nsec - startCPU.tv_nsec) / NANOSECONDS_PER_SECOND;
// stampa degli array e dei risultati
if (flag && N < 20) {
printf("array A\n");
stampaArray(A_host, N);
printf("array B\n");
stampaArray(B_host, N);
printf("Risultati host\n");
stampaArray(C_host, N);
printf("Risultati device\n");
stampaArray(copy,N);
}
// test di correttezza
if (flag) {
equalArray(copy, C_host, N);
}
// somma gli elementi dei due array
host_sum = device_sum = 0;
for (i = 0; i < N; i++) {
host_sum += C_host[i];
device_sum += copy[i];
}
// confronta i risultati
errorFlag = 0;
if (flag) {
printf("La somma sul device (%d) ", device_sum);
if (host_sum != device_sum) {
printf("non ");
errorFlag = 1;
}
printf("coincide con la somma sull'host (%d)!\n", host_sum);
}
else if (errorFlag) {
printf("Le somme non coincidono!");
}
printf("Tempo CPU: %.3f ms\n", elapsedCPU * MS_IN_S);
printf("Tempo GPU: %.3f ms\n", elapsedGPU); // already in ms
// de-allocazione host
free(A_host);
free(B_host);
free(C_host);
free(copy);
// de-allocazione device
hipFree(A_device);
hipFree(B_device);
hipFree(C_device);
exit(EXIT_SUCCESS);
}
void initializeArray(int *array, int n) {
int i;
for (i = 0; i < n; i++)
array[i] = i;
}
void stampaArray(int* array, int n) {
int i;
for (i = 0; i < n; i++)
printf("%d ", array[i]);
printf("\n");
}
void equalArray(int* a, int*b, int n) {
int i = 0;
while (a[i] == b[i])
i++;
if (i < n)
printf("I risultati dell'host e del device sono diversi\n");
else
printf("I risultati dell'host e del device coincidono\n");
}
// Seriale
void prodottoArrayCompPerCompCPU(int *a, int *b, int *c, int n) {
int i;
for (i = 0; i < n; i++)
c[i] = a[i] * b[i];
}
// Parallelo
__global__ void prodottoArrayCompPerCompGPU(int *a, int *b, int *c, int n) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < n)
c[index] = a[index] * b[index];
}
| 2e50f377e2a2132614fd3600d58043a6e47dab39.cu | #include <stdio.h>
#include <cuda.h>
#include <time.h>
#include "../cuPrintf.cu"
#define NANOSECONDS_PER_SECOND 1E9;
void initializeArray(int*, int);
void stampaArray(int*, int);
void equalArray(int*, int*, int);
void prodottoArrayCompPerCompCPU(int *, int *, int *, int);
__global__ void prodottoArrayCompPerCompGPU(int*, int*, int*, int);
int main(int argc, char *argv[]) {
// numero di blocchi e numero di thread per blocco
dim3 gridDim, blockDim;
int i, N; //numero totale di elementi dell'array
// array memorizzati sull'host
int *A_host, *B_host, *C_host;
// array memorizzati sul device
int *A_device, *B_device, *C_device;
int *copy; //array in cui copieremo i risultati di C_device
int size; //size in byte di ciascun array
int num;
int host_sum, device_sum;
int flag, errorFlag;
cudaEvent_t startGPU, stopGPU; // tempi di inizio e fine
float elapsedGPU, elapsedCPU;
struct timespec startCPU, stopCPU;
// const long NANOSECONDS_PER_SECOND = 1E9;
const int MS_IN_S = 1000;
if (argc < 4) {
printf("Numero di parametri insufficiente!\n");
printf("Uso corretto: %s <NumElementi> <NumThreadPerBlocco> <debugFlag>\n", argv[0]);
printf("Uso dei valori di default\n");
N = 128;
num = 32;
flag = 0;
}
else {
N = atoi(argv[1]);
num = atoi(argv[2]);
flag = atoi(argv[3]);
}
blockDim.x = num; // it should be 32 this time
// determinazione esatta del numero di blocchi
gridDim.x = N / blockDim.x + ((N % blockDim.x) == 0? 0: 1); // load balancing, punto terzo
// size in byte di ogni array
size = sizeof(int) * N;
// stampa delle info sull'esecuzione del kernel
if (flag) {
printf("***\t PRODOTTO COMPONENTE PER COMPONENTE DI DUE ARRAY \t***\n");
printf("Numero di elementi = %d\n", N);
printf("Numero di thread per blocco = %d\n", blockDim.x);
printf("Numero di blocchi = %d\n", gridDim.x);
}
// allocazione dati sull'host
A_host = (int *) malloc(size);
B_host = (int *) malloc(size);
C_host = (int *) malloc(size);
copy = (int *) malloc(size);
// allocazione dati sul device
cudaMalloc((void **) &A_device, size);
cudaMalloc((void **) &B_device, size);
cudaMalloc((void **) &C_device, size);
// inizializzazione dati sull'host
initializeArray(A_host, N);
initializeArray(B_host, N);
// copia dei dati dall'host al device
cudaMemcpy(A_device, A_host, size, cudaMemcpyHostToDevice);
cudaMemcpy(B_device, B_host, size, cudaMemcpyHostToDevice);
// azzeriamo il contenuto della matrice C
memset(C_host, 0, size);
cudaMemset(C_device, 0, size);
// avvia cronometrazione GPU
cudaEventCreate(&startGPU);
cudaEventCreate(&stopGPU);
// invocazione del kernel
cudaEventRecord(startGPU);
prodottoArrayCompPerCompGPU<<<gridDim, blockDim>>>(A_device, B_device, C_device, N);
cudaEventRecord(stopGPU);
// calcola il tempo impiegato dal device per l'esecuzione del kernel
cudaEventSynchronize(stopGPU);
cudaEventElapsedTime(&elapsedGPU, startGPU, stopGPU);
cudaEventDestroy(startGPU);
cudaEventDestroy(stopGPU);
// copia dei risultati dal device all'host
cudaMemcpy(copy, C_device, size, cudaMemcpyDeviceToHost);
// chiamata alla funzione seriale per il prodotto di due array
clock_gettime(CLOCK_REALTIME, &startCPU);
prodottoArrayCompPerCompCPU(A_host, B_host, C_host, N);
clock_gettime(CLOCK_REALTIME, &stopCPU);
elapsedCPU = (stopCPU.tv_sec - startCPU.tv_sec) + (stopCPU.tv_nsec - startCPU.tv_nsec) / NANOSECONDS_PER_SECOND;
// stampa degli array e dei risultati
if (flag && N < 20) {
printf("array A\n");
stampaArray(A_host, N);
printf("array B\n");
stampaArray(B_host, N);
printf("Risultati host\n");
stampaArray(C_host, N);
printf("Risultati device\n");
stampaArray(copy,N);
}
// test di correttezza
if (flag) {
equalArray(copy, C_host, N);
}
// somma gli elementi dei due array
host_sum = device_sum = 0;
for (i = 0; i < N; i++) {
host_sum += C_host[i];
device_sum += copy[i];
}
// confronta i risultati
errorFlag = 0;
if (flag) {
printf("La somma sul device (%d) ", device_sum);
if (host_sum != device_sum) {
printf("non ");
errorFlag = 1;
}
printf("coincide con la somma sull'host (%d)!\n", host_sum);
}
else if (errorFlag) {
printf("Le somme non coincidono!");
}
printf("Tempo CPU: %.3f ms\n", elapsedCPU * MS_IN_S);
printf("Tempo GPU: %.3f ms\n", elapsedGPU); // already in ms
// de-allocazione host
free(A_host);
free(B_host);
free(C_host);
free(copy);
// de-allocazione device
cudaFree(A_device);
cudaFree(B_device);
cudaFree(C_device);
exit(EXIT_SUCCESS);
}
void initializeArray(int *array, int n) {
int i;
for (i = 0; i < n; i++)
array[i] = i;
}
void stampaArray(int* array, int n) {
int i;
for (i = 0; i < n; i++)
printf("%d ", array[i]);
printf("\n");
}
void equalArray(int* a, int*b, int n) {
int i = 0;
while (a[i] == b[i])
i++;
if (i < n)
printf("I risultati dell'host e del device sono diversi\n");
else
printf("I risultati dell'host e del device coincidono\n");
}
// Seriale
void prodottoArrayCompPerCompCPU(int *a, int *b, int *c, int n) {
int i;
for (i = 0; i < n; i++)
c[i] = a[i] * b[i];
}
// Parallelo
__global__ void prodottoArrayCompPerCompGPU(int *a, int *b, int *c, int n) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < n)
c[index] = a[index] * b[index];
}
|
6e1fd6c61ef00fe442f5261a57d89b75999af99b.hip | // !!! This is a file automatically generated by hipify!!!
#include <hiprand/hiprand_kernel.h>
#include "../include/dtypes.cuh"
#include "../include/inits.cuh"
#include "../include/mesh.cuh"
#include "../include/solvers.cuh"
#include "minunit.cuh"
const char* test_transformations()
{
Mesh mesh{"tests/torus.vtk"};
auto minimum = mesh.get_minimum();
auto maximum = mesh.get_maximum();
MU_ASSERT("Min wrong in x", isclose(minimum.x, -1.5));
MU_ASSERT("Min wrong in y", isclose(minimum.y, -1.5));
MU_ASSERT("Min wrong in z", isclose(minimum.z, -0.5));
MU_ASSERT("Max wrong in x", isclose(maximum.x, 1.5));
MU_ASSERT("Max wrong in y", isclose(maximum.y, 1.5));
MU_ASSERT("Max wrong in z", isclose(maximum.z, 0.5));
mesh.translate(float3{1, 0, 0});
minimum = mesh.get_minimum();
maximum = mesh.get_maximum();
MU_ASSERT("Translated min wrong in x", isclose(minimum.x, -1.5 + 1));
MU_ASSERT("Translated min wrong in y", isclose(minimum.y, -1.5));
MU_ASSERT("Translated min wrong in z", isclose(minimum.z, -0.5));
MU_ASSERT("Translated max wrong in x", isclose(maximum.x, 1.5 + 1));
MU_ASSERT("Translated max wrong in y", isclose(maximum.y, 1.5));
MU_ASSERT("Translated max wrong in z", isclose(maximum.z, 0.5));
mesh.translate(float3{-1, 0, 0});
mesh.rotate(0, M_PI / 2, 0);
minimum = mesh.get_minimum();
maximum = mesh.get_maximum();
MU_ASSERT("Rotated min wrong in x", isclose(minimum.x, -0.5));
MU_ASSERT("Rotated min wrong in y", isclose(minimum.y, -1.5));
MU_ASSERT("Rotated min wrong in z", isclose(minimum.z, -1.5));
MU_ASSERT("Rotated max wrong in x", isclose(maximum.x, 0.5));
MU_ASSERT("Rotated max wrong in y", isclose(maximum.y, 1.5));
MU_ASSERT("Rotated max wrong in z", isclose(maximum.z, 1.5));
mesh.rotate(0, -M_PI / 2, 0);
mesh.rescale(2);
minimum = mesh.get_minimum();
maximum = mesh.get_maximum();
MU_ASSERT("Scaled min wrong in x", isclose(minimum.x, -1.5 * 2));
MU_ASSERT("Scaled min wrong in y", isclose(minimum.y, -1.5 * 2));
MU_ASSERT("Scaled min wrong in z", isclose(minimum.z, -0.5 * 2));
MU_ASSERT("Scaled max wrong in x", isclose(maximum.x, 1.5 * 2));
MU_ASSERT("Scaled max wrong in y", isclose(maximum.y, 1.5 * 2));
MU_ASSERT("Scaled max wrong in z", isclose(maximum.z, 0.5 * 2));
mesh.rescale(0.5);
mesh.grow_normally(0.1);
minimum = mesh.get_minimum();
maximum = mesh.get_maximum();
MU_ASSERT("Grown min wrong in x", isclose(minimum.x, -1.5 - 0.1));
MU_ASSERT("Grown min wrong in y", isclose(minimum.y, -1.5 - 0.1));
MU_ASSERT("Grown min wrong in z", isclose(minimum.z, -0.5 - 0.1));
MU_ASSERT("Grown max wrong in x", isclose(maximum.x, 1.5 + 0.1));
MU_ASSERT("Grown max wrong in y", isclose(maximum.y, 1.5 + 0.1));
MU_ASSERT("Grown max wrong in z", isclose(maximum.z, 0.5 + 0.1));
return NULL;
}
const char* test_exclusion()
{
const auto n_points = 1500;
Solution<float3, Grid_solver> points{n_points};
random_cuboid(
0.25, float3{-1.5, -1.5, -0.5}, float3{1.5, 1.5, 0.5}, points);
Mesh mesh{"tests/torus.vtk"};
for (auto i = 0; i < n_points; i++) {
auto dist_from_ring = sqrt(
pow(1 - sqrt(pow(points.h_X[i].x, 2) + pow(points.h_X[i].y, 2)),
2) +
pow(points.h_X[i].z, 2));
if (abs(dist_from_ring - 0.5) < 0.01) continue; // Tolerance for mesh
auto out = mesh.test_exclusion(points.h_X[i]);
MU_ASSERT("Exclusion test wrong", (dist_from_ring >= 0.5) == out);
}
return NULL;
}
const char* test_shape_comparison()
{
Mesh mesh{"tests/torus.vtk"};
mesh.copy_to_device();
Solution<float3, Grid_solver> points{
static_cast<int>(mesh.vertices.size())};
for (auto i = 0; i < mesh.vertices.size(); i++) {
points.h_X[i].x = mesh.vertices[i].x;
points.h_X[i].y = mesh.vertices[i].y;
points.h_X[i].z = mesh.vertices[i].z;
}
points.copy_to_device();
MU_ASSERT("Shape comparison wrong",
isclose(mesh.shape_comparison_mesh_to_points(points), 0.0));
mesh.grow_normally(0.1);
mesh.copy_to_device();
MU_ASSERT("Grown shape comparison wrong",
isclose(mesh.shape_comparison_mesh_to_points(points), 0.1));
return NULL;
}
__device__ __host__ bool operator==(const float3& a, const float3& b)
{
return (a.x == b.x and a.y == b.y and a.z == b.z);
}
__device__ __host__ bool operator==(const Triangle& a, const Triangle& b)
{
return (a.V0 == b.V0 and a.V1 == b.V1 and a.V2 == b.V2 and a.C == b.C and
a.n == b.n);
}
const char* test_copy()
{
Mesh orig{"tests/torus.vtk"};
Mesh copy{orig};
MU_ASSERT("Different vertices in copy", orig.vertices == copy.vertices);
MU_ASSERT("Different facets in copy", orig.facets == copy.facets);
MU_ASSERT("Different triangle_to_vertices in copy",
orig.triangle_to_vertices == copy.triangle_to_vertices);
MU_ASSERT("Different vertex_to_triangles in copy",
orig.vertex_to_triangles == copy.vertex_to_triangles);
copy.vertices.clear();
MU_ASSERT("Removed vertices still in copy", orig.vertices != copy.vertices);
return NULL;
}
const char* all_tests()
{
MU_RUN_TEST(test_transformations);
MU_RUN_TEST(test_exclusion);
MU_RUN_TEST(test_shape_comparison);
MU_RUN_TEST(test_copy);
return NULL;
}
MU_RUN_SUITE(all_tests);
| 6e1fd6c61ef00fe442f5261a57d89b75999af99b.cu | #include <curand_kernel.h>
#include "../include/dtypes.cuh"
#include "../include/inits.cuh"
#include "../include/mesh.cuh"
#include "../include/solvers.cuh"
#include "minunit.cuh"
const char* test_transformations()
{
Mesh mesh{"tests/torus.vtk"};
auto minimum = mesh.get_minimum();
auto maximum = mesh.get_maximum();
MU_ASSERT("Min wrong in x", isclose(minimum.x, -1.5));
MU_ASSERT("Min wrong in y", isclose(minimum.y, -1.5));
MU_ASSERT("Min wrong in z", isclose(minimum.z, -0.5));
MU_ASSERT("Max wrong in x", isclose(maximum.x, 1.5));
MU_ASSERT("Max wrong in y", isclose(maximum.y, 1.5));
MU_ASSERT("Max wrong in z", isclose(maximum.z, 0.5));
mesh.translate(float3{1, 0, 0});
minimum = mesh.get_minimum();
maximum = mesh.get_maximum();
MU_ASSERT("Translated min wrong in x", isclose(minimum.x, -1.5 + 1));
MU_ASSERT("Translated min wrong in y", isclose(minimum.y, -1.5));
MU_ASSERT("Translated min wrong in z", isclose(minimum.z, -0.5));
MU_ASSERT("Translated max wrong in x", isclose(maximum.x, 1.5 + 1));
MU_ASSERT("Translated max wrong in y", isclose(maximum.y, 1.5));
MU_ASSERT("Translated max wrong in z", isclose(maximum.z, 0.5));
mesh.translate(float3{-1, 0, 0});
mesh.rotate(0, M_PI / 2, 0);
minimum = mesh.get_minimum();
maximum = mesh.get_maximum();
MU_ASSERT("Rotated min wrong in x", isclose(minimum.x, -0.5));
MU_ASSERT("Rotated min wrong in y", isclose(minimum.y, -1.5));
MU_ASSERT("Rotated min wrong in z", isclose(minimum.z, -1.5));
MU_ASSERT("Rotated max wrong in x", isclose(maximum.x, 0.5));
MU_ASSERT("Rotated max wrong in y", isclose(maximum.y, 1.5));
MU_ASSERT("Rotated max wrong in z", isclose(maximum.z, 1.5));
mesh.rotate(0, -M_PI / 2, 0);
mesh.rescale(2);
minimum = mesh.get_minimum();
maximum = mesh.get_maximum();
MU_ASSERT("Scaled min wrong in x", isclose(minimum.x, -1.5 * 2));
MU_ASSERT("Scaled min wrong in y", isclose(minimum.y, -1.5 * 2));
MU_ASSERT("Scaled min wrong in z", isclose(minimum.z, -0.5 * 2));
MU_ASSERT("Scaled max wrong in x", isclose(maximum.x, 1.5 * 2));
MU_ASSERT("Scaled max wrong in y", isclose(maximum.y, 1.5 * 2));
MU_ASSERT("Scaled max wrong in z", isclose(maximum.z, 0.5 * 2));
mesh.rescale(0.5);
mesh.grow_normally(0.1);
minimum = mesh.get_minimum();
maximum = mesh.get_maximum();
MU_ASSERT("Grown min wrong in x", isclose(minimum.x, -1.5 - 0.1));
MU_ASSERT("Grown min wrong in y", isclose(minimum.y, -1.5 - 0.1));
MU_ASSERT("Grown min wrong in z", isclose(minimum.z, -0.5 - 0.1));
MU_ASSERT("Grown max wrong in x", isclose(maximum.x, 1.5 + 0.1));
MU_ASSERT("Grown max wrong in y", isclose(maximum.y, 1.5 + 0.1));
MU_ASSERT("Grown max wrong in z", isclose(maximum.z, 0.5 + 0.1));
return NULL;
}
const char* test_exclusion()
{
const auto n_points = 1500;
Solution<float3, Grid_solver> points{n_points};
random_cuboid(
0.25, float3{-1.5, -1.5, -0.5}, float3{1.5, 1.5, 0.5}, points);
Mesh mesh{"tests/torus.vtk"};
for (auto i = 0; i < n_points; i++) {
auto dist_from_ring = sqrt(
pow(1 - sqrt(pow(points.h_X[i].x, 2) + pow(points.h_X[i].y, 2)),
2) +
pow(points.h_X[i].z, 2));
if (abs(dist_from_ring - 0.5) < 0.01) continue; // Tolerance for mesh
auto out = mesh.test_exclusion(points.h_X[i]);
MU_ASSERT("Exclusion test wrong", (dist_from_ring >= 0.5) == out);
}
return NULL;
}
const char* test_shape_comparison()
{
Mesh mesh{"tests/torus.vtk"};
mesh.copy_to_device();
Solution<float3, Grid_solver> points{
static_cast<int>(mesh.vertices.size())};
for (auto i = 0; i < mesh.vertices.size(); i++) {
points.h_X[i].x = mesh.vertices[i].x;
points.h_X[i].y = mesh.vertices[i].y;
points.h_X[i].z = mesh.vertices[i].z;
}
points.copy_to_device();
MU_ASSERT("Shape comparison wrong",
isclose(mesh.shape_comparison_mesh_to_points(points), 0.0));
mesh.grow_normally(0.1);
mesh.copy_to_device();
MU_ASSERT("Grown shape comparison wrong",
isclose(mesh.shape_comparison_mesh_to_points(points), 0.1));
return NULL;
}
__device__ __host__ bool operator==(const float3& a, const float3& b)
{
return (a.x == b.x and a.y == b.y and a.z == b.z);
}
__device__ __host__ bool operator==(const Triangle& a, const Triangle& b)
{
return (a.V0 == b.V0 and a.V1 == b.V1 and a.V2 == b.V2 and a.C == b.C and
a.n == b.n);
}
const char* test_copy()
{
Mesh orig{"tests/torus.vtk"};
Mesh copy{orig};
MU_ASSERT("Different vertices in copy", orig.vertices == copy.vertices);
MU_ASSERT("Different facets in copy", orig.facets == copy.facets);
MU_ASSERT("Different triangle_to_vertices in copy",
orig.triangle_to_vertices == copy.triangle_to_vertices);
MU_ASSERT("Different vertex_to_triangles in copy",
orig.vertex_to_triangles == copy.vertex_to_triangles);
copy.vertices.clear();
MU_ASSERT("Removed vertices still in copy", orig.vertices != copy.vertices);
return NULL;
}
const char* all_tests()
{
MU_RUN_TEST(test_transformations);
MU_RUN_TEST(test_exclusion);
MU_RUN_TEST(test_shape_comparison);
MU_RUN_TEST(test_copy);
return NULL;
}
MU_RUN_SUITE(all_tests);
|
de6f42542f098ca0d8b8719bc47ff3bcb08acd2e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<stdio.h>
#include<stdlib.h>
#define N 1024
#define BLOCK_SIZE 32
__global__ void matrix_transpose_naive(int *input, int *output) {
int indexX = threadIdx.x + blockIdx.x * blockDim.x;
int indexY = threadIdx.y + blockIdx.y * blockDim.y;
int index = indexY * N + indexX;
int transposedIndex = indexX * N + indexY;
output[transposedIndex] = input[index];
}
__global__ void matrix_transpose_shared(int *input, int *output) {
__shared__ int sharedMemory [BLOCK_SIZE] [BLOCK_SIZE + 1];
//global index
int indexX = threadIdx.x + blockIdx.x * blockDim.x;
int indexY = threadIdx.y + blockIdx.y * blockDim.y;
//transposed index
int tindexX = threadIdx.x + blockIdx.y * blockDim.x;
int tindexY = threadIdx.y + blockIdx.x * blockDim.y;
int localIndexX = threadIdx.x;
int localIndexY = threadIdx.y;
int index = indexY * N + indexX;
int transposedIndex = tindexY * N + tindexX;
//reading from global memory in coalesed manner and performing tanspose in shared memory
sharedMemory[localIndexX][localIndexY] = input[index];
__syncthreads();
//writing into global memory in coalesed fashion via transposed data in shared memory
output[transposedIndex] = sharedMemory[localIndexY][localIndexX];
}
//basically just fills the array with index.
void fill_array(int *data) {
for(int idx=0;idx<(N*N);idx++)
data[idx] = idx;
}
void print_output(int *a, int *b) {
printf("\n Original Matrix::\n");
for(int idx=0;idx<(N*N);idx++) {
if(idx%N == 0)
printf("\n");
printf(" %d ", a[idx]);
}
printf("\n Transposed Matrix::\n");
for(int idx=0;idx<(N*N);idx++) {
if(idx%N == 0)
printf("\n");
printf(" %d ", b[idx]);
}
}
int main(void) {
int *a, *b;
int *d_a, *d_b; // device copies of a, b, c
int size = N * N *sizeof(int);
// Alloc space for host copies of a, b, c and setup input values
a = (int *)malloc(size); fill_array(a);
b = (int *)malloc(size);
// Alloc space for device copies of a, b, c
hipMalloc((void **)&d_a, size);
hipMalloc((void **)&d_b, size);
// Copy inputs to device
hipMemcpy(d_a, a, size, hipMemcpyHostToDevice);
hipMemcpy(d_b, b, size, hipMemcpyHostToDevice);
dim3 blockSize(BLOCK_SIZE,BLOCK_SIZE,1);
dim3 gridSize(N/BLOCK_SIZE,N/BLOCK_SIZE,1);
hipLaunchKernelGGL(( matrix_transpose_naive), dim3(gridSize),dim3(blockSize), 0, 0, d_a,d_b);
// Copy result back to host
hipMemcpy(b, d_b, size, hipMemcpyDeviceToHost);
//print_output(a,b);
hipLaunchKernelGGL(( matrix_transpose_shared), dim3(gridSize),dim3(blockSize), 0, 0, d_a,d_b);
// Copy result back to host
hipMemcpy(b, d_b, size, hipMemcpyDeviceToHost);
//print_output(a,b);
free(a); free(b);
hipFree(d_a); hipFree(d_b);
return 0;
}
| de6f42542f098ca0d8b8719bc47ff3bcb08acd2e.cu | #include<stdio.h>
#include<stdlib.h>
#define N 1024
#define BLOCK_SIZE 32
__global__ void matrix_transpose_naive(int *input, int *output) {
int indexX = threadIdx.x + blockIdx.x * blockDim.x;
int indexY = threadIdx.y + blockIdx.y * blockDim.y;
int index = indexY * N + indexX;
int transposedIndex = indexX * N + indexY;
output[transposedIndex] = input[index];
}
__global__ void matrix_transpose_shared(int *input, int *output) {
__shared__ int sharedMemory [BLOCK_SIZE] [BLOCK_SIZE + 1];
//global index
int indexX = threadIdx.x + blockIdx.x * blockDim.x;
int indexY = threadIdx.y + blockIdx.y * blockDim.y;
//transposed index
int tindexX = threadIdx.x + blockIdx.y * blockDim.x;
int tindexY = threadIdx.y + blockIdx.x * blockDim.y;
int localIndexX = threadIdx.x;
int localIndexY = threadIdx.y;
int index = indexY * N + indexX;
int transposedIndex = tindexY * N + tindexX;
//reading from global memory in coalesed manner and performing tanspose in shared memory
sharedMemory[localIndexX][localIndexY] = input[index];
__syncthreads();
//writing into global memory in coalesed fashion via transposed data in shared memory
output[transposedIndex] = sharedMemory[localIndexY][localIndexX];
}
//basically just fills the array with index.
void fill_array(int *data) {
for(int idx=0;idx<(N*N);idx++)
data[idx] = idx;
}
void print_output(int *a, int *b) {
printf("\n Original Matrix::\n");
for(int idx=0;idx<(N*N);idx++) {
if(idx%N == 0)
printf("\n");
printf(" %d ", a[idx]);
}
printf("\n Transposed Matrix::\n");
for(int idx=0;idx<(N*N);idx++) {
if(idx%N == 0)
printf("\n");
printf(" %d ", b[idx]);
}
}
int main(void) {
int *a, *b;
int *d_a, *d_b; // device copies of a, b, c
int size = N * N *sizeof(int);
// Alloc space for host copies of a, b, c and setup input values
a = (int *)malloc(size); fill_array(a);
b = (int *)malloc(size);
// Alloc space for device copies of a, b, c
cudaMalloc((void **)&d_a, size);
cudaMalloc((void **)&d_b, size);
// Copy inputs to device
cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice);
dim3 blockSize(BLOCK_SIZE,BLOCK_SIZE,1);
dim3 gridSize(N/BLOCK_SIZE,N/BLOCK_SIZE,1);
matrix_transpose_naive<<<gridSize,blockSize>>>(d_a,d_b);
// Copy result back to host
cudaMemcpy(b, d_b, size, cudaMemcpyDeviceToHost);
//print_output(a,b);
matrix_transpose_shared<<<gridSize,blockSize>>>(d_a,d_b);
// Copy result back to host
cudaMemcpy(b, d_b, size, cudaMemcpyDeviceToHost);
//print_output(a,b);
free(a); free(b);
cudaFree(d_a); cudaFree(d_b);
return 0;
}
|
d15df709e160449338456557401dc828e7c0110e.hip | // !!! This is a file automatically generated by hipify!!!
// Includes
#include <stdio.h>
#include <stdlib.h>
// includes CUDA
#include <hip/hip_runtime.h>
#define MAX_THREADS_PER_BLOCK 256
#define THREADS_PER_BLOCK 256
#define LINE_SIZE 128
#define SETS 4
#define ASSOC 24
#define SIMD_WIDTH 32
// Variables
int no_of_nodes;
int edge_list_size;
FILE *fp;
//Structure to hold a node information
struct Node
{
int starting;
int no_of_edges;
};
// Functions
void CleanupResources(void);
void RandomInit(int*, int);
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(hipError_t err, const char *file, const int line ){
if(hipSuccess != err){
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, hipGetErrorString( err ) );
exit(-1);
}
}
// This will output the proper error string when calling hipGetLastError
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line ){
hipError_t err = hipGetLastError();
if (hipSuccess != err){
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, hipGetErrorString( err ) );
exit(-1);
}
}
// end of CUDA Helper Functions
texture<float,1,hipReadModeElementType> texmem1;
texture<float,1,hipReadModeElementType> texmem2;
texture<float,1,hipReadModeElementType> texmem3;
texture<float,1,hipReadModeElementType> texmem4;
texture<float,1,hipReadModeElementType> texmem5;
texture<float,1,hipReadModeElementType> texmem6;
texture<float,1,hipReadModeElementType> texmem7;
texture<float,1,hipReadModeElementType> texmem9;
texture<float,1,hipReadModeElementType> texmem8;
__constant__ float ConstArray1[THREADS_PER_BLOCK];
__constant__ float ConstArray2[THREADS_PER_BLOCK];
__constant__ float ConstArray3[THREADS_PER_BLOCK];
__constant__ float ConstArray4[THREADS_PER_BLOCK];
__constant__ float ConstArray5[THREADS_PER_BLOCK];
__constant__ float ConstArray6[THREADS_PER_BLOCK];
__constant__ float ConstArray7[THREADS_PER_BLOCK];
__constant__ float ConstArray8[THREADS_PER_BLOCK];
__global__ void tex_bm_kernel( float* out, unsigned size, int iterations)
{
int tid = blockIdx.x*MAX_THREADS_PER_BLOCK + threadIdx.x;
__device__ __shared__ float I1[THREADS_PER_BLOCK];
__device__ __shared__ float I2[THREADS_PER_BLOCK];
__device__ __shared__ float I3[THREADS_PER_BLOCK];
__device__ __shared__ float I4[THREADS_PER_BLOCK];
__device__ __shared__ float I5[THREADS_PER_BLOCK];
__device__ __shared__ float I6[THREADS_PER_BLOCK];
__device__ __shared__ float I7[THREADS_PER_BLOCK];
__device__ __shared__ float I8[THREADS_PER_BLOCK];
I1[tid%THREADS_PER_BLOCK] = tid;
I2[tid%THREADS_PER_BLOCK] = tid/2;
I3[tid%THREADS_PER_BLOCK] = 2*tid;
I4[tid%THREADS_PER_BLOCK] = tid+2;
I5[tid%THREADS_PER_BLOCK] = 5*tid;
I6[tid%THREADS_PER_BLOCK] = tid/2;
I7[tid%THREADS_PER_BLOCK] = tid*10;
I8[tid%THREADS_PER_BLOCK] = tid/2;
if(tid < size){
for(unsigned i=0; i<iterations; ++i){
out[tid] = tex1Dfetch(texmem1,tid);
out[tid*2] = tex1Dfetch(texmem2,tid)+ConstArray1[(tid+i)%THREADS_PER_BLOCK];
out[tid*3] = tex1Dfetch(texmem3,tid)* I1[(tid+i)%THREADS_PER_BLOCK];
out[tid*4] = tex1Dfetch(texmem4,tid)*I2[tid%THREADS_PER_BLOCK];
out[tid*5] =tex1Dfetch(texmem5,tid)/ConstArray2[(tid+i)%THREADS_PER_BLOCK];
out[tid*6] = tex1Dfetch(texmem6,tid)+I6[(tid+i)%THREADS_PER_BLOCK];
out[tid*7] = tex1Dfetch(texmem7,tid)+I4[(tid+i)%THREADS_PER_BLOCK];
out[tid*8] = exp(tex1Dfetch(texmem8,tid));
out[tid*9] = sqrt(abs(tex1Dfetch(texmem9,tid)));
}
}
}
////////////////////////////////////////////////////////////////////////////////
// Main Program
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char** argv)
{
int iterations;
if (argc != 2){
fprintf(stderr,"usage: %s #iterations\n",argv[0]);
exit(1);
}
else{
iterations = atoi(argv[1]);
}
printf("Power Microbenchmark with %d iterations\n",iterations);
float array1[THREADS_PER_BLOCK];
for(int i=0; i<THREADS_PER_BLOCK;i++){
srand(time(0));
array1[i] = rand() / RAND_MAX;
}
float array2[THREADS_PER_BLOCK];
for(int i=0; i<THREADS_PER_BLOCK;i++){
srand(time(0));
array2[i] = rand() / RAND_MAX;
}
float array3[THREADS_PER_BLOCK];
for(int i=0; i<THREADS_PER_BLOCK;i++){
srand(time(0));
array3[i] = rand() / RAND_MAX;
}
float array4[THREADS_PER_BLOCK];
for(int i=0; i<THREADS_PER_BLOCK;i++){
srand(time(0));
array4[i] = rand() / RAND_MAX;
}
float array5[THREADS_PER_BLOCK];
for(int i=0; i<THREADS_PER_BLOCK;i++){
srand(time(0));
array5[i] = rand() / RAND_MAX;
}
float array6[THREADS_PER_BLOCK];
for(int i=0; i<THREADS_PER_BLOCK;i++){
srand(time(0));
array6[i] = rand() / RAND_MAX;
}
float array7[THREADS_PER_BLOCK];
for(int i=0; i<THREADS_PER_BLOCK;i++){
srand(time(0));
array7[i] = rand() / RAND_MAX;
}
float array8[THREADS_PER_BLOCK];
for(int i=0; i<THREADS_PER_BLOCK;i++){
srand(time(0));
array8[i] = rand() / RAND_MAX;
}
hipMemcpyToSymbol(ConstArray1, array1, sizeof(float) * THREADS_PER_BLOCK );
hipMemcpyToSymbol(ConstArray2, array2, sizeof(float) * THREADS_PER_BLOCK );
hipMemcpyToSymbol(ConstArray3, array3, sizeof(float) * THREADS_PER_BLOCK );
hipMemcpyToSymbol(ConstArray4, array4, sizeof(float) * THREADS_PER_BLOCK );
hipMemcpyToSymbol(ConstArray5, array5, sizeof(float) * THREADS_PER_BLOCK );
hipMemcpyToSymbol(ConstArray6, array6, sizeof(float) * THREADS_PER_BLOCK );
hipMemcpyToSymbol(ConstArray7, array7, sizeof(float) * THREADS_PER_BLOCK );
hipMemcpyToSymbol(ConstArray8, array8, sizeof(float) * THREADS_PER_BLOCK );
int texmem_size = LINE_SIZE*SETS*ASSOC;
float *host_texture1 = (float*) malloc(texmem_size*sizeof(float));
for (int i=0; i< texmem_size; i++) {
host_texture1[i] = i;
}
float *device_texture1;
float *device_texture2;
float *device_texture3;
float *device_texture4;
float *device_texture5;
float *device_texture6;
float *device_texture7;
float *device_texture8;
float *device_texture9;
float *host_out = (float*) malloc(texmem_size*sizeof(float)*10);
float *device_out;
hipMalloc((void**) &device_texture1, texmem_size*sizeof(float));
hipMalloc((void**) &device_texture2, texmem_size*sizeof(float));
hipMalloc((void**) &device_texture3, texmem_size*sizeof(float));
hipMalloc((void**) &device_texture4, texmem_size*sizeof(float));
hipMalloc((void**) &device_texture5, texmem_size*sizeof(float));
hipMalloc((void**) &device_texture6, texmem_size*sizeof(float));
hipMalloc((void**) &device_texture7, texmem_size*sizeof(float));
hipMalloc((void**) &device_texture8, texmem_size*sizeof(float));
hipMalloc((void**) &device_texture9, texmem_size*sizeof(float));
hipMalloc((void**) &device_out, texmem_size*10);
hipMemcpy(device_texture1, host_texture1, texmem_size*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(device_texture2, host_texture1, texmem_size*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(device_texture3, host_texture1, texmem_size*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(device_texture4, host_texture1, texmem_size*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(device_texture5, host_texture1, texmem_size*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(device_texture6, host_texture1, texmem_size*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(device_texture7, host_texture1, texmem_size*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(device_texture8, host_texture1, texmem_size*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(device_texture9, host_texture1, texmem_size*sizeof(float), hipMemcpyHostToDevice);
hipBindTexture(0, texmem1, device_texture1, texmem_size*sizeof(float));
hipBindTexture(0, texmem2, device_texture2, texmem_size*sizeof(float));
hipBindTexture(0, texmem3, device_texture3, texmem_size*sizeof(float));
hipBindTexture(0, texmem4, device_texture4, texmem_size*sizeof(float));
hipBindTexture(0, texmem5, device_texture5, texmem_size*sizeof(float));
hipBindTexture(0, texmem6, device_texture6, texmem_size*sizeof(float));
hipBindTexture(0, texmem7, device_texture7, texmem_size*sizeof(float));
hipBindTexture(0, texmem8, device_texture8, texmem_size*sizeof(float));
hipBindTexture(0, texmem9, device_texture9, texmem_size*sizeof(float));
unsigned num_blocks = (texmem_size / MAX_THREADS_PER_BLOCK) + 1;
dim3 grid( num_blocks, 1, 1);
dim3 threads( MAX_THREADS_PER_BLOCK, 1, 1);
hipEvent_t start, stop;
float elapsedTime = 0;
checkCudaErrors(hipEventCreate(&start));
checkCudaErrors(hipEventCreate(&stop));
checkCudaErrors(hipEventRecord(start));
hipLaunchKernelGGL(( tex_bm_kernel), dim3(grid), dim3(threads), 0 , 0, device_out, texmem_size, iterations);
checkCudaErrors(hipEventRecord(stop));
checkCudaErrors(hipEventSynchronize(stop));
checkCudaErrors(hipEventElapsedTime(&elapsedTime, start, stop));
printf("gpu execution time = %.2f s\n", elapsedTime/1000);
getLastCudaError("kernel launch failure");
hipDeviceSynchronize();
hipMemcpy(host_out, device_out, texmem_size*sizeof(float), hipMemcpyDeviceToHost);
/*
printf("Output: ");
float error = false;
for (int i=0; i< texmem_size; i++){
printf("%.1f ", host_out[i]);
if (host_out[i] - i > 0.0001) error = true;
}
printf("\n");
if (error) printf("\nFAILED\n");
else printf("\nPASSED\n");
*/
checkCudaErrors(hipEventDestroy(start));
checkCudaErrors(hipEventDestroy(stop));
return 0;
}
void CleanupResources(void){
// Free device memory
}
// Allocates an array with random float entries.
void RandomInit(int* data, int n){
for (int i = 0; i < n; ++i)
data[i] = (int)(rand() / RAND_MAX);
} | d15df709e160449338456557401dc828e7c0110e.cu | // Includes
#include <stdio.h>
#include <stdlib.h>
// includes CUDA
#include <cuda_runtime.h>
#define MAX_THREADS_PER_BLOCK 256
#define THREADS_PER_BLOCK 256
#define LINE_SIZE 128
#define SETS 4
#define ASSOC 24
#define SIMD_WIDTH 32
// Variables
int no_of_nodes;
int edge_list_size;
FILE *fp;
//Structure to hold a node information
struct Node
{
int starting;
int no_of_edges;
};
// Functions
void CleanupResources(void);
void RandomInit(int*, int);
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(cudaError err, const char *file, const int line ){
if(cudaSuccess != err){
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, cudaGetErrorString( err ) );
exit(-1);
}
}
// This will output the proper error string when calling cudaGetLastError
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line ){
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err){
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, cudaGetErrorString( err ) );
exit(-1);
}
}
// end of CUDA Helper Functions
texture<float,1,cudaReadModeElementType> texmem1;
texture<float,1,cudaReadModeElementType> texmem2;
texture<float,1,cudaReadModeElementType> texmem3;
texture<float,1,cudaReadModeElementType> texmem4;
texture<float,1,cudaReadModeElementType> texmem5;
texture<float,1,cudaReadModeElementType> texmem6;
texture<float,1,cudaReadModeElementType> texmem7;
texture<float,1,cudaReadModeElementType> texmem9;
texture<float,1,cudaReadModeElementType> texmem8;
__constant__ float ConstArray1[THREADS_PER_BLOCK];
__constant__ float ConstArray2[THREADS_PER_BLOCK];
__constant__ float ConstArray3[THREADS_PER_BLOCK];
__constant__ float ConstArray4[THREADS_PER_BLOCK];
__constant__ float ConstArray5[THREADS_PER_BLOCK];
__constant__ float ConstArray6[THREADS_PER_BLOCK];
__constant__ float ConstArray7[THREADS_PER_BLOCK];
__constant__ float ConstArray8[THREADS_PER_BLOCK];
__global__ void tex_bm_kernel( float* out, unsigned size, int iterations)
{
int tid = blockIdx.x*MAX_THREADS_PER_BLOCK + threadIdx.x;
__device__ __shared__ float I1[THREADS_PER_BLOCK];
__device__ __shared__ float I2[THREADS_PER_BLOCK];
__device__ __shared__ float I3[THREADS_PER_BLOCK];
__device__ __shared__ float I4[THREADS_PER_BLOCK];
__device__ __shared__ float I5[THREADS_PER_BLOCK];
__device__ __shared__ float I6[THREADS_PER_BLOCK];
__device__ __shared__ float I7[THREADS_PER_BLOCK];
__device__ __shared__ float I8[THREADS_PER_BLOCK];
I1[tid%THREADS_PER_BLOCK] = tid;
I2[tid%THREADS_PER_BLOCK] = tid/2;
I3[tid%THREADS_PER_BLOCK] = 2*tid;
I4[tid%THREADS_PER_BLOCK] = tid+2;
I5[tid%THREADS_PER_BLOCK] = 5*tid;
I6[tid%THREADS_PER_BLOCK] = tid/2;
I7[tid%THREADS_PER_BLOCK] = tid*10;
I8[tid%THREADS_PER_BLOCK] = tid/2;
if(tid < size){
for(unsigned i=0; i<iterations; ++i){
out[tid] = tex1Dfetch(texmem1,tid);
out[tid*2] = tex1Dfetch(texmem2,tid)+ConstArray1[(tid+i)%THREADS_PER_BLOCK];
out[tid*3] = tex1Dfetch(texmem3,tid)* I1[(tid+i)%THREADS_PER_BLOCK];
out[tid*4] = tex1Dfetch(texmem4,tid)*I2[tid%THREADS_PER_BLOCK];
out[tid*5] =tex1Dfetch(texmem5,tid)/ConstArray2[(tid+i)%THREADS_PER_BLOCK];
out[tid*6] = tex1Dfetch(texmem6,tid)+I6[(tid+i)%THREADS_PER_BLOCK];
out[tid*7] = tex1Dfetch(texmem7,tid)+I4[(tid+i)%THREADS_PER_BLOCK];
out[tid*8] = exp(tex1Dfetch(texmem8,tid));
out[tid*9] = sqrt(abs(tex1Dfetch(texmem9,tid)));
}
}
}
////////////////////////////////////////////////////////////////////////////////
// Main Program
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char** argv)
{
int iterations;
if (argc != 2){
fprintf(stderr,"usage: %s #iterations\n",argv[0]);
exit(1);
}
else{
iterations = atoi(argv[1]);
}
printf("Power Microbenchmark with %d iterations\n",iterations);
float array1[THREADS_PER_BLOCK];
for(int i=0; i<THREADS_PER_BLOCK;i++){
srand(time(0));
array1[i] = rand() / RAND_MAX;
}
float array2[THREADS_PER_BLOCK];
for(int i=0; i<THREADS_PER_BLOCK;i++){
srand(time(0));
array2[i] = rand() / RAND_MAX;
}
float array3[THREADS_PER_BLOCK];
for(int i=0; i<THREADS_PER_BLOCK;i++){
srand(time(0));
array3[i] = rand() / RAND_MAX;
}
float array4[THREADS_PER_BLOCK];
for(int i=0; i<THREADS_PER_BLOCK;i++){
srand(time(0));
array4[i] = rand() / RAND_MAX;
}
float array5[THREADS_PER_BLOCK];
for(int i=0; i<THREADS_PER_BLOCK;i++){
srand(time(0));
array5[i] = rand() / RAND_MAX;
}
float array6[THREADS_PER_BLOCK];
for(int i=0; i<THREADS_PER_BLOCK;i++){
srand(time(0));
array6[i] = rand() / RAND_MAX;
}
float array7[THREADS_PER_BLOCK];
for(int i=0; i<THREADS_PER_BLOCK;i++){
srand(time(0));
array7[i] = rand() / RAND_MAX;
}
float array8[THREADS_PER_BLOCK];
for(int i=0; i<THREADS_PER_BLOCK;i++){
srand(time(0));
array8[i] = rand() / RAND_MAX;
}
cudaMemcpyToSymbol(ConstArray1, array1, sizeof(float) * THREADS_PER_BLOCK );
cudaMemcpyToSymbol(ConstArray2, array2, sizeof(float) * THREADS_PER_BLOCK );
cudaMemcpyToSymbol(ConstArray3, array3, sizeof(float) * THREADS_PER_BLOCK );
cudaMemcpyToSymbol(ConstArray4, array4, sizeof(float) * THREADS_PER_BLOCK );
cudaMemcpyToSymbol(ConstArray5, array5, sizeof(float) * THREADS_PER_BLOCK );
cudaMemcpyToSymbol(ConstArray6, array6, sizeof(float) * THREADS_PER_BLOCK );
cudaMemcpyToSymbol(ConstArray7, array7, sizeof(float) * THREADS_PER_BLOCK );
cudaMemcpyToSymbol(ConstArray8, array8, sizeof(float) * THREADS_PER_BLOCK );
int texmem_size = LINE_SIZE*SETS*ASSOC;
float *host_texture1 = (float*) malloc(texmem_size*sizeof(float));
for (int i=0; i< texmem_size; i++) {
host_texture1[i] = i;
}
float *device_texture1;
float *device_texture2;
float *device_texture3;
float *device_texture4;
float *device_texture5;
float *device_texture6;
float *device_texture7;
float *device_texture8;
float *device_texture9;
float *host_out = (float*) malloc(texmem_size*sizeof(float)*10);
float *device_out;
cudaMalloc((void**) &device_texture1, texmem_size*sizeof(float));
cudaMalloc((void**) &device_texture2, texmem_size*sizeof(float));
cudaMalloc((void**) &device_texture3, texmem_size*sizeof(float));
cudaMalloc((void**) &device_texture4, texmem_size*sizeof(float));
cudaMalloc((void**) &device_texture5, texmem_size*sizeof(float));
cudaMalloc((void**) &device_texture6, texmem_size*sizeof(float));
cudaMalloc((void**) &device_texture7, texmem_size*sizeof(float));
cudaMalloc((void**) &device_texture8, texmem_size*sizeof(float));
cudaMalloc((void**) &device_texture9, texmem_size*sizeof(float));
cudaMalloc((void**) &device_out, texmem_size*10);
cudaMemcpy(device_texture1, host_texture1, texmem_size*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(device_texture2, host_texture1, texmem_size*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(device_texture3, host_texture1, texmem_size*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(device_texture4, host_texture1, texmem_size*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(device_texture5, host_texture1, texmem_size*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(device_texture6, host_texture1, texmem_size*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(device_texture7, host_texture1, texmem_size*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(device_texture8, host_texture1, texmem_size*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(device_texture9, host_texture1, texmem_size*sizeof(float), cudaMemcpyHostToDevice);
cudaBindTexture(0, texmem1, device_texture1, texmem_size*sizeof(float));
cudaBindTexture(0, texmem2, device_texture2, texmem_size*sizeof(float));
cudaBindTexture(0, texmem3, device_texture3, texmem_size*sizeof(float));
cudaBindTexture(0, texmem4, device_texture4, texmem_size*sizeof(float));
cudaBindTexture(0, texmem5, device_texture5, texmem_size*sizeof(float));
cudaBindTexture(0, texmem6, device_texture6, texmem_size*sizeof(float));
cudaBindTexture(0, texmem7, device_texture7, texmem_size*sizeof(float));
cudaBindTexture(0, texmem8, device_texture8, texmem_size*sizeof(float));
cudaBindTexture(0, texmem9, device_texture9, texmem_size*sizeof(float));
unsigned num_blocks = (texmem_size / MAX_THREADS_PER_BLOCK) + 1;
dim3 grid( num_blocks, 1, 1);
dim3 threads( MAX_THREADS_PER_BLOCK, 1, 1);
cudaEvent_t start, stop;
float elapsedTime = 0;
checkCudaErrors(cudaEventCreate(&start));
checkCudaErrors(cudaEventCreate(&stop));
checkCudaErrors(cudaEventRecord(start));
tex_bm_kernel<<< grid, threads, 0 >>>(device_out, texmem_size, iterations);
checkCudaErrors(cudaEventRecord(stop));
checkCudaErrors(cudaEventSynchronize(stop));
checkCudaErrors(cudaEventElapsedTime(&elapsedTime, start, stop));
printf("gpu execution time = %.2f s\n", elapsedTime/1000);
getLastCudaError("kernel launch failure");
cudaThreadSynchronize();
cudaMemcpy(host_out, device_out, texmem_size*sizeof(float), cudaMemcpyDeviceToHost);
/*
printf("Output: ");
float error = false;
for (int i=0; i< texmem_size; i++){
printf("%.1f ", host_out[i]);
if (host_out[i] - i > 0.0001) error = true;
}
printf("\n");
if (error) printf("\nFAILED\n");
else printf("\nPASSED\n");
*/
checkCudaErrors(cudaEventDestroy(start));
checkCudaErrors(cudaEventDestroy(stop));
return 0;
}
void CleanupResources(void){
// Free device memory
}
// Allocates an array with random float entries.
void RandomInit(int* data, int n){
for (int i = 0; i < n; ++i)
data[i] = (int)(rand() / RAND_MAX);
} |
5ba209e870510aa3fea98c075841baf0b1f0ed37.hip | // !!! This is a file automatically generated by hipify!!!
#include "../common/common.h"
#include <hip/hip_runtime.h>
#include <stdio.h>
#define LEN 1<<22
struct InnerArray
{
float x[LEN];
float y[LEN];
};
// functions for inner array outer struct
void initialInnerArray(InnerArray *ip, int size)
{
for (int i = 0; i < size; i++)
{
ip->x[i] = (float)( rand() & 0xFF ) / 100.0f;
ip->y[i] = (float)( rand() & 0xFF ) / 100.0f;
}
return;
}
void testInnerArrayHost(InnerArray *A, InnerArray *C, const int n)
{
for (int idx = 0; idx < n; idx++)
{
C->x[idx] = A->x[idx] + 10.f;
C->y[idx] = A->y[idx] + 20.f;
}
return;
}
void printfHostResult(InnerArray *C, const int n)
{
for (int idx = 0; idx < n; idx++)
{
printf("printout idx %d: x %f y %f\n", idx, C->x[idx], C->y[idx]);
}
return;
}
void checkInnerArray(InnerArray *hostRef, InnerArray *gpuRef, const int N)
{
double epsilon = 1.0E-8;
bool match = 1;
for (int i = 0; i < N; i++)
{
if (abs(hostRef->x[i] - gpuRef->x[i]) > epsilon)
{
match = 0;
printf("different on x %dth element: host %f gpu %f\n", i,
hostRef->x[i], gpuRef->x[i]);
break;
}
if (abs(hostRef->y[i] - gpuRef->y[i]) > epsilon)
{
match = 0;
printf("different on y %dth element: host %f gpu %f\n", i,
hostRef->y[i], gpuRef->y[i]);
break;
}
}
if (!match) printf("Arrays do not match.\n\n");
}
__global__ void testInnerArray(InnerArray *data, InnerArray * result,
const int n)
{
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n)
{
float tmpx = data->x[i];
float tmpy = data->y[i];
tmpx += 10.f;
tmpy += 20.f;
result->x[i] = tmpx;
result->y[i] = tmpy;
}
}
__global__ void warmup2(InnerArray *data, InnerArray * result, const int n)
{
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n)
{
float tmpx = data->x[i];
float tmpy = data->y[i];
tmpx += 10.f;
tmpy += 20.f;
result->x[i] = tmpx;
result->y[i] = tmpy;
}
}
// test for array of struct
int main(int argc, char **argv)
{
// set up device
int dev = 0;
hipDeviceProp_t deviceProp;
CHECK(hipGetDeviceProperties(&deviceProp, dev));
printf("%s test struct of array at ", argv[0]);
printf("device %d: %s \n", dev, deviceProp.name);
CHECK(hipSetDevice(dev));
// allocate host memory
int nElem = LEN;
size_t nBytes = sizeof(InnerArray);
InnerArray *h_A = (InnerArray *)malloc(nBytes);
InnerArray *hostRef = (InnerArray *)malloc(nBytes);
InnerArray *gpuRef = (InnerArray *)malloc(nBytes);
// initialize host array
initialInnerArray(h_A, nElem);
testInnerArrayHost(h_A, hostRef, nElem);
// allocate device memory
InnerArray *d_A, *d_C;
CHECK(hipMalloc((InnerArray**)&d_A, nBytes));
CHECK(hipMalloc((InnerArray**)&d_C, nBytes));
// copy data from host to device
CHECK(hipMemcpy(d_A, h_A, nBytes, hipMemcpyHostToDevice));
// set up offset for summary
int blocksize = 128;
if (argc > 1) blocksize = atoi(argv[1]);
// execution configuration
dim3 block (blocksize, 1);
dim3 grid ((nElem + block.x - 1) / block.x, 1);
// kernel 1:
double iStart = cpuSecond();
hipLaunchKernelGGL(( warmup2), dim3(grid), dim3(block), 0, 0, d_A, d_C, nElem);
CHECK(hipDeviceSynchronize());
double iElaps = cpuSecond() - iStart;
printf("warmup2 <<< %3d, %3d >>> elapsed %f sec\n", grid.x, block.x,
iElaps);
CHECK(hipMemcpy(gpuRef, d_C, nBytes, hipMemcpyDeviceToHost));
checkInnerArray(hostRef, gpuRef, nElem);
CHECK(hipGetLastError());
iStart = cpuSecond();
hipLaunchKernelGGL(( testInnerArray), dim3(grid), dim3(block), 0, 0, d_A, d_C, nElem);
CHECK(hipDeviceSynchronize());
iElaps = cpuSecond() - iStart;
printf("innerarray <<< %3d, %3d >>> elapsed %f sec\n", grid.x, block.x,
iElaps);
CHECK(hipMemcpy(gpuRef, d_C, nBytes, hipMemcpyDeviceToHost));
checkInnerArray(hostRef, gpuRef, nElem);
CHECK(hipGetLastError());
CHECK(hipFree(d_A));
CHECK(hipFree(d_C));
free(h_A);
free(hostRef);
free(gpuRef);
// reset device
CHECK(hipDeviceReset());
return EXIT_SUCCESS;
}
| 5ba209e870510aa3fea98c075841baf0b1f0ed37.cu | #include "../common/common.h"
#include <cuda_runtime.h>
#include <stdio.h>
#define LEN 1<<22
struct InnerArray
{
float x[LEN];
float y[LEN];
};
// functions for inner array outer struct
void initialInnerArray(InnerArray *ip, int size)
{
for (int i = 0; i < size; i++)
{
ip->x[i] = (float)( rand() & 0xFF ) / 100.0f;
ip->y[i] = (float)( rand() & 0xFF ) / 100.0f;
}
return;
}
void testInnerArrayHost(InnerArray *A, InnerArray *C, const int n)
{
for (int idx = 0; idx < n; idx++)
{
C->x[idx] = A->x[idx] + 10.f;
C->y[idx] = A->y[idx] + 20.f;
}
return;
}
void printfHostResult(InnerArray *C, const int n)
{
for (int idx = 0; idx < n; idx++)
{
printf("printout idx %d: x %f y %f\n", idx, C->x[idx], C->y[idx]);
}
return;
}
void checkInnerArray(InnerArray *hostRef, InnerArray *gpuRef, const int N)
{
double epsilon = 1.0E-8;
bool match = 1;
for (int i = 0; i < N; i++)
{
if (abs(hostRef->x[i] - gpuRef->x[i]) > epsilon)
{
match = 0;
printf("different on x %dth element: host %f gpu %f\n", i,
hostRef->x[i], gpuRef->x[i]);
break;
}
if (abs(hostRef->y[i] - gpuRef->y[i]) > epsilon)
{
match = 0;
printf("different on y %dth element: host %f gpu %f\n", i,
hostRef->y[i], gpuRef->y[i]);
break;
}
}
if (!match) printf("Arrays do not match.\n\n");
}
__global__ void testInnerArray(InnerArray *data, InnerArray * result,
const int n)
{
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n)
{
float tmpx = data->x[i];
float tmpy = data->y[i];
tmpx += 10.f;
tmpy += 20.f;
result->x[i] = tmpx;
result->y[i] = tmpy;
}
}
__global__ void warmup2(InnerArray *data, InnerArray * result, const int n)
{
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n)
{
float tmpx = data->x[i];
float tmpy = data->y[i];
tmpx += 10.f;
tmpy += 20.f;
result->x[i] = tmpx;
result->y[i] = tmpy;
}
}
// test for array of struct
int main(int argc, char **argv)
{
// set up device
int dev = 0;
cudaDeviceProp deviceProp;
CHECK(cudaGetDeviceProperties(&deviceProp, dev));
printf("%s test struct of array at ", argv[0]);
printf("device %d: %s \n", dev, deviceProp.name);
CHECK(cudaSetDevice(dev));
// allocate host memory
int nElem = LEN;
size_t nBytes = sizeof(InnerArray);
InnerArray *h_A = (InnerArray *)malloc(nBytes);
InnerArray *hostRef = (InnerArray *)malloc(nBytes);
InnerArray *gpuRef = (InnerArray *)malloc(nBytes);
// initialize host array
initialInnerArray(h_A, nElem);
testInnerArrayHost(h_A, hostRef, nElem);
// allocate device memory
InnerArray *d_A, *d_C;
CHECK(cudaMalloc((InnerArray**)&d_A, nBytes));
CHECK(cudaMalloc((InnerArray**)&d_C, nBytes));
// copy data from host to device
CHECK(cudaMemcpy(d_A, h_A, nBytes, cudaMemcpyHostToDevice));
// set up offset for summary
int blocksize = 128;
if (argc > 1) blocksize = atoi(argv[1]);
// execution configuration
dim3 block (blocksize, 1);
dim3 grid ((nElem + block.x - 1) / block.x, 1);
// kernel 1:
double iStart = cpuSecond();
warmup2<<<grid, block>>>(d_A, d_C, nElem);
CHECK(cudaDeviceSynchronize());
double iElaps = cpuSecond() - iStart;
printf("warmup2 <<< %3d, %3d >>> elapsed %f sec\n", grid.x, block.x,
iElaps);
CHECK(cudaMemcpy(gpuRef, d_C, nBytes, cudaMemcpyDeviceToHost));
checkInnerArray(hostRef, gpuRef, nElem);
CHECK(cudaGetLastError());
iStart = cpuSecond();
testInnerArray<<<grid, block>>>(d_A, d_C, nElem);
CHECK(cudaDeviceSynchronize());
iElaps = cpuSecond() - iStart;
printf("innerarray <<< %3d, %3d >>> elapsed %f sec\n", grid.x, block.x,
iElaps);
CHECK(cudaMemcpy(gpuRef, d_C, nBytes, cudaMemcpyDeviceToHost));
checkInnerArray(hostRef, gpuRef, nElem);
CHECK(cudaGetLastError());
CHECK(cudaFree(d_A));
CHECK(cudaFree(d_C));
free(h_A);
free(hostRef);
free(gpuRef);
// reset device
CHECK(cudaDeviceReset());
return EXIT_SUCCESS;
}
|
879000e6536b882942de8f9bab7e9bb8a70d3e37.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
__global__ void AddVecGPU(float* c, float* a, float* b)
{
//for .
int i = threadIdx.x;
c[i] = a[i] + b[i];
printf("Tread Id = %d\n", i);
}
int main()
{
int N = 1024 * 1024;
float* a = new float[N];
float* b = new float[N];
float* c = new float[N];
for (int i = 0; i < N; ++i)
{
//rand() : 0~ RAND MAX
a[i] = rand() / (float)RAND_MAX; //0.0 ~ 1.0
b[i] = -a[i];
c[i] = 0.0;
}
// 1. gpu
hipError_t cudaStatus = hipSetDevice(0);
// 2. gpu(Device) .
float* dev_a, * dev_b, * dev_c;
hipMalloc((void**)&dev_a, sizeof(float) * N);
hipMalloc((void**)&dev_b, sizeof(float) * N);
hipMalloc((void**)&dev_c, sizeof(float) * N);
// 3. cpu gpu
hipMemcpy(dev_a, a, sizeof(float) * N, hipMemcpyHostToDevice);
hipMemcpy(dev_b, b, sizeof(float) * N, hipMemcpyHostToDevice);
hipMemcpy(dev_c, c, sizeof(float) * N, hipMemcpyHostToDevice);
// 4. gpu (= , kernel)
hipLaunchKernelGGL(( AddVecGPU), dim3(1), dim3(N), 0, 0, dev_c, dev_a, dev_b);
// .
hipDeviceSynchronize();
// 5. GPU CPU .
hipMemcpy(c, dev_c, sizeof(float) * N, hipMemcpyDeviceToHost);
for (int i = 0; i < N; ++i)
{
// printf("c[%d] = %f\n", i, c[i]);
if (c[i] != 0.0)
printf("Error\n");
}
delete[] a;
delete[] b;
delete[] c;
hipFree(dev_a);
hipFree(dev_b);
hipFree(dev_c);
} | 879000e6536b882942de8f9bab7e9bb8a70d3e37.cu | #include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
__global__ void AddVecGPU(float* c, float* a, float* b)
{
//for문이 사라졌다. 순차 진행이 병렬화 됐다
int i = threadIdx.x;
c[i] = a[i] + b[i];
printf("Tread Id = %d\n", i);
}
int main()
{
int N = 1024 * 1024;
float* a = new float[N];
float* b = new float[N];
float* c = new float[N];
for (int i = 0; i < N; ++i)
{
//rand() : 0~ RAND MAX
a[i] = rand() / (float)RAND_MAX; //0.0 ~ 1.0 까지의 임의의 수
b[i] = -a[i];
c[i] = 0.0;
}
// 1. 사용할 gpu선택
cudaError_t cudaStatus = cudaSetDevice(0);
// 2. gpu(Device) 에 메모리를 동적으로 할당한다.
float* dev_a, * dev_b, * dev_c;
cudaMalloc((void**)&dev_a, sizeof(float) * N);
cudaMalloc((void**)&dev_b, sizeof(float) * N);
cudaMalloc((void**)&dev_c, sizeof(float) * N);
// 3. cpu 배열의 데이터를 gpu 배열로 복사시킨다
cudaMemcpy(dev_a, a, sizeof(float) * N, cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, sizeof(float) * N, cudaMemcpyHostToDevice);
cudaMemcpy(dev_c, c, sizeof(float) * N, cudaMemcpyHostToDevice);
// 4. gpu 에서 수행될 함수(= 커널, kernel)를 호출
AddVecGPU<<<1, N>>>(dev_c, dev_a, dev_b);
//모든 쓰레드가 수행을 종료할 때 까지 기다린다.
cudaDeviceSynchronize();
// 5. 결과를 GPU 메모리에서 CPU 메모리로 복사한다.
cudaMemcpy(c, dev_c, sizeof(float) * N, cudaMemcpyDeviceToHost);
for (int i = 0; i < N; ++i)
{
// printf("c[%d] = %f\n", i, c[i]);
if (c[i] != 0.0)
printf("Error\n");
}
delete[] a;
delete[] b;
delete[] c;
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
} |
11043d6e80c9023a0b6c26f822df35fbe6bb4369.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <vector>
#include "caffe/layers/dropout_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
#ifdef USE_ROCM
template<typename Dtype>
__global__ void DropoutForward(const int_tp n, const Dtype* in,
const uint_tp* mask,
const uint_tp threshold, const float scale,
Dtype* out) {
CUDA_KERNEL_LOOP(index, n) {
out[index] = in[index] * (mask[index] > threshold) * scale;
}
}
#endif // USE_ROCM
template<typename Dtype>
void DropoutLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const int_tp count = bottom[0]->count();
if (this->device_->backend() == BACKEND_CUDA) {
#ifdef USE_ROCM
if (this->phase_ == TRAIN) {
uint_tp* mask =
static_cast<uint_tp*>(rand_vec_.mutable_gpu_data());
caffe_gpu_rng_uniform(count, (uint_tpc*) (mask)); // NOLINT
// set thresholds
// NOLINT_NEXT_LINE(whitespace/operators)
DropoutForward<Dtype> CUDA_KERNEL(CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS)(
count, bottom_data, mask, uint_thres_, scale_, top_data);
CUDA_POST_KERNEL_CHECK;
} else {
caffe_copy(count, bottom_data, top_data);
}
#endif // USE_ROCM
} else {
#ifdef USE_GREENTEA
viennacl::ocl::context &ctx = viennacl::ocl::get_context(
this->device_->id());
viennacl::ocl::program &program = this->device_->program();
if (this->phase_ == TRAIN) {
cl_mem mask = (cl_mem) (rand_vec_.mutable_gpu_data());
greentea_gpu_rng_uniform(this->device_->id(), count, mask, 0);
// set thresholds
viennacl::ocl::kernel &oclk_dropout = program.get_kernel(
CL_KERNEL_SELECT("dropout_forward"));
viennacl::ocl::enqueue(
oclk_dropout(count, WrapHandle((cl_mem) bottom_data, &ctx),
WrapHandle(mask, &ctx), fixup_arg_type(uint_thres_),
fixup_arg_type(scale_),
WrapHandle((cl_mem) top_data, &ctx)),
ctx.get_queue());
} else {
greentea_copy<Dtype>(count, (cl_mem) bottom_data, 0, (cl_mem) top_data, 0,
&ctx);
}
#endif // USE_GREENTEA
}
}
#ifdef USE_ROCM
template<typename Dtype>
__global__ void DropoutBackward(const int_tp n, const Dtype* in_diff,
const uint_tp* mask,
const uint_tp threshold, const float scale,
Dtype* out_diff) {
CUDA_KERNEL_LOOP(index, n) {
out_diff[index] = in_diff[index] * scale * (mask[index] > threshold);
}
}
#endif // USE_ROCM
template<typename Dtype>
void DropoutLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[0]) {
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
if (this->device_->backend() == BACKEND_CUDA) {
#ifdef USE_ROCM
if (this->phase_ == TRAIN) {
const uint_tp* mask = static_cast<const uint_tp*>(rand_vec_
.gpu_data());
const int_tp count = bottom[0]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
DropoutBackward<Dtype> CUDA_KERNEL(CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS)(
count, top_diff, mask, uint_thres_, scale_, bottom_diff);
CUDA_POST_KERNEL_CHECK;
} else {
caffe_copy(top[0]->count(), top_diff, bottom_diff);
}
#endif // USE_ROCM
} else {
#ifdef USE_GREENTEA
viennacl::ocl::context &ctx = viennacl::ocl::get_context(
this->device_->id());
viennacl::ocl::program &program = this->device_->program();
if (this->phase_ == TRAIN) {
cl_mem mask = (cl_mem) (rand_vec_.gpu_data());
const int_tp count = bottom[0]->count();
viennacl::ocl::kernel &oclk_dropout = program.get_kernel(
CL_KERNEL_SELECT("dropout_backward"));
viennacl::ocl::enqueue(
oclk_dropout(count, WrapHandle((cl_mem) top_diff, &ctx),
WrapHandle(mask, &ctx), fixup_arg_type(uint_thres_),
fixup_arg_type(scale_),
WrapHandle((cl_mem) bottom_diff, &ctx)),
ctx.get_queue());
} else {
greentea_copy<Dtype>(top[0]->count(), (cl_mem) top_diff, 0,
(cl_mem) bottom_diff, 0, &ctx);
}
#endif // USE_GREENTEA
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(DropoutLayer);
} // namespace caffe
| 11043d6e80c9023a0b6c26f822df35fbe6bb4369.cu | #include <vector>
#include "caffe/layers/dropout_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
#ifdef USE_CUDA
template<typename Dtype>
__global__ void DropoutForward(const int_tp n, const Dtype* in,
const uint_tp* mask,
const uint_tp threshold, const float scale,
Dtype* out) {
CUDA_KERNEL_LOOP(index, n) {
out[index] = in[index] * (mask[index] > threshold) * scale;
}
}
#endif // USE_CUDA
template<typename Dtype>
void DropoutLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const int_tp count = bottom[0]->count();
if (this->device_->backend() == BACKEND_CUDA) {
#ifdef USE_CUDA
if (this->phase_ == TRAIN) {
uint_tp* mask =
static_cast<uint_tp*>(rand_vec_.mutable_gpu_data());
caffe_gpu_rng_uniform(count, (uint_tpc*) (mask)); // NOLINT
// set thresholds
// NOLINT_NEXT_LINE(whitespace/operators)
DropoutForward<Dtype> CUDA_KERNEL(CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS)(
count, bottom_data, mask, uint_thres_, scale_, top_data);
CUDA_POST_KERNEL_CHECK;
} else {
caffe_copy(count, bottom_data, top_data);
}
#endif // USE_CUDA
} else {
#ifdef USE_GREENTEA
viennacl::ocl::context &ctx = viennacl::ocl::get_context(
this->device_->id());
viennacl::ocl::program &program = this->device_->program();
if (this->phase_ == TRAIN) {
cl_mem mask = (cl_mem) (rand_vec_.mutable_gpu_data());
greentea_gpu_rng_uniform(this->device_->id(), count, mask, 0);
// set thresholds
viennacl::ocl::kernel &oclk_dropout = program.get_kernel(
CL_KERNEL_SELECT("dropout_forward"));
viennacl::ocl::enqueue(
oclk_dropout(count, WrapHandle((cl_mem) bottom_data, &ctx),
WrapHandle(mask, &ctx), fixup_arg_type(uint_thres_),
fixup_arg_type(scale_),
WrapHandle((cl_mem) top_data, &ctx)),
ctx.get_queue());
} else {
greentea_copy<Dtype>(count, (cl_mem) bottom_data, 0, (cl_mem) top_data, 0,
&ctx);
}
#endif // USE_GREENTEA
}
}
#ifdef USE_CUDA
template<typename Dtype>
__global__ void DropoutBackward(const int_tp n, const Dtype* in_diff,
const uint_tp* mask,
const uint_tp threshold, const float scale,
Dtype* out_diff) {
CUDA_KERNEL_LOOP(index, n) {
out_diff[index] = in_diff[index] * scale * (mask[index] > threshold);
}
}
#endif // USE_CUDA
template<typename Dtype>
void DropoutLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[0]) {
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
if (this->device_->backend() == BACKEND_CUDA) {
#ifdef USE_CUDA
if (this->phase_ == TRAIN) {
const uint_tp* mask = static_cast<const uint_tp*>(rand_vec_
.gpu_data());
const int_tp count = bottom[0]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
DropoutBackward<Dtype> CUDA_KERNEL(CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS)(
count, top_diff, mask, uint_thres_, scale_, bottom_diff);
CUDA_POST_KERNEL_CHECK;
} else {
caffe_copy(top[0]->count(), top_diff, bottom_diff);
}
#endif // USE_CUDA
} else {
#ifdef USE_GREENTEA
viennacl::ocl::context &ctx = viennacl::ocl::get_context(
this->device_->id());
viennacl::ocl::program &program = this->device_->program();
if (this->phase_ == TRAIN) {
cl_mem mask = (cl_mem) (rand_vec_.gpu_data());
const int_tp count = bottom[0]->count();
viennacl::ocl::kernel &oclk_dropout = program.get_kernel(
CL_KERNEL_SELECT("dropout_backward"));
viennacl::ocl::enqueue(
oclk_dropout(count, WrapHandle((cl_mem) top_diff, &ctx),
WrapHandle(mask, &ctx), fixup_arg_type(uint_thres_),
fixup_arg_type(scale_),
WrapHandle((cl_mem) bottom_diff, &ctx)),
ctx.get_queue());
} else {
greentea_copy<Dtype>(top[0]->count(), (cl_mem) top_diff, 0,
(cl_mem) bottom_diff, 0, &ctx);
}
#endif // USE_GREENTEA
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(DropoutLayer);
} // namespace caffe
|
46900607c230ae1ceffbd9d15d4725023418965d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define d_Mu(x, y) d_Mu[(y) * (nx) + (x)]
#define d_ave_Mu(x, y) d_ave_Mu[(y) * (nx) + (x)]
#define d_field(z, x) d_field[(x) * (nz) + (z)]
#define d_bnd(x, indT) d_bnd[(indT) * (len_Bnd_vec) + (x)]
#define d_Den(x, y) d_Den[(y) * (nx) + (x)]
#define d_ave_Byc_a(x, y) d_ave_Byc_a[(y) * (nx) + (x)]
#define d_ave_Byc_b(x, y) d_ave_Byc_b[(y) * (nx) + (x)]
#include "utilities.h"
void fileBinLoad(float *h_bin, int size, std::string fname) {
FILE *fp = fopen(fname.c_str(), "rb");
if (fp == nullptr) {
std::cout << "Attempted to read " << fname << std::endl;
printf("File reading error!\n");
exit(1);
} else {
size_t sizeRead = fread(h_bin, sizeof(float), size, fp);
}
fclose(fp);
}
void fileBinWrite(float *h_bin, int size, std::string fname) {
FILE *fp = fopen(fname.c_str(), "wb");
if (fp == nullptr) {
printf("File writing error!\n");
exit(1);
} else {
fwrite(h_bin, sizeof(float), size, fp);
}
fclose(fp);
}
void fileBinWriteDouble(double *h_bin, int size, std::string fname) {
FILE *fp = fopen(fname.c_str(), "wb");
if (fp == nullptr) {
printf("File writing error!\n");
exit(1);
} else {
fwrite(h_bin, sizeof(double), size, fp);
}
fclose(fp);
}
void initialArray(float *ip, int size, float value) {
for (int i = 0; i < size; i++) {
ip[i] = value;
// printf("value = %f\n", value);
}
}
void initialArray(double *ip, int size, double value) {
for (int i = 0; i < size; i++) {
ip[i] = value;
// printf("value = %f\n", value);
}
}
__global__ void intialArrayGPU(float *ip, int nx, int ny, float value) {
int gidx = threadIdx.x + blockDim.x * blockIdx.x;
int gidy = threadIdx.y + blockDim.y * blockIdx.y;
if (gidx < nx && gidy < ny) {
int offset = gidx + gidy * nx;
ip[offset] = value;
}
}
__global__ void assignArrayGPU(float *ip_in, float *ip_out, int nx, int ny) {
int gidx = threadIdx.x + blockDim.x * blockIdx.x;
int gidy = threadIdx.y + blockDim.y * blockIdx.y;
if (gidx < nx && gidy < ny) {
int offset = gidx + gidy * nx;
ip_out[offset] = ip_in[offset];
}
}
void displayArray(std::string s, float *ip, int nx, int ny) {
// printf("ip: \n");
// printf("%s: \n", s);
std::cout << s << ": " << std::endl;
for (int i = 0; i < ny; i++) {
for (int j = 0; j < nx; j++) {
// printf("ip[%d, %d] = %f ", i, j, ip[i*nx+j]);
printf("%f ", ip[i * nx + j]);
}
printf("\n");
}
printf("\n\n\n");
}
__global__ void moduliInit(float *d_Cp, float *d_Cs, float *d_Den,
float *d_Lambda, float *d_Mu, int nx, int ny) {
// printf("Hello, world!\n");
int gidx = threadIdx.x + blockDim.x * blockIdx.x;
int gidy = threadIdx.y + blockDim.y * blockIdx.y;
int offset = gidx + gidy * nx;
if (gidx < nx && gidy < ny) {
// printf("offset = %d ", offset);
// printf("gridDim.x = %d ", gridDim.x);
// printf("blockIdx.y = %d ", blockIdx.y);
d_Mu[offset] = powf(d_Cs[offset], 2) * d_Den[offset];
d_Lambda[offset] =
d_Den[offset] * (powf(d_Cp[offset], 2) - 2 * powf(d_Cs[offset], 2));
if (d_Lambda[offset] < 0) {
printf("Lambda is negative!!!");
}
}
}
__global__ void velInit(float *d_Lambda, float *d_Mu, float *d_Den, float *d_Cp,
float *d_Cs, int nx, int ny) {
// printf("Hello, world!\n");
int gidx = threadIdx.x + blockDim.x * blockIdx.x;
int gidy = threadIdx.y + blockDim.y * blockIdx.y;
int offset = gidx + gidy * nx;
if (gidx < nx && gidy < ny) {
// printf("offset = %d ", offset);
// printf("gridDim.x = %d ", gridDim.x);
// printf("blockIdx.y = %d ", blockIdx.y);
d_Cp[offset] =
sqrt((d_Lambda[offset] + 2.0 * d_Mu[offset]) / d_Den[offset]);
d_Cs[offset] = sqrt((d_Mu[offset]) / d_Den[offset]);
}
}
__global__ void aveMuInit(float *d_Mu, float *d_ave_Mu, int nx, int ny) {
int gidx = threadIdx.x + blockDim.x * blockIdx.x;
int gidy = threadIdx.y + blockDim.y * blockIdx.y;
float a, b, c, d;
if (gidx >= 2 && gidx <= nx - 3 && gidy >= 2 && gidy <= ny - 3) {
a = d_Mu(gidx, gidy);
b = d_Mu(gidx + 1, gidy);
c = d_Mu(gidx, gidy + 1);
d = d_Mu(gidx + 1, gidy + 1);
if (a == 0.0 || b == 0.0 || c == 0.0 || d == 0.0) {
d_ave_Mu(gidx, gidy) = 0.0;
} else {
d_ave_Mu(gidx, gidy) = 4.0 / (1.0 / a + 1.0 / b + 1.0 / c + 1.0 / d);
}
}
}
__global__ void aveBycInit(float *d_Den, float *d_ave_Byc_a, float *d_ave_Byc_b,
int nx, int ny) {
int gidx = threadIdx.x + blockDim.x * blockIdx.x;
int gidy = threadIdx.y + blockDim.y * blockIdx.y;
if (gidx >= 2 && gidx <= nx - 3 && gidy >= 2 && gidy <= ny - 3) {
d_ave_Byc_a(gidx, gidy) = 2.0 / (d_Den(gidx + 1, gidy) + d_Den(gidx, gidy));
d_ave_Byc_b(gidx, gidy) = 2.0 / (d_Den(gidx, gidy + 1) + d_Den(gidx, gidy));
} else {
return;
}
}
__global__ void gpuMinus(float *d_out, float *d_in1, float *d_in2, int nx,
int ny) {
int idx = threadIdx.x + blockDim.x * blockIdx.x;
int idy = threadIdx.y + blockDim.y * blockIdx.y;
// only compute last N-1 time samples for misfits!!!!!!!! DL 02/25/2019
if (idx < nx && idy < ny && idx > 0) {
d_out[(idy) * (nx) + (idx)] =
d_in1[(idy) * (nx) + (idx)] - d_in2[(idy) * (nx) + (idx)];
} else if (idx == 0 && idy < ny) {
d_out[(idy) * (nx) + (idx)] = 0.0;
} else {
return;
}
}
__global__ void cuda_cal_objective(float *obj, float *err, int ng)
/*< calculate the value of objective function: obj >*/
{
const int Block_Size = 512;
__shared__ float sdata[Block_Size];
int tid = threadIdx.x;
sdata[tid] = 0.0f;
for (int s = 0; s < (ng + Block_Size - 1) / Block_Size; s++) {
int id = s * blockDim.x + threadIdx.x;
float a = (id < ng) ? err[id] : 0.0f;
// sdata[tid] += a*a;
sdata[tid] += powf(a, 2);
}
__syncthreads();
/* do reduction in shared mem */
// for(int s=blockDim.x/2; s>32; s>>=1) {
// if (threadIdx.x < s) sdata[tid] += sdata[tid + s];
// __syncthreads();
// }
// if (tid < 32) {
// if (blockDim.x >= 64) { sdata[tid] += sdata[tid + 32]; }
// if (blockDim.x >= 32) { sdata[tid] += sdata[tid + 16]; }
// if (blockDim.x >= 16) { sdata[tid] += sdata[tid + 8]; }
// if (blockDim.x >= 8) { sdata[tid] += sdata[tid + 4]; }
// if (blockDim.x >= 4) { sdata[tid] += sdata[tid + 2]; }
// if (blockDim.x >= 2) { sdata[tid] += sdata[tid + 1]; }
// }
for (int s = blockDim.x / 2; s >= 1; s /= 2) {
if (threadIdx.x < s) sdata[tid] += sdata[tid + s];
__syncthreads();
}
if (tid == 0) {
*obj = sdata[0];
}
}
float cal_objective(float *array, int N) {
float misfit = 0.0;
printf("hhh\n");
for (int i = 0; i < N; i++) {
misfit += array[i] * array[i];
}
return misfit;
}
float compCpAve(float *array, int N) {
float temp = 0.0;
for (int i = 0; i < N; i++) {
temp += array[i];
}
temp = temp / float(N);
return temp;
}
void compCourantNumber(float *h_Cp, int size, float dt, float dz, float dx) {
float max = h_Cp[0];
float Courant_number = 0.0;
for (int i = 0; i < size; i++) {
if (h_Cp[i] > max) {
max = h_Cp[i];
}
}
float dh_min = (dz < dx) ? dz : dx;
// Courant_number = max * dt * sqrtf(powf(1.0 / dz, 2) + powf(1.0 / dx, 2));
Courant_number = max * dt * sqrtf(2.0) * (1.0 / 24.0 + 9.0 / 8.0) / dh_min;
if (Courant_number > 1.0) {
std::cout << "Courant_number = " << Courant_number << std::endl;
exit(1);
}
}
void cpmlInit(float *K, float *a, float *b, float *K_half, float *a_half,
float *b_half, int N, int nPml, float dh, float f0, float dt,
float CpAve) {
float *damp, *damp_half, *alpha, *alpha_half;
float d0_h = 0.0;
float Rcoef = 0.0008;
float depth_in_pml = 0.0;
float depth_normalized = 0.0;
float thickness_PML = 0.0;
// const float PI = 3.141592653589793238462643383279502884197169;
const float K_MAX_PML = 2.0;
const float ALPHA_MAX_PML = 2.0 * PI * (f0 / 2.0);
const float NPOWER = 8.0;
const float c1 = 0.25, c2 = 0.75, c3 = 0.0;
// const float c1 = 0.0, c2 = 1.0, c3 = 0.0;
thickness_PML = nPml * dh; // changed here
CpAve = 3000.0; // DL make this model independent
d0_h = -(NPOWER + 1) * CpAve * log(Rcoef) / (2.0 * thickness_PML);
damp = (float *)malloc(N * sizeof(float));
damp_half = (float *)malloc(N * sizeof(float));
alpha = (float *)malloc(N * sizeof(float));
alpha_half = (float *)malloc(N * sizeof(float));
initialArray(damp, N, 0.0);
initialArray(damp_half, N, 0.0);
initialArray(K, N, 1.0);
initialArray(K_half, N, 1.0);
initialArray(alpha, N, 0.0);
initialArray(alpha_half, N, 0.0);
initialArray(a, N, 0.0);
initialArray(a_half, N, 0.0);
initialArray(b, N, 0.0);
initialArray(b_half, N, 0.0);
for (int i = 0; i < N; i++) {
// left edge
depth_in_pml = (nPml - i) * dh;
if (depth_in_pml >= 0.0) {
depth_normalized = depth_in_pml / thickness_PML;
damp[i] =
d0_h * (c1 * depth_normalized + c2 * pow(depth_normalized, NPOWER) +
c3 * pow(depth_normalized, 2 * NPOWER));
K[i] = 1.0 + (K_MAX_PML - 1.0) * pow(depth_normalized, NPOWER);
alpha[i] = ALPHA_MAX_PML * (1.0 - depth_normalized);
}
if (alpha[i] < 0.0) {
std::cout << "CPML alpha < 0.0 --" << __LINE__ << std::endl;
exit(1);
}
// half the grid points
depth_in_pml = (nPml - i - 0.5) * dh;
if (depth_in_pml >= 0.0) {
depth_normalized = depth_in_pml / thickness_PML;
damp_half[i] =
d0_h * (c1 * depth_normalized + c2 * pow(depth_normalized, NPOWER) +
c3 * pow(depth_normalized, 2 * NPOWER));
K_half[i] = 1.0 + (K_MAX_PML - 1.0) * pow(depth_normalized, NPOWER);
alpha_half[i] = ALPHA_MAX_PML * (1.0 - depth_normalized);
}
if (alpha_half[i] < 0.0) {
std::cout << "CPML alpha_half < 0.0 --" << __LINE__ << std::endl;
exit(1);
}
// right edge
depth_in_pml = (nPml - N + i) * dh;
if (depth_in_pml >= 0.0) {
depth_normalized = depth_in_pml / thickness_PML;
damp[i] =
d0_h * (c1 * depth_normalized + c2 * pow(depth_normalized, NPOWER) +
c3 * pow(depth_normalized, 2 * NPOWER));
K[i] = 1.0 + (K_MAX_PML - 1.0) * pow(depth_normalized, NPOWER);
alpha[i] = ALPHA_MAX_PML * (1.0 - depth_normalized);
}
if (alpha[i] < 0.0) {
std::cout << "CPML alpha < 0.0 --" << __LINE__ << std::endl;
exit(1);
}
depth_in_pml = (nPml - N + i + 0.5) * dh;
if (depth_in_pml >= 0.0) {
depth_normalized = depth_in_pml / thickness_PML;
damp_half[i] =
d0_h * (c1 * depth_normalized + c2 * pow(depth_normalized, NPOWER) +
c3 * pow(depth_normalized, 2 * NPOWER));
K_half[i] = 1.0 + (K_MAX_PML - 1.0) * powf(depth_normalized, NPOWER);
alpha_half[i] = ALPHA_MAX_PML * (1.0 - depth_normalized);
}
if (alpha_half[i] < 0.0) {
std::cout << "CPML alpha_half < 0.0 --" << __LINE__ << std::endl;
exit(1);
}
if (alpha[i] < 0.0) {
alpha[i] = 0.0;
}
if (alpha_half[i] < 0.0) {
alpha_half[i] = 0.0;
}
b[i] = expf(-(damp[i] / K[i] + alpha[i]) * dt);
b_half[i] = expf(-(damp_half[i] / K_half[i] + alpha_half[i]) * dt);
if (fabs(damp[i]) > 1.0e-6) {
a[i] = damp[i] * (b[i] - 1.0) / (K[i] * (damp[i] + K[i] * alpha[i]));
}
if (fabs(damp_half[i]) > 1.0e-6) {
a_half[i] = damp_half[i] * (b_half[i] - 1.0) /
(K_half[i] * (damp_half[i] + K_half[i] * alpha_half[i]));
}
}
free(damp);
free(damp_half);
free(alpha);
free(alpha_half);
}
// Dongzhuo Li 05/15/2019
__global__ void from_bnd(float *d_field, float *d_bnd, int nz, int nx,
int nzBnd, int nxBnd, int len_Bnd_vec, int nLayerStore,
int indT, int nPml, int nPad, int nSteps) {
int idxBnd = threadIdx.x + blockDim.x * blockIdx.x;
int iRow, jCol;
if (idxBnd >= 0 && idxBnd <= nLayerStore * nzBnd - 1) {
jCol = idxBnd / nzBnd;
iRow = idxBnd - jCol * nzBnd;
d_bnd(idxBnd, indT) = d_field((iRow + nPml - 2), (jCol + nPml - 2));
} else if (idxBnd >= nLayerStore * nzBnd &&
idxBnd <= 2 * nLayerStore * nzBnd - 1) {
jCol = (idxBnd - nLayerStore * nzBnd) / nzBnd;
iRow = (idxBnd - nLayerStore * nzBnd) - jCol * nzBnd;
d_bnd(idxBnd, indT) =
d_field((iRow + nPml - 2), (nx - nPml - jCol - 1 + 2));
} else if (idxBnd >= 2 * nLayerStore * nzBnd &&
idxBnd <= nLayerStore * (2 * nzBnd + nxBnd) - 1) {
iRow = (idxBnd - 2 * nLayerStore * nzBnd) / nxBnd;
jCol = (idxBnd - 2 * nLayerStore * nzBnd) - iRow * nxBnd;
d_bnd(idxBnd, indT) = d_field((iRow + nPml - 2), (jCol + nPml - 2));
} else if (idxBnd >= nLayerStore * (2 * nzBnd + nxBnd) &&
idxBnd <= 2 * nLayerStore * (nzBnd + nxBnd) - 1) {
iRow = (idxBnd - nLayerStore * (2 * nzBnd + nxBnd)) / nxBnd;
jCol = (idxBnd - nLayerStore * (2 * nzBnd + nxBnd)) - iRow * nxBnd;
d_bnd(idxBnd, indT) =
d_field((nz - nPml - nPad - iRow - 1 + 2), (jCol + nPml - 2));
} else {
return;
}
}
// Dongzhuo Li 05/15/2019
__global__ void to_bnd(float *d_field, float *d_bnd, int nz, int nx, int nzBnd,
int nxBnd, int len_Bnd_vec, int nLayerStore, int indT,
int nPml, int nPad, int nSteps) {
int idxBnd = threadIdx.x + blockDim.x * blockIdx.x;
int iRow, jCol;
if (idxBnd >= 0 && idxBnd <= nLayerStore * nzBnd - 1) {
jCol = idxBnd / nzBnd;
iRow = idxBnd - jCol * nzBnd;
d_field((iRow + nPml - 2), (jCol + nPml - 2)) = d_bnd(idxBnd, indT);
} else if (idxBnd >= nLayerStore * nzBnd &&
idxBnd <= 2 * nLayerStore * nzBnd - 1) {
jCol = (idxBnd - nLayerStore * nzBnd) / nzBnd;
iRow = (idxBnd - nLayerStore * nzBnd) - jCol * nzBnd;
d_field((iRow + nPml - 2), (nx - nPml - jCol - 1 + 2)) =
d_bnd(idxBnd, indT);
} else if (idxBnd >= 2 * nLayerStore * nzBnd &&
idxBnd <= nLayerStore * (2 * nzBnd + nxBnd) - 1) {
iRow = (idxBnd - 2 * nLayerStore * nzBnd) / nxBnd;
jCol = (idxBnd - 2 * nLayerStore * nzBnd) - iRow * nxBnd;
d_field((iRow + nPml - 2), (jCol + nPml - 2)) = d_bnd(idxBnd, indT);
} else if (idxBnd >= nLayerStore * (2 * nzBnd + nxBnd) &&
idxBnd <= 2 * nLayerStore * (nzBnd + nxBnd) - 1) {
iRow = (idxBnd - nLayerStore * (2 * nzBnd + nxBnd)) / nxBnd;
jCol = (idxBnd - nLayerStore * (2 * nzBnd + nxBnd)) - iRow * nxBnd;
d_field((nz - nPml - nPad - iRow - 1 + 2), (jCol + nPml - 2)) =
d_bnd(idxBnd, indT);
} else {
return;
}
}
// // Dongzhuo Li 02/24/2019
// __global__ void from_bnd(float *d_field, float *d_bnd, int nz, int nx, int
// nzBnd, \
// int nxBnd, int len_Bnd_vec, int nLayerStore, int indT, int nPml, int nPad,
// int nSteps) {
// int idxBnd = threadIdx.x + blockDim.x*blockIdx.x;
// int iRow,jCol;
// if(idxBnd>=0 && idxBnd<=nLayerStore*nzBnd-1) {
// jCol = idxBnd/nzBnd;
// iRow = idxBnd - jCol*nzBnd;
// d_bnd(idxBnd,indT) = d_field((iRow),(jCol));
// }
// else if(idxBnd>=nLayerStore*nzBnd && idxBnd<=2*nLayerStore*nzBnd-1){
// jCol = (idxBnd-nLayerStore*nzBnd)/nzBnd;
// iRow = (idxBnd-nLayerStore*nzBnd) - jCol*nzBnd;
// d_bnd(idxBnd,indT) = d_field((iRow),(nx-jCol-1));
// }
// else if(idxBnd>=2*nLayerStore*nzBnd &&
// idxBnd<=nLayerStore*(2*nzBnd+nxBnd)-1) {
// iRow = (idxBnd - 2*nLayerStore*nzBnd)/nxBnd;
// jCol = (idxBnd - 2*nLayerStore*nzBnd) - iRow*nxBnd;
// d_bnd(idxBnd,indT) = d_field((iRow),(jCol));
// }
// else if(idxBnd>=nLayerStore*(2*nzBnd+nxBnd) &&
// idxBnd<=2*nLayerStore*(nzBnd+nxBnd)-1) {
// iRow = (idxBnd - nLayerStore*(2*nzBnd+nxBnd))/nxBnd;
// jCol = (idxBnd - nLayerStore*(2*nzBnd+nxBnd)) - iRow*nxBnd;
// d_bnd(idxBnd,indT) = d_field((nz-nPad-iRow-1),(jCol));
// }
// else {
// return;
// }
// // if(idxBnd>=0 && idxBnd<=2*(nzBnd+nxBnd)-1) {
// // d_bnd(idxBnd, indT) = 1.0;
// // } else {
// // return;
// // }
// }
// // Dongzhuo Li 02/24/2019
// __global__ void to_bnd(float *d_field, float *d_bnd, int nz, int nx, int
// nzBnd, \
// int nxBnd, int len_Bnd_vec, int nLayerStore, int indT, int nPml, int nPad,
// int nSteps) {
// int idxBnd = threadIdx.x + blockDim.x*blockIdx.x;
// int iRow,jCol;
// if(idxBnd>=0 && idxBnd<=nLayerStore*nzBnd-1) {
// jCol = idxBnd/nzBnd;
// iRow = idxBnd - jCol*nzBnd;
// d_field((iRow),(jCol)) = d_bnd(idxBnd,indT);
// }
// else if(idxBnd>=nLayerStore*nzBnd && idxBnd<=2*nLayerStore*nzBnd-1){
// jCol = (idxBnd-nLayerStore*nzBnd)/nzBnd;
// iRow = (idxBnd-nLayerStore*nzBnd) - jCol*nzBnd;
// d_field((iRow),(nx-jCol-1)) = d_bnd(idxBnd,indT);
// }
// else if(idxBnd>=2*nLayerStore*nzBnd &&
// idxBnd<=nLayerStore*(2*nzBnd+nxBnd)-1) {
// iRow = (idxBnd - 2*nLayerStore*nzBnd)/nxBnd;
// jCol = (idxBnd - 2*nLayerStore*nzBnd) - iRow*nxBnd;
// d_field((iRow),(jCol)) = d_bnd(idxBnd,indT);
// }
// else if(idxBnd>=nLayerStore*(2*nzBnd+nxBnd) &&
// idxBnd<=2*nLayerStore*(nzBnd+nxBnd)-1) {
// iRow = (idxBnd - nLayerStore*(2*nzBnd+nxBnd))/nxBnd;
// jCol = (idxBnd - nLayerStore*(2*nzBnd+nxBnd)) - iRow*nxBnd;
// d_field((nz-nPad-iRow-1),(jCol)) = d_bnd(idxBnd,indT);
// }
// else {
// return;
// }
// }
__global__ void src_rec_gauss_amp(float *gauss_amp, int nz, int nx) {
int gidz = blockIdx.x * blockDim.x + threadIdx.x;
int gidx = blockIdx.y * blockDim.y + threadIdx.y;
if (gidz >= 0 && gidz < nz && gidx >= 0 && gidx < nx) {
int idz = gidz - nz / 2;
int idx = gidx - nx / 2;
gauss_amp[gidz + gidx * nz] =
expf(-1000.0 * (powf(float(idz), 2) + powf(float(idx), 2)));
// printf("gidz=%d, gidx=%d, gauss_amp=%.10f\n", gidz, gidx,
// gauss_amp[gidz + gidx * nz]);
} else {
return;
}
}
__global__ void add_source(float *d_szz, float *d_sxx, float amp, int nz,
bool isFor, int z_loc, int x_loc, float dt,
float *gauss_amp, double rxz) {
// int id = threadIdx.x + blockDim.x * blockIdx.x;
int gidz = blockIdx.x * blockDim.x + threadIdx.x;
int gidx = blockIdx.y * blockDim.y + threadIdx.y;
float scale = pow(1500.0, 2);
if (isFor) {
if (gidz >= 0 && gidz < 9 && gidx >= 0 && gidx < 9) {
int idz = gidz - 9 / 2;
int idx = gidx - 9 / 2;
// printf("amp = %f ", amp);
d_szz[(z_loc + idz) + nz * (x_loc + idx)] +=
scale * amp * dt * gauss_amp[gidz + gidx * 9];
// crosswell borehole source (can be modified) assume cp/cs = sqrt(3.0)
d_sxx[(z_loc + idz) + nz * (x_loc + idx)] +=
rxz * scale * amp * dt * gauss_amp[gidz + gidx * 9];
} else {
return;
}
} else {
if (gidz >= 0 && gidz < 9 && gidx >= 0 && gidx < 9) {
int idz = gidz - 9 / 2;
int idx = gidx - 9 / 2;
// printf("amp = %f ", amp);
d_szz[(z_loc + idz) + nz * (x_loc + idx)] -=
scale * amp * dt * gauss_amp[gidz + gidx * 9];
d_sxx[(z_loc + idz) + nz * (x_loc + idx)] -=
rxz * scale * amp * dt * gauss_amp[gidz + gidx * 9];
} else {
return;
}
}
}
__global__ void recording(float *d_szz, float *d_sxx, int nz, float *d_data,
int iShot, int it, int nSteps, int nrec, int *d_z_rec,
int *d_x_rec, double *d_rxz) {
int iRec = threadIdx.x + blockDim.x * blockIdx.x;
if (iRec >= nrec) {
return;
}
d_data[(iRec) * (nSteps) + (it)] =
d_szz[d_z_rec[iRec] + d_x_rec[iRec] * nz] +
d_rxz[iRec] * d_sxx[d_z_rec[iRec] + d_x_rec[iRec] * nz];
}
__global__ void res_injection(float *d_szz_adj, float *d_sxx_adj, int nz,
float *d_res, int it, float dt, int nSteps,
int nrec, int *d_z_rec, int *d_x_rec,
double *d_rxz) {
int iRec = threadIdx.x + blockDim.x * blockIdx.x;
if (iRec >= nrec) {
return;
}
d_szz_adj[d_z_rec[iRec] + nz * d_x_rec[iRec]] +=
d_res[(iRec) * (nSteps) + (it)];
d_sxx_adj[d_z_rec[iRec] + nz * d_x_rec[iRec]] +=
d_rxz[iRec] * d_res[(iRec) * (nSteps) + (it)];
}
__global__ void source_grad(float *d_szz_adj, float *d_sxx_adj, int nz,
float *d_StfGrad, int it, float dt, int z_src,
int x_src, double rxz) {
int id = threadIdx.x + blockDim.x * blockIdx.x;
if (id == 0) {
d_StfGrad[it] =
-(d_szz_adj[z_src + nz * x_src] + rxz * d_sxx_adj[z_src + nz * x_src]) *
dt;
} else {
return;
}
}
// Dongzhuo Li 01/28/2019
__global__ void cuda_bp_filter1d(int nSteps, float dt, int nrec,
hipfftComplex *d_data_F, float f0, float f1,
float f2, float f3) {
int nf = nSteps / 2 + 1;
float df = 1.0 / dt / nSteps;
int idf = blockIdx.x * blockDim.x + threadIdx.x;
int idr = blockIdx.y * blockDim.y + threadIdx.y;
int ip = idr * nf + idf;
float freq = idf * df;
float filter_amp = 1.0;
// printf("fffffff = %f\n", freq);
if (idf >= 0 && idf < nf && idr >= 0 && idr < nrec) {
if (freq >= f0 && freq < f1) {
filter_amp = sin(PI / 2.0 * (freq - f0) / (f1 - f0));
} else if (freq >= f1 && freq < f2) {
filter_amp = 1.0;
} else if (freq >= f2 && freq < f3) {
filter_amp = cos(PI / 2.0 * (freq - f2) / (f3 - f2));
} else {
filter_amp = 0.0;
}
d_data_F[ip].x *= filter_amp * filter_amp;
d_data_F[ip].y *= filter_amp * filter_amp;
}
}
__global__ void cuda_filter1d(int nf, int nrec, cuFloatComplex *d_data_F,
cuFloatComplex *d_coef) {
int idf = blockIdx.x * blockDim.x + threadIdx.x;
int idr = blockIdx.y * blockDim.y + threadIdx.y;
int ip = idr * nf + idf;
if (idf >= 0 && idf < nf && idr >= 0 && idr < nrec) {
d_data_F[ip] = cuCmulf(d_data_F[ip], d_coef[idf]);
}
}
__global__ void cuda_normalize(int nz, int nx, float *data, float factor) {
int idz = blockIdx.x * blockDim.x + threadIdx.x;
int idx = blockIdx.y * blockDim.y + threadIdx.y;
if (factor == 0.0) {
printf("Dividing by zero!\n");
return;
}
if (idz >= 0 && idz < nz && idx >= 0 && idx < nx) {
data[idx * nz + idz] *= factor;
} else {
return;
}
}
// windowing in the time axis
__global__ void cuda_window(int nt, int nrec, float dt, float *d_win_start,
float *d_win_end, float *d_weights,
float src_weight, float ratio, float *data) {
int idt = blockIdx.x * blockDim.x + threadIdx.x;
int idr = blockIdx.y * blockDim.y + threadIdx.y;
int ip = idr * nt + idt;
// stupid bug... (I put the if just befor line 614)
if (idt >= 0 && idt < nt && idr >= 0 && idr < nrec) {
float window_amp = 1.0;
float t = idt * dt;
if (ratio > 0.5) {
printf("Dividing by zero!\n");
return;
}
float t0 = d_win_start[idr];
float t3 = d_win_end[idr];
if (t0 == 0.0 && t3 == 0.0) printf("t0 = %f, t3 = %f\n\n", t0, t3);
float t_max = nt * dt;
if (t0 < 0.0) t0 = 0.0;
if (t0 > t_max) t0 = t_max;
if (t3 < 0.0) t3 = 0.0;
if (t3 > t_max) t3 = t_max;
float offset = (t3 - t0) * ratio;
if (offset <= 0.0) {
printf("Window error 1!!\n");
printf("offset = %f\n", offset);
return;
}
float t1 = t0 + offset;
float t2 = t3 - offset;
if (t >= t0 && t < t1) {
window_amp = sin(PI / 2.0 * (t - t0) / (t1 - t0));
} else if (t >= t1 && t < t2) {
window_amp = 1.0;
} else if (t >= t2 && t < t3) {
window_amp = cos(PI / 2.0 * (t - t2) / (t3 - t2));
} else {
window_amp = 0.0;
}
data[ip] *= window_amp * window_amp * d_weights[idr] * src_weight;
} else {
return;
}
}
// overloaded window function: without specifying windows and weights
__global__ void cuda_window(int nt, int nrec, float dt, float ratio,
float *data) {
int idt = blockIdx.x * blockDim.x + threadIdx.x;
int idr = blockIdx.y * blockDim.y + threadIdx.y;
int ip = idr * nt + idt;
if (idt >= 0 && idt < nt && idr >= 0 && idr < nrec) {
float window_amp = 1.0;
float t = idt * dt;
// if (ratio > 0.5) {
// printf("Dividing by zero!\n");
// return;
// }
float t0 = 0;
float t3 = nt * dt;
float offset = nt * dt * ratio;
if (2.0 * offset >= t3 - t0) {
printf("Window error 2!\n");
return;
}
float t1 = t0 + offset;
float t2 = t3 - offset;
if (t >= t0 && t < t1) {
window_amp = sin(PI / 2.0 * (t - t0) / (t1 - t0));
} else if (t >= t1 && t < t2) {
window_amp = 1.0;
} else if (t >= t2 && t < t3) {
window_amp = cos(PI / 2.0 * (t - t2) / (t3 - t2));
} else {
window_amp = 0.0;
}
data[ip] *= window_amp * window_amp;
}
}
// Array padding
__global__ void cuda_embed_crop(int nz, int nx, float *d_data, int nz_pad,
int nx_pad, float *d_data_pad, bool isEmbed) {
int idz = blockIdx.x * blockDim.x + threadIdx.x;
int idx = blockIdx.y * blockDim.y + threadIdx.y;
int ip = idx * nz + idz;
int ip_pad = idx * nz_pad + idz;
if (idz >= 0 && idz < nz && idx >= 0 && idx < nx) {
if (isEmbed) {
d_data_pad[ip_pad] = d_data[ip];
} else {
d_data[ip] = d_data_pad[ip_pad];
}
} else {
return;
}
}
// Dongzhuo Li 02/02/2019
__global__ void cuda_spectrum_update(int nf, int nrec,
cuFloatComplex *d_data_obs_F,
cuFloatComplex *d_data_cal_F,
cuFloatComplex *d_source_F,
cuFloatComplex *d_coef) {
int idr = 0, idf = 0, ip = 0;
const int Block_Size = 512;
const float lambda = 1e-6;
cuFloatComplex c_obs = make_cuFloatComplex(0.0f, 0.0f);
cuFloatComplex c_cal = make_cuFloatComplex(0.0f, 0.0f);
cuFloatComplex c_nominator = make_cuFloatComplex(0.0f, 0.0f);
cuFloatComplex c_denominator = make_cuFloatComplex(0.0f, 0.0f);
__shared__ cuFloatComplex sh_nominator_F[Block_Size];
__shared__ cuFloatComplex sh_denominator_F[Block_Size];
int tid =
threadIdx.x; // one thread handles s receivers (with 512 as the interval)
int bid = blockIdx.x; // one block handles one frequency
sh_nominator_F[tid] = make_cuFloatComplex(0.0f, 0.0f);
sh_denominator_F[tid] = make_cuFloatComplex(0.0f, 0.0f);
__syncthreads();
for (int s = 0; s < (nrec + Block_Size - 1) / Block_Size; s++) {
idr = s * blockDim.x + tid;
idf = bid;
ip = idr * nf + idf;
if (idr >= 0 && idr < nrec && idf >= 0 && idf < nf) {
c_obs = d_data_obs_F[ip];
c_cal = d_data_cal_F[ip];
sh_nominator_F[tid] =
cuCaddf(sh_nominator_F[tid], cuCmulf(cuConjf(c_cal), c_obs));
sh_denominator_F[tid] =
cuCaddf(sh_denominator_F[tid], cuCmulf(cuConjf(c_cal), c_cal));
}
}
__syncthreads();
// do reduction in shared memory
for (int s = blockDim.x / 2; s >= 1; s /= 2) {
if (tid < s) {
sh_nominator_F[tid] =
cuCaddf(sh_nominator_F[tid], sh_nominator_F[tid + s]);
sh_denominator_F[tid] =
cuCaddf(sh_denominator_F[tid], sh_denominator_F[tid + s]);
}
__syncthreads();
}
if (tid == 0) {
sh_denominator_F[0].x += lambda;
// printf("nomi = %f, deno = %f\n", cuCabsf(sh_nominator_F[0]),
// cuCabsf(sh_denominator_F[0]));
sh_nominator_F[0] = cuCdivf(sh_nominator_F[0], sh_denominator_F[0]);
// printf("coef = %f", sh_nominator_F[0].x);
d_coef[bid] = sh_nominator_F[0];
d_source_F[bid] = cuCmulf(d_source_F[bid], sh_nominator_F[0]);
}
// printf("tid = %d\n", tid);
__syncthreads();
for (int s = 0; s < (nrec + Block_Size - 1) / Block_Size; s++) {
idr = s * blockDim.x + tid;
idf = bid;
ip = idr * nf + idf;
if (idr >= 0 && idr < nrec && idf >= 0 && idf < nf) {
d_data_cal_F[ip] = cuCmulf(d_data_cal_F[ip], sh_nominator_F[0]);
// d_data_cal_F[ip].x *= cuCabsf(sh_nominator_F[0]);
// d_data_cal_F[ip].y *= cuCabsf(sh_nominator_F[0]);
// if (tid == 0) printf("ratio = %f\n", cuCabsf(sh_nominator_F[0]));
}
}
__syncthreads();
}
__global__ void cuda_find_absmax(int n, float *data, float *maxval) {
int tid =
threadIdx.x; // one thread handles s receivers (with 512 as the interval)
const int Block_Size = 512;
__shared__ float sh_data[Block_Size];
sh_data[tid] = 0.0;
__syncthreads();
for (int s = 0; s < (n + Block_Size - 1) / Block_Size; s++) {
int ip = s * blockDim.x + tid;
if (ip >= 0 && ip < n) {
if (fabs(data[ip]) > fabs(sh_data[tid])) sh_data[tid] = fabs(data[ip]);
}
}
__syncthreads();
// do reduction in shared memory
for (int s = blockDim.x / 2; s >= 1; s /= 2) {
if (tid < s) {
sh_data[tid] =
(sh_data[tid] >= sh_data[tid + s]) ? sh_data[tid] : sh_data[tid + s];
}
__syncthreads();
}
if (tid == 0) maxval[0] = sh_data[0];
__syncthreads();
}
// Dongzhuo Li's last attempt - 09/26/2019
// find out the norm square of each trace for normalization
// number of blocks = number of traces
// size of a block = 512
__global__ void cuda_find_normfact(int nt, int nrec, float *data1, float *data2,
float *normfact) {
// one thread handles s time samples (with 512 as the interval)
int tid = threadIdx.x;
int bid = blockIdx.x; // one block handles one trace
const int Block_Size = 512;
__shared__ float sh_data[Block_Size];
sh_data[tid] = 0.0;
__syncthreads();
for (int s = 0; s < (nt + Block_Size - 1) / Block_Size; s++) {
int ip = s * blockDim.x + tid + bid * nt;
int time_id = s * blockDim.x + tid;
if (time_id >= 0 && time_id < nt && bid >= 0 && bid < nrec) {
sh_data[tid] += data1[ip] * data2[ip];
}
__syncthreads();
}
// do reduction in shared memory
for (int s = blockDim.x / 2; s >= 1; s /= 2) {
if (tid < s) {
sh_data[tid] += sh_data[tid + s];
}
__syncthreads();
}
if (tid == 0) {
normfact[bid] = sh_data[0] + DIVCONST; // add a very small number
// printf("norm = %f\n", sh_data[0]);
}
__syncthreads();
}
//
// // normalize each trace by its normfact
// __global__ void cuda_normal_traces(int nt, int nrec, float *normfact,
// float *data) {
// int idt = blockIdx.x * blockDim.x + threadIdx.x;
// int idr = blockIdx.y * blockDim.y + threadIdx.y;
// int ip = idr * nt + idt;
// if (idt >= 0 && idt < nt && idr >= 0 && idr < nrec) {
// data[ip] = data[ip] / sqrt(normfact[idr]);
// } else {
// return;
// }
// }
//
// normalized zero-lag cross-correlation misfit function
__global__ void cuda_normal_misfit(int nrec, float *d_cross_normfact,
float *d_obs_normfact, float *d_cal_normfact,
float *misfit, float *d_weights,
float src_weight) {
// one thread handles s receivers (with 512 as the interval)
int tid = threadIdx.x;
const int Block_Size = 512;
__shared__ float sh_data[Block_Size];
sh_data[tid] = 0.0;
__syncthreads();
for (int s = 0; s < (nrec + Block_Size - 1) / Block_Size; s++) {
int ip = s * blockDim.x + tid;
if (ip >= 0 && ip < nrec) {
sh_data[tid] += d_cross_normfact[ip] /
(sqrt(d_obs_normfact[ip]) * sqrt(d_cal_normfact[ip])) *
d_weights[ip] * src_weight;
}
}
__syncthreads();
// do reduction in shared memory
for (int s = blockDim.x / 2; s >= 1; s /= 2) {
if (tid < s) {
sh_data[tid] += sh_data[tid + s];
}
__syncthreads();
}
if (tid == 0) *misfit = -2.0 * sh_data[0]; // since I multiply 0.5 later
__syncthreads();
}
//
// apply the weighting factor to the residual
__global__ void cuda_normal_adjoint_source(
int nt, int nrec, float *d_obs_normfact, float *d_cal_normfact,
float *d_cross_normfact, float *d_data_obs, float *d_data, float *d_res,
float *d_weights, float src_weight) {
int idt = blockIdx.x * blockDim.x + threadIdx.x;
int idr = blockIdx.y * blockDim.y + threadIdx.y;
int ip = idr * nt + idt;
if (idt >= 0 && idt < nt && idr >= 0 && idr < nrec) {
d_res[ip] = (d_data_obs[ip] -
d_cross_normfact[idr] / d_cal_normfact[idr] * d_data[ip]) /
(sqrt(d_obs_normfact[idr]) * sqrt(d_cal_normfact[idr])) *
d_weights[idr] * src_weight;
// d_res[ip] = (d_data_obs[ip] - d_data[ip]);
// if (idt == 0) {
// printf("cross-cal-ratio = %f\n",
// d_cross_normfact[idr] / d_cal_normfact[idr]);
// }
} else {
return;
}
}
// 1D band-pass filtering wrapper code
// Steps: padding, FFT, filtering, IFFT, cropping
void bp_filter1d(int nSteps, float dt, int nrec, float *d_data, float *filter) {
int nSteps_pad = 2 * nSteps;
int nfft = nSteps_pad / 2 + 1;
// float df = 1.0/dt/nSteps_pad;
float *d_data_pad;
float f0 = filter[0];
float f1 = filter[1];
float f2 = filter[2];
float f3 = filter[3];
hipfftHandle plan_f, plan_b;
hipfftComplex *d_data_F;
dim3 threads(TX, TY);
dim3 blocks((nSteps + TX - 1) / TX, (nrec + TY - 1) / TY);
// float *h_test = new float[nSteps_pad];
// pad data
CHECK(hipMalloc((void **)&d_data_pad, nSteps_pad * nrec * sizeof(float)));
hipLaunchKernelGGL(( intialArrayGPU), dim3(blocks), dim3(threads), 0, 0, d_data_pad, nSteps_pad, nrec, 0.0);
hipLaunchKernelGGL(( cuda_embed_crop), dim3(blocks), dim3(threads), 0, 0, nSteps, nrec, d_data, nSteps_pad, nrec,
d_data_pad, true);
// CHECK(hipMemcpy(h_test, d_data_pad, nSteps * sizeof(float),
// hipMemcpyDeviceToHost)); displayArray("h_test", h_test, nSteps_pad, 1);
// filtering
CHECK(hipMalloc((void **)&d_data_F, sizeof(hipfftComplex) * nfft * nrec));
hipfftPlan1d(&plan_f, nSteps_pad, HIPFFT_R2C, nrec);
hipfftExecR2C(plan_f, d_data_pad, d_data_F); // forward FFT
hipfftDestroy(plan_f);
hipLaunchKernelGGL(( cuda_bp_filter1d), dim3(blocks), dim3(threads), 0, 0, nSteps_pad, dt, nrec, d_data_F, f0, f1,
f2, f3);
hipfftPlan1d(&plan_b, nSteps_pad, HIPFFT_C2R, nrec);
hipfftExecC2R(plan_b, d_data_F, d_data_pad); // inverse FFT
hipfftDestroy(plan_b);
// CHECK(hipMemcpy(h_test, d_data_pad, nSteps * sizeof(float),
// hipMemcpyDeviceToHost)); displayArray("h_test", h_test, nSteps_pad, 1);
// crop data
hipLaunchKernelGGL(( cuda_embed_crop), dim3(blocks), dim3(threads), 0, 0, nSteps, nrec, d_data, nSteps_pad, nrec,
d_data_pad, false);
// normalization (in the padded fft, the length is nSteps_pad)
hipLaunchKernelGGL(( cuda_normalize), dim3(blocks), dim3(threads), 0, 0, nSteps, nrec, d_data,
1 / float(nSteps_pad));
CHECK(hipFree(d_data_F));
CHECK(hipFree(d_data_pad));
}
// source signature and calculated data update
// Steps: padding, FFT, compute spectrum, filtering, IFFT, cropping
float source_update(int nSteps, float dt, int nrec, float *d_data_obs,
float *d_data_cal, float *d_source,
cuFloatComplex *d_coef) {
int nSteps_pad = 2 * nSteps;
int nfft = nSteps_pad / 2 + 1;
float *d_data_obs_pad, *d_data_cal_pad, *d_source_pad;
hipfftHandle plan_f, plan_b;
hipfftComplex *d_data_obs_F, *d_data_cal_F, *d_source_F;
dim3 threads(TX, TY);
dim3 blocks((nSteps + TX - 1) / TX, (nrec + TY - 1) / TY);
dim3 blocks_pad((nSteps_pad + TX - 1) / TX, (nrec + TY - 1) / TY);
// pad data and window data
CHECK(
hipMalloc((void **)&d_data_obs_pad, nSteps_pad * nrec * sizeof(float)));
CHECK(
hipMalloc((void **)&d_data_cal_pad, nSteps_pad * nrec * sizeof(float)));
CHECK(hipMalloc((void **)&d_source_pad, nSteps_pad * sizeof(float)));
hipLaunchKernelGGL(( intialArrayGPU), dim3(blocks), dim3(threads), 0, 0, d_data_obs_pad, nSteps_pad, nrec, 0.0);
hipLaunchKernelGGL(( intialArrayGPU), dim3(blocks), dim3(threads), 0, 0, d_data_cal_pad, nSteps_pad, nrec, 0.0);
hipLaunchKernelGGL(( intialArrayGPU), dim3((nSteps_pad + 31) / 32), dim3(32), 0, 0, d_source_pad, nSteps_pad, 1,
0.0);
hipLaunchKernelGGL(( cuda_embed_crop), dim3(blocks), dim3(threads), 0, 0, nSteps, nrec, d_data_obs, nSteps_pad,
nrec, d_data_obs_pad, true);
hipLaunchKernelGGL(( cuda_embed_crop), dim3(blocks), dim3(threads), 0, 0, nSteps, nrec, d_data_cal, nSteps_pad,
nrec, d_data_cal_pad, true);
hipLaunchKernelGGL(( cuda_window), dim3(blocks_pad), dim3(threads), 0, 0, nSteps_pad, nrec, dt, 0.01,
d_data_obs_pad);
hipLaunchKernelGGL(( cuda_window), dim3(blocks_pad), dim3(threads), 0, 0, nSteps_pad, nrec, dt, 0.01,
d_data_cal_pad);
hipLaunchKernelGGL(( cuda_embed_crop), dim3((nSteps_pad + 31) / 32), dim3(32), 0, 0,
nSteps, 1, d_source, nSteps_pad, 1, d_source_pad, true);
// CHECK(hipMemcpy(h_test, d_data_pad, nSteps * sizeof(float),
// hipMemcpyDeviceToHost)); displayArray("h_test", h_test, nSteps_pad, 1);
// // filtering
CHECK(hipMalloc((void **)&d_data_obs_F, sizeof(hipfftComplex) * nfft * nrec));
CHECK(hipMalloc((void **)&d_data_cal_F, sizeof(hipfftComplex) * nfft * nrec));
CHECK(hipMalloc((void **)&d_source_F, sizeof(hipfftComplex) * nfft))
hipfftPlan1d(&plan_f, nSteps_pad, HIPFFT_R2C, nrec);
hipfftExecR2C(plan_f, d_data_obs_pad,
d_data_obs_F); // forward FFT of observed data
hipfftExecR2C(plan_f, d_data_cal_pad,
d_data_cal_F); // forward FFT of calculated data
hipfftDestroy(plan_f);
hipfftPlan1d(&plan_f, nSteps_pad, HIPFFT_R2C, 1); // source FFT
hipfftExecR2C(plan_f, d_source_pad, d_source_F);
hipfftDestroy(plan_f);
// cuda_bp_filter1d<<<blocks,threads>>>(nSteps_pad, dt, nrec, d_data_F, f0,
// f1, f2, f3);
hipLaunchKernelGGL(( cuda_spectrum_update), dim3(nfft), dim3(512), 0, 0, nfft, nrec, d_data_obs_F, d_data_cal_F,
d_source_F, d_coef);
hipfftPlan1d(&plan_b, nSteps_pad, HIPFFT_C2R, nrec);
hipfftExecC2R(plan_b, d_data_cal_F, d_data_cal_pad); // inverse FFT
hipfftDestroy(plan_b);
hipfftPlan1d(&plan_b, nSteps_pad, HIPFFT_C2R, 1);
hipfftExecC2R(plan_b, d_source_F, d_source_pad); // inverse FFT
hipfftDestroy(plan_b);
// CHECK(hipMemcpy(h_test, d_data_pad, nSteps * sizeof(float),
// hipMemcpyDeviceToHost)); displayArray("h_test", h_test, nSteps_pad, 1);
// crop data
hipLaunchKernelGGL(( cuda_embed_crop), dim3(blocks), dim3(threads), 0, 0, nSteps, nrec, d_data_cal, nSteps_pad,
nrec, d_data_cal_pad, false);
hipLaunchKernelGGL(( cuda_embed_crop), dim3((nSteps + 31) / 32), dim3(32), 0, 0, nSteps, 1, d_source, nSteps_pad,
1, d_source_pad, false);
// normalization (in the padded fft, the length is nSteps_pad)
// printf("amp = %f\n", amp_ratio);
hipLaunchKernelGGL(( cuda_normalize), dim3(blocks), dim3(threads), 0, 0, nSteps, nrec, d_data_cal,
1.0f / float(nSteps_pad));
hipLaunchKernelGGL(( cuda_normalize), dim3((nSteps + 31) / 32), dim3(32), 0, 0, nSteps, 1, d_source,
1.0f / float(nSteps_pad));
float amp_ratio = amp_ratio_comp(nSteps * nrec, d_data_obs, d_data_cal);
// cuda_normalize<<<blocks,threads>>>(nSteps, nrec, d_data_cal, amp_ratio);
// cuda_normalize<<<(nSteps+31)/32, 32>>>(nSteps, 1, d_source,
// amp_ratio/float(nSteps_pad));
// // update amplitude
//hipLaunchKernelGGL(( cuda_find_absmax), dim3(1), dim3(512), 0, 0, nSteps*nrec, d_data_obs, d_obs_maxval);
//hipLaunchKernelGGL(( cuda_find_absmax), dim3(1), dim3(512), 0, 0, nSteps*nrec, d_data_cal, d_cal_maxval);
// CHECK(hipMemcpy(obs_maxval, d_obs_maxval, sizeof(float),
// hipMemcpyDeviceToHost)); CHECK(hipMemcpy(cal_maxval, d_cal_maxval,
// sizeof(float), hipMemcpyDeviceToHost));
// cuda_normalize<<<blocks,threads>>>(nSteps, nrec,
// d_data_cal, 1.0/amp_ratio); printf("Shot gather amplitude ratio = %f\n",
// obs_maxval[0]/cal_maxval[0]);
CHECK(hipFree(d_data_obs_pad));
CHECK(hipFree(d_data_cal_pad));
CHECK(hipFree(d_data_obs_F));
CHECK(hipFree(d_data_cal_F));
CHECK(hipFree(d_source_pad));
CHECK(hipFree(d_source_F));
return amp_ratio;
}
// source signature and calculated data update
// Steps: padding, FFT, compute spectrum, filtering, IFFT, cropping
void source_update_adj(int nSteps, float dt, int nrec, float *d_data,
float amp_ratio, cuFloatComplex *d_coef) {
int nSteps_pad = 2 * nSteps;
int nfft = nSteps_pad / 2 + 1;
float *d_data_pad;
hipfftHandle plan_f, plan_b;
hipfftComplex *d_data_F;
dim3 threads(TX, TY);
dim3 blocks((nSteps + TX - 1) / TX, (nrec + TY - 1) / TY);
dim3 blocks_pad((nSteps_pad + TX - 1) / TX, (nrec + TY - 1) / TY);
// cuda_normalize<<<blocks,threads>>>(nSteps, nrec, d_data, amp_ratio);
// pad data
CHECK(hipMalloc((void **)&d_data_pad, nSteps_pad * nrec * sizeof(float)));
hipLaunchKernelGGL(( intialArrayGPU), dim3(blocks), dim3(threads), 0, 0, d_data_pad, nSteps_pad, nrec, 0.0);
hipLaunchKernelGGL(( cuda_embed_crop), dim3(blocks), dim3(threads), 0, 0, nSteps, nrec, d_data, nSteps_pad, nrec,
d_data_pad, true);
hipLaunchKernelGGL(( cuda_window), dim3(blocks_pad), dim3(threads), 0, 0, nSteps_pad, nrec, dt, 0.01, d_data_pad);
CHECK(hipMalloc((void **)&d_data_F, sizeof(hipfftComplex) * nfft * nrec));
hipfftPlan1d(&plan_f, nSteps_pad, HIPFFT_R2C, nrec);
hipfftExecR2C(plan_f, d_data_pad, d_data_F);
hipfftDestroy(plan_f);
// update data
hipLaunchKernelGGL(( cuda_filter1d), dim3(blocks), dim3(threads), 0, 0, nfft, nrec, d_data_F, d_coef);
hipfftPlan1d(&plan_b, nSteps_pad, HIPFFT_C2R, nrec);
hipfftExecC2R(plan_b, d_data_F, d_data_pad); // inverse FFT
hipfftDestroy(plan_b);
// crop data
hipLaunchKernelGGL(( cuda_embed_crop), dim3(blocks), dim3(threads), 0, 0, nSteps, nrec, d_data, nSteps_pad, nrec,
d_data_pad, false);
// normalization (in the padded fft, the length is nSteps_pad)
// printf("amp_adj = %f\n", amp_ratio);
hipLaunchKernelGGL(( cuda_normalize), dim3(blocks), dim3(threads), 0, 0, nSteps, nrec, d_data,
amp_ratio / float(nSteps_pad));
CHECK(hipFree(d_data_pad));
CHECK(hipFree(d_data_F));
}
float amp_ratio_comp(int n, float *d_data_obs, float *d_data_cal) {
float *obs_maxval = nullptr, *cal_maxval = nullptr;
float *d_obs_maxval, *d_cal_maxval;
obs_maxval = (float *)malloc(sizeof(float));
cal_maxval = (float *)malloc(sizeof(float));
CHECK(hipMalloc((void **)&d_obs_maxval, sizeof(float)));
CHECK(hipMalloc((void **)&d_cal_maxval, sizeof(float)));
hipLaunchKernelGGL(( cuda_find_absmax), dim3(1), dim3(512), 0, 0, n, d_data_obs, d_obs_maxval);
hipLaunchKernelGGL(( cuda_find_absmax), dim3(1), dim3(512), 0, 0, n, d_data_cal, d_cal_maxval);
CHECK(hipMemcpy(obs_maxval, d_obs_maxval, sizeof(float),
hipMemcpyDeviceToHost));
CHECK(hipMemcpy(cal_maxval, d_cal_maxval, sizeof(float),
hipMemcpyDeviceToHost));
// printf("Shot gather amplitude ratio = %f\n",
// obs_maxval[0]/cal_maxval[0]);
float ratio = 0.0;
if (cal_maxval[0] != 0.0) {
ratio = obs_maxval[0] / cal_maxval[0];
}
CHECK(hipFree(d_obs_maxval));
CHECK(hipFree(d_cal_maxval));
delete[] obs_maxval;
delete[] cal_maxval;
return ratio;
} | 46900607c230ae1ceffbd9d15d4725023418965d.cu | #define d_Mu(x, y) d_Mu[(y) * (nx) + (x)]
#define d_ave_Mu(x, y) d_ave_Mu[(y) * (nx) + (x)]
#define d_field(z, x) d_field[(x) * (nz) + (z)]
#define d_bnd(x, indT) d_bnd[(indT) * (len_Bnd_vec) + (x)]
#define d_Den(x, y) d_Den[(y) * (nx) + (x)]
#define d_ave_Byc_a(x, y) d_ave_Byc_a[(y) * (nx) + (x)]
#define d_ave_Byc_b(x, y) d_ave_Byc_b[(y) * (nx) + (x)]
#include "utilities.h"
void fileBinLoad(float *h_bin, int size, std::string fname) {
FILE *fp = fopen(fname.c_str(), "rb");
if (fp == nullptr) {
std::cout << "Attempted to read " << fname << std::endl;
printf("File reading error!\n");
exit(1);
} else {
size_t sizeRead = fread(h_bin, sizeof(float), size, fp);
}
fclose(fp);
}
void fileBinWrite(float *h_bin, int size, std::string fname) {
FILE *fp = fopen(fname.c_str(), "wb");
if (fp == nullptr) {
printf("File writing error!\n");
exit(1);
} else {
fwrite(h_bin, sizeof(float), size, fp);
}
fclose(fp);
}
void fileBinWriteDouble(double *h_bin, int size, std::string fname) {
FILE *fp = fopen(fname.c_str(), "wb");
if (fp == nullptr) {
printf("File writing error!\n");
exit(1);
} else {
fwrite(h_bin, sizeof(double), size, fp);
}
fclose(fp);
}
void initialArray(float *ip, int size, float value) {
for (int i = 0; i < size; i++) {
ip[i] = value;
// printf("value = %f\n", value);
}
}
void initialArray(double *ip, int size, double value) {
for (int i = 0; i < size; i++) {
ip[i] = value;
// printf("value = %f\n", value);
}
}
__global__ void intialArrayGPU(float *ip, int nx, int ny, float value) {
int gidx = threadIdx.x + blockDim.x * blockIdx.x;
int gidy = threadIdx.y + blockDim.y * blockIdx.y;
if (gidx < nx && gidy < ny) {
int offset = gidx + gidy * nx;
ip[offset] = value;
}
}
__global__ void assignArrayGPU(float *ip_in, float *ip_out, int nx, int ny) {
int gidx = threadIdx.x + blockDim.x * blockIdx.x;
int gidy = threadIdx.y + blockDim.y * blockIdx.y;
if (gidx < nx && gidy < ny) {
int offset = gidx + gidy * nx;
ip_out[offset] = ip_in[offset];
}
}
void displayArray(std::string s, float *ip, int nx, int ny) {
// printf("ip: \n");
// printf("%s: \n", s);
std::cout << s << ": " << std::endl;
for (int i = 0; i < ny; i++) {
for (int j = 0; j < nx; j++) {
// printf("ip[%d, %d] = %f ", i, j, ip[i*nx+j]);
printf("%f ", ip[i * nx + j]);
}
printf("\n");
}
printf("\n\n\n");
}
__global__ void moduliInit(float *d_Cp, float *d_Cs, float *d_Den,
float *d_Lambda, float *d_Mu, int nx, int ny) {
// printf("Hello, world!\n");
int gidx = threadIdx.x + blockDim.x * blockIdx.x;
int gidy = threadIdx.y + blockDim.y * blockIdx.y;
int offset = gidx + gidy * nx;
if (gidx < nx && gidy < ny) {
// printf("offset = %d ", offset);
// printf("gridDim.x = %d ", gridDim.x);
// printf("blockIdx.y = %d ", blockIdx.y);
d_Mu[offset] = powf(d_Cs[offset], 2) * d_Den[offset];
d_Lambda[offset] =
d_Den[offset] * (powf(d_Cp[offset], 2) - 2 * powf(d_Cs[offset], 2));
if (d_Lambda[offset] < 0) {
printf("Lambda is negative!!!");
}
}
}
__global__ void velInit(float *d_Lambda, float *d_Mu, float *d_Den, float *d_Cp,
float *d_Cs, int nx, int ny) {
// printf("Hello, world!\n");
int gidx = threadIdx.x + blockDim.x * blockIdx.x;
int gidy = threadIdx.y + blockDim.y * blockIdx.y;
int offset = gidx + gidy * nx;
if (gidx < nx && gidy < ny) {
// printf("offset = %d ", offset);
// printf("gridDim.x = %d ", gridDim.x);
// printf("blockIdx.y = %d ", blockIdx.y);
d_Cp[offset] =
sqrt((d_Lambda[offset] + 2.0 * d_Mu[offset]) / d_Den[offset]);
d_Cs[offset] = sqrt((d_Mu[offset]) / d_Den[offset]);
}
}
__global__ void aveMuInit(float *d_Mu, float *d_ave_Mu, int nx, int ny) {
int gidx = threadIdx.x + blockDim.x * blockIdx.x;
int gidy = threadIdx.y + blockDim.y * blockIdx.y;
float a, b, c, d;
if (gidx >= 2 && gidx <= nx - 3 && gidy >= 2 && gidy <= ny - 3) {
a = d_Mu(gidx, gidy);
b = d_Mu(gidx + 1, gidy);
c = d_Mu(gidx, gidy + 1);
d = d_Mu(gidx + 1, gidy + 1);
if (a == 0.0 || b == 0.0 || c == 0.0 || d == 0.0) {
d_ave_Mu(gidx, gidy) = 0.0;
} else {
d_ave_Mu(gidx, gidy) = 4.0 / (1.0 / a + 1.0 / b + 1.0 / c + 1.0 / d);
}
}
}
__global__ void aveBycInit(float *d_Den, float *d_ave_Byc_a, float *d_ave_Byc_b,
int nx, int ny) {
int gidx = threadIdx.x + blockDim.x * blockIdx.x;
int gidy = threadIdx.y + blockDim.y * blockIdx.y;
if (gidx >= 2 && gidx <= nx - 3 && gidy >= 2 && gidy <= ny - 3) {
d_ave_Byc_a(gidx, gidy) = 2.0 / (d_Den(gidx + 1, gidy) + d_Den(gidx, gidy));
d_ave_Byc_b(gidx, gidy) = 2.0 / (d_Den(gidx, gidy + 1) + d_Den(gidx, gidy));
} else {
return;
}
}
__global__ void gpuMinus(float *d_out, float *d_in1, float *d_in2, int nx,
int ny) {
int idx = threadIdx.x + blockDim.x * blockIdx.x;
int idy = threadIdx.y + blockDim.y * blockIdx.y;
// only compute last N-1 time samples for misfits!!!!!!!! DL 02/25/2019
if (idx < nx && idy < ny && idx > 0) {
d_out[(idy) * (nx) + (idx)] =
d_in1[(idy) * (nx) + (idx)] - d_in2[(idy) * (nx) + (idx)];
} else if (idx == 0 && idy < ny) {
d_out[(idy) * (nx) + (idx)] = 0.0;
} else {
return;
}
}
__global__ void cuda_cal_objective(float *obj, float *err, int ng)
/*< calculate the value of objective function: obj >*/
{
const int Block_Size = 512;
__shared__ float sdata[Block_Size];
int tid = threadIdx.x;
sdata[tid] = 0.0f;
for (int s = 0; s < (ng + Block_Size - 1) / Block_Size; s++) {
int id = s * blockDim.x + threadIdx.x;
float a = (id < ng) ? err[id] : 0.0f;
// sdata[tid] += a*a;
sdata[tid] += powf(a, 2);
}
__syncthreads();
/* do reduction in shared mem */
// for(int s=blockDim.x/2; s>32; s>>=1) {
// if (threadIdx.x < s) sdata[tid] += sdata[tid + s];
// __syncthreads();
// }
// if (tid < 32) {
// if (blockDim.x >= 64) { sdata[tid] += sdata[tid + 32]; }
// if (blockDim.x >= 32) { sdata[tid] += sdata[tid + 16]; }
// if (blockDim.x >= 16) { sdata[tid] += sdata[tid + 8]; }
// if (blockDim.x >= 8) { sdata[tid] += sdata[tid + 4]; }
// if (blockDim.x >= 4) { sdata[tid] += sdata[tid + 2]; }
// if (blockDim.x >= 2) { sdata[tid] += sdata[tid + 1]; }
// }
for (int s = blockDim.x / 2; s >= 1; s /= 2) {
if (threadIdx.x < s) sdata[tid] += sdata[tid + s];
__syncthreads();
}
if (tid == 0) {
*obj = sdata[0];
}
}
float cal_objective(float *array, int N) {
float misfit = 0.0;
printf("hhh\n");
for (int i = 0; i < N; i++) {
misfit += array[i] * array[i];
}
return misfit;
}
float compCpAve(float *array, int N) {
float temp = 0.0;
for (int i = 0; i < N; i++) {
temp += array[i];
}
temp = temp / float(N);
return temp;
}
void compCourantNumber(float *h_Cp, int size, float dt, float dz, float dx) {
float max = h_Cp[0];
float Courant_number = 0.0;
for (int i = 0; i < size; i++) {
if (h_Cp[i] > max) {
max = h_Cp[i];
}
}
float dh_min = (dz < dx) ? dz : dx;
// Courant_number = max * dt * sqrtf(powf(1.0 / dz, 2) + powf(1.0 / dx, 2));
Courant_number = max * dt * sqrtf(2.0) * (1.0 / 24.0 + 9.0 / 8.0) / dh_min;
if (Courant_number > 1.0) {
std::cout << "Courant_number = " << Courant_number << std::endl;
exit(1);
}
}
void cpmlInit(float *K, float *a, float *b, float *K_half, float *a_half,
float *b_half, int N, int nPml, float dh, float f0, float dt,
float CpAve) {
float *damp, *damp_half, *alpha, *alpha_half;
float d0_h = 0.0;
float Rcoef = 0.0008;
float depth_in_pml = 0.0;
float depth_normalized = 0.0;
float thickness_PML = 0.0;
// const float PI = 3.141592653589793238462643383279502884197169;
const float K_MAX_PML = 2.0;
const float ALPHA_MAX_PML = 2.0 * PI * (f0 / 2.0);
const float NPOWER = 8.0;
const float c1 = 0.25, c2 = 0.75, c3 = 0.0;
// const float c1 = 0.0, c2 = 1.0, c3 = 0.0;
thickness_PML = nPml * dh; // changed here
CpAve = 3000.0; // DL make this model independent
d0_h = -(NPOWER + 1) * CpAve * log(Rcoef) / (2.0 * thickness_PML);
damp = (float *)malloc(N * sizeof(float));
damp_half = (float *)malloc(N * sizeof(float));
alpha = (float *)malloc(N * sizeof(float));
alpha_half = (float *)malloc(N * sizeof(float));
initialArray(damp, N, 0.0);
initialArray(damp_half, N, 0.0);
initialArray(K, N, 1.0);
initialArray(K_half, N, 1.0);
initialArray(alpha, N, 0.0);
initialArray(alpha_half, N, 0.0);
initialArray(a, N, 0.0);
initialArray(a_half, N, 0.0);
initialArray(b, N, 0.0);
initialArray(b_half, N, 0.0);
for (int i = 0; i < N; i++) {
// left edge
depth_in_pml = (nPml - i) * dh;
if (depth_in_pml >= 0.0) {
depth_normalized = depth_in_pml / thickness_PML;
damp[i] =
d0_h * (c1 * depth_normalized + c2 * pow(depth_normalized, NPOWER) +
c3 * pow(depth_normalized, 2 * NPOWER));
K[i] = 1.0 + (K_MAX_PML - 1.0) * pow(depth_normalized, NPOWER);
alpha[i] = ALPHA_MAX_PML * (1.0 - depth_normalized);
}
if (alpha[i] < 0.0) {
std::cout << "CPML alpha < 0.0 --" << __LINE__ << std::endl;
exit(1);
}
// half the grid points
depth_in_pml = (nPml - i - 0.5) * dh;
if (depth_in_pml >= 0.0) {
depth_normalized = depth_in_pml / thickness_PML;
damp_half[i] =
d0_h * (c1 * depth_normalized + c2 * pow(depth_normalized, NPOWER) +
c3 * pow(depth_normalized, 2 * NPOWER));
K_half[i] = 1.0 + (K_MAX_PML - 1.0) * pow(depth_normalized, NPOWER);
alpha_half[i] = ALPHA_MAX_PML * (1.0 - depth_normalized);
}
if (alpha_half[i] < 0.0) {
std::cout << "CPML alpha_half < 0.0 --" << __LINE__ << std::endl;
exit(1);
}
// right edge
depth_in_pml = (nPml - N + i) * dh;
if (depth_in_pml >= 0.0) {
depth_normalized = depth_in_pml / thickness_PML;
damp[i] =
d0_h * (c1 * depth_normalized + c2 * pow(depth_normalized, NPOWER) +
c3 * pow(depth_normalized, 2 * NPOWER));
K[i] = 1.0 + (K_MAX_PML - 1.0) * pow(depth_normalized, NPOWER);
alpha[i] = ALPHA_MAX_PML * (1.0 - depth_normalized);
}
if (alpha[i] < 0.0) {
std::cout << "CPML alpha < 0.0 --" << __LINE__ << std::endl;
exit(1);
}
depth_in_pml = (nPml - N + i + 0.5) * dh;
if (depth_in_pml >= 0.0) {
depth_normalized = depth_in_pml / thickness_PML;
damp_half[i] =
d0_h * (c1 * depth_normalized + c2 * pow(depth_normalized, NPOWER) +
c3 * pow(depth_normalized, 2 * NPOWER));
K_half[i] = 1.0 + (K_MAX_PML - 1.0) * powf(depth_normalized, NPOWER);
alpha_half[i] = ALPHA_MAX_PML * (1.0 - depth_normalized);
}
if (alpha_half[i] < 0.0) {
std::cout << "CPML alpha_half < 0.0 --" << __LINE__ << std::endl;
exit(1);
}
if (alpha[i] < 0.0) {
alpha[i] = 0.0;
}
if (alpha_half[i] < 0.0) {
alpha_half[i] = 0.0;
}
b[i] = expf(-(damp[i] / K[i] + alpha[i]) * dt);
b_half[i] = expf(-(damp_half[i] / K_half[i] + alpha_half[i]) * dt);
if (fabs(damp[i]) > 1.0e-6) {
a[i] = damp[i] * (b[i] - 1.0) / (K[i] * (damp[i] + K[i] * alpha[i]));
}
if (fabs(damp_half[i]) > 1.0e-6) {
a_half[i] = damp_half[i] * (b_half[i] - 1.0) /
(K_half[i] * (damp_half[i] + K_half[i] * alpha_half[i]));
}
}
free(damp);
free(damp_half);
free(alpha);
free(alpha_half);
}
// Dongzhuo Li 05/15/2019
__global__ void from_bnd(float *d_field, float *d_bnd, int nz, int nx,
int nzBnd, int nxBnd, int len_Bnd_vec, int nLayerStore,
int indT, int nPml, int nPad, int nSteps) {
int idxBnd = threadIdx.x + blockDim.x * blockIdx.x;
int iRow, jCol;
if (idxBnd >= 0 && idxBnd <= nLayerStore * nzBnd - 1) {
jCol = idxBnd / nzBnd;
iRow = idxBnd - jCol * nzBnd;
d_bnd(idxBnd, indT) = d_field((iRow + nPml - 2), (jCol + nPml - 2));
} else if (idxBnd >= nLayerStore * nzBnd &&
idxBnd <= 2 * nLayerStore * nzBnd - 1) {
jCol = (idxBnd - nLayerStore * nzBnd) / nzBnd;
iRow = (idxBnd - nLayerStore * nzBnd) - jCol * nzBnd;
d_bnd(idxBnd, indT) =
d_field((iRow + nPml - 2), (nx - nPml - jCol - 1 + 2));
} else if (idxBnd >= 2 * nLayerStore * nzBnd &&
idxBnd <= nLayerStore * (2 * nzBnd + nxBnd) - 1) {
iRow = (idxBnd - 2 * nLayerStore * nzBnd) / nxBnd;
jCol = (idxBnd - 2 * nLayerStore * nzBnd) - iRow * nxBnd;
d_bnd(idxBnd, indT) = d_field((iRow + nPml - 2), (jCol + nPml - 2));
} else if (idxBnd >= nLayerStore * (2 * nzBnd + nxBnd) &&
idxBnd <= 2 * nLayerStore * (nzBnd + nxBnd) - 1) {
iRow = (idxBnd - nLayerStore * (2 * nzBnd + nxBnd)) / nxBnd;
jCol = (idxBnd - nLayerStore * (2 * nzBnd + nxBnd)) - iRow * nxBnd;
d_bnd(idxBnd, indT) =
d_field((nz - nPml - nPad - iRow - 1 + 2), (jCol + nPml - 2));
} else {
return;
}
}
// Dongzhuo Li 05/15/2019
__global__ void to_bnd(float *d_field, float *d_bnd, int nz, int nx, int nzBnd,
int nxBnd, int len_Bnd_vec, int nLayerStore, int indT,
int nPml, int nPad, int nSteps) {
int idxBnd = threadIdx.x + blockDim.x * blockIdx.x;
int iRow, jCol;
if (idxBnd >= 0 && idxBnd <= nLayerStore * nzBnd - 1) {
jCol = idxBnd / nzBnd;
iRow = idxBnd - jCol * nzBnd;
d_field((iRow + nPml - 2), (jCol + nPml - 2)) = d_bnd(idxBnd, indT);
} else if (idxBnd >= nLayerStore * nzBnd &&
idxBnd <= 2 * nLayerStore * nzBnd - 1) {
jCol = (idxBnd - nLayerStore * nzBnd) / nzBnd;
iRow = (idxBnd - nLayerStore * nzBnd) - jCol * nzBnd;
d_field((iRow + nPml - 2), (nx - nPml - jCol - 1 + 2)) =
d_bnd(idxBnd, indT);
} else if (idxBnd >= 2 * nLayerStore * nzBnd &&
idxBnd <= nLayerStore * (2 * nzBnd + nxBnd) - 1) {
iRow = (idxBnd - 2 * nLayerStore * nzBnd) / nxBnd;
jCol = (idxBnd - 2 * nLayerStore * nzBnd) - iRow * nxBnd;
d_field((iRow + nPml - 2), (jCol + nPml - 2)) = d_bnd(idxBnd, indT);
} else if (idxBnd >= nLayerStore * (2 * nzBnd + nxBnd) &&
idxBnd <= 2 * nLayerStore * (nzBnd + nxBnd) - 1) {
iRow = (idxBnd - nLayerStore * (2 * nzBnd + nxBnd)) / nxBnd;
jCol = (idxBnd - nLayerStore * (2 * nzBnd + nxBnd)) - iRow * nxBnd;
d_field((nz - nPml - nPad - iRow - 1 + 2), (jCol + nPml - 2)) =
d_bnd(idxBnd, indT);
} else {
return;
}
}
// // Dongzhuo Li 02/24/2019
// __global__ void from_bnd(float *d_field, float *d_bnd, int nz, int nx, int
// nzBnd, \
// int nxBnd, int len_Bnd_vec, int nLayerStore, int indT, int nPml, int nPad,
// int nSteps) {
// int idxBnd = threadIdx.x + blockDim.x*blockIdx.x;
// int iRow,jCol;
// if(idxBnd>=0 && idxBnd<=nLayerStore*nzBnd-1) {
// jCol = idxBnd/nzBnd;
// iRow = idxBnd - jCol*nzBnd;
// d_bnd(idxBnd,indT) = d_field((iRow),(jCol));
// }
// else if(idxBnd>=nLayerStore*nzBnd && idxBnd<=2*nLayerStore*nzBnd-1){
// jCol = (idxBnd-nLayerStore*nzBnd)/nzBnd;
// iRow = (idxBnd-nLayerStore*nzBnd) - jCol*nzBnd;
// d_bnd(idxBnd,indT) = d_field((iRow),(nx-jCol-1));
// }
// else if(idxBnd>=2*nLayerStore*nzBnd &&
// idxBnd<=nLayerStore*(2*nzBnd+nxBnd)-1) {
// iRow = (idxBnd - 2*nLayerStore*nzBnd)/nxBnd;
// jCol = (idxBnd - 2*nLayerStore*nzBnd) - iRow*nxBnd;
// d_bnd(idxBnd,indT) = d_field((iRow),(jCol));
// }
// else if(idxBnd>=nLayerStore*(2*nzBnd+nxBnd) &&
// idxBnd<=2*nLayerStore*(nzBnd+nxBnd)-1) {
// iRow = (idxBnd - nLayerStore*(2*nzBnd+nxBnd))/nxBnd;
// jCol = (idxBnd - nLayerStore*(2*nzBnd+nxBnd)) - iRow*nxBnd;
// d_bnd(idxBnd,indT) = d_field((nz-nPad-iRow-1),(jCol));
// }
// else {
// return;
// }
// // if(idxBnd>=0 && idxBnd<=2*(nzBnd+nxBnd)-1) {
// // d_bnd(idxBnd, indT) = 1.0;
// // } else {
// // return;
// // }
// }
// // Dongzhuo Li 02/24/2019
// __global__ void to_bnd(float *d_field, float *d_bnd, int nz, int nx, int
// nzBnd, \
// int nxBnd, int len_Bnd_vec, int nLayerStore, int indT, int nPml, int nPad,
// int nSteps) {
// int idxBnd = threadIdx.x + blockDim.x*blockIdx.x;
// int iRow,jCol;
// if(idxBnd>=0 && idxBnd<=nLayerStore*nzBnd-1) {
// jCol = idxBnd/nzBnd;
// iRow = idxBnd - jCol*nzBnd;
// d_field((iRow),(jCol)) = d_bnd(idxBnd,indT);
// }
// else if(idxBnd>=nLayerStore*nzBnd && idxBnd<=2*nLayerStore*nzBnd-1){
// jCol = (idxBnd-nLayerStore*nzBnd)/nzBnd;
// iRow = (idxBnd-nLayerStore*nzBnd) - jCol*nzBnd;
// d_field((iRow),(nx-jCol-1)) = d_bnd(idxBnd,indT);
// }
// else if(idxBnd>=2*nLayerStore*nzBnd &&
// idxBnd<=nLayerStore*(2*nzBnd+nxBnd)-1) {
// iRow = (idxBnd - 2*nLayerStore*nzBnd)/nxBnd;
// jCol = (idxBnd - 2*nLayerStore*nzBnd) - iRow*nxBnd;
// d_field((iRow),(jCol)) = d_bnd(idxBnd,indT);
// }
// else if(idxBnd>=nLayerStore*(2*nzBnd+nxBnd) &&
// idxBnd<=2*nLayerStore*(nzBnd+nxBnd)-1) {
// iRow = (idxBnd - nLayerStore*(2*nzBnd+nxBnd))/nxBnd;
// jCol = (idxBnd - nLayerStore*(2*nzBnd+nxBnd)) - iRow*nxBnd;
// d_field((nz-nPad-iRow-1),(jCol)) = d_bnd(idxBnd,indT);
// }
// else {
// return;
// }
// }
__global__ void src_rec_gauss_amp(float *gauss_amp, int nz, int nx) {
int gidz = blockIdx.x * blockDim.x + threadIdx.x;
int gidx = blockIdx.y * blockDim.y + threadIdx.y;
if (gidz >= 0 && gidz < nz && gidx >= 0 && gidx < nx) {
int idz = gidz - nz / 2;
int idx = gidx - nx / 2;
gauss_amp[gidz + gidx * nz] =
expf(-1000.0 * (powf(float(idz), 2) + powf(float(idx), 2)));
// printf("gidz=%d, gidx=%d, gauss_amp=%.10f\n", gidz, gidx,
// gauss_amp[gidz + gidx * nz]);
} else {
return;
}
}
__global__ void add_source(float *d_szz, float *d_sxx, float amp, int nz,
bool isFor, int z_loc, int x_loc, float dt,
float *gauss_amp, double rxz) {
// int id = threadIdx.x + blockDim.x * blockIdx.x;
int gidz = blockIdx.x * blockDim.x + threadIdx.x;
int gidx = blockIdx.y * blockDim.y + threadIdx.y;
float scale = pow(1500.0, 2);
if (isFor) {
if (gidz >= 0 && gidz < 9 && gidx >= 0 && gidx < 9) {
int idz = gidz - 9 / 2;
int idx = gidx - 9 / 2;
// printf("amp = %f ", amp);
d_szz[(z_loc + idz) + nz * (x_loc + idx)] +=
scale * amp * dt * gauss_amp[gidz + gidx * 9];
// crosswell borehole source (can be modified) assume cp/cs = sqrt(3.0)
d_sxx[(z_loc + idz) + nz * (x_loc + idx)] +=
rxz * scale * amp * dt * gauss_amp[gidz + gidx * 9];
} else {
return;
}
} else {
if (gidz >= 0 && gidz < 9 && gidx >= 0 && gidx < 9) {
int idz = gidz - 9 / 2;
int idx = gidx - 9 / 2;
// printf("amp = %f ", amp);
d_szz[(z_loc + idz) + nz * (x_loc + idx)] -=
scale * amp * dt * gauss_amp[gidz + gidx * 9];
d_sxx[(z_loc + idz) + nz * (x_loc + idx)] -=
rxz * scale * amp * dt * gauss_amp[gidz + gidx * 9];
} else {
return;
}
}
}
__global__ void recording(float *d_szz, float *d_sxx, int nz, float *d_data,
int iShot, int it, int nSteps, int nrec, int *d_z_rec,
int *d_x_rec, double *d_rxz) {
int iRec = threadIdx.x + blockDim.x * blockIdx.x;
if (iRec >= nrec) {
return;
}
d_data[(iRec) * (nSteps) + (it)] =
d_szz[d_z_rec[iRec] + d_x_rec[iRec] * nz] +
d_rxz[iRec] * d_sxx[d_z_rec[iRec] + d_x_rec[iRec] * nz];
}
__global__ void res_injection(float *d_szz_adj, float *d_sxx_adj, int nz,
float *d_res, int it, float dt, int nSteps,
int nrec, int *d_z_rec, int *d_x_rec,
double *d_rxz) {
int iRec = threadIdx.x + blockDim.x * blockIdx.x;
if (iRec >= nrec) {
return;
}
d_szz_adj[d_z_rec[iRec] + nz * d_x_rec[iRec]] +=
d_res[(iRec) * (nSteps) + (it)];
d_sxx_adj[d_z_rec[iRec] + nz * d_x_rec[iRec]] +=
d_rxz[iRec] * d_res[(iRec) * (nSteps) + (it)];
}
__global__ void source_grad(float *d_szz_adj, float *d_sxx_adj, int nz,
float *d_StfGrad, int it, float dt, int z_src,
int x_src, double rxz) {
int id = threadIdx.x + blockDim.x * blockIdx.x;
if (id == 0) {
d_StfGrad[it] =
-(d_szz_adj[z_src + nz * x_src] + rxz * d_sxx_adj[z_src + nz * x_src]) *
dt;
} else {
return;
}
}
// Dongzhuo Li 01/28/2019
__global__ void cuda_bp_filter1d(int nSteps, float dt, int nrec,
cufftComplex *d_data_F, float f0, float f1,
float f2, float f3) {
int nf = nSteps / 2 + 1;
float df = 1.0 / dt / nSteps;
int idf = blockIdx.x * blockDim.x + threadIdx.x;
int idr = blockIdx.y * blockDim.y + threadIdx.y;
int ip = idr * nf + idf;
float freq = idf * df;
float filter_amp = 1.0;
// printf("fffffff = %f\n", freq);
if (idf >= 0 && idf < nf && idr >= 0 && idr < nrec) {
if (freq >= f0 && freq < f1) {
filter_amp = sin(PI / 2.0 * (freq - f0) / (f1 - f0));
} else if (freq >= f1 && freq < f2) {
filter_amp = 1.0;
} else if (freq >= f2 && freq < f3) {
filter_amp = cos(PI / 2.0 * (freq - f2) / (f3 - f2));
} else {
filter_amp = 0.0;
}
d_data_F[ip].x *= filter_amp * filter_amp;
d_data_F[ip].y *= filter_amp * filter_amp;
}
}
__global__ void cuda_filter1d(int nf, int nrec, cuFloatComplex *d_data_F,
cuFloatComplex *d_coef) {
int idf = blockIdx.x * blockDim.x + threadIdx.x;
int idr = blockIdx.y * blockDim.y + threadIdx.y;
int ip = idr * nf + idf;
if (idf >= 0 && idf < nf && idr >= 0 && idr < nrec) {
d_data_F[ip] = cuCmulf(d_data_F[ip], d_coef[idf]);
}
}
__global__ void cuda_normalize(int nz, int nx, float *data, float factor) {
int idz = blockIdx.x * blockDim.x + threadIdx.x;
int idx = blockIdx.y * blockDim.y + threadIdx.y;
if (factor == 0.0) {
printf("Dividing by zero!\n");
return;
}
if (idz >= 0 && idz < nz && idx >= 0 && idx < nx) {
data[idx * nz + idz] *= factor;
} else {
return;
}
}
// windowing in the time axis
__global__ void cuda_window(int nt, int nrec, float dt, float *d_win_start,
float *d_win_end, float *d_weights,
float src_weight, float ratio, float *data) {
int idt = blockIdx.x * blockDim.x + threadIdx.x;
int idr = blockIdx.y * blockDim.y + threadIdx.y;
int ip = idr * nt + idt;
// stupid bug... (I put the if just befor line 614)
if (idt >= 0 && idt < nt && idr >= 0 && idr < nrec) {
float window_amp = 1.0;
float t = idt * dt;
if (ratio > 0.5) {
printf("Dividing by zero!\n");
return;
}
float t0 = d_win_start[idr];
float t3 = d_win_end[idr];
if (t0 == 0.0 && t3 == 0.0) printf("t0 = %f, t3 = %f\n\n", t0, t3);
float t_max = nt * dt;
if (t0 < 0.0) t0 = 0.0;
if (t0 > t_max) t0 = t_max;
if (t3 < 0.0) t3 = 0.0;
if (t3 > t_max) t3 = t_max;
float offset = (t3 - t0) * ratio;
if (offset <= 0.0) {
printf("Window error 1!!\n");
printf("offset = %f\n", offset);
return;
}
float t1 = t0 + offset;
float t2 = t3 - offset;
if (t >= t0 && t < t1) {
window_amp = sin(PI / 2.0 * (t - t0) / (t1 - t0));
} else if (t >= t1 && t < t2) {
window_amp = 1.0;
} else if (t >= t2 && t < t3) {
window_amp = cos(PI / 2.0 * (t - t2) / (t3 - t2));
} else {
window_amp = 0.0;
}
data[ip] *= window_amp * window_amp * d_weights[idr] * src_weight;
} else {
return;
}
}
// overloaded window function: without specifying windows and weights
__global__ void cuda_window(int nt, int nrec, float dt, float ratio,
float *data) {
int idt = blockIdx.x * blockDim.x + threadIdx.x;
int idr = blockIdx.y * blockDim.y + threadIdx.y;
int ip = idr * nt + idt;
if (idt >= 0 && idt < nt && idr >= 0 && idr < nrec) {
float window_amp = 1.0;
float t = idt * dt;
// if (ratio > 0.5) {
// printf("Dividing by zero!\n");
// return;
// }
float t0 = 0;
float t3 = nt * dt;
float offset = nt * dt * ratio;
if (2.0 * offset >= t3 - t0) {
printf("Window error 2!\n");
return;
}
float t1 = t0 + offset;
float t2 = t3 - offset;
if (t >= t0 && t < t1) {
window_amp = sin(PI / 2.0 * (t - t0) / (t1 - t0));
} else if (t >= t1 && t < t2) {
window_amp = 1.0;
} else if (t >= t2 && t < t3) {
window_amp = cos(PI / 2.0 * (t - t2) / (t3 - t2));
} else {
window_amp = 0.0;
}
data[ip] *= window_amp * window_amp;
}
}
// Array padding
__global__ void cuda_embed_crop(int nz, int nx, float *d_data, int nz_pad,
int nx_pad, float *d_data_pad, bool isEmbed) {
int idz = blockIdx.x * blockDim.x + threadIdx.x;
int idx = blockIdx.y * blockDim.y + threadIdx.y;
int ip = idx * nz + idz;
int ip_pad = idx * nz_pad + idz;
if (idz >= 0 && idz < nz && idx >= 0 && idx < nx) {
if (isEmbed) {
d_data_pad[ip_pad] = d_data[ip];
} else {
d_data[ip] = d_data_pad[ip_pad];
}
} else {
return;
}
}
// Dongzhuo Li 02/02/2019
__global__ void cuda_spectrum_update(int nf, int nrec,
cuFloatComplex *d_data_obs_F,
cuFloatComplex *d_data_cal_F,
cuFloatComplex *d_source_F,
cuFloatComplex *d_coef) {
int idr = 0, idf = 0, ip = 0;
const int Block_Size = 512;
const float lambda = 1e-6;
cuFloatComplex c_obs = make_cuFloatComplex(0.0f, 0.0f);
cuFloatComplex c_cal = make_cuFloatComplex(0.0f, 0.0f);
cuFloatComplex c_nominator = make_cuFloatComplex(0.0f, 0.0f);
cuFloatComplex c_denominator = make_cuFloatComplex(0.0f, 0.0f);
__shared__ cuFloatComplex sh_nominator_F[Block_Size];
__shared__ cuFloatComplex sh_denominator_F[Block_Size];
int tid =
threadIdx.x; // one thread handles s receivers (with 512 as the interval)
int bid = blockIdx.x; // one block handles one frequency
sh_nominator_F[tid] = make_cuFloatComplex(0.0f, 0.0f);
sh_denominator_F[tid] = make_cuFloatComplex(0.0f, 0.0f);
__syncthreads();
for (int s = 0; s < (nrec + Block_Size - 1) / Block_Size; s++) {
idr = s * blockDim.x + tid;
idf = bid;
ip = idr * nf + idf;
if (idr >= 0 && idr < nrec && idf >= 0 && idf < nf) {
c_obs = d_data_obs_F[ip];
c_cal = d_data_cal_F[ip];
sh_nominator_F[tid] =
cuCaddf(sh_nominator_F[tid], cuCmulf(cuConjf(c_cal), c_obs));
sh_denominator_F[tid] =
cuCaddf(sh_denominator_F[tid], cuCmulf(cuConjf(c_cal), c_cal));
}
}
__syncthreads();
// do reduction in shared memory
for (int s = blockDim.x / 2; s >= 1; s /= 2) {
if (tid < s) {
sh_nominator_F[tid] =
cuCaddf(sh_nominator_F[tid], sh_nominator_F[tid + s]);
sh_denominator_F[tid] =
cuCaddf(sh_denominator_F[tid], sh_denominator_F[tid + s]);
}
__syncthreads();
}
if (tid == 0) {
sh_denominator_F[0].x += lambda;
// printf("nomi = %f, deno = %f\n", cuCabsf(sh_nominator_F[0]),
// cuCabsf(sh_denominator_F[0]));
sh_nominator_F[0] = cuCdivf(sh_nominator_F[0], sh_denominator_F[0]);
// printf("coef = %f", sh_nominator_F[0].x);
d_coef[bid] = sh_nominator_F[0];
d_source_F[bid] = cuCmulf(d_source_F[bid], sh_nominator_F[0]);
}
// printf("tid = %d\n", tid);
__syncthreads();
for (int s = 0; s < (nrec + Block_Size - 1) / Block_Size; s++) {
idr = s * blockDim.x + tid;
idf = bid;
ip = idr * nf + idf;
if (idr >= 0 && idr < nrec && idf >= 0 && idf < nf) {
d_data_cal_F[ip] = cuCmulf(d_data_cal_F[ip], sh_nominator_F[0]);
// d_data_cal_F[ip].x *= cuCabsf(sh_nominator_F[0]);
// d_data_cal_F[ip].y *= cuCabsf(sh_nominator_F[0]);
// if (tid == 0) printf("ratio = %f\n", cuCabsf(sh_nominator_F[0]));
}
}
__syncthreads();
}
__global__ void cuda_find_absmax(int n, float *data, float *maxval) {
int tid =
threadIdx.x; // one thread handles s receivers (with 512 as the interval)
const int Block_Size = 512;
__shared__ float sh_data[Block_Size];
sh_data[tid] = 0.0;
__syncthreads();
for (int s = 0; s < (n + Block_Size - 1) / Block_Size; s++) {
int ip = s * blockDim.x + tid;
if (ip >= 0 && ip < n) {
if (fabs(data[ip]) > fabs(sh_data[tid])) sh_data[tid] = fabs(data[ip]);
}
}
__syncthreads();
// do reduction in shared memory
for (int s = blockDim.x / 2; s >= 1; s /= 2) {
if (tid < s) {
sh_data[tid] =
(sh_data[tid] >= sh_data[tid + s]) ? sh_data[tid] : sh_data[tid + s];
}
__syncthreads();
}
if (tid == 0) maxval[0] = sh_data[0];
__syncthreads();
}
// Dongzhuo Li's last attempt - 09/26/2019
// find out the norm square of each trace for normalization
// number of blocks = number of traces
// size of a block = 512
__global__ void cuda_find_normfact(int nt, int nrec, float *data1, float *data2,
float *normfact) {
// one thread handles s time samples (with 512 as the interval)
int tid = threadIdx.x;
int bid = blockIdx.x; // one block handles one trace
const int Block_Size = 512;
__shared__ float sh_data[Block_Size];
sh_data[tid] = 0.0;
__syncthreads();
for (int s = 0; s < (nt + Block_Size - 1) / Block_Size; s++) {
int ip = s * blockDim.x + tid + bid * nt;
int time_id = s * blockDim.x + tid;
if (time_id >= 0 && time_id < nt && bid >= 0 && bid < nrec) {
sh_data[tid] += data1[ip] * data2[ip];
}
__syncthreads();
}
// do reduction in shared memory
for (int s = blockDim.x / 2; s >= 1; s /= 2) {
if (tid < s) {
sh_data[tid] += sh_data[tid + s];
}
__syncthreads();
}
if (tid == 0) {
normfact[bid] = sh_data[0] + DIVCONST; // add a very small number
// printf("norm = %f\n", sh_data[0]);
}
__syncthreads();
}
//
// // normalize each trace by its normfact
// __global__ void cuda_normal_traces(int nt, int nrec, float *normfact,
// float *data) {
// int idt = blockIdx.x * blockDim.x + threadIdx.x;
// int idr = blockIdx.y * blockDim.y + threadIdx.y;
// int ip = idr * nt + idt;
// if (idt >= 0 && idt < nt && idr >= 0 && idr < nrec) {
// data[ip] = data[ip] / sqrt(normfact[idr]);
// } else {
// return;
// }
// }
//
// normalized zero-lag cross-correlation misfit function
__global__ void cuda_normal_misfit(int nrec, float *d_cross_normfact,
float *d_obs_normfact, float *d_cal_normfact,
float *misfit, float *d_weights,
float src_weight) {
// one thread handles s receivers (with 512 as the interval)
int tid = threadIdx.x;
const int Block_Size = 512;
__shared__ float sh_data[Block_Size];
sh_data[tid] = 0.0;
__syncthreads();
for (int s = 0; s < (nrec + Block_Size - 1) / Block_Size; s++) {
int ip = s * blockDim.x + tid;
if (ip >= 0 && ip < nrec) {
sh_data[tid] += d_cross_normfact[ip] /
(sqrt(d_obs_normfact[ip]) * sqrt(d_cal_normfact[ip])) *
d_weights[ip] * src_weight;
}
}
__syncthreads();
// do reduction in shared memory
for (int s = blockDim.x / 2; s >= 1; s /= 2) {
if (tid < s) {
sh_data[tid] += sh_data[tid + s];
}
__syncthreads();
}
if (tid == 0) *misfit = -2.0 * sh_data[0]; // since I multiply 0.5 later
__syncthreads();
}
//
// apply the weighting factor to the residual
__global__ void cuda_normal_adjoint_source(
int nt, int nrec, float *d_obs_normfact, float *d_cal_normfact,
float *d_cross_normfact, float *d_data_obs, float *d_data, float *d_res,
float *d_weights, float src_weight) {
int idt = blockIdx.x * blockDim.x + threadIdx.x;
int idr = blockIdx.y * blockDim.y + threadIdx.y;
int ip = idr * nt + idt;
if (idt >= 0 && idt < nt && idr >= 0 && idr < nrec) {
d_res[ip] = (d_data_obs[ip] -
d_cross_normfact[idr] / d_cal_normfact[idr] * d_data[ip]) /
(sqrt(d_obs_normfact[idr]) * sqrt(d_cal_normfact[idr])) *
d_weights[idr] * src_weight;
// d_res[ip] = (d_data_obs[ip] - d_data[ip]);
// if (idt == 0) {
// printf("cross-cal-ratio = %f\n",
// d_cross_normfact[idr] / d_cal_normfact[idr]);
// }
} else {
return;
}
}
// 1D band-pass filtering wrapper code
// Steps: padding, FFT, filtering, IFFT, cropping
void bp_filter1d(int nSteps, float dt, int nrec, float *d_data, float *filter) {
int nSteps_pad = 2 * nSteps;
int nfft = nSteps_pad / 2 + 1;
// float df = 1.0/dt/nSteps_pad;
float *d_data_pad;
float f0 = filter[0];
float f1 = filter[1];
float f2 = filter[2];
float f3 = filter[3];
cufftHandle plan_f, plan_b;
cufftComplex *d_data_F;
dim3 threads(TX, TY);
dim3 blocks((nSteps + TX - 1) / TX, (nrec + TY - 1) / TY);
// float *h_test = new float[nSteps_pad];
// pad data
CHECK(cudaMalloc((void **)&d_data_pad, nSteps_pad * nrec * sizeof(float)));
intialArrayGPU<<<blocks, threads>>>(d_data_pad, nSteps_pad, nrec, 0.0);
cuda_embed_crop<<<blocks, threads>>>(nSteps, nrec, d_data, nSteps_pad, nrec,
d_data_pad, true);
// CHECK(cudaMemcpy(h_test, d_data_pad, nSteps * sizeof(float),
// cudaMemcpyDeviceToHost)); displayArray("h_test", h_test, nSteps_pad, 1);
// filtering
CHECK(cudaMalloc((void **)&d_data_F, sizeof(cufftComplex) * nfft * nrec));
cufftPlan1d(&plan_f, nSteps_pad, CUFFT_R2C, nrec);
cufftExecR2C(plan_f, d_data_pad, d_data_F); // forward FFT
cufftDestroy(plan_f);
cuda_bp_filter1d<<<blocks, threads>>>(nSteps_pad, dt, nrec, d_data_F, f0, f1,
f2, f3);
cufftPlan1d(&plan_b, nSteps_pad, CUFFT_C2R, nrec);
cufftExecC2R(plan_b, d_data_F, d_data_pad); // inverse FFT
cufftDestroy(plan_b);
// CHECK(cudaMemcpy(h_test, d_data_pad, nSteps * sizeof(float),
// cudaMemcpyDeviceToHost)); displayArray("h_test", h_test, nSteps_pad, 1);
// crop data
cuda_embed_crop<<<blocks, threads>>>(nSteps, nrec, d_data, nSteps_pad, nrec,
d_data_pad, false);
// normalization (in the padded fft, the length is nSteps_pad)
cuda_normalize<<<blocks, threads>>>(nSteps, nrec, d_data,
1 / float(nSteps_pad));
CHECK(cudaFree(d_data_F));
CHECK(cudaFree(d_data_pad));
}
// source signature and calculated data update
// Steps: padding, FFT, compute spectrum, filtering, IFFT, cropping
float source_update(int nSteps, float dt, int nrec, float *d_data_obs,
float *d_data_cal, float *d_source,
cuFloatComplex *d_coef) {
int nSteps_pad = 2 * nSteps;
int nfft = nSteps_pad / 2 + 1;
float *d_data_obs_pad, *d_data_cal_pad, *d_source_pad;
cufftHandle plan_f, plan_b;
cufftComplex *d_data_obs_F, *d_data_cal_F, *d_source_F;
dim3 threads(TX, TY);
dim3 blocks((nSteps + TX - 1) / TX, (nrec + TY - 1) / TY);
dim3 blocks_pad((nSteps_pad + TX - 1) / TX, (nrec + TY - 1) / TY);
// pad data and window data
CHECK(
cudaMalloc((void **)&d_data_obs_pad, nSteps_pad * nrec * sizeof(float)));
CHECK(
cudaMalloc((void **)&d_data_cal_pad, nSteps_pad * nrec * sizeof(float)));
CHECK(cudaMalloc((void **)&d_source_pad, nSteps_pad * sizeof(float)));
intialArrayGPU<<<blocks, threads>>>(d_data_obs_pad, nSteps_pad, nrec, 0.0);
intialArrayGPU<<<blocks, threads>>>(d_data_cal_pad, nSteps_pad, nrec, 0.0);
intialArrayGPU<<<(nSteps_pad + 31) / 32, 32>>>(d_source_pad, nSteps_pad, 1,
0.0);
cuda_embed_crop<<<blocks, threads>>>(nSteps, nrec, d_data_obs, nSteps_pad,
nrec, d_data_obs_pad, true);
cuda_embed_crop<<<blocks, threads>>>(nSteps, nrec, d_data_cal, nSteps_pad,
nrec, d_data_cal_pad, true);
cuda_window<<<blocks_pad, threads>>>(nSteps_pad, nrec, dt, 0.01,
d_data_obs_pad);
cuda_window<<<blocks_pad, threads>>>(nSteps_pad, nrec, dt, 0.01,
d_data_cal_pad);
cuda_embed_crop<<<(nSteps_pad + 31) / 32, 32>>>(
nSteps, 1, d_source, nSteps_pad, 1, d_source_pad, true);
// CHECK(cudaMemcpy(h_test, d_data_pad, nSteps * sizeof(float),
// cudaMemcpyDeviceToHost)); displayArray("h_test", h_test, nSteps_pad, 1);
// // filtering
CHECK(cudaMalloc((void **)&d_data_obs_F, sizeof(cufftComplex) * nfft * nrec));
CHECK(cudaMalloc((void **)&d_data_cal_F, sizeof(cufftComplex) * nfft * nrec));
CHECK(cudaMalloc((void **)&d_source_F, sizeof(cufftComplex) * nfft))
cufftPlan1d(&plan_f, nSteps_pad, CUFFT_R2C, nrec);
cufftExecR2C(plan_f, d_data_obs_pad,
d_data_obs_F); // forward FFT of observed data
cufftExecR2C(plan_f, d_data_cal_pad,
d_data_cal_F); // forward FFT of calculated data
cufftDestroy(plan_f);
cufftPlan1d(&plan_f, nSteps_pad, CUFFT_R2C, 1); // source FFT
cufftExecR2C(plan_f, d_source_pad, d_source_F);
cufftDestroy(plan_f);
// cuda_bp_filter1d<<<blocks,threads>>>(nSteps_pad, dt, nrec, d_data_F, f0,
// f1, f2, f3);
cuda_spectrum_update<<<nfft, 512>>>(nfft, nrec, d_data_obs_F, d_data_cal_F,
d_source_F, d_coef);
cufftPlan1d(&plan_b, nSteps_pad, CUFFT_C2R, nrec);
cufftExecC2R(plan_b, d_data_cal_F, d_data_cal_pad); // inverse FFT
cufftDestroy(plan_b);
cufftPlan1d(&plan_b, nSteps_pad, CUFFT_C2R, 1);
cufftExecC2R(plan_b, d_source_F, d_source_pad); // inverse FFT
cufftDestroy(plan_b);
// CHECK(cudaMemcpy(h_test, d_data_pad, nSteps * sizeof(float),
// cudaMemcpyDeviceToHost)); displayArray("h_test", h_test, nSteps_pad, 1);
// crop data
cuda_embed_crop<<<blocks, threads>>>(nSteps, nrec, d_data_cal, nSteps_pad,
nrec, d_data_cal_pad, false);
cuda_embed_crop<<<(nSteps + 31) / 32, 32>>>(nSteps, 1, d_source, nSteps_pad,
1, d_source_pad, false);
// normalization (in the padded fft, the length is nSteps_pad)
// printf("amp = %f\n", amp_ratio);
cuda_normalize<<<blocks, threads>>>(nSteps, nrec, d_data_cal,
1.0f / float(nSteps_pad));
cuda_normalize<<<(nSteps + 31) / 32, 32>>>(nSteps, 1, d_source,
1.0f / float(nSteps_pad));
float amp_ratio = amp_ratio_comp(nSteps * nrec, d_data_obs, d_data_cal);
// cuda_normalize<<<blocks,threads>>>(nSteps, nrec, d_data_cal, amp_ratio);
// cuda_normalize<<<(nSteps+31)/32, 32>>>(nSteps, 1, d_source,
// amp_ratio/float(nSteps_pad));
// // update amplitude
// cuda_find_absmax<<<1, 512>>>(nSteps*nrec, d_data_obs, d_obs_maxval);
// cuda_find_absmax<<<1, 512>>>(nSteps*nrec, d_data_cal, d_cal_maxval);
// CHECK(cudaMemcpy(obs_maxval, d_obs_maxval, sizeof(float),
// cudaMemcpyDeviceToHost)); CHECK(cudaMemcpy(cal_maxval, d_cal_maxval,
// sizeof(float), cudaMemcpyDeviceToHost));
// cuda_normalize<<<blocks,threads>>>(nSteps, nrec,
// d_data_cal, 1.0/amp_ratio); printf("Shot gather amplitude ratio = %f\n",
// obs_maxval[0]/cal_maxval[0]);
CHECK(cudaFree(d_data_obs_pad));
CHECK(cudaFree(d_data_cal_pad));
CHECK(cudaFree(d_data_obs_F));
CHECK(cudaFree(d_data_cal_F));
CHECK(cudaFree(d_source_pad));
CHECK(cudaFree(d_source_F));
return amp_ratio;
}
// source signature and calculated data update
// Steps: padding, FFT, compute spectrum, filtering, IFFT, cropping
void source_update_adj(int nSteps, float dt, int nrec, float *d_data,
float amp_ratio, cuFloatComplex *d_coef) {
int nSteps_pad = 2 * nSteps;
int nfft = nSteps_pad / 2 + 1;
float *d_data_pad;
cufftHandle plan_f, plan_b;
cufftComplex *d_data_F;
dim3 threads(TX, TY);
dim3 blocks((nSteps + TX - 1) / TX, (nrec + TY - 1) / TY);
dim3 blocks_pad((nSteps_pad + TX - 1) / TX, (nrec + TY - 1) / TY);
// cuda_normalize<<<blocks,threads>>>(nSteps, nrec, d_data, amp_ratio);
// pad data
CHECK(cudaMalloc((void **)&d_data_pad, nSteps_pad * nrec * sizeof(float)));
intialArrayGPU<<<blocks, threads>>>(d_data_pad, nSteps_pad, nrec, 0.0);
cuda_embed_crop<<<blocks, threads>>>(nSteps, nrec, d_data, nSteps_pad, nrec,
d_data_pad, true);
cuda_window<<<blocks_pad, threads>>>(nSteps_pad, nrec, dt, 0.01, d_data_pad);
CHECK(cudaMalloc((void **)&d_data_F, sizeof(cufftComplex) * nfft * nrec));
cufftPlan1d(&plan_f, nSteps_pad, CUFFT_R2C, nrec);
cufftExecR2C(plan_f, d_data_pad, d_data_F);
cufftDestroy(plan_f);
// update data
cuda_filter1d<<<blocks, threads>>>(nfft, nrec, d_data_F, d_coef);
cufftPlan1d(&plan_b, nSteps_pad, CUFFT_C2R, nrec);
cufftExecC2R(plan_b, d_data_F, d_data_pad); // inverse FFT
cufftDestroy(plan_b);
// crop data
cuda_embed_crop<<<blocks, threads>>>(nSteps, nrec, d_data, nSteps_pad, nrec,
d_data_pad, false);
// normalization (in the padded fft, the length is nSteps_pad)
// printf("amp_adj = %f\n", amp_ratio);
cuda_normalize<<<blocks, threads>>>(nSteps, nrec, d_data,
amp_ratio / float(nSteps_pad));
CHECK(cudaFree(d_data_pad));
CHECK(cudaFree(d_data_F));
}
float amp_ratio_comp(int n, float *d_data_obs, float *d_data_cal) {
float *obs_maxval = nullptr, *cal_maxval = nullptr;
float *d_obs_maxval, *d_cal_maxval;
obs_maxval = (float *)malloc(sizeof(float));
cal_maxval = (float *)malloc(sizeof(float));
CHECK(cudaMalloc((void **)&d_obs_maxval, sizeof(float)));
CHECK(cudaMalloc((void **)&d_cal_maxval, sizeof(float)));
cuda_find_absmax<<<1, 512>>>(n, d_data_obs, d_obs_maxval);
cuda_find_absmax<<<1, 512>>>(n, d_data_cal, d_cal_maxval);
CHECK(cudaMemcpy(obs_maxval, d_obs_maxval, sizeof(float),
cudaMemcpyDeviceToHost));
CHECK(cudaMemcpy(cal_maxval, d_cal_maxval, sizeof(float),
cudaMemcpyDeviceToHost));
// printf("Shot gather amplitude ratio = %f\n",
// obs_maxval[0]/cal_maxval[0]);
float ratio = 0.0;
if (cal_maxval[0] != 0.0) {
ratio = obs_maxval[0] / cal_maxval[0];
}
CHECK(cudaFree(d_obs_maxval));
CHECK(cudaFree(d_cal_maxval));
delete[] obs_maxval;
delete[] cal_maxval;
return ratio;
} |
7cd218409579d5530ddb01c24058d90287924b98.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef MATRIX_H
#define MATRIX_H
#include <iostream>
#include <string>
#include <stdio.h>
#include <random>
// Copia los elementos de to a from
__global__ void copyFromTo(float *from, float *to, int size);
// Copia los elementos de las filas idx de to a from
__global__ void copyFromToIdx(float *from, float *to, int *idx, int h, int w, int idx0);
class Matrix{
public:
int height, width, size;
float *h_elem, *d_elem;
float weight;
std::string dist;
bool allocated;
// public:
Matrix();
Matrix(int height, int width, std::string dist = "uniform", float w = 1);
~Matrix();
void copyDeviceToHost();
void copyHostToDevice();
void print();
void printDimensions();
float* getHostData();
float* getDeviceData();
int getHeight();
int getWidth();
void initialize(int height, int width, std::string dist = "zeros", float w = 1);
void copyDeviceDataFromAnother(Matrix &from);
void copyDeviceDataFromBatch(Matrix &from, int *idx, int idx0);
};
Matrix::Matrix(){
allocated = false;
}
Matrix::Matrix(int height, int width, std::string dist, float w)
: height(height), width(width), size(width * height){
weight = w;
dist = dist;
h_elem = new float[size];
std::random_device rd;
std::mt19937 mt(rd());
if(dist == "normal"){
// std::default_random_engine generator;
std::normal_distribution<float> distribution(0.0,weight);
for(int i=0; i < size; ++i){
h_elem[i] = distribution(mt);
}
}else if(dist == "uniform"){
// std::default_random_engine generator;
std::uniform_real_distribution<float> distribution(-weight,1.0);
for(int i=0; i < size; ++i){
h_elem[i] = distribution(mt);
}
}else if(dist == "ones"){
for(int i=0; i < size; ++i){
h_elem[i] = 1.0f;
}
}else if(dist == "zeros"){
for(int i=0; i < size; ++i){
h_elem[i] = 0.0f; }
}else{
throw std::invalid_argument("Invalid Weight initialization");
}
// Allocacion en device
hipMalloc(&d_elem, size * sizeof(float));
hipMemcpy( d_elem, h_elem, size * sizeof(float), hipMemcpyHostToDevice);
allocated = true;
}
Matrix::~Matrix(){
if(allocated){
delete [] h_elem;
hipFree(d_elem);
}
}
void Matrix::copyDeviceToHost(){
hipMemcpy(h_elem, d_elem, size * sizeof(float), hipMemcpyDeviceToHost);
}
void Matrix::copyHostToDevice(){
hipMemcpy(d_elem, h_elem, size * sizeof(float), hipMemcpyHostToDevice );
}
void Matrix::print(){
for(int i=0; i < height; ++i){
for(int j=0; j < width; ++j)
std::cout << h_elem[i*width + j] << "\t";
std::cout << std::endl;
}
}
void Matrix::printDimensions(){
std::cout << "(" << height << "," << width << ")";
}
float* Matrix::getHostData(){
return h_elem;
}
float* Matrix::getDeviceData(){
return d_elem;
}
int Matrix::getHeight(){return height;}
int Matrix::getWidth(){return width;}
void Matrix::initialize(int height_, int width_, std::string dist, float w){
if (allocated){
delete [] h_elem;
hipFree(d_elem);
allocated = false;
}
height = height_;
width = width_;
size = width * height;
weight = w;
dist = dist;
h_elem = new float[size];
std::random_device rd;
std::mt19937 mt(rd());
if(dist == "normal"){
// std::default_random_engine generator;
std::normal_distribution<float> distribution(0.0,weight);
for(int i=0; i < size; ++i){
h_elem[i] = distribution(mt);
}
}else if(dist == "uniform"){
// std::default_random_engine generator;
std::uniform_real_distribution<float> distribution(-weight,1.0);
for(int i=0; i < size; ++i){
h_elem[i] = distribution(mt);
}
}else if(dist == "ones"){
for(int i=0; i < size; ++i){
h_elem[i] = 1.0f;
}
}else if(dist == "zeros"){
for(int i=0; i < size; ++i){
h_elem[i] = 0.0f; }
}else{
throw std::invalid_argument("Invalid Weight initialization");
}
// Allocacion en device
hipMalloc(&d_elem, size * sizeof(float));
hipMemcpy( d_elem, h_elem, size * sizeof(float), hipMemcpyHostToDevice);
allocated = true;
}
void Matrix::copyDeviceDataFromAnother(Matrix &from){
// Asumo dimensiones correctas
int dev;
hipGetDevice(&dev);
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, dev);
// dim3 nThreads(256);
dim3 nThreads(deviceProp.maxThreadsDim[0]);
dim3 nBlocks((from.size + nThreads.x - 1) / nThreads.x);
if(nBlocks.x > deviceProp.maxGridSize[0]){
nBlocks.x = deviceProp.maxGridSize[0];
}
hipLaunchKernelGGL(( copyFromTo), dim3(nBlocks), dim3(nThreads) , 0, 0, from.getDeviceData(), d_elem, from.size);
hipDeviceSynchronize();
// Aca Host y Device son distintos
}
void Matrix::copyDeviceDataFromBatch(Matrix &from, int *idx, int idx0){
// Asumo dimensiones correctas
int dev;
hipGetDevice(&dev);
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, dev);
// dim3 nThreads(256);
// dim3 nBlocks((N + nThreads.x - 1) / nThreads.x);
// dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
// dim3 dimGrid(B.width / dimBlock.x, A.height / dimBlock.y);
// dim3 nThreads(256);
// dim3 nThreads(deviceProp.maxThreadsDim[0]);
// dim3 nBlocks((from.size + nThreads.x - 1) / nThreads.x);
// if(nBlocks.x > deviceProp.maxGridSize[0]){
// nBlocks.x = deviceProp.maxGridSize[0];
// }
// dim3 dimBlock(deviceProp.maxThreadsDim[0], deviceProp.maxThreadsDim[1]);
// dim3 dimGrid((width + dimBlock.x -1)/dimBlock.x, (height + dimBlock.y -1)/dimBlock.y);
// if(dimGrid.x > deviceProp.maxGridSize[0]){
// dimGrid.x = deviceProp.maxGridSize[0];
// }
// if(dimGrid.y > deviceProp.maxGridSize[1]){
// dimGrid.y = deviceProp.maxGridSize[1];
// }
dim3 dimBlock(256,256);
dim3 dimGrid(256,256);
hipLaunchKernelGGL(( copyFromToIdx), dim3(dimGrid), dim3(dimBlock) , 0, 0, from.getDeviceData(), d_elem, idx, height, width, idx0);
hipDeviceSynchronize();
}
/* ----------------------------
Kernels
---------------------------- */
__global__ void copyFromTo(float *from, float *to, int size){
int i = blockIdx.x * blockDim.x + threadIdx.x;
while(i < size){
to[i] = from[i];
i += blockDim.x * gridDim.x;
}
}
// from es mas grande
// las filas de to son iguales a el tamao
// idx deberia tener h elementos
__global__ void copyFromToIdx(float *from, float *to, int *idx, int h, int w, int idx0){
// lo tengo que hacer como columna
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
for(int j=0; j<3; ++j){
printf("%d", idx[j]);
}
#if __CUDA_ARCH__ >= 200
printf("Hi Cuda World");
#endif
if(row < h && col < w){
to[row * w + col] = from[idx[idx0+row] * w + col];
}
}
#endif
| 7cd218409579d5530ddb01c24058d90287924b98.cu | #ifndef MATRIX_H
#define MATRIX_H
#include <iostream>
#include <string>
#include <stdio.h>
#include <random>
// Copia los elementos de to a from
__global__ void copyFromTo(float *from, float *to, int size);
// Copia los elementos de las filas idx de to a from
__global__ void copyFromToIdx(float *from, float *to, int *idx, int h, int w, int idx0);
class Matrix{
public:
int height, width, size;
float *h_elem, *d_elem;
float weight;
std::string dist;
bool allocated;
// public:
Matrix();
Matrix(int height, int width, std::string dist = "uniform", float w = 1);
~Matrix();
void copyDeviceToHost();
void copyHostToDevice();
void print();
void printDimensions();
float* getHostData();
float* getDeviceData();
int getHeight();
int getWidth();
void initialize(int height, int width, std::string dist = "zeros", float w = 1);
void copyDeviceDataFromAnother(Matrix &from);
void copyDeviceDataFromBatch(Matrix &from, int *idx, int idx0);
};
Matrix::Matrix(){
allocated = false;
}
Matrix::Matrix(int height, int width, std::string dist, float w)
: height(height), width(width), size(width * height){
weight = w;
dist = dist;
h_elem = new float[size];
std::random_device rd;
std::mt19937 mt(rd());
if(dist == "normal"){
// std::default_random_engine generator;
std::normal_distribution<float> distribution(0.0,weight);
for(int i=0; i < size; ++i){
h_elem[i] = distribution(mt);
}
}else if(dist == "uniform"){
// std::default_random_engine generator;
std::uniform_real_distribution<float> distribution(-weight,1.0);
for(int i=0; i < size; ++i){
h_elem[i] = distribution(mt);
}
}else if(dist == "ones"){
for(int i=0; i < size; ++i){
h_elem[i] = 1.0f;
}
}else if(dist == "zeros"){
for(int i=0; i < size; ++i){
h_elem[i] = 0.0f; }
}else{
throw std::invalid_argument("Invalid Weight initialization");
}
// Allocacion en device
cudaMalloc(&d_elem, size * sizeof(float));
cudaMemcpy( d_elem, h_elem, size * sizeof(float), cudaMemcpyHostToDevice);
allocated = true;
}
Matrix::~Matrix(){
if(allocated){
delete [] h_elem;
cudaFree(d_elem);
}
}
void Matrix::copyDeviceToHost(){
cudaMemcpy(h_elem, d_elem, size * sizeof(float), cudaMemcpyDeviceToHost);
}
void Matrix::copyHostToDevice(){
cudaMemcpy(d_elem, h_elem, size * sizeof(float), cudaMemcpyHostToDevice );
}
void Matrix::print(){
for(int i=0; i < height; ++i){
for(int j=0; j < width; ++j)
std::cout << h_elem[i*width + j] << "\t";
std::cout << std::endl;
}
}
void Matrix::printDimensions(){
std::cout << "(" << height << "," << width << ")";
}
float* Matrix::getHostData(){
return h_elem;
}
float* Matrix::getDeviceData(){
return d_elem;
}
int Matrix::getHeight(){return height;}
int Matrix::getWidth(){return width;}
void Matrix::initialize(int height_, int width_, std::string dist, float w){
if (allocated){
delete [] h_elem;
cudaFree(d_elem);
allocated = false;
}
height = height_;
width = width_;
size = width * height;
weight = w;
dist = dist;
h_elem = new float[size];
std::random_device rd;
std::mt19937 mt(rd());
if(dist == "normal"){
// std::default_random_engine generator;
std::normal_distribution<float> distribution(0.0,weight);
for(int i=0; i < size; ++i){
h_elem[i] = distribution(mt);
}
}else if(dist == "uniform"){
// std::default_random_engine generator;
std::uniform_real_distribution<float> distribution(-weight,1.0);
for(int i=0; i < size; ++i){
h_elem[i] = distribution(mt);
}
}else if(dist == "ones"){
for(int i=0; i < size; ++i){
h_elem[i] = 1.0f;
}
}else if(dist == "zeros"){
for(int i=0; i < size; ++i){
h_elem[i] = 0.0f; }
}else{
throw std::invalid_argument("Invalid Weight initialization");
}
// Allocacion en device
cudaMalloc(&d_elem, size * sizeof(float));
cudaMemcpy( d_elem, h_elem, size * sizeof(float), cudaMemcpyHostToDevice);
allocated = true;
}
void Matrix::copyDeviceDataFromAnother(Matrix &from){
// Asumo dimensiones correctas
int dev;
cudaGetDevice(&dev);
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, dev);
// dim3 nThreads(256);
dim3 nThreads(deviceProp.maxThreadsDim[0]);
dim3 nBlocks((from.size + nThreads.x - 1) / nThreads.x);
if(nBlocks.x > deviceProp.maxGridSize[0]){
nBlocks.x = deviceProp.maxGridSize[0];
}
copyFromTo<<< nBlocks, nThreads >>>(from.getDeviceData(), d_elem, from.size);
cudaDeviceSynchronize();
// Aca Host y Device son distintos
}
void Matrix::copyDeviceDataFromBatch(Matrix &from, int *idx, int idx0){
// Asumo dimensiones correctas
int dev;
cudaGetDevice(&dev);
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, dev);
// dim3 nThreads(256);
// dim3 nBlocks((N + nThreads.x - 1) / nThreads.x);
// dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
// dim3 dimGrid(B.width / dimBlock.x, A.height / dimBlock.y);
// dim3 nThreads(256);
// dim3 nThreads(deviceProp.maxThreadsDim[0]);
// dim3 nBlocks((from.size + nThreads.x - 1) / nThreads.x);
// if(nBlocks.x > deviceProp.maxGridSize[0]){
// nBlocks.x = deviceProp.maxGridSize[0];
// }
// dim3 dimBlock(deviceProp.maxThreadsDim[0], deviceProp.maxThreadsDim[1]);
// dim3 dimGrid((width + dimBlock.x -1)/dimBlock.x, (height + dimBlock.y -1)/dimBlock.y);
// if(dimGrid.x > deviceProp.maxGridSize[0]){
// dimGrid.x = deviceProp.maxGridSize[0];
// }
// if(dimGrid.y > deviceProp.maxGridSize[1]){
// dimGrid.y = deviceProp.maxGridSize[1];
// }
dim3 dimBlock(256,256);
dim3 dimGrid(256,256);
copyFromToIdx<<< dimGrid, dimBlock >>>(from.getDeviceData(), d_elem, idx, height, width, idx0);
cudaDeviceSynchronize();
}
/* ----------------------------
Kernels
---------------------------- */
__global__ void copyFromTo(float *from, float *to, int size){
int i = blockIdx.x * blockDim.x + threadIdx.x;
while(i < size){
to[i] = from[i];
i += blockDim.x * gridDim.x;
}
}
// from es mas grande
// las filas de to son iguales a el tamaño
// idx deberia tener h elementos
__global__ void copyFromToIdx(float *from, float *to, int *idx, int h, int w, int idx0){
// lo tengo que hacer como columna
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
for(int j=0; j<3; ++j){
printf("%d", idx[j]);
}
#if __CUDA_ARCH__ >= 200
printf("Hi Cuda World");
#endif
if(row < h && col < w){
to[row * w + col] = from[idx[idx0+row] * w + col];
}
}
#endif
|
41470093205980362bbe25e6c4122cb41683af38.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.5.4) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date October 2020
@generated from sparse/blas/zparilu_kernels.cu, normal z -> s, Thu Oct 8 23:05:48 2020
*/
#include "magmasparse_internal.h"
#define PRECISION_s
__global__ void
magma_sparilu_csr_kernel(
const magma_int_t num_rows,
const magma_int_t nnz,
const magma_index_t *rowidxA,
const magma_index_t *colidxA,
const float * __restrict__ A,
const magma_index_t *rowptrL,
const magma_index_t *colidxL,
float *valL,
const magma_index_t *rowptrU,
const magma_index_t *colidxU,
float *valU)
{
int i, j;
int k = blockDim.x * blockIdx.x + threadIdx.x;
float zero = MAGMA_S_MAKE(0.0, 0.0);
float s, sp;
int il, iu, jl, ju;
if (k < nnz) {
i = rowidxA[k];
j = colidxA[k];
#if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s))
s = __ldg(A+k);
#else
s = A[k];
#endif
il = rowptrL[i];
iu = rowptrU[j];
while (il < rowptrL[i+1] && iu < rowptrU[j+1]) {
sp = zero;
jl = colidxL[il];
ju = colidxU[iu];
sp = (jl == ju) ? valL[il] * valU[iu] : sp;
s = (jl == ju) ? s-sp : s;
il = (jl <= ju) ? il+1 : il;
iu = (jl >= ju) ? iu+1 : iu;
}
s += sp; // undo the last operation (it must be the last)
if (i > j) // modify l entry
valL[il-1] = s / valU[rowptrU[j+1]-1];
else // modify u entry
valU[iu-1] = s;
}
}
/**
Purpose
-------
This routine iteratively computes an incomplete LU factorization.
For reference, see:
E. Chow and A. Patel: "Fine-grained Parallel Incomplete LU Factorization",
SIAM Journal on Scientific Computing, 37, C169-C193 (2015).
This routine was used in the ISC 2015 paper:
E. Chow et al.: "Asynchronous Iterative Algorithm for Computing Incomplete
Factorizations on GPUs",
ISC High Performance 2015, LNCS 9137, pp. 1-16, 2015.
The input format of the system matrix is COO, the lower triangular factor L
is stored in CSR, the upper triangular factor U is transposed, then also
stored in CSR (equivalent to CSC format for the non-transposed U).
Every component of L and U is handled by one thread.
Arguments
---------
@param[in]
A magma_s_matrix
input matrix A determing initial guess & processing order
@param[in,out]
L magma_s_matrix
input/output matrix L containing the lower triangular factor
@param[in,out]
U magma_s_matrix
input/output matrix U containing the upper triangular factor
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_sgegpuk
********************************************************************/
extern "C" magma_int_t
magma_sparilu_csr(
magma_s_matrix A,
magma_s_matrix L,
magma_s_matrix U,
magma_queue_t queue)
{
int blocksize1 = 128;
int blocksize2 = 1;
int dimgrid1 = magma_ceildiv(A.nnz, blocksize1);
int dimgrid2 = 1;
int dimgrid3 = 1;
dim3 grid(dimgrid1, dimgrid2, dimgrid3);
dim3 block(blocksize1, blocksize2, 1);
hipLaunchKernelGGL(( magma_sparilu_csr_kernel), dim3(grid), dim3(block), 0, queue->cuda_stream() ,
A.num_rows, A.nnz, A.rowidx, A.col, A.val,
L.row, L.col, L.val,
U.row, U.col, U.val);
return MAGMA_SUCCESS;
}
| 41470093205980362bbe25e6c4122cb41683af38.cu | /*
-- MAGMA (version 2.5.4) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date October 2020
@generated from sparse/blas/zparilu_kernels.cu, normal z -> s, Thu Oct 8 23:05:48 2020
*/
#include "magmasparse_internal.h"
#define PRECISION_s
__global__ void
magma_sparilu_csr_kernel(
const magma_int_t num_rows,
const magma_int_t nnz,
const magma_index_t *rowidxA,
const magma_index_t *colidxA,
const float * __restrict__ A,
const magma_index_t *rowptrL,
const magma_index_t *colidxL,
float *valL,
const magma_index_t *rowptrU,
const magma_index_t *colidxU,
float *valU)
{
int i, j;
int k = blockDim.x * blockIdx.x + threadIdx.x;
float zero = MAGMA_S_MAKE(0.0, 0.0);
float s, sp;
int il, iu, jl, ju;
if (k < nnz) {
i = rowidxA[k];
j = colidxA[k];
#if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s))
s = __ldg(A+k);
#else
s = A[k];
#endif
il = rowptrL[i];
iu = rowptrU[j];
while (il < rowptrL[i+1] && iu < rowptrU[j+1]) {
sp = zero;
jl = colidxL[il];
ju = colidxU[iu];
sp = (jl == ju) ? valL[il] * valU[iu] : sp;
s = (jl == ju) ? s-sp : s;
il = (jl <= ju) ? il+1 : il;
iu = (jl >= ju) ? iu+1 : iu;
}
s += sp; // undo the last operation (it must be the last)
if (i > j) // modify l entry
valL[il-1] = s / valU[rowptrU[j+1]-1];
else // modify u entry
valU[iu-1] = s;
}
}
/**
Purpose
-------
This routine iteratively computes an incomplete LU factorization.
For reference, see:
E. Chow and A. Patel: "Fine-grained Parallel Incomplete LU Factorization",
SIAM Journal on Scientific Computing, 37, C169-C193 (2015).
This routine was used in the ISC 2015 paper:
E. Chow et al.: "Asynchronous Iterative Algorithm for Computing Incomplete
Factorizations on GPUs",
ISC High Performance 2015, LNCS 9137, pp. 1-16, 2015.
The input format of the system matrix is COO, the lower triangular factor L
is stored in CSR, the upper triangular factor U is transposed, then also
stored in CSR (equivalent to CSC format for the non-transposed U).
Every component of L and U is handled by one thread.
Arguments
---------
@param[in]
A magma_s_matrix
input matrix A determing initial guess & processing order
@param[in,out]
L magma_s_matrix
input/output matrix L containing the lower triangular factor
@param[in,out]
U magma_s_matrix
input/output matrix U containing the upper triangular factor
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_sgegpuk
********************************************************************/
extern "C" magma_int_t
magma_sparilu_csr(
magma_s_matrix A,
magma_s_matrix L,
magma_s_matrix U,
magma_queue_t queue)
{
int blocksize1 = 128;
int blocksize2 = 1;
int dimgrid1 = magma_ceildiv(A.nnz, blocksize1);
int dimgrid2 = 1;
int dimgrid3 = 1;
dim3 grid(dimgrid1, dimgrid2, dimgrid3);
dim3 block(blocksize1, blocksize2, 1);
magma_sparilu_csr_kernel<<< grid, block, 0, queue->cuda_stream() >>>
(A.num_rows, A.nnz, A.rowidx, A.col, A.val,
L.row, L.col, L.val,
U.row, U.col, U.val);
return MAGMA_SUCCESS;
}
|
57f52fee86fca753ef991f84cad33a63f028c417.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include "compare.h"
#include "gputimer.h"
// Subpart b:
// Compute capability 2.0+ GPUs have support for 3 per-warp instructions.
// Namely, these instructions are:
//
// int __popc(int x) Population Count: Returns the number of bits that are set
// to 1 in the 32-bit integer x.
//
// int __clz(int x) Count Leading Zeros: Returns the number of consecutive zero
// bits beginning at the most significant bit of the 32-bit integer x.
//
// int __ballot(int p) Returns a 32-bit integer in which bit k is set if and only
// if the predicate p provided by the thread in lane k of the warp is non-zero.
__device__ unsigned int warp_reduce(unsigned int p, volatile unsigned int * s) {
// Assumes values in 'p' are either 1 or 0
// Should not use 's'
// Sums p across warp, returning the result.
// You can do this without using the character '+' in your code at all
//
// TODO: Implement this function using some combination of
// __popc, __clz, or __ballot function
int t = threadIdx.x;
s[0] = 0;
s[0] += __ballot(p);
return __popc(s[0]);
}
__global__ void reduce(unsigned int * d_out_warp,
const unsigned int * d_in)
{
extern __shared__ unsigned int s[];
int t = threadIdx.x;
int p = d_in[t];
unsigned int wr = warp_reduce(p, s);
if (t == 0)
{
*d_out_warp = wr;
}
}
int main(int argc, char **argv)
{
const int ARRAY_SIZE = 32;
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(unsigned int);
// generate the input array on the host
unsigned int h_in[ARRAY_SIZE];
unsigned int sum = 0;
for(int i = 0; i < ARRAY_SIZE; i++) {
// generate random float in [0, 1]
h_in[i] = (float)random()/(float)RAND_MAX > 0.5f ? 1 : 0;
sum += h_in[i];
}
// declare GPU memory pointers
unsigned int * d_in, * d_out_warp;
// allocate GPU memory
hipMalloc((void **) &d_in, ARRAY_BYTES);
hipMalloc((void **) &d_out_warp, sizeof(unsigned int));
// transfer the input array to the GPU
hipMemcpy(d_in, h_in, ARRAY_BYTES, hipMemcpyHostToDevice);
GpuTimer timer;
timer.Start();
// launch the kernel
hipLaunchKernelGGL(( reduce), dim3(1), dim3(ARRAY_SIZE), ARRAY_SIZE * sizeof(unsigned int), 0,
d_out_warp, d_in);
timer.Stop();
printf("Your code executed in %g ms\n", timer.Elapsed());
unsigned int h_out_warp;
// copy back the sum from GPU
hipMemcpy(&h_out_warp, d_out_warp, sizeof(unsigned int),
hipMemcpyDeviceToHost);
compare(h_out_warp, sum);
// free GPU memory allocation
hipFree(d_in);
hipFree(d_out_warp);
} | 57f52fee86fca753ef991f84cad33a63f028c417.cu | #include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
#include "compare.h"
#include "gputimer.h"
// Subpart b:
// Compute capability 2.0+ GPUs have support for 3 per-warp instructions.
// Namely, these instructions are:
//
// int __popc(int x) Population Count: Returns the number of bits that are set
// to 1 in the 32-bit integer x.
//
// int __clz(int x) Count Leading Zeros: Returns the number of consecutive zero
// bits beginning at the most significant bit of the 32-bit integer x.
//
// int __ballot(int p) Returns a 32-bit integer in which bit k is set if and only
// if the predicate p provided by the thread in lane k of the warp is non-zero.
__device__ unsigned int warp_reduce(unsigned int p, volatile unsigned int * s) {
// Assumes values in 'p' are either 1 or 0
// Should not use 's'
// Sums p across warp, returning the result.
// You can do this without using the character '+' in your code at all
//
// TODO: Implement this function using some combination of
// __popc, __clz, or __ballot function
int t = threadIdx.x;
s[0] = 0;
s[0] += __ballot(p);
return __popc(s[0]);
}
__global__ void reduce(unsigned int * d_out_warp,
const unsigned int * d_in)
{
extern __shared__ unsigned int s[];
int t = threadIdx.x;
int p = d_in[t];
unsigned int wr = warp_reduce(p, s);
if (t == 0)
{
*d_out_warp = wr;
}
}
int main(int argc, char **argv)
{
const int ARRAY_SIZE = 32;
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(unsigned int);
// generate the input array on the host
unsigned int h_in[ARRAY_SIZE];
unsigned int sum = 0;
for(int i = 0; i < ARRAY_SIZE; i++) {
// generate random float in [0, 1]
h_in[i] = (float)random()/(float)RAND_MAX > 0.5f ? 1 : 0;
sum += h_in[i];
}
// declare GPU memory pointers
unsigned int * d_in, * d_out_warp;
// allocate GPU memory
cudaMalloc((void **) &d_in, ARRAY_BYTES);
cudaMalloc((void **) &d_out_warp, sizeof(unsigned int));
// transfer the input array to the GPU
cudaMemcpy(d_in, h_in, ARRAY_BYTES, cudaMemcpyHostToDevice);
GpuTimer timer;
timer.Start();
// launch the kernel
reduce<<<1, ARRAY_SIZE, ARRAY_SIZE * sizeof(unsigned int)>>>
(d_out_warp, d_in);
timer.Stop();
printf("Your code executed in %g ms\n", timer.Elapsed());
unsigned int h_out_warp;
// copy back the sum from GPU
cudaMemcpy(&h_out_warp, d_out_warp, sizeof(unsigned int),
cudaMemcpyDeviceToHost);
compare(h_out_warp, sum);
// free GPU memory allocation
cudaFree(d_in);
cudaFree(d_out_warp);
} |
ccd9633908efd8fe204c94d7d0fc5ef28d18ab59.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifdef USE_ROCM
#include "dragon/core/context_cuda.h"
#include "dragon/utils/math/transpose.h"
#include "dragon/utils/math/utils.h"
namespace dragon {
namespace math {
namespace {
constexpr int kTileDim = 32;
constexpr int kBlockRows = 8;
template <typename T, int D>
__global__ void _Transpose(
const int N,
const SimpleArray<int, D> X_strides,
const SimpleArray<int, D> Y_dims,
const T* x,
T* y) {
CUDA_1D_KERNEL_LOOP(yi, N) {
int xi = 0, tmp = yi;
#pragma unroll
for (int d = D - 1; d >= 0; --d) {
int r;
FIXED_DIVISOR_DIV_MOD(Y_dims.data[d], tmp, &tmp, &r);
xi += r * X_strides.data[d];
}
y[yi] = x[xi];
}
}
template <typename T>
__global__ void _BatchTranspose2D(
const int H,
const int W,
const int dh,
const int dw,
const T* X,
T* Y) {
__shared__ T block[kTileDim][kTileDim + 1];
const int k = blockIdx.x % (dh * dw);
const int r = k / dw;
const int c = k % dw;
const int offset = blockIdx.x / (dh * dw) * H * W;
int x = c * kTileDim + threadIdx.x;
int y = r * kTileDim + threadIdx.y;
if (x < W) {
for (int i = 0; threadIdx.y + i < kTileDim && y + i < H; i += kBlockRows) {
block[threadIdx.y + i][threadIdx.x] = X[offset + (y + i) * W + x];
}
}
__syncthreads();
x = r * kTileDim + threadIdx.x;
y = c * kTileDim + threadIdx.y;
if (x < H) {
for (int i = 0; threadIdx.y + i < kTileDim && y + i < W; i += kBlockRows) {
Y[offset + (y + i) * H + x] = block[threadIdx.x][threadIdx.y + i];
}
}
}
template <typename T, int D>
void _TransposeImpl(
const int64_t* x_strides,
const int64_t* y_dims,
const T* x,
T* y,
CUDAContext* ctx) {
const auto N =
std::accumulate(y_dims, y_dims + D, 1, std::multiplies<int64_t>());
SimpleArray<int, D> X_strides, Y_dims;
for (int i = 0; i < D; ++i) {
X_strides.data[i] = x_strides[i];
Y_dims.data[i] = y_dims[i];
}
hipLaunchKernelGGL(( _Transpose), dim3(CUDA_BLOCKS(N)), dim3(CUDA_THREADS), 0, ctx->cuda_stream(),
N, X_strides, Y_dims, x, y);
}
} // namespace
#define DEFINE_TRANSPOSE_FUNC(T) \
template <> \
DRAGON_API void Transpose<T, CUDAContext>( \
const int num_dims, \
const int64_t* dims, \
const int64_t* axes, \
const T* x, \
T* y, \
CUDAContext* ctx) { \
vec64_t new_dims, new_axes; \
utils::CollapseTransposeAxes(num_dims, dims, axes, new_dims, new_axes); \
const int num_axes = new_dims.size(); \
if (num_axes == 3 && new_axes == vec64_t({0, 2, 1})) { \
const auto N = new_dims[0], H = new_dims[1], W = new_dims[2]; \
const auto dh = utils::DivUp<int64_t>(H, kTileDim); \
const auto dw = utils::DivUp<int64_t>(W, kTileDim); \
hipLaunchKernelGGL(( _BatchTranspose2D), \
N * dh * dw, \
dim3(kTileDim, kBlockRows), \
0, \
ctx->cuda_stream(), H, W, dh, dw, x, y); \
return; \
} \
CUDA_TENSOR_DIMS_CHECK(num_axes); \
vec64_t X_strides(num_axes), Y_dims(num_axes); \
utils::ComputeTransposeStrides( \
num_axes, new_dims.data(), new_axes.data(), X_strides.data()); \
for (int i = 0; i < num_axes; ++i) { \
Y_dims[i] = new_dims[new_axes[i]]; \
} \
DISPATCH_FUNC_BY_VALUE_WITH_TYPE_1( \
_TransposeImpl, \
T, \
num_axes, \
X_strides.data(), \
Y_dims.data(), \
x, \
y, \
ctx); \
}
DEFINE_TRANSPOSE_FUNC(bool);
DEFINE_TRANSPOSE_FUNC(uint8_t);
DEFINE_TRANSPOSE_FUNC(int8_t);
DEFINE_TRANSPOSE_FUNC(int);
DEFINE_TRANSPOSE_FUNC(int64_t);
DEFINE_TRANSPOSE_FUNC(float16);
DEFINE_TRANSPOSE_FUNC(float);
DEFINE_TRANSPOSE_FUNC(double);
#undef DEFINE_TRANSPOSE_FUNC
} // namespace math
} // namespace dragon
#endif // USE_ROCM
| ccd9633908efd8fe204c94d7d0fc5ef28d18ab59.cu | #ifdef USE_CUDA
#include "dragon/core/context_cuda.h"
#include "dragon/utils/math/transpose.h"
#include "dragon/utils/math/utils.h"
namespace dragon {
namespace math {
namespace {
constexpr int kTileDim = 32;
constexpr int kBlockRows = 8;
template <typename T, int D>
__global__ void _Transpose(
const int N,
const SimpleArray<int, D> X_strides,
const SimpleArray<int, D> Y_dims,
const T* x,
T* y) {
CUDA_1D_KERNEL_LOOP(yi, N) {
int xi = 0, tmp = yi;
#pragma unroll
for (int d = D - 1; d >= 0; --d) {
int r;
FIXED_DIVISOR_DIV_MOD(Y_dims.data[d], tmp, &tmp, &r);
xi += r * X_strides.data[d];
}
y[yi] = x[xi];
}
}
template <typename T>
__global__ void _BatchTranspose2D(
const int H,
const int W,
const int dh,
const int dw,
const T* X,
T* Y) {
__shared__ T block[kTileDim][kTileDim + 1];
const int k = blockIdx.x % (dh * dw);
const int r = k / dw;
const int c = k % dw;
const int offset = blockIdx.x / (dh * dw) * H * W;
int x = c * kTileDim + threadIdx.x;
int y = r * kTileDim + threadIdx.y;
if (x < W) {
for (int i = 0; threadIdx.y + i < kTileDim && y + i < H; i += kBlockRows) {
block[threadIdx.y + i][threadIdx.x] = X[offset + (y + i) * W + x];
}
}
__syncthreads();
x = r * kTileDim + threadIdx.x;
y = c * kTileDim + threadIdx.y;
if (x < H) {
for (int i = 0; threadIdx.y + i < kTileDim && y + i < W; i += kBlockRows) {
Y[offset + (y + i) * H + x] = block[threadIdx.x][threadIdx.y + i];
}
}
}
template <typename T, int D>
void _TransposeImpl(
const int64_t* x_strides,
const int64_t* y_dims,
const T* x,
T* y,
CUDAContext* ctx) {
const auto N =
std::accumulate(y_dims, y_dims + D, 1, std::multiplies<int64_t>());
SimpleArray<int, D> X_strides, Y_dims;
for (int i = 0; i < D; ++i) {
X_strides.data[i] = x_strides[i];
Y_dims.data[i] = y_dims[i];
}
_Transpose<<<CUDA_BLOCKS(N), CUDA_THREADS, 0, ctx->cuda_stream()>>>(
N, X_strides, Y_dims, x, y);
}
} // namespace
#define DEFINE_TRANSPOSE_FUNC(T) \
template <> \
DRAGON_API void Transpose<T, CUDAContext>( \
const int num_dims, \
const int64_t* dims, \
const int64_t* axes, \
const T* x, \
T* y, \
CUDAContext* ctx) { \
vec64_t new_dims, new_axes; \
utils::CollapseTransposeAxes(num_dims, dims, axes, new_dims, new_axes); \
const int num_axes = new_dims.size(); \
if (num_axes == 3 && new_axes == vec64_t({0, 2, 1})) { \
const auto N = new_dims[0], H = new_dims[1], W = new_dims[2]; \
const auto dh = utils::DivUp<int64_t>(H, kTileDim); \
const auto dw = utils::DivUp<int64_t>(W, kTileDim); \
_BatchTranspose2D<<< \
N * dh * dw, \
dim3(kTileDim, kBlockRows), \
0, \
ctx->cuda_stream()>>>(H, W, dh, dw, x, y); \
return; \
} \
CUDA_TENSOR_DIMS_CHECK(num_axes); \
vec64_t X_strides(num_axes), Y_dims(num_axes); \
utils::ComputeTransposeStrides( \
num_axes, new_dims.data(), new_axes.data(), X_strides.data()); \
for (int i = 0; i < num_axes; ++i) { \
Y_dims[i] = new_dims[new_axes[i]]; \
} \
DISPATCH_FUNC_BY_VALUE_WITH_TYPE_1( \
_TransposeImpl, \
T, \
num_axes, \
X_strides.data(), \
Y_dims.data(), \
x, \
y, \
ctx); \
}
DEFINE_TRANSPOSE_FUNC(bool);
DEFINE_TRANSPOSE_FUNC(uint8_t);
DEFINE_TRANSPOSE_FUNC(int8_t);
DEFINE_TRANSPOSE_FUNC(int);
DEFINE_TRANSPOSE_FUNC(int64_t);
DEFINE_TRANSPOSE_FUNC(float16);
DEFINE_TRANSPOSE_FUNC(float);
DEFINE_TRANSPOSE_FUNC(double);
#undef DEFINE_TRANSPOSE_FUNC
} // namespace math
} // namespace dragon
#endif // USE_CUDA
|
52085886da2d87b00b4f53313eb7f19f6ae158c6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int xdim0_update_halo_kernel5_minus_4_back;
int xdim0_update_halo_kernel5_minus_4_back_h = -1;
__constant__ int ydim0_update_halo_kernel5_minus_4_back;
int ydim0_update_halo_kernel5_minus_4_back_h = -1;
__constant__ int xdim1_update_halo_kernel5_minus_4_back;
int xdim1_update_halo_kernel5_minus_4_back_h = -1;
__constant__ int ydim1_update_halo_kernel5_minus_4_back;
int ydim1_update_halo_kernel5_minus_4_back_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#define OPS_ACC0(x, y, z) \
(x + xdim0_update_halo_kernel5_minus_4_back * (y) + \
xdim0_update_halo_kernel5_minus_4_back * \
ydim0_update_halo_kernel5_minus_4_back * (z))
#define OPS_ACC1(x, y, z) \
(x + xdim1_update_halo_kernel5_minus_4_back * (y) + \
xdim1_update_halo_kernel5_minus_4_back * \
ydim1_update_halo_kernel5_minus_4_back * (z))
// user function
__device__
inline void
update_halo_kernel5_minus_4_back(double *vol_flux_z, double *mass_flux_z,
const int *fields) {
if (fields[FIELD_VOL_FLUX_Z] == 1)
vol_flux_z[OPS_ACC0(0, 0, 0)] = -vol_flux_z[OPS_ACC0(0, 0, 4)];
if (fields[FIELD_MASS_FLUX_Z] == 1)
mass_flux_z[OPS_ACC1(0, 0, 0)] = -mass_flux_z[OPS_ACC1(0, 0, 4)];
}
#undef OPS_ACC0
#undef OPS_ACC1
__global__ void ops_update_halo_kernel5_minus_4_back(double *__restrict arg0,
double *__restrict arg1,
const int *__restrict arg2,
int size0, int size1,
int size2) {
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 * 1 +
idx_y * 1 * 1 * xdim0_update_halo_kernel5_minus_4_back +
idx_z * 1 * 1 * xdim0_update_halo_kernel5_minus_4_back *
ydim0_update_halo_kernel5_minus_4_back;
arg1 += idx_x * 1 * 1 +
idx_y * 1 * 1 * xdim1_update_halo_kernel5_minus_4_back +
idx_z * 1 * 1 * xdim1_update_halo_kernel5_minus_4_back *
ydim1_update_halo_kernel5_minus_4_back;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
update_halo_kernel5_minus_4_back(arg0, arg1, arg2);
}
}
// host stub function
void ops_par_loop_update_halo_kernel5_minus_4_back(char const *name,
ops_block block, int dim,
int *range, ops_arg arg0,
ops_arg arg1, ops_arg arg2) {
// Timing
double t1, t2, c1, c2;
ops_arg args[3] = {arg0, arg1, arg2};
#ifdef CHECKPOINTING
if (!ops_checkpointing_before(args, 3, range, 137))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(137, "update_halo_kernel5_minus_4_back");
OPS_kernels[137].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[3];
int end[3];
#ifdef OPS_MPI
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 3; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 3; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
int z_size = MAX(0, end[2] - start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != xdim0_update_halo_kernel5_minus_4_back_h ||
ydim0 != ydim0_update_halo_kernel5_minus_4_back_h ||
xdim1 != xdim1_update_halo_kernel5_minus_4_back_h ||
ydim1 != ydim1_update_halo_kernel5_minus_4_back_h) {
hipMemcpyToSymbol(xdim0_update_halo_kernel5_minus_4_back, &xdim0,
sizeof(int));
xdim0_update_halo_kernel5_minus_4_back_h = xdim0;
hipMemcpyToSymbol(ydim0_update_halo_kernel5_minus_4_back, &ydim0,
sizeof(int));
ydim0_update_halo_kernel5_minus_4_back_h = ydim0;
hipMemcpyToSymbol(xdim1_update_halo_kernel5_minus_4_back, &xdim1,
sizeof(int));
xdim1_update_halo_kernel5_minus_4_back_h = xdim1;
hipMemcpyToSymbol(ydim1_update_halo_kernel5_minus_4_back, &ydim1,
sizeof(int));
ydim1_update_halo_kernel5_minus_4_back_h = ydim1;
}
int *arg2h = (int *)arg2.data;
dim3 grid((x_size - 1) / OPS_block_size_x + 1,
(y_size - 1) / OPS_block_size_y + 1, z_size);
dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d = 0; d < NUM_FIELDS; d++)
((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = args[0].dat->elem_size;
int dat1 = args[1].dat->elem_size;
char *p_a[3];
// set up initial pointers
int d_m[OPS_MAX_DIM];
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[0].dat->d_m[d];
#endif
int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] -
args[0].dat->base[0] - d_m[0]);
base0 = base0 +
dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] -
args[0].dat->base[1] - d_m[1]);
base0 = base0 +
dat0 * args[0].dat->size[0] * args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] -
d_m[2]);
p_a[0] = (char *)args[0].data_d + base0;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[1].dat->d_m[d];
#endif
int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] -
args[1].dat->base[0] - d_m[0]);
base1 = base1 +
dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] -
args[1].dat->base[1] - d_m[1]);
base1 = base1 +
dat1 * args[1].dat->size[0] * args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] -
d_m[2]);
p_a[1] = (char *)args[1].data_d + base1;
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args, 3, range);
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[137].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
hipLaunchKernelGGL(( ops_update_halo_kernel5_minus_4_back), dim3(grid), dim3(tblock), 0, 0,
(double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d, x_size, y_size,
z_size);
if (OPS_diags > 1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[137].time += t1 - t2;
}
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0], range);
ops_set_halo_dirtybit3(&args[1], range);
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[137].mpi_time += t2 - t1;
OPS_kernels[137].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[137].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
| 52085886da2d87b00b4f53313eb7f19f6ae158c6.cu | //
// auto-generated by ops.py
//
__constant__ int xdim0_update_halo_kernel5_minus_4_back;
int xdim0_update_halo_kernel5_minus_4_back_h = -1;
__constant__ int ydim0_update_halo_kernel5_minus_4_back;
int ydim0_update_halo_kernel5_minus_4_back_h = -1;
__constant__ int xdim1_update_halo_kernel5_minus_4_back;
int xdim1_update_halo_kernel5_minus_4_back_h = -1;
__constant__ int ydim1_update_halo_kernel5_minus_4_back;
int ydim1_update_halo_kernel5_minus_4_back_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#define OPS_ACC0(x, y, z) \
(x + xdim0_update_halo_kernel5_minus_4_back * (y) + \
xdim0_update_halo_kernel5_minus_4_back * \
ydim0_update_halo_kernel5_minus_4_back * (z))
#define OPS_ACC1(x, y, z) \
(x + xdim1_update_halo_kernel5_minus_4_back * (y) + \
xdim1_update_halo_kernel5_minus_4_back * \
ydim1_update_halo_kernel5_minus_4_back * (z))
// user function
__device__
inline void
update_halo_kernel5_minus_4_back(double *vol_flux_z, double *mass_flux_z,
const int *fields) {
if (fields[FIELD_VOL_FLUX_Z] == 1)
vol_flux_z[OPS_ACC0(0, 0, 0)] = -vol_flux_z[OPS_ACC0(0, 0, 4)];
if (fields[FIELD_MASS_FLUX_Z] == 1)
mass_flux_z[OPS_ACC1(0, 0, 0)] = -mass_flux_z[OPS_ACC1(0, 0, 4)];
}
#undef OPS_ACC0
#undef OPS_ACC1
__global__ void ops_update_halo_kernel5_minus_4_back(double *__restrict arg0,
double *__restrict arg1,
const int *__restrict arg2,
int size0, int size1,
int size2) {
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 * 1 +
idx_y * 1 * 1 * xdim0_update_halo_kernel5_minus_4_back +
idx_z * 1 * 1 * xdim0_update_halo_kernel5_minus_4_back *
ydim0_update_halo_kernel5_minus_4_back;
arg1 += idx_x * 1 * 1 +
idx_y * 1 * 1 * xdim1_update_halo_kernel5_minus_4_back +
idx_z * 1 * 1 * xdim1_update_halo_kernel5_minus_4_back *
ydim1_update_halo_kernel5_minus_4_back;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
update_halo_kernel5_minus_4_back(arg0, arg1, arg2);
}
}
// host stub function
void ops_par_loop_update_halo_kernel5_minus_4_back(char const *name,
ops_block block, int dim,
int *range, ops_arg arg0,
ops_arg arg1, ops_arg arg2) {
// Timing
double t1, t2, c1, c2;
ops_arg args[3] = {arg0, arg1, arg2};
#ifdef CHECKPOINTING
if (!ops_checkpointing_before(args, 3, range, 137))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(137, "update_halo_kernel5_minus_4_back");
OPS_kernels[137].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[3];
int end[3];
#ifdef OPS_MPI
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 3; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 3; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
int z_size = MAX(0, end[2] - start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != xdim0_update_halo_kernel5_minus_4_back_h ||
ydim0 != ydim0_update_halo_kernel5_minus_4_back_h ||
xdim1 != xdim1_update_halo_kernel5_minus_4_back_h ||
ydim1 != ydim1_update_halo_kernel5_minus_4_back_h) {
cudaMemcpyToSymbol(xdim0_update_halo_kernel5_minus_4_back, &xdim0,
sizeof(int));
xdim0_update_halo_kernel5_minus_4_back_h = xdim0;
cudaMemcpyToSymbol(ydim0_update_halo_kernel5_minus_4_back, &ydim0,
sizeof(int));
ydim0_update_halo_kernel5_minus_4_back_h = ydim0;
cudaMemcpyToSymbol(xdim1_update_halo_kernel5_minus_4_back, &xdim1,
sizeof(int));
xdim1_update_halo_kernel5_minus_4_back_h = xdim1;
cudaMemcpyToSymbol(ydim1_update_halo_kernel5_minus_4_back, &ydim1,
sizeof(int));
ydim1_update_halo_kernel5_minus_4_back_h = ydim1;
}
int *arg2h = (int *)arg2.data;
dim3 grid((x_size - 1) / OPS_block_size_x + 1,
(y_size - 1) / OPS_block_size_y + 1, z_size);
dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d = 0; d < NUM_FIELDS; d++)
((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = args[0].dat->elem_size;
int dat1 = args[1].dat->elem_size;
char *p_a[3];
// set up initial pointers
int d_m[OPS_MAX_DIM];
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[0].dat->d_m[d];
#endif
int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] -
args[0].dat->base[0] - d_m[0]);
base0 = base0 +
dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] -
args[0].dat->base[1] - d_m[1]);
base0 = base0 +
dat0 * args[0].dat->size[0] * args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] -
d_m[2]);
p_a[0] = (char *)args[0].data_d + base0;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[1].dat->d_m[d];
#endif
int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] -
args[1].dat->base[0] - d_m[0]);
base1 = base1 +
dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] -
args[1].dat->base[1] - d_m[1]);
base1 = base1 +
dat1 * args[1].dat->size[0] * args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] -
d_m[2]);
p_a[1] = (char *)args[1].data_d + base1;
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args, 3, range);
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[137].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
ops_update_halo_kernel5_minus_4_back<<<grid, tblock>>>(
(double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d, x_size, y_size,
z_size);
if (OPS_diags > 1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[137].time += t1 - t2;
}
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0], range);
ops_set_halo_dirtybit3(&args[1], range);
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[137].mpi_time += t2 - t1;
OPS_kernels[137].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[137].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
|
539848fef1061e9c972906580b99e5378fc648a2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#define N 2048 // size of vectors
#define T 240 // number of threads per block
__global__ void vecAdd(int *A, int *B, int *C)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < N) {
C[i] = A[i] + B[i];
printf("%d\n",i);
printf("%d\n", blockIdx.x);
}
}
int main()
{
int a[N], b[N], c[N];
int *dev_a, *dev_b, *dev_c;
// allocate the memory on the GPU
hipMalloc((void**)&dev_a, N * sizeof(int));
hipMalloc((void**)&dev_b, N * sizeof(int));
hipMalloc((void**)&dev_c, N * sizeof(int));
// fill the arrays 'a' and 'b' on the CPU
for (int i = 0; i<N; i++) {
a[i] = i;
b[i] = i + i;
}
// copy the arrays 'a' and 'b' to the GPU
hipMemcpy(dev_a, a, N * sizeof(int),
hipMemcpyHostToDevice);
hipMemcpy(dev_b, b, N * sizeof(int),
hipMemcpyHostToDevice);
int blocks = (N - 1) / T + 1;
vecAdd << <blocks, T >> >(dev_a, dev_b, dev_c);
// copy the array 'c' back from the GPU to the CPU
hipMemcpy(c, dev_c, N * sizeof(int),
hipMemcpyDeviceToHost);
// display the results
long temp1, temp2;
temp1 = temp2 = 0;
for (int i = 0; i<N; i++) {
temp1 += (a[i] + b[i]);
temp2 += c[i];
}
printf("total a+b:%ld \n total c: %ld\n", temp1, temp2);
// free the memory allocated on the GPU
hipFree(dev_a);
hipFree(dev_b);
hipFree(dev_c);
return 0;
}
| 539848fef1061e9c972906580b99e5378fc648a2.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#define N 2048 // size of vectors
#define T 240 // number of threads per block
__global__ void vecAdd(int *A, int *B, int *C)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < N) {
C[i] = A[i] + B[i];
printf("%d\n",i);
printf("%d\n", blockIdx.x);
}
}
int main()
{
int a[N], b[N], c[N];
int *dev_a, *dev_b, *dev_c;
// allocate the memory on the GPU
cudaMalloc((void**)&dev_a, N * sizeof(int));
cudaMalloc((void**)&dev_b, N * sizeof(int));
cudaMalloc((void**)&dev_c, N * sizeof(int));
// fill the arrays 'a' and 'b' on the CPU
for (int i = 0; i<N; i++) {
a[i] = i;
b[i] = i + i;
}
// copy the arrays 'a' and 'b' to the GPU
cudaMemcpy(dev_a, a, N * sizeof(int),
cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, N * sizeof(int),
cudaMemcpyHostToDevice);
int blocks = (N - 1) / T + 1;
vecAdd << <blocks, T >> >(dev_a, dev_b, dev_c);
// copy the array 'c' back from the GPU to the CPU
cudaMemcpy(c, dev_c, N * sizeof(int),
cudaMemcpyDeviceToHost);
// display the results
long temp1, temp2;
temp1 = temp2 = 0;
for (int i = 0; i<N; i++) {
temp1 += (a[i] + b[i]);
temp2 += c[i];
}
printf("total a+b:%ld \n total c: %ld\n", temp1, temp2);
// free the memory allocated on the GPU
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
return 0;
}
|
7f7a3d8fa4de32b29dfdbbec599cf935e8ed0f5a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <faiss/gpu/impl/PQScanMultiPassNoPrecomputed.cuh>
#include <faiss/gpu/GpuResources.h>
#include <faiss/gpu/impl/PQCodeDistances.cuh>
#include <faiss/gpu/impl/PQCodeLoad.cuh>
#include <faiss/gpu/impl/IVFUtils.cuh>
#include <faiss/gpu/utils/ConversionOperators.cuh>
#include <faiss/gpu/utils/DeviceTensor.cuh>
#include <faiss/gpu/utils/DeviceUtils.h>
#include <faiss/gpu/utils/Float16.cuh>
#include <faiss/gpu/utils/LoadStoreOperators.cuh>
#include <faiss/gpu/utils/NoTypeTensor.cuh>
#include <faiss/gpu/utils/StaticUtils.h>
#include <faiss/gpu/utils/HostTensor.cuh>
namespace faiss { namespace gpu {
// This must be kept in sync with PQCodeDistances.cu
bool isSupportedNoPrecomputedSubDimSize(int dims) {
switch (dims) {
case 1:
case 2:
case 3:
case 4:
case 6:
case 8:
case 10:
case 12:
case 16:
case 20:
case 24:
case 28:
case 32:
return true;
default:
// FIXME: larger sizes require too many registers - we need the
// MM implementation working
return false;
}
}
template <typename LookupT, typename LookupVecT>
struct LoadCodeDistances {
static inline __device__ void load(LookupT* smem,
LookupT* codes,
int numCodes) {
constexpr int kWordSize = sizeof(LookupVecT) / sizeof(LookupT);
// We can only use the vector type if the data is guaranteed to be
// aligned. The codes are innermost, so if it is evenly divisible,
// then any slice will be aligned.
if (numCodes % kWordSize == 0) {
// Load the data by float4 for efficiency, and then handle any remainder
// limitVec is the number of whole vec words we can load, in terms
// of whole blocks performing the load
constexpr int kUnroll = 2;
int limitVec = numCodes / (kUnroll * kWordSize * blockDim.x);
limitVec *= kUnroll * blockDim.x;
LookupVecT* smemV = (LookupVecT*) smem;
LookupVecT* codesV = (LookupVecT*) codes;
for (int i = threadIdx.x; i < limitVec; i += kUnroll * blockDim.x) {
LookupVecT vals[kUnroll];
#pragma unroll
for (int j = 0; j < kUnroll; ++j) {
vals[j] =
LoadStore<LookupVecT>::load(&codesV[i + j * blockDim.x]);
}
#pragma unroll
for (int j = 0; j < kUnroll; ++j) {
LoadStore<LookupVecT>::store(&smemV[i + j * blockDim.x], vals[j]);
}
}
// This is where we start loading the remainder that does not evenly
// fit into kUnroll x blockDim.x
int remainder = limitVec * kWordSize;
for (int i = remainder + threadIdx.x; i < numCodes; i += blockDim.x) {
smem[i] = codes[i];
}
} else {
// Potential unaligned load
constexpr int kUnroll = 4;
int limit = utils::roundDown(numCodes, kUnroll * blockDim.x);
int i = threadIdx.x;
for (; i < limit; i += kUnroll * blockDim.x) {
LookupT vals[kUnroll];
#pragma unroll
for (int j = 0; j < kUnroll; ++j) {
vals[j] = codes[i + j * blockDim.x];
}
#pragma unroll
for (int j = 0; j < kUnroll; ++j) {
smem[i + j * blockDim.x] = vals[j];
}
}
for (; i < numCodes; i += blockDim.x) {
smem[i] = codes[i];
}
}
}
};
template <int NumSubQuantizers, typename LookupT, typename LookupVecT>
__global__ void
pqScanNoPrecomputedMultiPass(Tensor<float, 2, true> queries,
Tensor<float, 3, true> pqCentroids,
Tensor<int, 2, true> topQueryToCentroid,
Tensor<LookupT, 4, true> codeDistances,
void** listCodes,
int* listLengths,
Tensor<int, 2, true> prefixSumOffsets,
Tensor<float, 1, true> distance) {
const auto codesPerSubQuantizer = pqCentroids.getSize(2);
// Where the pq code -> residual distance is stored
extern __shared__ char smemCodeDistances[];
LookupT* codeDist = (LookupT*) smemCodeDistances;
// Each block handles a single query
auto queryId = blockIdx.y;
auto probeId = blockIdx.x;
// This is where we start writing out data
// We ensure that before the array (at offset -1), there is a 0 value
int outBase = *(prefixSumOffsets[queryId][probeId].data() - 1);
float* distanceOut = distance[outBase].data();
auto listId = topQueryToCentroid[queryId][probeId];
// Safety guard in case NaNs in input cause no list ID to be generated
if (listId == -1) {
return;
}
unsigned char* codeList = (unsigned char*) listCodes[listId];
int limit = listLengths[listId];
constexpr int kNumCode32 = NumSubQuantizers <= 4 ? 1 :
(NumSubQuantizers / 4);
unsigned int code32[kNumCode32];
unsigned int nextCode32[kNumCode32];
// We double-buffer the code loading, which improves memory utilization
if (threadIdx.x < limit) {
LoadCode32<NumSubQuantizers>::load(code32, codeList, threadIdx.x);
}
LoadCodeDistances<LookupT, LookupVecT>::load(
codeDist,
codeDistances[queryId][probeId].data(),
codeDistances.getSize(2) * codeDistances.getSize(3));
// Prevent WAR dependencies
__syncthreads();
// Each thread handles one code element in the list, with a
// block-wide stride
for (int codeIndex = threadIdx.x;
codeIndex < limit;
codeIndex += blockDim.x) {
// Prefetch next codes
if (codeIndex + blockDim.x < limit) {
LoadCode32<NumSubQuantizers>::load(
nextCode32, codeList, codeIndex + blockDim.x);
}
float dist = 0.0f;
#pragma unroll
for (int word = 0; word < kNumCode32; ++word) {
constexpr int kBytesPerCode32 =
NumSubQuantizers < 4 ? NumSubQuantizers : 4;
if (kBytesPerCode32 == 1) {
auto code = code32[0];
dist = ConvertTo<float>::to(codeDist[code]);
} else {
#pragma unroll
for (int byte = 0; byte < kBytesPerCode32; ++byte) {
auto code = getByte(code32[word], byte * 8, 8);
auto offset =
codesPerSubQuantizer * (word * kBytesPerCode32 + byte);
dist += ConvertTo<float>::to(codeDist[offset + code]);
}
}
}
// Write out intermediate distance result
// We do not maintain indices here, in order to reduce global
// memory traffic. Those are recovered in the final selection step.
distanceOut[codeIndex] = dist;
// Rotate buffers
#pragma unroll
for (int word = 0; word < kNumCode32; ++word) {
code32[word] = nextCode32[word];
}
}
}
void
runMultiPassTile(Tensor<float, 2, true>& queries,
Tensor<float, 2, true>& centroids,
Tensor<float, 3, true>& pqCentroidsInnermostCode,
NoTypeTensor<4, true>& codeDistances,
Tensor<int, 2, true>& topQueryToCentroid,
bool useFloat16Lookup,
int bytesPerCode,
int numSubQuantizers,
int numSubQuantizerCodes,
thrust::device_vector<void*>& listCodes,
thrust::device_vector<void*>& listIndices,
IndicesOptions indicesOptions,
thrust::device_vector<int>& listLengths,
Tensor<char, 1, true>& thrustMem,
Tensor<int, 2, true>& prefixSumOffsets,
Tensor<float, 1, true>& allDistances,
Tensor<float, 3, true>& heapDistances,
Tensor<int, 3, true>& heapIndices,
int k,
Tensor<float, 2, true>& outDistances,
Tensor<long, 2, true>& outIndices,
hipStream_t stream) {
// Calculate offset lengths, so we know where to write out
// intermediate results
runCalcListOffsets(topQueryToCentroid, listLengths, prefixSumOffsets,
thrustMem, stream);
// Calculate residual code distances, since this is without
// precomputed codes
runPQCodeDistances(pqCentroidsInnermostCode,
queries,
centroids,
topQueryToCentroid,
codeDistances,
useFloat16Lookup,
stream);
// Convert all codes to a distance, and write out (distance,
// index) values for all intermediate results
{
auto kThreadsPerBlock = 256;
auto grid = dim3(topQueryToCentroid.getSize(1),
topQueryToCentroid.getSize(0));
auto block = dim3(kThreadsPerBlock);
// pq centroid distances
auto smem = useFloat16Lookup ? sizeof(half) : sizeof(float);
smem *= numSubQuantizers * numSubQuantizerCodes;
FAISS_ASSERT(smem <= getMaxSharedMemPerBlockCurrentDevice());
#define RUN_PQ_OPT(NUM_SUB_Q, LOOKUP_T, LOOKUP_VEC_T) \
do { \
auto codeDistancesT = codeDistances.toTensor<LOOKUP_T>(); \
\
hipLaunchKernelGGL(( pqScanNoPrecomputedMultiPass<NUM_SUB_Q, LOOKUP_T, LOOKUP_VEC_T>) \
, dim3(grid), dim3(block), smem, stream, \
queries, \
pqCentroidsInnermostCode, \
topQueryToCentroid, \
codeDistancesT, \
listCodes.data().get(), \
listLengths.data().get(), \
prefixSumOffsets, \
allDistances); \
} while (0)
#define RUN_PQ(NUM_SUB_Q) \
do { \
if (useFloat16Lookup) { \
RUN_PQ_OPT(NUM_SUB_Q, half, Half8); \
} else { \
RUN_PQ_OPT(NUM_SUB_Q, float, float4); \
} \
} while (0)
switch (bytesPerCode) {
case 1:
RUN_PQ(1);
break;
case 2:
RUN_PQ(2);
break;
case 3:
RUN_PQ(3);
break;
case 4:
RUN_PQ(4);
break;
case 8:
RUN_PQ(8);
break;
case 12:
RUN_PQ(12);
break;
case 16:
RUN_PQ(16);
break;
case 20:
RUN_PQ(20);
break;
case 24:
RUN_PQ(24);
break;
case 28:
RUN_PQ(28);
break;
case 32:
RUN_PQ(32);
break;
case 40:
RUN_PQ(40);
break;
case 48:
RUN_PQ(48);
break;
case 56:
RUN_PQ(56);
break;
case 64:
RUN_PQ(64);
break;
case 96:
RUN_PQ(96);
break;
default:
FAISS_ASSERT(false);
break;
}
#undef RUN_PQ
#undef RUN_PQ_OPT
}
CUDA_TEST_ERROR();
// k-select the output in chunks, to increase parallelism
runPass1SelectLists(prefixSumOffsets,
allDistances,
topQueryToCentroid.getSize(1),
k,
false, // L2 distance chooses smallest
heapDistances,
heapIndices,
stream);
// k-select final output
auto flatHeapDistances = heapDistances.downcastInner<2>();
auto flatHeapIndices = heapIndices.downcastInner<2>();
runPass2SelectLists(flatHeapDistances,
flatHeapIndices,
listIndices,
indicesOptions,
prefixSumOffsets,
topQueryToCentroid,
k,
false, // L2 distance chooses smallest
outDistances,
outIndices,
stream);
}
void runPQScanMultiPassNoPrecomputed(Tensor<float, 2, true>& queries,
Tensor<float, 2, true>& centroids,
Tensor<float, 3, true>& pqCentroidsInnermostCode,
Tensor<int, 2, true>& topQueryToCentroid,
bool useFloat16Lookup,
int bytesPerCode,
int numSubQuantizers,
int numSubQuantizerCodes,
thrust::device_vector<void*>& listCodes,
thrust::device_vector<void*>& listIndices,
IndicesOptions indicesOptions,
thrust::device_vector<int>& listLengths,
int maxListLength,
int k,
// output
Tensor<float, 2, true>& outDistances,
// output
Tensor<long, 2, true>& outIndices,
GpuResources* res) {
constexpr int kMinQueryTileSize = 8;
constexpr int kMaxQueryTileSize = 128;
constexpr int kThrustMemSize = 16384;
int nprobe = topQueryToCentroid.getSize(1);
auto& mem = res->getMemoryManagerCurrentDevice();
auto stream = res->getDefaultStreamCurrentDevice();
// Make a reservation for Thrust to do its dirty work (global memory
// cross-block reduction space); hopefully this is large enough.
DeviceTensor<char, 1, true> thrustMem1(
mem, {kThrustMemSize}, stream);
DeviceTensor<char, 1, true> thrustMem2(
mem, {kThrustMemSize}, stream);
DeviceTensor<char, 1, true>* thrustMem[2] =
{&thrustMem1, &thrustMem2};
// How much temporary storage is available?
// If possible, we'd like to fit within the space available.
size_t sizeAvailable = mem.getSizeAvailable();
// We run two passes of heap selection
// This is the size of the first-level heap passes
constexpr int kNProbeSplit = 8;
int pass2Chunks = ::min(nprobe, kNProbeSplit);
size_t sizeForFirstSelectPass =
pass2Chunks * k * (sizeof(float) + sizeof(int));
// How much temporary storage we need per each query
size_t sizePerQuery =
2 * // streams
((nprobe * sizeof(int) + sizeof(int)) + // prefixSumOffsets
nprobe * maxListLength * sizeof(float) + // allDistances
// residual distances
nprobe * numSubQuantizers * numSubQuantizerCodes * sizeof(float) +
sizeForFirstSelectPass);
int queryTileSize = (int) (sizeAvailable / sizePerQuery);
if (queryTileSize < kMinQueryTileSize) {
queryTileSize = kMinQueryTileSize;
} else if (queryTileSize > kMaxQueryTileSize) {
queryTileSize = kMaxQueryTileSize;
}
// FIXME: we should adjust queryTileSize to deal with this, since
// indexing is in int32
FAISS_ASSERT(queryTileSize * nprobe * maxListLength <
std::numeric_limits<int>::max());
// Temporary memory buffers
// Make sure there is space prior to the start which will be 0, and
// will handle the boundary condition without branches
DeviceTensor<int, 1, true> prefixSumOffsetSpace1(
mem, {queryTileSize * nprobe + 1}, stream);
DeviceTensor<int, 1, true> prefixSumOffsetSpace2(
mem, {queryTileSize * nprobe + 1}, stream);
DeviceTensor<int, 2, true> prefixSumOffsets1(
prefixSumOffsetSpace1[1].data(),
{queryTileSize, nprobe});
DeviceTensor<int, 2, true> prefixSumOffsets2(
prefixSumOffsetSpace2[1].data(),
{queryTileSize, nprobe});
DeviceTensor<int, 2, true>* prefixSumOffsets[2] =
{&prefixSumOffsets1, &prefixSumOffsets2};
// Make sure the element before prefixSumOffsets is 0, since we
// depend upon simple, boundary-less indexing to get proper results
CUDA_VERIFY(hipMemsetAsync(prefixSumOffsetSpace1.data(),
0,
sizeof(int),
stream));
CUDA_VERIFY(hipMemsetAsync(prefixSumOffsetSpace2.data(),
0,
sizeof(int),
stream));
int codeDistanceTypeSize = useFloat16Lookup ? sizeof(half) : sizeof(float);
int totalCodeDistancesSize =
queryTileSize * nprobe * numSubQuantizers * numSubQuantizerCodes *
codeDistanceTypeSize;
DeviceTensor<char, 1, true> codeDistances1Mem(
mem, {totalCodeDistancesSize}, stream);
NoTypeTensor<4, true> codeDistances1(
codeDistances1Mem.data(),
codeDistanceTypeSize,
{queryTileSize, nprobe, numSubQuantizers, numSubQuantizerCodes});
DeviceTensor<char, 1, true> codeDistances2Mem(
mem, {totalCodeDistancesSize}, stream);
NoTypeTensor<4, true> codeDistances2(
codeDistances2Mem.data(),
codeDistanceTypeSize,
{queryTileSize, nprobe, numSubQuantizers, numSubQuantizerCodes});
NoTypeTensor<4, true>* codeDistances[2] =
{&codeDistances1, &codeDistances2};
DeviceTensor<float, 1, true> allDistances1(
mem, {queryTileSize * nprobe * maxListLength}, stream);
DeviceTensor<float, 1, true> allDistances2(
mem, {queryTileSize * nprobe * maxListLength}, stream);
DeviceTensor<float, 1, true>* allDistances[2] =
{&allDistances1, &allDistances2};
DeviceTensor<float, 3, true> heapDistances1(
mem, {queryTileSize, pass2Chunks, k}, stream);
DeviceTensor<float, 3, true> heapDistances2(
mem, {queryTileSize, pass2Chunks, k}, stream);
DeviceTensor<float, 3, true>* heapDistances[2] =
{&heapDistances1, &heapDistances2};
DeviceTensor<int, 3, true> heapIndices1(
mem, {queryTileSize, pass2Chunks, k}, stream);
DeviceTensor<int, 3, true> heapIndices2(
mem, {queryTileSize, pass2Chunks, k}, stream);
DeviceTensor<int, 3, true>* heapIndices[2] =
{&heapIndices1, &heapIndices2};
auto streams = res->getAlternateStreamsCurrentDevice();
streamWait(streams, {stream});
int curStream = 0;
for (int query = 0; query < queries.getSize(0); query += queryTileSize) {
int numQueriesInTile =
::min(queryTileSize, queries.getSize(0) - query);
auto prefixSumOffsetsView =
prefixSumOffsets[curStream]->narrowOutermost(0, numQueriesInTile);
auto codeDistancesView =
codeDistances[curStream]->narrowOutermost(0, numQueriesInTile);
auto coarseIndicesView =
topQueryToCentroid.narrowOutermost(query, numQueriesInTile);
auto queryView =
queries.narrowOutermost(query, numQueriesInTile);
auto heapDistancesView =
heapDistances[curStream]->narrowOutermost(0, numQueriesInTile);
auto heapIndicesView =
heapIndices[curStream]->narrowOutermost(0, numQueriesInTile);
auto outDistanceView =
outDistances.narrowOutermost(query, numQueriesInTile);
auto outIndicesView =
outIndices.narrowOutermost(query, numQueriesInTile);
runMultiPassTile(queryView,
centroids,
pqCentroidsInnermostCode,
codeDistancesView,
coarseIndicesView,
useFloat16Lookup,
bytesPerCode,
numSubQuantizers,
numSubQuantizerCodes,
listCodes,
listIndices,
indicesOptions,
listLengths,
*thrustMem[curStream],
prefixSumOffsetsView,
*allDistances[curStream],
heapDistancesView,
heapIndicesView,
k,
outDistanceView,
outIndicesView,
streams[curStream]);
curStream = (curStream + 1) % 2;
}
streamWait({stream}, streams);
}
} } // namespace
| 7f7a3d8fa4de32b29dfdbbec599cf935e8ed0f5a.cu | /**
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <faiss/gpu/impl/PQScanMultiPassNoPrecomputed.cuh>
#include <faiss/gpu/GpuResources.h>
#include <faiss/gpu/impl/PQCodeDistances.cuh>
#include <faiss/gpu/impl/PQCodeLoad.cuh>
#include <faiss/gpu/impl/IVFUtils.cuh>
#include <faiss/gpu/utils/ConversionOperators.cuh>
#include <faiss/gpu/utils/DeviceTensor.cuh>
#include <faiss/gpu/utils/DeviceUtils.h>
#include <faiss/gpu/utils/Float16.cuh>
#include <faiss/gpu/utils/LoadStoreOperators.cuh>
#include <faiss/gpu/utils/NoTypeTensor.cuh>
#include <faiss/gpu/utils/StaticUtils.h>
#include <faiss/gpu/utils/HostTensor.cuh>
namespace faiss { namespace gpu {
// This must be kept in sync with PQCodeDistances.cu
bool isSupportedNoPrecomputedSubDimSize(int dims) {
switch (dims) {
case 1:
case 2:
case 3:
case 4:
case 6:
case 8:
case 10:
case 12:
case 16:
case 20:
case 24:
case 28:
case 32:
return true;
default:
// FIXME: larger sizes require too many registers - we need the
// MM implementation working
return false;
}
}
template <typename LookupT, typename LookupVecT>
struct LoadCodeDistances {
static inline __device__ void load(LookupT* smem,
LookupT* codes,
int numCodes) {
constexpr int kWordSize = sizeof(LookupVecT) / sizeof(LookupT);
// We can only use the vector type if the data is guaranteed to be
// aligned. The codes are innermost, so if it is evenly divisible,
// then any slice will be aligned.
if (numCodes % kWordSize == 0) {
// Load the data by float4 for efficiency, and then handle any remainder
// limitVec is the number of whole vec words we can load, in terms
// of whole blocks performing the load
constexpr int kUnroll = 2;
int limitVec = numCodes / (kUnroll * kWordSize * blockDim.x);
limitVec *= kUnroll * blockDim.x;
LookupVecT* smemV = (LookupVecT*) smem;
LookupVecT* codesV = (LookupVecT*) codes;
for (int i = threadIdx.x; i < limitVec; i += kUnroll * blockDim.x) {
LookupVecT vals[kUnroll];
#pragma unroll
for (int j = 0; j < kUnroll; ++j) {
vals[j] =
LoadStore<LookupVecT>::load(&codesV[i + j * blockDim.x]);
}
#pragma unroll
for (int j = 0; j < kUnroll; ++j) {
LoadStore<LookupVecT>::store(&smemV[i + j * blockDim.x], vals[j]);
}
}
// This is where we start loading the remainder that does not evenly
// fit into kUnroll x blockDim.x
int remainder = limitVec * kWordSize;
for (int i = remainder + threadIdx.x; i < numCodes; i += blockDim.x) {
smem[i] = codes[i];
}
} else {
// Potential unaligned load
constexpr int kUnroll = 4;
int limit = utils::roundDown(numCodes, kUnroll * blockDim.x);
int i = threadIdx.x;
for (; i < limit; i += kUnroll * blockDim.x) {
LookupT vals[kUnroll];
#pragma unroll
for (int j = 0; j < kUnroll; ++j) {
vals[j] = codes[i + j * blockDim.x];
}
#pragma unroll
for (int j = 0; j < kUnroll; ++j) {
smem[i + j * blockDim.x] = vals[j];
}
}
for (; i < numCodes; i += blockDim.x) {
smem[i] = codes[i];
}
}
}
};
template <int NumSubQuantizers, typename LookupT, typename LookupVecT>
__global__ void
pqScanNoPrecomputedMultiPass(Tensor<float, 2, true> queries,
Tensor<float, 3, true> pqCentroids,
Tensor<int, 2, true> topQueryToCentroid,
Tensor<LookupT, 4, true> codeDistances,
void** listCodes,
int* listLengths,
Tensor<int, 2, true> prefixSumOffsets,
Tensor<float, 1, true> distance) {
const auto codesPerSubQuantizer = pqCentroids.getSize(2);
// Where the pq code -> residual distance is stored
extern __shared__ char smemCodeDistances[];
LookupT* codeDist = (LookupT*) smemCodeDistances;
// Each block handles a single query
auto queryId = blockIdx.y;
auto probeId = blockIdx.x;
// This is where we start writing out data
// We ensure that before the array (at offset -1), there is a 0 value
int outBase = *(prefixSumOffsets[queryId][probeId].data() - 1);
float* distanceOut = distance[outBase].data();
auto listId = topQueryToCentroid[queryId][probeId];
// Safety guard in case NaNs in input cause no list ID to be generated
if (listId == -1) {
return;
}
unsigned char* codeList = (unsigned char*) listCodes[listId];
int limit = listLengths[listId];
constexpr int kNumCode32 = NumSubQuantizers <= 4 ? 1 :
(NumSubQuantizers / 4);
unsigned int code32[kNumCode32];
unsigned int nextCode32[kNumCode32];
// We double-buffer the code loading, which improves memory utilization
if (threadIdx.x < limit) {
LoadCode32<NumSubQuantizers>::load(code32, codeList, threadIdx.x);
}
LoadCodeDistances<LookupT, LookupVecT>::load(
codeDist,
codeDistances[queryId][probeId].data(),
codeDistances.getSize(2) * codeDistances.getSize(3));
// Prevent WAR dependencies
__syncthreads();
// Each thread handles one code element in the list, with a
// block-wide stride
for (int codeIndex = threadIdx.x;
codeIndex < limit;
codeIndex += blockDim.x) {
// Prefetch next codes
if (codeIndex + blockDim.x < limit) {
LoadCode32<NumSubQuantizers>::load(
nextCode32, codeList, codeIndex + blockDim.x);
}
float dist = 0.0f;
#pragma unroll
for (int word = 0; word < kNumCode32; ++word) {
constexpr int kBytesPerCode32 =
NumSubQuantizers < 4 ? NumSubQuantizers : 4;
if (kBytesPerCode32 == 1) {
auto code = code32[0];
dist = ConvertTo<float>::to(codeDist[code]);
} else {
#pragma unroll
for (int byte = 0; byte < kBytesPerCode32; ++byte) {
auto code = getByte(code32[word], byte * 8, 8);
auto offset =
codesPerSubQuantizer * (word * kBytesPerCode32 + byte);
dist += ConvertTo<float>::to(codeDist[offset + code]);
}
}
}
// Write out intermediate distance result
// We do not maintain indices here, in order to reduce global
// memory traffic. Those are recovered in the final selection step.
distanceOut[codeIndex] = dist;
// Rotate buffers
#pragma unroll
for (int word = 0; word < kNumCode32; ++word) {
code32[word] = nextCode32[word];
}
}
}
void
runMultiPassTile(Tensor<float, 2, true>& queries,
Tensor<float, 2, true>& centroids,
Tensor<float, 3, true>& pqCentroidsInnermostCode,
NoTypeTensor<4, true>& codeDistances,
Tensor<int, 2, true>& topQueryToCentroid,
bool useFloat16Lookup,
int bytesPerCode,
int numSubQuantizers,
int numSubQuantizerCodes,
thrust::device_vector<void*>& listCodes,
thrust::device_vector<void*>& listIndices,
IndicesOptions indicesOptions,
thrust::device_vector<int>& listLengths,
Tensor<char, 1, true>& thrustMem,
Tensor<int, 2, true>& prefixSumOffsets,
Tensor<float, 1, true>& allDistances,
Tensor<float, 3, true>& heapDistances,
Tensor<int, 3, true>& heapIndices,
int k,
Tensor<float, 2, true>& outDistances,
Tensor<long, 2, true>& outIndices,
cudaStream_t stream) {
// Calculate offset lengths, so we know where to write out
// intermediate results
runCalcListOffsets(topQueryToCentroid, listLengths, prefixSumOffsets,
thrustMem, stream);
// Calculate residual code distances, since this is without
// precomputed codes
runPQCodeDistances(pqCentroidsInnermostCode,
queries,
centroids,
topQueryToCentroid,
codeDistances,
useFloat16Lookup,
stream);
// Convert all codes to a distance, and write out (distance,
// index) values for all intermediate results
{
auto kThreadsPerBlock = 256;
auto grid = dim3(topQueryToCentroid.getSize(1),
topQueryToCentroid.getSize(0));
auto block = dim3(kThreadsPerBlock);
// pq centroid distances
auto smem = useFloat16Lookup ? sizeof(half) : sizeof(float);
smem *= numSubQuantizers * numSubQuantizerCodes;
FAISS_ASSERT(smem <= getMaxSharedMemPerBlockCurrentDevice());
#define RUN_PQ_OPT(NUM_SUB_Q, LOOKUP_T, LOOKUP_VEC_T) \
do { \
auto codeDistancesT = codeDistances.toTensor<LOOKUP_T>(); \
\
pqScanNoPrecomputedMultiPass<NUM_SUB_Q, LOOKUP_T, LOOKUP_VEC_T> \
<<<grid, block, smem, stream>>>( \
queries, \
pqCentroidsInnermostCode, \
topQueryToCentroid, \
codeDistancesT, \
listCodes.data().get(), \
listLengths.data().get(), \
prefixSumOffsets, \
allDistances); \
} while (0)
#define RUN_PQ(NUM_SUB_Q) \
do { \
if (useFloat16Lookup) { \
RUN_PQ_OPT(NUM_SUB_Q, half, Half8); \
} else { \
RUN_PQ_OPT(NUM_SUB_Q, float, float4); \
} \
} while (0)
switch (bytesPerCode) {
case 1:
RUN_PQ(1);
break;
case 2:
RUN_PQ(2);
break;
case 3:
RUN_PQ(3);
break;
case 4:
RUN_PQ(4);
break;
case 8:
RUN_PQ(8);
break;
case 12:
RUN_PQ(12);
break;
case 16:
RUN_PQ(16);
break;
case 20:
RUN_PQ(20);
break;
case 24:
RUN_PQ(24);
break;
case 28:
RUN_PQ(28);
break;
case 32:
RUN_PQ(32);
break;
case 40:
RUN_PQ(40);
break;
case 48:
RUN_PQ(48);
break;
case 56:
RUN_PQ(56);
break;
case 64:
RUN_PQ(64);
break;
case 96:
RUN_PQ(96);
break;
default:
FAISS_ASSERT(false);
break;
}
#undef RUN_PQ
#undef RUN_PQ_OPT
}
CUDA_TEST_ERROR();
// k-select the output in chunks, to increase parallelism
runPass1SelectLists(prefixSumOffsets,
allDistances,
topQueryToCentroid.getSize(1),
k,
false, // L2 distance chooses smallest
heapDistances,
heapIndices,
stream);
// k-select final output
auto flatHeapDistances = heapDistances.downcastInner<2>();
auto flatHeapIndices = heapIndices.downcastInner<2>();
runPass2SelectLists(flatHeapDistances,
flatHeapIndices,
listIndices,
indicesOptions,
prefixSumOffsets,
topQueryToCentroid,
k,
false, // L2 distance chooses smallest
outDistances,
outIndices,
stream);
}
void runPQScanMultiPassNoPrecomputed(Tensor<float, 2, true>& queries,
Tensor<float, 2, true>& centroids,
Tensor<float, 3, true>& pqCentroidsInnermostCode,
Tensor<int, 2, true>& topQueryToCentroid,
bool useFloat16Lookup,
int bytesPerCode,
int numSubQuantizers,
int numSubQuantizerCodes,
thrust::device_vector<void*>& listCodes,
thrust::device_vector<void*>& listIndices,
IndicesOptions indicesOptions,
thrust::device_vector<int>& listLengths,
int maxListLength,
int k,
// output
Tensor<float, 2, true>& outDistances,
// output
Tensor<long, 2, true>& outIndices,
GpuResources* res) {
constexpr int kMinQueryTileSize = 8;
constexpr int kMaxQueryTileSize = 128;
constexpr int kThrustMemSize = 16384;
int nprobe = topQueryToCentroid.getSize(1);
auto& mem = res->getMemoryManagerCurrentDevice();
auto stream = res->getDefaultStreamCurrentDevice();
// Make a reservation for Thrust to do its dirty work (global memory
// cross-block reduction space); hopefully this is large enough.
DeviceTensor<char, 1, true> thrustMem1(
mem, {kThrustMemSize}, stream);
DeviceTensor<char, 1, true> thrustMem2(
mem, {kThrustMemSize}, stream);
DeviceTensor<char, 1, true>* thrustMem[2] =
{&thrustMem1, &thrustMem2};
// How much temporary storage is available?
// If possible, we'd like to fit within the space available.
size_t sizeAvailable = mem.getSizeAvailable();
// We run two passes of heap selection
// This is the size of the first-level heap passes
constexpr int kNProbeSplit = 8;
int pass2Chunks = std::min(nprobe, kNProbeSplit);
size_t sizeForFirstSelectPass =
pass2Chunks * k * (sizeof(float) + sizeof(int));
// How much temporary storage we need per each query
size_t sizePerQuery =
2 * // streams
((nprobe * sizeof(int) + sizeof(int)) + // prefixSumOffsets
nprobe * maxListLength * sizeof(float) + // allDistances
// residual distances
nprobe * numSubQuantizers * numSubQuantizerCodes * sizeof(float) +
sizeForFirstSelectPass);
int queryTileSize = (int) (sizeAvailable / sizePerQuery);
if (queryTileSize < kMinQueryTileSize) {
queryTileSize = kMinQueryTileSize;
} else if (queryTileSize > kMaxQueryTileSize) {
queryTileSize = kMaxQueryTileSize;
}
// FIXME: we should adjust queryTileSize to deal with this, since
// indexing is in int32
FAISS_ASSERT(queryTileSize * nprobe * maxListLength <
std::numeric_limits<int>::max());
// Temporary memory buffers
// Make sure there is space prior to the start which will be 0, and
// will handle the boundary condition without branches
DeviceTensor<int, 1, true> prefixSumOffsetSpace1(
mem, {queryTileSize * nprobe + 1}, stream);
DeviceTensor<int, 1, true> prefixSumOffsetSpace2(
mem, {queryTileSize * nprobe + 1}, stream);
DeviceTensor<int, 2, true> prefixSumOffsets1(
prefixSumOffsetSpace1[1].data(),
{queryTileSize, nprobe});
DeviceTensor<int, 2, true> prefixSumOffsets2(
prefixSumOffsetSpace2[1].data(),
{queryTileSize, nprobe});
DeviceTensor<int, 2, true>* prefixSumOffsets[2] =
{&prefixSumOffsets1, &prefixSumOffsets2};
// Make sure the element before prefixSumOffsets is 0, since we
// depend upon simple, boundary-less indexing to get proper results
CUDA_VERIFY(cudaMemsetAsync(prefixSumOffsetSpace1.data(),
0,
sizeof(int),
stream));
CUDA_VERIFY(cudaMemsetAsync(prefixSumOffsetSpace2.data(),
0,
sizeof(int),
stream));
int codeDistanceTypeSize = useFloat16Lookup ? sizeof(half) : sizeof(float);
int totalCodeDistancesSize =
queryTileSize * nprobe * numSubQuantizers * numSubQuantizerCodes *
codeDistanceTypeSize;
DeviceTensor<char, 1, true> codeDistances1Mem(
mem, {totalCodeDistancesSize}, stream);
NoTypeTensor<4, true> codeDistances1(
codeDistances1Mem.data(),
codeDistanceTypeSize,
{queryTileSize, nprobe, numSubQuantizers, numSubQuantizerCodes});
DeviceTensor<char, 1, true> codeDistances2Mem(
mem, {totalCodeDistancesSize}, stream);
NoTypeTensor<4, true> codeDistances2(
codeDistances2Mem.data(),
codeDistanceTypeSize,
{queryTileSize, nprobe, numSubQuantizers, numSubQuantizerCodes});
NoTypeTensor<4, true>* codeDistances[2] =
{&codeDistances1, &codeDistances2};
DeviceTensor<float, 1, true> allDistances1(
mem, {queryTileSize * nprobe * maxListLength}, stream);
DeviceTensor<float, 1, true> allDistances2(
mem, {queryTileSize * nprobe * maxListLength}, stream);
DeviceTensor<float, 1, true>* allDistances[2] =
{&allDistances1, &allDistances2};
DeviceTensor<float, 3, true> heapDistances1(
mem, {queryTileSize, pass2Chunks, k}, stream);
DeviceTensor<float, 3, true> heapDistances2(
mem, {queryTileSize, pass2Chunks, k}, stream);
DeviceTensor<float, 3, true>* heapDistances[2] =
{&heapDistances1, &heapDistances2};
DeviceTensor<int, 3, true> heapIndices1(
mem, {queryTileSize, pass2Chunks, k}, stream);
DeviceTensor<int, 3, true> heapIndices2(
mem, {queryTileSize, pass2Chunks, k}, stream);
DeviceTensor<int, 3, true>* heapIndices[2] =
{&heapIndices1, &heapIndices2};
auto streams = res->getAlternateStreamsCurrentDevice();
streamWait(streams, {stream});
int curStream = 0;
for (int query = 0; query < queries.getSize(0); query += queryTileSize) {
int numQueriesInTile =
std::min(queryTileSize, queries.getSize(0) - query);
auto prefixSumOffsetsView =
prefixSumOffsets[curStream]->narrowOutermost(0, numQueriesInTile);
auto codeDistancesView =
codeDistances[curStream]->narrowOutermost(0, numQueriesInTile);
auto coarseIndicesView =
topQueryToCentroid.narrowOutermost(query, numQueriesInTile);
auto queryView =
queries.narrowOutermost(query, numQueriesInTile);
auto heapDistancesView =
heapDistances[curStream]->narrowOutermost(0, numQueriesInTile);
auto heapIndicesView =
heapIndices[curStream]->narrowOutermost(0, numQueriesInTile);
auto outDistanceView =
outDistances.narrowOutermost(query, numQueriesInTile);
auto outIndicesView =
outIndices.narrowOutermost(query, numQueriesInTile);
runMultiPassTile(queryView,
centroids,
pqCentroidsInnermostCode,
codeDistancesView,
coarseIndicesView,
useFloat16Lookup,
bytesPerCode,
numSubQuantizers,
numSubQuantizerCodes,
listCodes,
listIndices,
indicesOptions,
listLengths,
*thrustMem[curStream],
prefixSumOffsetsView,
*allDistances[curStream],
heapDistancesView,
heapIndicesView,
k,
outDistanceView,
outIndicesView,
streams[curStream]);
curStream = (curStream + 1) % 2;
}
streamWait({stream}, streams);
}
} } // namespace
|
275655f1efbc00cbee13d4b8fbf105234e05880d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "THH/generic/THHTensorMath.hip"
#else
#include "ATen/hip/HIPContext.h"
void THCTensor_(fill)(THCState* state, THCTensor *self_, scalar_t value)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_));
if (!THC_pointwiseApply1<scalar_t>(
state, self_, TensorFillOp<scalar_t>(value))) {
THArgCheck(false, 1, CUTORCH_DIM_WARNING);
}
THCudaCheck(hipGetLastError());
}
void THCTensor_(zero)(THCState *state, THCTensor *self_)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_));
if (THCTensor_(isContiguous)(state, self_)) {
THCudaCheck(hipMemsetAsync(THCTensor_(data)(state, self_),
0,
sizeof(scalar_t) * THCTensor_(nElement)(state, self_),
THCState_getCurrentStream(state)));
} else {
if (!THC_pointwiseApply1<scalar_t>(
state, self_,
TensorFillOp<scalar_t>(ScalarConvert<int, scalar_t>::to(0)))) {
THArgCheck(false, 1, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(hipGetLastError());
}
void THCTensor_(zerosLike)(THCState *state, THCTensor *r_, THCTensor *input)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, r_, input));
THCTensor_(resizeAs)(state, r_, input);
THCTensor_(zero)(state, r_);
}
void THCTensor_(onesLike)(THCState *state, THCTensor *r_, THCTensor *input)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, r_, input));
THCTensor_(resizeAs)(state, r_, input);
THCTensor_(fill)(state, r_, ScalarConvert<int, scalar_t>::to(1));
}
ptrdiff_t
THCTensor_(numel)(THCState *state, THCTensor *t)
{
return THCTensor_(nElement)(state, t);
}
void THCTensor_(cat)(THCState *state, THCTensor *result,
THCTensor *ta, THCTensor *tb, int dimension)
{
THCTensor* inputs[2];
inputs[0] = ta;
inputs[1] = tb;
THCTensor_(catArray)(state, result, inputs, 2, dimension);
}
void THCTensor_(check_shape_except_dim)(THCState *state,
THCTensor *first, THCTensor *second, int dimension);
inline void THCTensor_(check_shape_except_dim)(THCState *state,
THCTensor *first, THCTensor *second, int dimension)
{
int first_dims = first->dim();
int second_dims = second->dim();
THArgCheck(first_dims == second_dims, 0,
"Tensors must have same number of dimensions: got %d and %d",
first_dims, second_dims);
for (int dim = 0; dim < first_dims; dim++) {
if (dim == dimension) {
continue;
}
int64_t first_dim_size = THCTensor_(size)(state, first, dim);
int64_t second_dim_size = THCTensor_(size)(state, second, dim);
THArgCheck(first_dim_size == second_dim_size, 0,
"Sizes of tensors must match except in dimension %d. Got %lld and %lld in dimension %d",
dimension, (long long)first_dim_size, (long long)second_dim_size, dim);
}
}
void THCTensor_(catArray)(THCState *state, THCTensor *result,
THCTensor **inputs, int numInputs, int dimension)
{
// previously, size [0] tensors were the only possible empty tensors; thus, it wasn't possible
// to cat empty tensors unless all the other tensors were 1-dimensional, so we allowed these tensors
// to be "skipped". We maintain this behavior for backwards compatibility, but only for this specific
// size (i.e. other empty sizes are not skipped).
// FIXME: warn if this is the case
int i, j, cohortMax;
int64_t offset;
bool hasSkippedInput = false;
THCTensor *notSkippedTensor = NULL; // non-owning reference
auto should_skip = [](THCTensor *t) { return t->is_empty() && t->dim() == 1; };
int nDims = 0;
for (i = 0; i < numInputs; i++)
{
if (should_skip(inputs[i])) {
hasSkippedInput = true;
continue;
}
nDims = inputs[i]->dim();
notSkippedTensor = inputs[i];
}
// If all inputs are empty tensors, return an empty tensor
if (notSkippedTensor == NULL) {
return;
}
THArgCheck(numInputs > 0, 3, "invalid number of inputs %d", numInputs);
THArgCheck(dimension >= 0, 4, "invalid dimension %d", dimension);
std::vector<int64_t> size(nDims);
// Compute size of the result in the cat dimension
int64_t cat_dim_size = 0;
for (int i = 0; i < numInputs; i++) {
THCTensor *tensor = inputs[i];
if (should_skip(tensor)) {
continue;
}
THCTensor_(check_shape_except_dim)(state, notSkippedTensor, tensor, dimension);
cat_dim_size += THCTensor_(size)(state, tensor, dimension);
}
// Compute the size of the result
for (int dim = 0; dim < nDims; dim++) {
int64_t result_dim_size = THCTensor_(size)(state, notSkippedTensor, dim);
if (dim == dimension) {
result_dim_size = cat_dim_size;
}
size[dim] = result_dim_size;
}
THCTensor_(resize)(state, result, size, {});
// We parallelize the copy if all 6 conditions pass:
//
// 1. There is more than one input tensor
// 2. No empty inputs
// 3. The result tensor is 32-bit indexable
// 4. The number of dimensions is <= 4
// 5. All input tensors are contiguous (output tensor may be non-contig)
// 6. All input tensors can use 32-bit indexing
// 7. All input tensors are on the same device
if (numInputs > 1 &&
!hasSkippedInput &&
result->dim() <= CAT_ARRAY_MAX_INPUT_DIMS &&
THCTensor_canUse32BitIndexMath(state, result) &&
THCTensor_allContiguous(state, inputs, numInputs) &&
THCTensor_all32BitIndexable(state, inputs, numInputs) &&
THCTensor_allSameDevice(state, inputs, numInputs)) {
// First, let's set up our kernel parameters. We start with a raw pointer to the storage
// for the output Tensor.
scalar_t *data = THCTensor_(data)(state, result);
// Kernel Parameter
size_t tensorMetadataSize = sizeof(CatArrInputTensor<scalar_t, unsigned int>) * CAT_ARRAY_BATCH_SIZE;
auto d_inputs = static_cast<CatArrInputTensor<scalar_t, unsigned int> *>(THCudaMalloc(state, tensorMetadataSize));
OutputTensorSizeStride<unsigned int, CAT_ARRAY_MAX_INPUT_DIMS> param;
// Next, let's initialize the size, stride arrays for the output Tensor.
for (i = 0; i < nDims; ++i) {
param.outputSize[i] = THCTensor_(size)(state, result, i);
param.outputStride[i] = THCTensor_(stride)(state, result, i);
}
at::hip::HIPStreamMasqueradingAsCUDA stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
// Template Declarations for dim = 1, 2, 3, 4
#define HANDLE_CASE(DIMS) \
hipLaunchKernelGGL(( CatArrayBatchedCopy<scalar_t, unsigned int, DIMS>), dim3(catGrid), dim3(applyBlock), 0, stream.stream(), data, d_inputs, param, dimension, param.outputStride[dimension]);
// Now we loop
offset = 0;
for (i = 0; i < numInputs; i += CAT_ARRAY_BATCH_SIZE) {
// Re-allocate stackInputs every iteration to avoid read-after-write hazard
{
auto stackInputs_owner = THCudaHostAlloc(state, tensorMetadataSize);
CatArrInputTensor<scalar_t, unsigned int>* stackInputs = static_cast<CatArrInputTensor<scalar_t, unsigned int>*>(stackInputs_owner.get());
cohortMax = 0;
for (j = 0; j < CAT_ARRAY_BATCH_SIZE && (i+j) < numInputs; ++j) {
int64_t dimSize = THCTensor_(size)(state, inputs[i+j], dimension);
stackInputs[j].input = THCTensor_(data)(state, inputs[i+j]);
stackInputs[j].offset = offset;
stackInputs[j].dimSize = dimSize;
stackInputs[j].nElements = THCTensor_(nElement)(state, inputs[i+j]);
cohortMax = cohortMax > (int) stackInputs[j].nElements ? cohortMax : (int) stackInputs[j].nElements;
// update offset
offset += dimSize;
}
THCudaCheck(hipMemcpyAsync(
d_inputs,
stackInputs,
j * sizeof(CatArrInputTensor<scalar_t, unsigned int>),
hipMemcpyHostToDevice,
stream.stream()));
THCudaHostRecord(state, stackInputs);
}
// Next, let's consider how we set our kernel launch parameters.
// We borrow from THCApply, which the kernel's internal indexing
// is based on.
dim3 applyBlock = getApplyBlock();
//Get grid where x dim fills half gpu and y dim is number of tensors.
//This will have cating two tensors fill the entire grid, but prevent
//many threads from needlessly load meta data if their sizes is small.
dim3 catGrid;
getCatGrid(state, j, catGrid);
switch (nDims) {
case 1:
HANDLE_CASE(1);
break;
case 2:
HANDLE_CASE(2);
break;
case 3:
HANDLE_CASE(3);
break;
case 4:
HANDLE_CASE(4);
break;
}
THCudaCheck(hipGetLastError());
}
THCudaFree(state, d_inputs);
#undef HANDLE_CASE
} else {
offset = 0;
for (j = 0; j < numInputs; j++)
{
if (should_skip(inputs[j])) continue;
int64_t dimSize = THCTensor_(size)(state, inputs[j], dimension);
THCTensor *nt = THCTensor_(newWithTensor)(state, result);
THCTensor_(narrow)(state, nt, NULL, dimension, offset, dimSize);
THCTensor_(copy)(state, nt, inputs[j]);
THCTensor_(free)(state, nt);
offset += dimSize;
}
}
}
void THCTensor_(nonzero)(THCState* state, THCudaLongTensor *tensor,
THCTensor *self)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self ));
THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, tensor));
using namespace thrust::placeholders;
THCThrustAllocator thrustAlloc(state);
self = THCTensor_(newContiguous)(state, self);
thrust::device_ptr<scalar_t> self_data(THCTensor_(data)(state, self));
int num_dim = THCTensor_(nDimension)(state, self);
int num_dim_noscalars = std::max<int>(1, num_dim);
int64_t N = THCTensor_(nElement)(state, self);
// this is a little awkward for scalars because we run thrust to count the number of zeros
// (which are necessary to get the correct size), but thrust just has an array API, so
// we need to basically threat the scalar as a 1-dimensional tensor (array) for
// the counting part.
THCudaLongTensor_resize2d(state, tensor, N, num_dim_noscalars);
tensor = THCudaLongTensor_newContiguous(state, tensor);
thrust::device_ptr<int64_t> tensor_data(THCudaLongTensor_data(state, tensor));
thrust::counting_iterator<int64_t> idxfirst(0);
thrust::counting_iterator<int64_t> idxlast = idxfirst + N;
typedef thrust::device_ptr<int64_t> Iter;
strided_range<Iter> strided_tensor(tensor_data,
tensor_data+N*num_dim_noscalars, num_dim_noscalars);
#if TORCH_HIP_VERSION >= 7000 || defined __HIP_PLATFORM_HCC__
hipStream_t stream = THCState_getCurrentStream(state);
#endif
strided_range<Iter>::iterator dend = thrust::copy_if(
#if TORCH_HIP_VERSION >= 7000 || defined __HIP_PLATFORM_HCC__
thrust::hip::par(thrustAlloc).on(stream),
#endif
idxfirst,
idxlast,
self_data,
strided_tensor.begin(),
NonZeroOp<scalar_t>()
);
int64_t num_nonzeros = thrust::distance(strided_tensor.begin(), dend);
if (num_nonzeros > 0 && num_dim > 0) {
int64_t div = 1;
for (int dim = num_dim-1; dim >= 0; dim--) {
strided_range<Iter> stride_dim(tensor_data+dim,
tensor_data+N*num_dim, num_dim);
thrust::transform(
#if TORCH_HIP_VERSION >= 7000 || defined __HIP_PLATFORM_HCC__
thrust::hip::par(thrustAlloc).on(stream),
#endif
strided_tensor.begin(),
strided_tensor.end(),
stride_dim.begin(),
idx_functor(div, THTensor_(size)(self, dim))
);
div *= THTensor_(size)(self, dim);
}
}
THCudaLongTensor_resize2d(state, tensor, num_nonzeros, num_dim);
THCTensor_(free)(state, self);
THCudaLongTensor_free(state, tensor);
THCudaCheck(hipGetLastError());
}
void THCTensor_(diag)(THCState *state, THCTensor *self_, THCTensor *src_, int64_t k){
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src_));
int nDimension = THCTensor_(nDimensionLegacyNoScalars)(state, src_);
THArgCheck((nDimension == 2) || (nDimension == 1), 1, "expected a matrix or a vector");
if (nDimension == 2) {
int64_t stride0 = THCTensor_(stride)(state, src_, 0);
int64_t stride1 = THCTensor_(stride)(state, src_, 1);
int64_t size0 = THCTensor_(size)(state, src_, 0);
int64_t size1 = THCTensor_(size)(state, src_, 1);
int64_t size = (k > 0) ? min((int64_t)size0, (int64_t)size1 - k) : min((int64_t)size0 + k, (int64_t)size1);
THCTensor_(resize1d)(state, self_, size);
if (size > 0) {
int64_t strideSelf = THCTensor_(stride)(state, self_, 0);
const dim3 threads(min((int64_t)at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, (int64_t)size));
dim3 grid(min((int64_t)1024, (int64_t)THCCeilDiv(size, (int64_t)threads.x)));
int64_t start = (k >= 0 ? k * stride1 : -k * stride0);
hipLaunchKernelGGL(( THCTensor_copyFromDiagonal<scalar_t>), dim3(grid), dim3(threads), 0, THCState_getCurrentStream(state),
THCTensor_(data)(state, self_), THCTensor_(data)(state, src_), start, size, stride0 + stride1, strideSelf);
}
} else {
ptrdiff_t totalElements = THCTensor_(nElement)(state, src_);
ptrdiff_t size = (k > 0) ? totalElements + k : totalElements - k;
int64_t strideSrc = THTensor_strideLegacyNoScalars(src_, 0);
THCTensor_(resize2d)(state, self_, size, size);
THCTensor_(zero)(state, self_);
if (size > 0) {
int64_t stride0 = THCTensor_(stride)(state, self_, 0);
int64_t stride1 = THCTensor_(stride)(state, self_, 1);
const dim3 threads(min((int64_t)at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, (int64_t)size));
dim3 grid(min((int64_t)1024, (int64_t)THCCeilDiv(size, (ptrdiff_t)threads.x)));
ptrdiff_t start = (k >= 0 ? k * stride1 : -k * stride0);
hipLaunchKernelGGL(( THCTensor_copyToDiagonal<scalar_t>), dim3(grid), dim3(threads), 0, THCState_getCurrentStream(state),
THCTensor_(data)(state, self_), THCTensor_(data)(state, src_), start, totalElements, stride0 + stride1, strideSrc);
}
}
THCudaCheck(hipGetLastError());
}
void THCTensor_(eye)(THCState *state, THCTensor *self_, int64_t n, int64_t m)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_));
THArgCheck(n > 0, 1, "invalid argument");
if(m <= 0)
m = n;
THCTensor_(resize2d)(state, self_, n, m);
THCTensor_(zero)(state, self_);
int64_t sz = THMin(n, m);
int64_t stride = THCTensor_(stride)(state, self_, 0) +
THCTensor_(stride)(state, self_, 1);
THCTensor *diag = THCTensor_(newWithStorage1d)(state, THTensor_getStoragePtr(self_),
self_->storage_offset(), sz, stride);
THCTensor_(fill)(state, diag, ScalarConvert<int, scalar_t>::to(1));
THCTensor_(free)(state, diag);
}
accreal THCTensor_(trace)(THCState *state, THCTensor *src_) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, src_));
THArgCheck((THTensor_nDimensionLegacyAll(src_) == 2), 1, "expected a matrix");
THCTensor *diag = THCTensor_(new)(state);
THCTensor_(diag)(state, diag, src_, 0);
accreal trace = THCTensor_(sumall)(state, diag);
THCTensor_(free)(state, diag);
return trace;
}
#endif
| 275655f1efbc00cbee13d4b8fbf105234e05880d.cu | #ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "THC/generic/THCTensorMath.cu"
#else
#include "ATen/cuda/CUDAContext.h"
void THCTensor_(fill)(THCState* state, THCTensor *self_, scalar_t value)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_));
if (!THC_pointwiseApply1<scalar_t>(
state, self_, TensorFillOp<scalar_t>(value))) {
THArgCheck(false, 1, CUTORCH_DIM_WARNING);
}
THCudaCheck(cudaGetLastError());
}
void THCTensor_(zero)(THCState *state, THCTensor *self_)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_));
if (THCTensor_(isContiguous)(state, self_)) {
THCudaCheck(cudaMemsetAsync(THCTensor_(data)(state, self_),
0,
sizeof(scalar_t) * THCTensor_(nElement)(state, self_),
THCState_getCurrentStream(state)));
} else {
if (!THC_pointwiseApply1<scalar_t>(
state, self_,
TensorFillOp<scalar_t>(ScalarConvert<int, scalar_t>::to(0)))) {
THArgCheck(false, 1, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(cudaGetLastError());
}
void THCTensor_(zerosLike)(THCState *state, THCTensor *r_, THCTensor *input)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, r_, input));
THCTensor_(resizeAs)(state, r_, input);
THCTensor_(zero)(state, r_);
}
void THCTensor_(onesLike)(THCState *state, THCTensor *r_, THCTensor *input)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, r_, input));
THCTensor_(resizeAs)(state, r_, input);
THCTensor_(fill)(state, r_, ScalarConvert<int, scalar_t>::to(1));
}
ptrdiff_t
THCTensor_(numel)(THCState *state, THCTensor *t)
{
return THCTensor_(nElement)(state, t);
}
void THCTensor_(cat)(THCState *state, THCTensor *result,
THCTensor *ta, THCTensor *tb, int dimension)
{
THCTensor* inputs[2];
inputs[0] = ta;
inputs[1] = tb;
THCTensor_(catArray)(state, result, inputs, 2, dimension);
}
void THCTensor_(check_shape_except_dim)(THCState *state,
THCTensor *first, THCTensor *second, int dimension);
inline void THCTensor_(check_shape_except_dim)(THCState *state,
THCTensor *first, THCTensor *second, int dimension)
{
int first_dims = first->dim();
int second_dims = second->dim();
THArgCheck(first_dims == second_dims, 0,
"Tensors must have same number of dimensions: got %d and %d",
first_dims, second_dims);
for (int dim = 0; dim < first_dims; dim++) {
if (dim == dimension) {
continue;
}
int64_t first_dim_size = THCTensor_(size)(state, first, dim);
int64_t second_dim_size = THCTensor_(size)(state, second, dim);
THArgCheck(first_dim_size == second_dim_size, 0,
"Sizes of tensors must match except in dimension %d. Got %lld and %lld in dimension %d",
dimension, (long long)first_dim_size, (long long)second_dim_size, dim);
}
}
void THCTensor_(catArray)(THCState *state, THCTensor *result,
THCTensor **inputs, int numInputs, int dimension)
{
// previously, size [0] tensors were the only possible empty tensors; thus, it wasn't possible
// to cat empty tensors unless all the other tensors were 1-dimensional, so we allowed these tensors
// to be "skipped". We maintain this behavior for backwards compatibility, but only for this specific
// size (i.e. other empty sizes are not skipped).
// FIXME: warn if this is the case
int i, j, cohortMax;
int64_t offset;
bool hasSkippedInput = false;
THCTensor *notSkippedTensor = NULL; // non-owning reference
auto should_skip = [](THCTensor *t) { return t->is_empty() && t->dim() == 1; };
int nDims = 0;
for (i = 0; i < numInputs; i++)
{
if (should_skip(inputs[i])) {
hasSkippedInput = true;
continue;
}
nDims = inputs[i]->dim();
notSkippedTensor = inputs[i];
}
// If all inputs are empty tensors, return an empty tensor
if (notSkippedTensor == NULL) {
return;
}
THArgCheck(numInputs > 0, 3, "invalid number of inputs %d", numInputs);
THArgCheck(dimension >= 0, 4, "invalid dimension %d", dimension);
std::vector<int64_t> size(nDims);
// Compute size of the result in the cat dimension
int64_t cat_dim_size = 0;
for (int i = 0; i < numInputs; i++) {
THCTensor *tensor = inputs[i];
if (should_skip(tensor)) {
continue;
}
THCTensor_(check_shape_except_dim)(state, notSkippedTensor, tensor, dimension);
cat_dim_size += THCTensor_(size)(state, tensor, dimension);
}
// Compute the size of the result
for (int dim = 0; dim < nDims; dim++) {
int64_t result_dim_size = THCTensor_(size)(state, notSkippedTensor, dim);
if (dim == dimension) {
result_dim_size = cat_dim_size;
}
size[dim] = result_dim_size;
}
THCTensor_(resize)(state, result, size, {});
// We parallelize the copy if all 6 conditions pass:
//
// 1. There is more than one input tensor
// 2. No empty inputs
// 3. The result tensor is 32-bit indexable
// 4. The number of dimensions is <= 4
// 5. All input tensors are contiguous (output tensor may be non-contig)
// 6. All input tensors can use 32-bit indexing
// 7. All input tensors are on the same device
if (numInputs > 1 &&
!hasSkippedInput &&
result->dim() <= CAT_ARRAY_MAX_INPUT_DIMS &&
THCTensor_canUse32BitIndexMath(state, result) &&
THCTensor_allContiguous(state, inputs, numInputs) &&
THCTensor_all32BitIndexable(state, inputs, numInputs) &&
THCTensor_allSameDevice(state, inputs, numInputs)) {
// First, let's set up our kernel parameters. We start with a raw pointer to the storage
// for the output Tensor.
scalar_t *data = THCTensor_(data)(state, result);
// Kernel Parameter
size_t tensorMetadataSize = sizeof(CatArrInputTensor<scalar_t, unsigned int>) * CAT_ARRAY_BATCH_SIZE;
auto d_inputs = static_cast<CatArrInputTensor<scalar_t, unsigned int> *>(THCudaMalloc(state, tensorMetadataSize));
OutputTensorSizeStride<unsigned int, CAT_ARRAY_MAX_INPUT_DIMS> param;
// Next, let's initialize the size, stride arrays for the output Tensor.
for (i = 0; i < nDims; ++i) {
param.outputSize[i] = THCTensor_(size)(state, result, i);
param.outputStride[i] = THCTensor_(stride)(state, result, i);
}
at::cuda::CUDAStream stream = at::cuda::getCurrentCUDAStream();
// Template Declarations for dim = 1, 2, 3, 4
#define HANDLE_CASE(DIMS) \
CatArrayBatchedCopy<scalar_t, unsigned int, DIMS><<<catGrid, applyBlock, 0, stream.stream()>>>(data, d_inputs, param, dimension, param.outputStride[dimension]);
// Now we loop
offset = 0;
for (i = 0; i < numInputs; i += CAT_ARRAY_BATCH_SIZE) {
// Re-allocate stackInputs every iteration to avoid read-after-write hazard
{
auto stackInputs_owner = THCudaHostAlloc(state, tensorMetadataSize);
CatArrInputTensor<scalar_t, unsigned int>* stackInputs = static_cast<CatArrInputTensor<scalar_t, unsigned int>*>(stackInputs_owner.get());
cohortMax = 0;
for (j = 0; j < CAT_ARRAY_BATCH_SIZE && (i+j) < numInputs; ++j) {
int64_t dimSize = THCTensor_(size)(state, inputs[i+j], dimension);
stackInputs[j].input = THCTensor_(data)(state, inputs[i+j]);
stackInputs[j].offset = offset;
stackInputs[j].dimSize = dimSize;
stackInputs[j].nElements = THCTensor_(nElement)(state, inputs[i+j]);
cohortMax = cohortMax > (int) stackInputs[j].nElements ? cohortMax : (int) stackInputs[j].nElements;
// update offset
offset += dimSize;
}
THCudaCheck(cudaMemcpyAsync(
d_inputs,
stackInputs,
j * sizeof(CatArrInputTensor<scalar_t, unsigned int>),
cudaMemcpyHostToDevice,
stream.stream()));
THCudaHostRecord(state, stackInputs);
}
// Next, let's consider how we set our kernel launch parameters.
// We borrow from THCApply, which the kernel's internal indexing
// is based on.
dim3 applyBlock = getApplyBlock();
//Get grid where x dim fills half gpu and y dim is number of tensors.
//This will have cating two tensors fill the entire grid, but prevent
//many threads from needlessly load meta data if their sizes is small.
dim3 catGrid;
getCatGrid(state, j, catGrid);
switch (nDims) {
case 1:
HANDLE_CASE(1);
break;
case 2:
HANDLE_CASE(2);
break;
case 3:
HANDLE_CASE(3);
break;
case 4:
HANDLE_CASE(4);
break;
}
THCudaCheck(cudaGetLastError());
}
THCudaFree(state, d_inputs);
#undef HANDLE_CASE
} else {
offset = 0;
for (j = 0; j < numInputs; j++)
{
if (should_skip(inputs[j])) continue;
int64_t dimSize = THCTensor_(size)(state, inputs[j], dimension);
THCTensor *nt = THCTensor_(newWithTensor)(state, result);
THCTensor_(narrow)(state, nt, NULL, dimension, offset, dimSize);
THCTensor_(copy)(state, nt, inputs[j]);
THCTensor_(free)(state, nt);
offset += dimSize;
}
}
}
void THCTensor_(nonzero)(THCState* state, THCudaLongTensor *tensor,
THCTensor *self)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self ));
THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, tensor));
using namespace thrust::placeholders;
THCThrustAllocator thrustAlloc(state);
self = THCTensor_(newContiguous)(state, self);
thrust::device_ptr<scalar_t> self_data(THCTensor_(data)(state, self));
int num_dim = THCTensor_(nDimension)(state, self);
int num_dim_noscalars = std::max<int>(1, num_dim);
int64_t N = THCTensor_(nElement)(state, self);
// this is a little awkward for scalars because we run thrust to count the number of zeros
// (which are necessary to get the correct size), but thrust just has an array API, so
// we need to basically threat the scalar as a 1-dimensional tensor (array) for
// the counting part.
THCudaLongTensor_resize2d(state, tensor, N, num_dim_noscalars);
tensor = THCudaLongTensor_newContiguous(state, tensor);
thrust::device_ptr<int64_t> tensor_data(THCudaLongTensor_data(state, tensor));
thrust::counting_iterator<int64_t> idxfirst(0);
thrust::counting_iterator<int64_t> idxlast = idxfirst + N;
typedef thrust::device_ptr<int64_t> Iter;
strided_range<Iter> strided_tensor(tensor_data,
tensor_data+N*num_dim_noscalars, num_dim_noscalars);
#if CUDA_VERSION >= 7000 || defined __HIP_PLATFORM_HCC__
cudaStream_t stream = THCState_getCurrentStream(state);
#endif
strided_range<Iter>::iterator dend = thrust::copy_if(
#if CUDA_VERSION >= 7000 || defined __HIP_PLATFORM_HCC__
thrust::cuda::par(thrustAlloc).on(stream),
#endif
idxfirst,
idxlast,
self_data,
strided_tensor.begin(),
NonZeroOp<scalar_t>()
);
int64_t num_nonzeros = thrust::distance(strided_tensor.begin(), dend);
if (num_nonzeros > 0 && num_dim > 0) {
int64_t div = 1;
for (int dim = num_dim-1; dim >= 0; dim--) {
strided_range<Iter> stride_dim(tensor_data+dim,
tensor_data+N*num_dim, num_dim);
thrust::transform(
#if CUDA_VERSION >= 7000 || defined __HIP_PLATFORM_HCC__
thrust::cuda::par(thrustAlloc).on(stream),
#endif
strided_tensor.begin(),
strided_tensor.end(),
stride_dim.begin(),
idx_functor(div, THTensor_(size)(self, dim))
);
div *= THTensor_(size)(self, dim);
}
}
THCudaLongTensor_resize2d(state, tensor, num_nonzeros, num_dim);
THCTensor_(free)(state, self);
THCudaLongTensor_free(state, tensor);
THCudaCheck(cudaGetLastError());
}
void THCTensor_(diag)(THCState *state, THCTensor *self_, THCTensor *src_, int64_t k){
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src_));
int nDimension = THCTensor_(nDimensionLegacyNoScalars)(state, src_);
THArgCheck((nDimension == 2) || (nDimension == 1), 1, "expected a matrix or a vector");
if (nDimension == 2) {
int64_t stride0 = THCTensor_(stride)(state, src_, 0);
int64_t stride1 = THCTensor_(stride)(state, src_, 1);
int64_t size0 = THCTensor_(size)(state, src_, 0);
int64_t size1 = THCTensor_(size)(state, src_, 1);
int64_t size = (k > 0) ? min((int64_t)size0, (int64_t)size1 - k) : min((int64_t)size0 + k, (int64_t)size1);
THCTensor_(resize1d)(state, self_, size);
if (size > 0) {
int64_t strideSelf = THCTensor_(stride)(state, self_, 0);
const dim3 threads(min((int64_t)at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, (int64_t)size));
dim3 grid(min((int64_t)1024, (int64_t)THCCeilDiv(size, (int64_t)threads.x)));
int64_t start = (k >= 0 ? k * stride1 : -k * stride0);
THCTensor_copyFromDiagonal<scalar_t><<<grid, threads, 0, THCState_getCurrentStream(state)>>>
(THCTensor_(data)(state, self_), THCTensor_(data)(state, src_), start, size, stride0 + stride1, strideSelf);
}
} else {
ptrdiff_t totalElements = THCTensor_(nElement)(state, src_);
ptrdiff_t size = (k > 0) ? totalElements + k : totalElements - k;
int64_t strideSrc = THTensor_strideLegacyNoScalars(src_, 0);
THCTensor_(resize2d)(state, self_, size, size);
THCTensor_(zero)(state, self_);
if (size > 0) {
int64_t stride0 = THCTensor_(stride)(state, self_, 0);
int64_t stride1 = THCTensor_(stride)(state, self_, 1);
const dim3 threads(min((int64_t)at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, (int64_t)size));
dim3 grid(min((int64_t)1024, (int64_t)THCCeilDiv(size, (ptrdiff_t)threads.x)));
ptrdiff_t start = (k >= 0 ? k * stride1 : -k * stride0);
THCTensor_copyToDiagonal<scalar_t><<<grid, threads, 0, THCState_getCurrentStream(state)>>>
(THCTensor_(data)(state, self_), THCTensor_(data)(state, src_), start, totalElements, stride0 + stride1, strideSrc);
}
}
THCudaCheck(cudaGetLastError());
}
void THCTensor_(eye)(THCState *state, THCTensor *self_, int64_t n, int64_t m)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_));
THArgCheck(n > 0, 1, "invalid argument");
if(m <= 0)
m = n;
THCTensor_(resize2d)(state, self_, n, m);
THCTensor_(zero)(state, self_);
int64_t sz = THMin(n, m);
int64_t stride = THCTensor_(stride)(state, self_, 0) +
THCTensor_(stride)(state, self_, 1);
THCTensor *diag = THCTensor_(newWithStorage1d)(state, THTensor_getStoragePtr(self_),
self_->storage_offset(), sz, stride);
THCTensor_(fill)(state, diag, ScalarConvert<int, scalar_t>::to(1));
THCTensor_(free)(state, diag);
}
accreal THCTensor_(trace)(THCState *state, THCTensor *src_) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, src_));
THArgCheck((THTensor_nDimensionLegacyAll(src_) == 2), 1, "expected a matrix");
THCTensor *diag = THCTensor_(new)(state);
THCTensor_(diag)(state, diag, src_, 0);
accreal trace = THCTensor_(sumall)(state, diag);
THCTensor_(free)(state, diag);
return trace;
}
#endif
|
312ca19af7c5ff68cb0d2729b98d658408ea20e8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/fluid/operators/shard_index_op.h"
#include "paddle/fluid/platform/cuda_primitives.h"
#include "paddle/fluid/platform/gpu_info.h"
namespace paddle {
namespace operators {
using platform::PADDLE_CUDA_NUM_THREADS;
template <typename T>
__global__ void ShardIndexInner(const T* in_data, T* out_data,
const int64_t numel, const int index_num,
const int nshards, const int shard_id,
const int ignore_value) {
int shard_size = index_num / nshards;
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < numel) {
assert(in_data[idx] >= 0 && in_data[idx] < index_num);
if (in_data[idx] / shard_size == shard_id) {
out_data[idx] = in_data[idx] % shard_size;
} else {
out_data[idx] = ignore_value;
}
}
}
using LoDTensor = framework::LoDTensor;
template <typename T>
class ShardIndexCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& context) const override {
auto* in = context.Input<LoDTensor>("X");
auto* out = context.Output<LoDTensor>("Out");
int index_num = context.Attr<int>("index_num");
int nshards = context.Attr<int>("nshards");
int shard_id = context.Attr<int>("shard_id");
int ignore_value = context.Attr<int>("ignore_value");
PADDLE_ENFORCE_GT(index_num, 0);
PADDLE_ENFORCE_GT(nshards, 0);
PADDLE_ENFORCE(shard_id >= 0 && shard_id < nshards,
"shard_id(%d) is not in range [0, %d)", shard_id, nshards);
out->Resize(in->dims());
out->set_lod(in->lod());
auto* in_data = in->data<T>();
auto* out_data = out->mutable_data<T>(context.GetPlace());
int64_t numel = in->numel();
auto stream =
context.template device_context<platform::CUDADeviceContext>().stream();
hipLaunchKernelGGL(( ShardIndexInner), (numel + PADDLE_CUDA_NUM_THREADS - 1) /
PADDLE_CUDA_NUM_THREADS,
dim3(PADDLE_CUDA_NUM_THREADS), 0, stream,
in_data, out_data, numel, index_num, nshards, shard_id, ignore_value);
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(shard_index, ops::ShardIndexCUDAKernel<int>,
ops::ShardIndexCUDAKernel<int64_t>);
| 312ca19af7c5ff68cb0d2729b98d658408ea20e8.cu | // Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/fluid/operators/shard_index_op.h"
#include "paddle/fluid/platform/cuda_primitives.h"
#include "paddle/fluid/platform/gpu_info.h"
namespace paddle {
namespace operators {
using platform::PADDLE_CUDA_NUM_THREADS;
template <typename T>
__global__ void ShardIndexInner(const T* in_data, T* out_data,
const int64_t numel, const int index_num,
const int nshards, const int shard_id,
const int ignore_value) {
int shard_size = index_num / nshards;
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < numel) {
assert(in_data[idx] >= 0 && in_data[idx] < index_num);
if (in_data[idx] / shard_size == shard_id) {
out_data[idx] = in_data[idx] % shard_size;
} else {
out_data[idx] = ignore_value;
}
}
}
using LoDTensor = framework::LoDTensor;
template <typename T>
class ShardIndexCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& context) const override {
auto* in = context.Input<LoDTensor>("X");
auto* out = context.Output<LoDTensor>("Out");
int index_num = context.Attr<int>("index_num");
int nshards = context.Attr<int>("nshards");
int shard_id = context.Attr<int>("shard_id");
int ignore_value = context.Attr<int>("ignore_value");
PADDLE_ENFORCE_GT(index_num, 0);
PADDLE_ENFORCE_GT(nshards, 0);
PADDLE_ENFORCE(shard_id >= 0 && shard_id < nshards,
"shard_id(%d) is not in range [0, %d)", shard_id, nshards);
out->Resize(in->dims());
out->set_lod(in->lod());
auto* in_data = in->data<T>();
auto* out_data = out->mutable_data<T>(context.GetPlace());
int64_t numel = in->numel();
auto stream =
context.template device_context<platform::CUDADeviceContext>().stream();
ShardIndexInner<<<(numel + PADDLE_CUDA_NUM_THREADS - 1) /
PADDLE_CUDA_NUM_THREADS,
PADDLE_CUDA_NUM_THREADS, 0, stream>>>(
in_data, out_data, numel, index_num, nshards, shard_id, ignore_value);
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(shard_index, ops::ShardIndexCUDAKernel<int>,
ops::ShardIndexCUDAKernel<int64_t>);
|
409f3b5ed1598c0624ebf098c897f36080b6fbad.hip | // !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/OpMathType.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/UnaryOps.h>
#include <ATen/native/hip/JitLoops.cuh>
#include <ATen/native/hip/Loops.cuh>
#include <ATen/native/hip/Math.cuh>
#include <limits>
namespace at::native {
#if AT_USE_JITERATOR()
CONSTEXPR_EXCEPT_WIN_CUDA char tanh_name[] = "tanh_impl";
#endif
void tanh_kernel_cuda(TensorIteratorBase& iter) {
auto common_dtype = iter.common_dtype();
if (at::isComplexType(common_dtype)) {
#if AT_USE_JITERATOR()
static const auto tanh_string = jiterator_stringify(
template <typename T> T tanh_impl(T a) { return std::tanh(a); });
AT_DISPATCH_COMPLEX_TYPES_AND(
kComplexHalf, common_dtype, "tanh_name", [&]() {
jitted_gpu_kernel<
/*name=*/tanh_name,
/*return_dtype=*/scalar_t,
/*common_dtype=*/scalar_t,
/*arity=*/1>(iter, tanh_string);
});
#else
AT_DISPATCH_COMPLEX_TYPES_AND(
kComplexHalf, common_dtype, "tanh_name", [&]() {
gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t {
using opmath_t = at::opmath_type<scalar_t>;
return ::tanh(static_cast<opmath_t>(a));
});
});
#endif
} else {
AT_DISPATCH_FLOATING_TYPES_AND2(
ScalarType::Half,
ScalarType::BFloat16,
common_dtype,
"tanh_cuda",
[&]() {
gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::tanh(a);
});
});
}
}
REGISTER_DISPATCH(tanh_stub, &tanh_kernel_cuda);
} // namespace at::native
| 409f3b5ed1598c0624ebf098c897f36080b6fbad.cu | #define TORCH_ASSERT_NO_OPERATORS
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/OpMathType.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/UnaryOps.h>
#include <ATen/native/cuda/JitLoops.cuh>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/cuda/Math.cuh>
#include <limits>
namespace at::native {
#if AT_USE_JITERATOR()
CONSTEXPR_EXCEPT_WIN_CUDA char tanh_name[] = "tanh_impl";
#endif
void tanh_kernel_cuda(TensorIteratorBase& iter) {
auto common_dtype = iter.common_dtype();
if (at::isComplexType(common_dtype)) {
#if AT_USE_JITERATOR()
static const auto tanh_string = jiterator_stringify(
template <typename T> T tanh_impl(T a) { return std::tanh(a); });
AT_DISPATCH_COMPLEX_TYPES_AND(
kComplexHalf, common_dtype, "tanh_name", [&]() {
jitted_gpu_kernel<
/*name=*/tanh_name,
/*return_dtype=*/scalar_t,
/*common_dtype=*/scalar_t,
/*arity=*/1>(iter, tanh_string);
});
#else
AT_DISPATCH_COMPLEX_TYPES_AND(
kComplexHalf, common_dtype, "tanh_name", [&]() {
gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t {
using opmath_t = at::opmath_type<scalar_t>;
return ::tanh(static_cast<opmath_t>(a));
});
});
#endif
} else {
AT_DISPATCH_FLOATING_TYPES_AND2(
ScalarType::Half,
ScalarType::BFloat16,
common_dtype,
"tanh_cuda",
[&]() {
gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::tanh(a);
});
});
}
}
REGISTER_DISPATCH(tanh_stub, &tanh_kernel_cuda);
} // namespace at::native
|
9eabe37817b559044b8f1321587659682ac8ee9f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/math/cos_sim_functor.h"
#include "paddle/fluid/platform/cuda_helper.h"
namespace paddle {
namespace operators {
namespace math {
template <typename T>
__global__ void CosSimDyKernel(const T* x_norm, const T* y_norm, const T* x,
const T* y, const T* z, const T* dz,
const size_t rows, const size_t cols, T* dy) {
int grid_size = blockDim.x * gridDim.x;
T y_norm_data = y_norm[0];
for (int row_id = blockIdx.x * blockDim.x + threadIdx.x; row_id < rows;
row_id += grid_size) {
T xy_norm_prod = x_norm[row_id] * y_norm_data;
T dz_data = dz[row_id];
T z_data = z[row_id];
const T* x_data = x + cols * row_id;
T reciprocal_xy_norm_prod = 1 / xy_norm_prod;
T y_norm_square = y_norm_data * y_norm_data;
T reciprocal_y_norm_square = 1 / y_norm_square;
for (size_t i = 0; i < cols; ++i) {
T dy_data = dz_data * (x_data[i] * reciprocal_xy_norm_prod -
z_data * y[i] * reciprocal_y_norm_square);
platform::CudaAtomicAdd(dy + i, dy_data);
}
}
}
template <typename T>
struct CosSimDyFunctor<platform::CUDADeviceContext, T> {
void operator()(const platform::CUDADeviceContext& ctx, const T* x_norm,
const T* y_norm, const T* x, const T* y, const T* z,
const T* dz, const size_t rows, const size_t cols,
T* dy) const {
const int block_size = 512;
dim3 threads(block_size, 1);
dim3 grid(1, (rows + block_size - 1) / block_size);
hipLaunchKernelGGL(( CosSimDyKernel<T>), dim3(grid), dim3(threads), 0, ctx.stream(),
x_norm, y_norm, x, y, z, dz, rows, cols, dy);
}
};
template struct CosSimDyFunctor<platform::CUDADeviceContext, float>;
template struct CosSimDyFunctor<platform::CUDADeviceContext, double>;
} // namespace math
} // namespace operators
} // namespace paddle
| 9eabe37817b559044b8f1321587659682ac8ee9f.cu | /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/math/cos_sim_functor.h"
#include "paddle/fluid/platform/cuda_helper.h"
namespace paddle {
namespace operators {
namespace math {
template <typename T>
__global__ void CosSimDyKernel(const T* x_norm, const T* y_norm, const T* x,
const T* y, const T* z, const T* dz,
const size_t rows, const size_t cols, T* dy) {
int grid_size = blockDim.x * gridDim.x;
T y_norm_data = y_norm[0];
for (int row_id = blockIdx.x * blockDim.x + threadIdx.x; row_id < rows;
row_id += grid_size) {
T xy_norm_prod = x_norm[row_id] * y_norm_data;
T dz_data = dz[row_id];
T z_data = z[row_id];
const T* x_data = x + cols * row_id;
T reciprocal_xy_norm_prod = 1 / xy_norm_prod;
T y_norm_square = y_norm_data * y_norm_data;
T reciprocal_y_norm_square = 1 / y_norm_square;
for (size_t i = 0; i < cols; ++i) {
T dy_data = dz_data * (x_data[i] * reciprocal_xy_norm_prod -
z_data * y[i] * reciprocal_y_norm_square);
platform::CudaAtomicAdd(dy + i, dy_data);
}
}
}
template <typename T>
struct CosSimDyFunctor<platform::CUDADeviceContext, T> {
void operator()(const platform::CUDADeviceContext& ctx, const T* x_norm,
const T* y_norm, const T* x, const T* y, const T* z,
const T* dz, const size_t rows, const size_t cols,
T* dy) const {
const int block_size = 512;
dim3 threads(block_size, 1);
dim3 grid(1, (rows + block_size - 1) / block_size);
CosSimDyKernel<T><<<grid, threads, 0, ctx.stream()>>>(
x_norm, y_norm, x, y, z, dz, rows, cols, dy);
}
};
template struct CosSimDyFunctor<platform::CUDADeviceContext, float>;
template struct CosSimDyFunctor<platform::CUDADeviceContext, double>;
} // namespace math
} // namespace operators
} // namespace paddle
|
228613a47aecc8864caed85cc9234f7a42c876d4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// ------------------------------------------------------------------
// Fast R-CNN
// Copyright (c) 2015 Microsoft
// Licensed under The MIT License [see fast-rcnn/LICENSE for details]
// Written by Ross Girshick
// ------------------------------------------------------------------
#include <cfloat>
#include "caffe/fast_rcnn_layers.hpp"
using std::max;
using std::min;
namespace caffe {
template <typename Dtype>
__global__ void ROIPoolForward(const int nthreads, const Dtype* bottom_data,
const Dtype spatial_scale, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const Dtype* bottom_rois, Dtype* top_data, int* argmax_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
bottom_rois += n * 5;
int roi_batch_ind = bottom_rois[0];
int roi_start_w = round(bottom_rois[1] * spatial_scale);
int roi_start_h = round(bottom_rois[2] * spatial_scale);
int roi_end_w = round(bottom_rois[3] * spatial_scale);
int roi_end_h = round(bottom_rois[4] * spatial_scale);
// Force malformed ROIs to be 1x1
int roi_width = max(roi_end_w - roi_start_w + 1, 1);
int roi_height = max(roi_end_h - roi_start_h + 1, 1);
Dtype bin_size_h = static_cast<Dtype>(roi_height)
/ static_cast<Dtype>(pooled_height);
Dtype bin_size_w = static_cast<Dtype>(roi_width)
/ static_cast<Dtype>(pooled_width);
int hstart = static_cast<int>(floor(static_cast<Dtype>(ph)
* bin_size_h));
int wstart = static_cast<int>(floor(static_cast<Dtype>(pw)
* bin_size_w));
int hend = static_cast<int>(ceil(static_cast<Dtype>(ph + 1)
* bin_size_h));
int wend = static_cast<int>(ceil(static_cast<Dtype>(pw + 1)
* bin_size_w));
// Add roi offsets and clip to input boundaries
hstart = min(max(hstart + roi_start_h, 0), height);
hend = min(max(hend + roi_start_h, 0), height);
wstart = min(max(wstart + roi_start_w, 0), width);
wend = min(max(wend + roi_start_w, 0), width);
bool is_empty = (hend <= hstart) || (wend <= wstart);
// Define an empty pooling region to be zero
Dtype maxval = is_empty ? 0 : -FLT_MAX;
// If nothing is pooled, argmax = -1 causes nothing to be backprop'd
int maxidx = -1;
bottom_data += (roi_batch_ind * channels + c) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
int bottom_index = h * width + w;
if (bottom_data[bottom_index] > maxval) {
maxval = bottom_data[bottom_index];
maxidx = bottom_index;
}
}
}
top_data[index] = maxval;
argmax_data[index] = maxidx;
}
}
template <typename Dtype>
void ROIPoolingLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* bottom_rois = bottom[1]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
int* argmax_data = max_idx_.mutable_gpu_data();
int count = top[0]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( ROIPoolForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, bottom_data, spatial_scale_, channels_, height_, width_,
pooled_height_, pooled_width_, bottom_rois, top_data, argmax_data);
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
__global__ void ROIPoolBackward(const int nthreads, const Dtype* top_diff,
const int* argmax_data, const int num_rois, const Dtype spatial_scale,
const int channels, const int height, const int width,
const int pooled_height, const int pooled_width, Dtype* bottom_diff,
const Dtype* bottom_rois) {
CUDA_KERNEL_LOOP(index, nthreads) {
// (n, c, h, w) coords in bottom data
int w = index % width;
int h = (index / width) % height;
int c = (index / width / height) % channels;
int n = index / width / height / channels;
Dtype gradient = 0;
// Accumulate gradient over all ROIs that pooled this element
for (int roi_n = 0; roi_n < num_rois; ++roi_n) {
const Dtype* offset_bottom_rois = bottom_rois + roi_n * 5;
int roi_batch_ind = offset_bottom_rois[0];
// Skip if ROI's batch index doesn't match n
if (n != roi_batch_ind) {
continue;
}
int roi_start_w = round(offset_bottom_rois[1] * spatial_scale);
int roi_start_h = round(offset_bottom_rois[2] * spatial_scale);
int roi_end_w = round(offset_bottom_rois[3] * spatial_scale);
int roi_end_h = round(offset_bottom_rois[4] * spatial_scale);
// Skip if ROI doesn't include (h, w)
const bool in_roi = (w >= roi_start_w && w <= roi_end_w &&
h >= roi_start_h && h <= roi_end_h);
if (!in_roi) {
continue;
}
int offset = (roi_n * channels + c) * pooled_height * pooled_width;
const Dtype* offset_top_diff = top_diff + offset;
const int* offset_argmax_data = argmax_data + offset;
// Compute feasible set of pooled units that could have pooled
// this bottom unit
// Force malformed ROIs to be 1x1
int roi_width = max(roi_end_w - roi_start_w + 1, 1);
int roi_height = max(roi_end_h - roi_start_h + 1, 1);
Dtype bin_size_h = static_cast<Dtype>(roi_height)
/ static_cast<Dtype>(pooled_height);
Dtype bin_size_w = static_cast<Dtype>(roi_width)
/ static_cast<Dtype>(pooled_width);
int phstart = floor(static_cast<Dtype>(h - roi_start_h) / bin_size_h);
int phend = ceil(static_cast<Dtype>(h - roi_start_h + 1) / bin_size_h);
int pwstart = floor(static_cast<Dtype>(w - roi_start_w) / bin_size_w);
int pwend = ceil(static_cast<Dtype>(w - roi_start_w + 1) / bin_size_w);
phstart = min(max(phstart, 0), pooled_height);
phend = min(max(phend, 0), pooled_height);
pwstart = min(max(pwstart, 0), pooled_width);
pwend = min(max(pwend, 0), pooled_width);
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
if (offset_argmax_data[ph * pooled_width + pw] == (h * width + w)) {
gradient += offset_top_diff[ph * pooled_width + pw];
}
}
}
}
bottom_diff[index] = gradient;
}
}
template <typename Dtype>
void ROIPoolingLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (!propagate_down[0]) {
return;
}
const Dtype* bottom_rois = bottom[1]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int count = bottom[0]->count();
caffe_gpu_set(count, Dtype(0.), bottom_diff);
const int* argmax_data = max_idx_.gpu_data();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( ROIPoolBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, top_diff, argmax_data, top[0]->num(), spatial_scale_, channels_,
height_, width_, pooled_height_, pooled_width_, bottom_diff, bottom_rois);
CUDA_POST_KERNEL_CHECK;
}
INSTANTIATE_LAYER_GPU_FUNCS(ROIPoolingLayer);
} // namespace caffe | 228613a47aecc8864caed85cc9234f7a42c876d4.cu | // ------------------------------------------------------------------
// Fast R-CNN
// Copyright (c) 2015 Microsoft
// Licensed under The MIT License [see fast-rcnn/LICENSE for details]
// Written by Ross Girshick
// ------------------------------------------------------------------
#include <cfloat>
#include "caffe/fast_rcnn_layers.hpp"
using std::max;
using std::min;
namespace caffe {
template <typename Dtype>
__global__ void ROIPoolForward(const int nthreads, const Dtype* bottom_data,
const Dtype spatial_scale, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const Dtype* bottom_rois, Dtype* top_data, int* argmax_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
bottom_rois += n * 5;
int roi_batch_ind = bottom_rois[0];
int roi_start_w = round(bottom_rois[1] * spatial_scale);
int roi_start_h = round(bottom_rois[2] * spatial_scale);
int roi_end_w = round(bottom_rois[3] * spatial_scale);
int roi_end_h = round(bottom_rois[4] * spatial_scale);
// Force malformed ROIs to be 1x1
int roi_width = max(roi_end_w - roi_start_w + 1, 1);
int roi_height = max(roi_end_h - roi_start_h + 1, 1);
Dtype bin_size_h = static_cast<Dtype>(roi_height)
/ static_cast<Dtype>(pooled_height);
Dtype bin_size_w = static_cast<Dtype>(roi_width)
/ static_cast<Dtype>(pooled_width);
int hstart = static_cast<int>(floor(static_cast<Dtype>(ph)
* bin_size_h));
int wstart = static_cast<int>(floor(static_cast<Dtype>(pw)
* bin_size_w));
int hend = static_cast<int>(ceil(static_cast<Dtype>(ph + 1)
* bin_size_h));
int wend = static_cast<int>(ceil(static_cast<Dtype>(pw + 1)
* bin_size_w));
// Add roi offsets and clip to input boundaries
hstart = min(max(hstart + roi_start_h, 0), height);
hend = min(max(hend + roi_start_h, 0), height);
wstart = min(max(wstart + roi_start_w, 0), width);
wend = min(max(wend + roi_start_w, 0), width);
bool is_empty = (hend <= hstart) || (wend <= wstart);
// Define an empty pooling region to be zero
Dtype maxval = is_empty ? 0 : -FLT_MAX;
// If nothing is pooled, argmax = -1 causes nothing to be backprop'd
int maxidx = -1;
bottom_data += (roi_batch_ind * channels + c) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
int bottom_index = h * width + w;
if (bottom_data[bottom_index] > maxval) {
maxval = bottom_data[bottom_index];
maxidx = bottom_index;
}
}
}
top_data[index] = maxval;
argmax_data[index] = maxidx;
}
}
template <typename Dtype>
void ROIPoolingLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* bottom_rois = bottom[1]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
int* argmax_data = max_idx_.mutable_gpu_data();
int count = top[0]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
ROIPoolForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, bottom_data, spatial_scale_, channels_, height_, width_,
pooled_height_, pooled_width_, bottom_rois, top_data, argmax_data);
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
__global__ void ROIPoolBackward(const int nthreads, const Dtype* top_diff,
const int* argmax_data, const int num_rois, const Dtype spatial_scale,
const int channels, const int height, const int width,
const int pooled_height, const int pooled_width, Dtype* bottom_diff,
const Dtype* bottom_rois) {
CUDA_KERNEL_LOOP(index, nthreads) {
// (n, c, h, w) coords in bottom data
int w = index % width;
int h = (index / width) % height;
int c = (index / width / height) % channels;
int n = index / width / height / channels;
Dtype gradient = 0;
// Accumulate gradient over all ROIs that pooled this element
for (int roi_n = 0; roi_n < num_rois; ++roi_n) {
const Dtype* offset_bottom_rois = bottom_rois + roi_n * 5;
int roi_batch_ind = offset_bottom_rois[0];
// Skip if ROI's batch index doesn't match n
if (n != roi_batch_ind) {
continue;
}
int roi_start_w = round(offset_bottom_rois[1] * spatial_scale);
int roi_start_h = round(offset_bottom_rois[2] * spatial_scale);
int roi_end_w = round(offset_bottom_rois[3] * spatial_scale);
int roi_end_h = round(offset_bottom_rois[4] * spatial_scale);
// Skip if ROI doesn't include (h, w)
const bool in_roi = (w >= roi_start_w && w <= roi_end_w &&
h >= roi_start_h && h <= roi_end_h);
if (!in_roi) {
continue;
}
int offset = (roi_n * channels + c) * pooled_height * pooled_width;
const Dtype* offset_top_diff = top_diff + offset;
const int* offset_argmax_data = argmax_data + offset;
// Compute feasible set of pooled units that could have pooled
// this bottom unit
// Force malformed ROIs to be 1x1
int roi_width = max(roi_end_w - roi_start_w + 1, 1);
int roi_height = max(roi_end_h - roi_start_h + 1, 1);
Dtype bin_size_h = static_cast<Dtype>(roi_height)
/ static_cast<Dtype>(pooled_height);
Dtype bin_size_w = static_cast<Dtype>(roi_width)
/ static_cast<Dtype>(pooled_width);
int phstart = floor(static_cast<Dtype>(h - roi_start_h) / bin_size_h);
int phend = ceil(static_cast<Dtype>(h - roi_start_h + 1) / bin_size_h);
int pwstart = floor(static_cast<Dtype>(w - roi_start_w) / bin_size_w);
int pwend = ceil(static_cast<Dtype>(w - roi_start_w + 1) / bin_size_w);
phstart = min(max(phstart, 0), pooled_height);
phend = min(max(phend, 0), pooled_height);
pwstart = min(max(pwstart, 0), pooled_width);
pwend = min(max(pwend, 0), pooled_width);
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
if (offset_argmax_data[ph * pooled_width + pw] == (h * width + w)) {
gradient += offset_top_diff[ph * pooled_width + pw];
}
}
}
}
bottom_diff[index] = gradient;
}
}
template <typename Dtype>
void ROIPoolingLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (!propagate_down[0]) {
return;
}
const Dtype* bottom_rois = bottom[1]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int count = bottom[0]->count();
caffe_gpu_set(count, Dtype(0.), bottom_diff);
const int* argmax_data = max_idx_.gpu_data();
// NOLINT_NEXT_LINE(whitespace/operators)
ROIPoolBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, top_diff, argmax_data, top[0]->num(), spatial_scale_, channels_,
height_, width_, pooled_height_, pooled_width_, bottom_diff, bottom_rois);
CUDA_POST_KERNEL_CHECK;
}
INSTANTIATE_LAYER_GPU_FUNCS(ROIPoolingLayer);
} // namespace caffe |
ea25f259f0507de6e5892f4f9d165b338d02bcd1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 2018 BlazingDB, Inc.
* Copyright 2018 Cristhian Alberto Gonzales Castillo <cristhian@blazingdb.com>
* Copyright 2018 Alexander Ocsa <alexander@blazingdb.com>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <thrust/device_ptr.h>
#include <thrust/find.h>
#include <thrust/execution_policy.h>
#include <hipcub/hipcub.hpp>
#include <cudf/copying.hpp>
#include <cudf/replace.hpp>
#include <cudf/detail/replace.hpp>
#include <rmm/rmm.h>
#include <cudf/types.hpp>
#include <cudf/scalar/scalar.hpp>
#include <cudf/utilities/type_dispatcher.hpp>
#include <cudf/column/column_factories.hpp>
#include <cudf/column/column_view.hpp>
#include <cudf/column/column.hpp>
#include <cudf/utilities/bit.hpp>
#include <cudf/column/column_device_view.cuh>
#include <cudf/detail/utilities/cuda.cuh>
#include <cudf/copying.hpp>
#include <cudf/utilities/traits.hpp>
#include <cudf/utilities/error.hpp>
#include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_factories.hpp>
#include <cudf/utilities/type_dispatcher.hpp>
#include <cudf/detail/iterator.cuh>
#include <cudf/strings/replace.hpp>
#include <cudf/strings/detail/utilities.hpp>
#include <cudf/strings/detail/utilities.cuh>
#include <cudf/detail/copy.hpp>
#include <cudf/null_mask.hpp>
namespace { //anonymous
static constexpr int BLOCK_SIZE = 256;
// return the new_value for output column at index `idx`
template<class T, bool replacement_has_nulls>
__device__ auto get_new_value(cudf::size_type idx,
const T* __restrict__ input_data,
const T* __restrict__ values_to_replace_begin,
const T* __restrict__ values_to_replace_end,
const T* __restrict__ d_replacement_values,
cudf::bitmask_type const * __restrict__ replacement_valid)
{
auto found_ptr = thrust::find(thrust::seq, values_to_replace_begin,
values_to_replace_end,
input_data[idx]);
T new_value { 0 };
bool output_is_valid { true };
if (found_ptr != values_to_replace_end) {
auto d = thrust::distance(values_to_replace_begin, found_ptr);
new_value = d_replacement_values[d];
if (replacement_has_nulls) {
output_is_valid = cudf::bit_is_set(replacement_valid, d);
}
} else {
new_value = input_data[idx];
}
return thrust::make_pair(new_value, output_is_valid);
}
__device__ int get_new_string_value(cudf::size_type idx,
cudf::column_device_view& input,
cudf::column_device_view& values_to_replace,
cudf::column_device_view& replacement_values) {
cudf::string_view input_string = input.element<cudf::string_view>(idx);
int match = -1;
for (int i = 0; i < values_to_replace.size(); i++) {
cudf::string_view value_string = values_to_replace.element<cudf::string_view>(i);
if (input_string == value_string){
match = i;
break;
}
}
return match;
}
/*
* Kernel which does the first pass of strings replace. It computes the output null_mask, null_count,
* and the offsets.
*
* @param input The input column to replace strings in.
* @param values_to_replace The string values to replace.
* @param replacement The replacement values.
* @param offsets The column which will contain the offsets of the new string column
* @param indices Temporary column used to store the replacement indices
* @param output_valid The output null_mask
* @param output_valid_count The output valid count
*/
template<bool input_has_nulls, bool replacement_has_nulls>
__global__ void replace_strings_first_pass(cudf::column_device_view input,
cudf::column_device_view values_to_replace,
cudf::column_device_view replacement,
cudf::mutable_column_device_view offsets,
cudf::mutable_column_device_view indices,
cudf::bitmask_type * output_valid,
cudf::size_type* __restrict__ output_valid_count) {
cudf::size_type nrows = input.size();
cudf::size_type i = blockIdx.x * blockDim.x + threadIdx.x;
uint32_t active_mask = 0xffffffff;
active_mask = __ballot_sync(active_mask, i < nrows);
auto const lane_id { threadIdx.x % cudf::experimental::detail::warp_size };
uint32_t valid_sum { 0 };
while (i < nrows) {
bool input_is_valid = true;
if (input_has_nulls)
input_is_valid = input.is_valid_nocheck(i);
bool output_is_valid = input_is_valid;
if (input_is_valid){
int result = get_new_string_value(i, input, values_to_replace, replacement);
cudf::string_view output = (result == -1) ? input.element<cudf::string_view>(i) : replacement.element<cudf::string_view>(result);
offsets.data<cudf::size_type>()[i] = output.size_bytes();
indices.data<cudf::size_type>()[i] = result;
if (replacement_has_nulls && result != -1){
output_is_valid = replacement.is_valid_nocheck(result);
}
} else {
offsets.data<cudf::size_type>()[i] = 0;
indices.data<cudf::size_type>()[i] = -1;
}
uint32_t bitmask = __ballot_sync(active_mask, output_is_valid);
if (0 == lane_id) {
output_valid[cudf::word_index(i)] = bitmask;
valid_sum += __popc(bitmask);
}
i += blockDim.x * gridDim.x;
active_mask = __ballot_sync(active_mask, i < nrows);
}
// Compute total valid count for this block and add it to global count
uint32_t block_valid_count = cudf::experimental::detail::single_lane_block_sum_reduce<BLOCK_SIZE, 0>(valid_sum);
// one thread computes and adds to output_valid_count
if (threadIdx.x == 0) {
atomicAdd(output_valid_count, block_valid_count);
}
}
/*
* Kernel which does the second pass of strings replace. It copies the string data needed from input and
* replacement into the new strings column chars column.
* @param input The input column
* @param replacement The replacement values
* @param offsets The offsets column of the new strings column
* @param strings The chars column of the new strings column
* @param indices Temporary column used to store the replacement indices.
*/
template<bool input_has_nulls, bool replacement_has_nulls>
__global__ void replace_strings_second_pass(cudf::column_device_view input,
cudf::column_device_view replacement,
cudf::mutable_column_device_view offsets,
cudf::mutable_column_device_view strings,
cudf::mutable_column_device_view indices) {
cudf::size_type nrows = input.size();
cudf::size_type i = blockIdx.x * blockDim.x + threadIdx.x;
while (i < nrows) {
bool output_is_valid = true;
bool input_is_valid = true;
cudf::size_type idx = indices.element<cudf::size_type>(i);
if (input_has_nulls){
input_is_valid = input.is_valid_nocheck(i);
output_is_valid = input_is_valid;
}
if (replacement_has_nulls && idx != -1) {
output_is_valid = replacement.is_valid_nocheck(idx);
}
if (output_is_valid) {
cudf::string_view output = (idx == -1) ? input.element<cudf::string_view>(i) : replacement.element<cudf::string_view>(idx);
std::memcpy(strings.data<char>() + offsets.data<cudf::size_type>()[i],
output.data(),
output.size_bytes());
}
i += blockDim.x * gridDim.x;
}
}
/* --------------------------------------------------------------------------*/
/**
* @brief Kernel that replaces elements from `output_data` given the following
* rule: replace all `values_to_replace[i]` in [values_to_replace_begin`,
* `values_to_replace_end`) present in `output_data` with `d_replacement_values[i]`.
*
* @tparam input_has_nulls `true` if output column has valid mask, `false` otherwise
* @tparam replacement_has_nulls `true` if replacement_values column has valid mask, `false` otherwise
* The input_has_nulls and replacement_has_nulls template parameters allows us to specialize
* this kernel for the different scenario for performance without writing different kernel.
*
* @param[in] input_data Device array with the data to be modified
* @param[in] input_valid Valid mask associated with input_data
* @param[out] output_data Device array to store the data from input_data
* @param[out] output_valid Valid mask associated with output_data
* @param[out] output_valid_count #valid in output column
* @param[in] nrows # rows in `output_data`
* @param[in] values_to_replace_begin Device pointer to the beginning of the sequence
* of old values to be replaced
* @param[in] values_to_replace_end Device pointer to the end of the sequence
* of old values to be replaced
* @param[in] d_replacement_values Device array with the new values
* @param[in] replacement_valid Valid mask associated with d_replacement_values
*
* @returns
*/
/* ----------------------------------------------------------------------------*/
template<class T,
bool input_has_nulls, bool replacement_has_nulls>
__global__
void replace_kernel(cudf::column_device_view input,
cudf::mutable_column_device_view output,
cudf::size_type * __restrict__ output_valid_count,
cudf::size_type nrows,
cudf::column_device_view values_to_replace,
cudf::column_device_view replacement)
{
T * __restrict__ output_data = output.data<T>();
cudf::size_type i = blockIdx.x * blockDim.x + threadIdx.x;
uint32_t active_mask = 0xffffffff;
active_mask = __ballot_sync(active_mask, i < nrows);
auto const lane_id { threadIdx.x % cudf::experimental::detail::warp_size };
uint32_t valid_sum { 0 };
while (i < nrows) {
bool output_is_valid{true};
bool input_is_valid{true};
if (input_has_nulls) {
input_is_valid = input.is_valid_nocheck(i);
output_is_valid = input_is_valid;
}
if (input_is_valid)
thrust::tie(output_data[i], output_is_valid) =
get_new_value<T, replacement_has_nulls>(i,
input.data<T>(),
values_to_replace.data<T>(),
values_to_replace.data<T>() + values_to_replace.size(),
replacement.data<T>(),
replacement.null_mask());
/* output valid counts calculations*/
if (input_has_nulls or replacement_has_nulls) {
uint32_t bitmask = __ballot_sync(active_mask, output_is_valid);
if (0 == lane_id) {
output.set_mask_word(cudf::word_index(i), bitmask);
valid_sum += __popc(bitmask);
}
}
i += blockDim.x * gridDim.x;
active_mask = __ballot_sync(active_mask, i < nrows);
}
if (input_has_nulls or replacement_has_nulls) {
// Compute total valid count for this block and add it to global count
uint32_t block_valid_count = cudf::experimental::detail::single_lane_block_sum_reduce<
BLOCK_SIZE, 0>(valid_sum);
// one thread computes and adds to output_valid_count
if (threadIdx.x == 0) {
atomicAdd(output_valid_count, block_valid_count);
}
}
}
/*
* @brief Functor called by the `type_dispatcher` in order to invoke and instantiate
* `replace_kernel` with the appropriate data types.
*/
struct replace_kernel_forwarder {
template<typename col_type,
std::enable_if_t<cudf::is_fixed_width<col_type>()>* = nullptr>
std::unique_ptr<cudf::column> operator()(cudf::column_view const& input_col,
cudf::column_view const& values_to_replace,
cudf::column_view const& replacement_values,
rmm::mr::device_memory_resource* mr,
hipStream_t stream = 0)
{
rmm::device_scalar<cudf::size_type> valid_counter(0, stream);
cudf::size_type *valid_count = valid_counter.data();
auto replace = replace_kernel<col_type, true, true>;
if (input_col.has_nulls()) {
if (replacement_values.has_nulls()) {
replace = replace_kernel<col_type, true, true>;
} else {
replace = replace_kernel<col_type, true, false>;
}
} else {
if (replacement_values.has_nulls()) {
replace = replace_kernel<col_type, false, true>;
} else {
replace = replace_kernel<col_type, false, false>;
}
}
std::unique_ptr<cudf::column> output;
if (input_col.has_nulls() || replacement_values.has_nulls()) {
output = cudf::experimental::detail::allocate_like(input_col,
input_col.size(),
cudf::experimental::mask_allocation_policy::ALWAYS,
mr,
stream);
}
else {
output = cudf::experimental::detail::allocate_like(input_col,
input_col.size(),
cudf::experimental::mask_allocation_policy::NEVER,
mr,
stream);
}
cudf::mutable_column_view outputView = output->mutable_view();
cudf::experimental::detail::grid_1d grid { outputView.size(), BLOCK_SIZE, 1 };
auto device_in = cudf::column_device_view::create(input_col);
auto device_out = cudf::mutable_column_device_view::create(outputView);
auto device_values_to_replace = cudf::column_device_view::create(values_to_replace);
auto device_replacement_values = cudf::column_device_view::create(replacement_values);
hipLaunchKernelGGL(( replace), dim3(grid.num_blocks), dim3(BLOCK_SIZE), 0, stream, *device_in,
*device_out,
valid_count,
outputView.size(),
*device_values_to_replace,
*device_replacement_values);
if (outputView.nullable()) {
output->set_null_count(output->size() - valid_counter.value(stream));
}
return output;
}
template<typename col_type,
std::enable_if_t<not cudf::is_fixed_width<col_type>()>* = nullptr>
std::unique_ptr<cudf::column> operator()(cudf::column_view const& input_col,
cudf::column_view const& values_to_replace,
cudf::column_view const& replacement_values,
rmm::mr::device_memory_resource* mr,
hipStream_t stream = 0) {
CUDF_FAIL("No specialization exists for this type");
}
};
template<>
std::unique_ptr<cudf::column> replace_kernel_forwarder::operator()<cudf::string_view>(cudf::column_view const& input_col,
cudf::column_view const& values_to_replace,
cudf::column_view const& replacement_values,
rmm::mr::device_memory_resource* mr,
hipStream_t stream){
rmm::device_scalar<cudf::size_type> valid_counter(0, stream);
cudf::size_type *valid_count = valid_counter.data();
auto replace_first = replace_strings_first_pass<true, false>;
auto replace_second = replace_strings_second_pass<true, false>;
if (input_col.has_nulls()) {
if (replacement_values.has_nulls()) {
replace_first = replace_strings_first_pass<true, true>;
replace_second = replace_strings_second_pass<true,true>;
}
} else {
if (replacement_values.has_nulls()) {
replace_first = replace_strings_first_pass<false, true>;
replace_second = replace_strings_second_pass<false, true>;
} else {
replace_first = replace_strings_first_pass<false, false>;
replace_second = replace_strings_second_pass<false, false>;
}
}
// Create new offsets column to use in kernel
std::unique_ptr<cudf::column> sizes = cudf::make_numeric_column(cudf::data_type(cudf::type_id::INT32),
input_col.size(),
cudf::mask_state::UNALLOCATED,
stream,
mr);
std::unique_ptr<cudf::column> indices = cudf::make_numeric_column(cudf::data_type(cudf::type_id::INT32),
input_col.size(),
cudf::mask_state::UNALLOCATED,
stream);
auto sizes_view = sizes->mutable_view();
auto indices_view = indices->mutable_view();
auto device_in = cudf::column_device_view::create(input_col);
auto device_values_to_replace = cudf::column_device_view::create(values_to_replace);
auto device_replacement = cudf::column_device_view::create(replacement_values);
auto device_sizes = cudf::mutable_column_device_view::create(sizes_view);
auto device_indices = cudf::mutable_column_device_view::create(indices_view);
rmm::device_buffer valid_bits = cudf::create_null_mask(input_col.size(),
cudf::mask_state::UNINITIALIZED,
stream,
mr);
// Call first pass kernel to get sizes in offsets
cudf::experimental::detail::grid_1d grid { input_col.size(), BLOCK_SIZE, 1 };
hipLaunchKernelGGL(( replace_first), dim3(grid.num_blocks), dim3(BLOCK_SIZE), 0, stream, *device_in,
*device_values_to_replace,
*device_replacement,
*device_sizes,
*device_indices,
reinterpret_cast<cudf::bitmask_type*>(valid_bits.data()),
valid_count);
std::unique_ptr<cudf::column> offsets = cudf::strings::detail::make_offsets_child_column(sizes_view.begin<int32_t>(),
sizes_view.end<int32_t>(),
mr,
stream);
auto offsets_view = offsets->mutable_view();
auto device_offsets = cudf::mutable_column_device_view::create(offsets_view);
int32_t size;
CUDA_TRY(hipMemcpyAsync(&size, offsets_view.end<int32_t>() - 1, sizeof(int32_t), hipMemcpyDefault, stream));
// Allocate chars array and output null mask
cudf::size_type null_count = input_col.size() - valid_counter.value(stream);
std::unique_ptr<cudf::column> output_chars =
cudf::strings::detail::create_chars_child_column(input_col.size(), null_count, size, mr, stream);
auto output_chars_view = output_chars->mutable_view();
auto device_chars = cudf::mutable_column_device_view::create(output_chars_view);
hipLaunchKernelGGL(( replace_second), dim3(grid.num_blocks), dim3(BLOCK_SIZE), 0, stream, *device_in,
*device_replacement,
*device_offsets,
*device_chars,
*device_indices);
std::unique_ptr<cudf::column> output = cudf::make_strings_column(input_col.size(),
std::move(offsets),
std::move(output_chars),
null_count,
std::move(valid_bits),
stream,
mr);
return output;
}
} //end anonymous namespace
namespace cudf {
namespace detail {
std::unique_ptr<cudf::column> find_and_replace_all(cudf::column_view const& input_col,
cudf::column_view const& values_to_replace,
cudf::column_view const& replacement_values,
rmm::mr::device_memory_resource* mr,
hipStream_t stream) {
CUDF_EXPECTS(values_to_replace.size() == replacement_values.size(),
"values_to_replace and replacement_values size mismatch.");
CUDF_EXPECTS(input_col.type() == values_to_replace.type() &&
input_col.type() == replacement_values.type(),
"Columns type mismatch");
CUDF_EXPECTS(values_to_replace.has_nulls() == false,
"values_to_replace must not have nulls");
if (0 == input_col.size() || 0 == values_to_replace.size() || 0 == replacement_values.size()) {
return std::make_unique < cudf::column > (input_col);
}
return cudf::experimental::type_dispatcher(input_col.type(),
replace_kernel_forwarder { },
input_col,
values_to_replace,
replacement_values,
mr,
stream);
}
} //end details
namespace experimental {
/* --------------------------------------------------------------------------*/
/*
* @brief Replace elements from `input_col` according to the mapping `values_to_replace` to
* `replacement_values`, that is, replace all `values_to_replace[i]` present in `input_col`
* with `replacement_values[i]`.
*
* @param[in] col gdf_column with the data to be modified
* @param[in] values_to_replace gdf_column with the old values to be replaced
* @param[in] replacement_values gdf_column with the new values
*
* @returns output gdf_column with the modified data
*/
/* ----------------------------------------------------------------------------*/
std::unique_ptr<cudf::column> find_and_replace_all(cudf::column_view const& input_col,
cudf::column_view const& values_to_replace,
cudf::column_view const& replacement_values,
rmm::mr::device_memory_resource* mr) {
return cudf::detail::find_and_replace_all(input_col, values_to_replace, replacement_values, mr, 0);
}
} //end experimental
} //end cudf
namespace { //anonymous
template<int phase, bool replacement_has_nulls>
__global__
void replace_nulls_strings(cudf::column_device_view input,
cudf::column_device_view replacement,
cudf::bitmask_type *output_valid,
cudf::size_type* offsets,
char* chars,
cudf::size_type* valid_counter){
cudf::size_type nrows = input.size();
cudf::size_type i = blockIdx.x * blockDim.x + threadIdx.x;
uint32_t active_mask = 0xffffffff;
active_mask = __ballot_sync(active_mask, i < nrows);
auto const lane_id { threadIdx.x % cudf::experimental::detail::warp_size };
uint32_t valid_sum { 0 };
while (i < nrows) {
bool input_is_valid = input.is_valid_nocheck(i);
bool output_is_valid = true;
if(replacement_has_nulls && !input_is_valid) {
output_is_valid = replacement.is_valid_nocheck(i);
}
cudf::string_view out;
if (input_is_valid){
out = input.element<cudf::string_view>(i);
} else if (output_is_valid) {
out = replacement.element<cudf::string_view>(i);
}
bool nonzero_output = (input_is_valid || output_is_valid);
if (phase == 0) {
offsets[i] = nonzero_output ? out.size_bytes() : 0;
uint32_t bitmask = __ballot_sync(active_mask, output_is_valid);
if (0 == lane_id) {
output_valid[cudf::word_index(i)] = bitmask;
valid_sum += __popc(bitmask);
}
} else if (phase == 1) {
if (nonzero_output)
std::memcpy(chars + offsets[i], out.data(), out.size_bytes());
}
i += blockDim.x * gridDim.x;
active_mask = __ballot_sync(active_mask, i < nrows);
}
// Compute total valid count for this block and add it to global count
uint32_t block_valid_count = cudf::experimental::detail::single_lane_block_sum_reduce<BLOCK_SIZE, 0>(valid_sum);
// one thread computes and adds to output_valid_count
if (threadIdx.x == 0) {
atomicAdd(valid_counter, block_valid_count);
}
}
template<typename Type, bool replacement_has_nulls>
__global__
void replace_nulls(cudf::column_device_view input,
cudf::column_device_view replacement,
cudf::mutable_column_device_view output,
cudf::size_type* output_valid_count) {
cudf::size_type nrows = input.size();
cudf::size_type i = blockIdx.x * blockDim.x + threadIdx.x;
uint32_t active_mask = 0xffffffff;
active_mask = __ballot_sync(active_mask, i < nrows);
auto const lane_id { threadIdx.x % cudf::experimental::detail::warp_size };
uint32_t valid_sum { 0 };
while (i < nrows) {
bool input_is_valid = input.is_valid_nocheck(i);
bool output_is_valid = true;
if (input_is_valid) {
output.data<Type>()[i] = input.element<Type>(i);
}
else {
if (replacement_has_nulls) {
output_is_valid = replacement.is_valid_nocheck(i);
}
output.data<Type>()[i] = replacement.element<Type>(i);
}
/* output valid counts calculations*/
if (replacement_has_nulls) {
uint32_t bitmask = __ballot_sync(active_mask, output_is_valid);
if (0 == lane_id) {
output.set_mask_word(cudf::word_index(i), bitmask);
valid_sum += __popc(bitmask);
}
}
i += blockDim.x * gridDim.x;
active_mask = __ballot_sync(active_mask, i < nrows);
}
if (replacement_has_nulls) {
// Compute total valid count for this block and add it to global count
uint32_t block_valid_count = cudf::experimental::detail::single_lane_block_sum_reduce<
BLOCK_SIZE, 0>(valid_sum);
// one thread computes and adds to output_valid_count
if (threadIdx.x == 0) {
atomicAdd(output_valid_count, block_valid_count);
}
}
}
/* --------------------------------------------------------------------------*/
/**
* @brief Functor called by the `type_dispatcher` in order to invoke and instantiate
* `replace_nulls` with the appropriate data types.
*/
/* ----------------------------------------------------------------------------*/
struct replace_nulls_column_kernel_forwarder {
template<typename col_type,
std::enable_if_t<cudf::is_fixed_width<col_type>()>* = nullptr>
std::unique_ptr<cudf::column> operator()(cudf::column_view const& input,
cudf::column_view const& replacement,
rmm::mr::device_memory_resource* mr,
hipStream_t stream = 0)
{
cudf::size_type nrows = input.size();
cudf::experimental::detail::grid_1d grid { nrows, BLOCK_SIZE };
std::unique_ptr<cudf::column> output;
if (replacement.has_nulls())
output = cudf::experimental::detail::allocate_like(input,
input.size(),
cudf::experimental::mask_allocation_policy::ALWAYS,
mr,
stream);
else
output = cudf::experimental::detail::allocate_like(input,
input.size(),
cudf::experimental::mask_allocation_policy::NEVER,
mr,
stream);
auto output_view = output->mutable_view();
auto replace = replace_nulls<col_type, false>;
if (output_view.nullable())
replace = replace_nulls<col_type, true>;
auto device_in = cudf::column_device_view::create(input);
auto device_out = cudf::mutable_column_device_view::create(output_view);
auto device_replacement = cudf::column_device_view::create(replacement);
rmm::device_scalar<cudf::size_type> valid_counter(0, stream);
cudf::size_type *valid_count = valid_counter.data();
hipLaunchKernelGGL(( replace), dim3(grid.num_blocks), dim3(BLOCK_SIZE), 0, stream, *device_in,
*device_replacement,
*device_out,
valid_count);
if (output_view.nullable()) {
output->set_null_count(output->size() - valid_counter.value(stream));
}
return output;
}
template<typename col_type,
std::enable_if_t<not cudf::is_fixed_width<col_type>()>* = nullptr>
std::unique_ptr<cudf::column> operator()(cudf::column_view const& input,
cudf::column_view const& replacement,
rmm::mr::device_memory_resource* mr,
hipStream_t stream = 0) {
CUDF_FAIL("No specialization exists for the given type.");
}
};
template<>
std::unique_ptr<cudf::column> replace_nulls_column_kernel_forwarder::operator ()<cudf::string_view>(cudf::column_view const& input,
cudf::column_view const& replacement,
rmm::mr::device_memory_resource* mr,
hipStream_t stream) {
rmm::device_scalar<cudf::size_type> valid_counter(0, stream);
cudf::size_type *valid_count = valid_counter.data();
auto replace_first = replace_nulls_strings<0, false>;
auto replace_second = replace_nulls_strings<1, false>;
if (replacement.has_nulls()){
replace_first = replace_nulls_strings<0, true>;
replace_second = replace_nulls_strings<1, true>;
}
// Create new offsets column to use in kernel
std::unique_ptr<cudf::column> sizes = cudf::make_numeric_column(cudf::data_type(cudf::type_id::INT32),
input.size(),
cudf::mask_state::UNALLOCATED,
stream);
auto sizes_view = sizes->mutable_view();
auto device_in = cudf::column_device_view::create(input);
auto device_replacement = cudf::column_device_view::create(replacement);
rmm::device_buffer valid_bits = cudf::create_null_mask(input.size(),
cudf::mask_state::UNINITIALIZED,
stream,
mr);
// Call first pass kernel to get sizes in offsets
cudf::experimental::detail::grid_1d grid { input.size(), BLOCK_SIZE, 1 };
hipLaunchKernelGGL(( replace_first), dim3(grid.num_blocks), dim3(BLOCK_SIZE), 0, stream, *device_in,
*device_replacement,
reinterpret_cast<cudf::bitmask_type*>(valid_bits.data()),
sizes_view.begin<cudf::size_type>(),
nullptr,
valid_count);
std::unique_ptr<cudf::column> offsets = cudf::strings::detail::make_offsets_child_column(sizes_view.begin<int32_t>(),
sizes_view.end<int32_t>(),
mr,
stream);
auto offsets_view = offsets->mutable_view();
int32_t size;
CUDA_TRY(hipMemcpyAsync(&size, offsets_view.end<int32_t>() - 1, sizeof(int32_t), hipMemcpyDefault, stream));
// Allocate chars array and output null mask
cudf::size_type null_count = input.size() - valid_counter.value(stream);
std::unique_ptr<cudf::column> output_chars =
cudf::strings::detail::create_chars_child_column(input.size(), null_count, size, mr, stream);
auto output_chars_view = output_chars->mutable_view();
hipLaunchKernelGGL(( replace_second), dim3(grid.num_blocks), dim3(BLOCK_SIZE), 0, stream, *device_in,
*device_replacement,
reinterpret_cast<cudf::bitmask_type*>(valid_bits.data()),
offsets_view.begin<cudf::size_type>(),
output_chars_view.data<char>(),
valid_count);
std::unique_ptr<cudf::column> output = cudf::make_strings_column(input.size(),
std::move(offsets),
std::move(output_chars),
input.size() - valid_counter.value(stream),
std::move(valid_bits),
stream,
mr);
return output;
}
template<typename T>
struct replace_nulls_functor {
T* value_it;
replace_nulls_functor(T* _value_it):value_it(_value_it) {}
__device__
T operator()(T input, bool is_valid) {
return is_valid ? input : *value_it;
}
};
/* --------------------------------------------------------------------------*/
/**
* @brief Functor called by the `type_dispatcher` in order to invoke and instantiate
* `replace_nulls` with the appropriate data types.
*/
/* ----------------------------------------------------------------------------*/
struct replace_nulls_scalar_kernel_forwarder {
template<typename col_type,
std::enable_if_t<cudf::is_fixed_width<col_type>()>* = nullptr>
std::unique_ptr<cudf::column> operator()(cudf::column_view const& input,
cudf::scalar const& replacement,
rmm::mr::device_memory_resource* mr,
hipStream_t stream = 0)
{
std::unique_ptr<cudf::column> output = cudf::experimental::allocate_like(input,
cudf::experimental::mask_allocation_policy::NEVER,
mr);
auto output_view = output->mutable_view();
using ScalarType = cudf::experimental::scalar_type_t<col_type>;
auto s1 = static_cast<ScalarType const&>(replacement);
auto device_in = cudf::column_device_view::create(input);
replace_nulls_functor<col_type> func(s1.data());
thrust::transform(rmm::exec_policy(stream)->on(stream),
input.data<col_type>(),
input.data<col_type>() + input.size(),
cudf::experimental::detail::make_validity_iterator(*device_in),
output_view.data<col_type>(),
func);
return output;
}
template<typename col_type,
std::enable_if_t<not cudf::is_fixed_width<col_type>()>* = nullptr>
std::unique_ptr<cudf::column> operator()(cudf::column_view const& input,
cudf::scalar const& replacement,
rmm::mr::device_memory_resource* mr,
hipStream_t stream = 0) {
CUDF_FAIL("No specialization exists for the given type.");
}
};
template<>
std::unique_ptr<cudf::column> replace_nulls_scalar_kernel_forwarder::operator ()<cudf::string_view>(cudf::column_view const& input,
cudf::scalar const& replacement,
rmm::mr::device_memory_resource* mr,
hipStream_t stream) {
cudf::strings_column_view input_s(input);
const cudf::string_scalar& repl = static_cast<const cudf::string_scalar&>(replacement);
return cudf::strings::replace_nulls(input_s, repl, mr);
}
} //end anonymous namespace
namespace cudf {
namespace detail {
std::unique_ptr<cudf::column> replace_nulls(cudf::column_view const& input,
cudf::column_view const& replacement,
rmm::mr::device_memory_resource* mr,
hipStream_t stream)
{
CUDF_EXPECTS(input.type() == replacement.type(), "Data type mismatch");
CUDF_EXPECTS(replacement.size() == input.size(), "Column size mismatch");
if (input.size() == 0) {
return cudf::experimental::empty_like(input);
}
if (!input.has_nulls()) {
return std::make_unique < cudf::column > (input);
}
return cudf::experimental::type_dispatcher(input.type(),
replace_nulls_column_kernel_forwarder { },
input,
replacement,
mr,
stream);
}
std::unique_ptr<cudf::column> replace_nulls(cudf::column_view const& input,
cudf::scalar const& replacement,
rmm::mr::device_memory_resource* mr,
hipStream_t stream)
{
if (input.size() == 0) {
return cudf::experimental::empty_like(input);
}
if (!input.has_nulls() || !replacement.is_valid()) {
return std::make_unique < cudf::column > (input, stream, mr);
}
CUDF_EXPECTS(input.type() == replacement.type(), "Data type mismatch");
return cudf::experimental::type_dispatcher(input.type(),
replace_nulls_scalar_kernel_forwarder { },
input,
replacement,
mr,
stream);
}
} //namespace detail
namespace experimental {
std::unique_ptr<cudf::column> replace_nulls(cudf::column_view const& input,
cudf::column_view const& replacement,
rmm::mr::device_memory_resource* mr)
{
return cudf::detail::replace_nulls(input, replacement, mr, 0);
}
std::unique_ptr<cudf::column> replace_nulls(cudf::column_view const& input,
cudf::scalar const& replacement,
rmm::mr::device_memory_resource* mr)
{
return cudf::detail::replace_nulls(input, replacement, mr, 0);
}
} //end experimental
} //namespace cudf
namespace { // anonymous
template<typename T>
struct normalize_nans_and_zeros_lambda {
cudf::column_device_view in;
T __device__ operator()(cudf::size_type i)
{
auto e = in.element < T > (i);
if (isnan(e)) {
return std::numeric_limits<T>::quiet_NaN();
}
if (T { 0.0 } == e) {
return T { 0.0 };
}
return e;
}
};
/* --------------------------------------------------------------------------*/
/**
* @brief Functor called by the `type_dispatcher` in order to invoke and instantiate
* `normalize_nans_and_zeros` with the appropriate data types.
*/
/* ----------------------------------------------------------------------------*/
struct normalize_nans_and_zeros_kernel_forwarder {
// floats and doubles. what we really care about.
template<typename T, std::enable_if_t<std::is_floating_point<T>::value>* = nullptr>
void operator()(cudf::column_device_view in,
cudf::mutable_column_device_view out,
hipStream_t stream)
{
thrust::transform(rmm::exec_policy(stream)->on(stream),
thrust::make_counting_iterator(0),
thrust::make_counting_iterator(in.size()),
out.head<T>(),
normalize_nans_and_zeros_lambda<T> { in });
}
// if we get in here for anything but a float or double, that's a problem.
template<typename T, std::enable_if_t<not std::is_floating_point<T>::value>* = nullptr>
void operator()(cudf::column_device_view in,
cudf::mutable_column_device_view out,
hipStream_t stream)
{
CUDF_FAIL("Unexpected non floating-point type.");
}
};
} // end anonymous namespace
namespace cudf {
namespace detail {
void normalize_nans_and_zeros(mutable_column_view in_out,
hipStream_t stream = 0)
{
if (in_out.size() == 0) {
return;
}
CUDF_EXPECTS(in_out.type() == data_type(FLOAT32) || in_out.type() == data_type(FLOAT64),
"Expects float or double input");
// wrapping the in_out data in a column_view so we can call the same lower level code.
// that we use for the non in-place version.
column_view input = in_out;
// to device. unique_ptr which gets automatically cleaned up when we leave
auto device_in = column_device_view::create(input);
// from device. unique_ptr which gets automatically cleaned up when we leave.
auto device_out = mutable_column_device_view::create(in_out);
// invoke the actual kernel.
cudf::experimental::type_dispatcher(input.type(),
normalize_nans_and_zeros_kernel_forwarder { },
*device_in,
*device_out,
stream);
}
} // namespace detail
/*
* @brief Makes all NaNs and zeroes positive.
*
* Converts floating point values from @p input using the following rules:
* Convert -NaN -> NaN
* Convert -0.0 -> 0.0
*
* @throws cudf::logic_error if column does not have floating point data type.
* @param[in] column_view representing input data
* @param[in] device_memory_resource allocator for allocating output data
*
* @returns new column with the modified data
*/
std::unique_ptr<column> normalize_nans_and_zeros(column_view const& input,
rmm::mr::device_memory_resource *mr)
{
// output. copies the input
std::unique_ptr<column> out = std::make_unique < column > (input, (hipStream_t) 0, mr);
// from device. unique_ptr which gets automatically cleaned up when we leave.
auto out_view = out->mutable_view();
detail::normalize_nans_and_zeros(out_view, 0);
return out;
}
/*
* @brief Makes all Nans and zeroes positive.
*
* Converts floating point values from @p in_out using the following rules:
* Convert -NaN -> NaN
* Convert -0.0 -> 0.0
*
* @throws cudf::logic_error if column does not have floating point data type.
* @param[in, out] mutable_column_view representing input data. data is processed in-place
*/
void normalize_nans_and_zeros(mutable_column_view& in_out)
{
detail::normalize_nans_and_zeros(in_out, 0);
}
}
| ea25f259f0507de6e5892f4f9d165b338d02bcd1.cu | /*
* Copyright 2018 BlazingDB, Inc.
* Copyright 2018 Cristhian Alberto Gonzales Castillo <cristhian@blazingdb.com>
* Copyright 2018 Alexander Ocsa <alexander@blazingdb.com>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <thrust/device_ptr.h>
#include <thrust/find.h>
#include <thrust/execution_policy.h>
#include <cub/cub.cuh>
#include <cudf/copying.hpp>
#include <cudf/replace.hpp>
#include <cudf/detail/replace.hpp>
#include <rmm/rmm.h>
#include <cudf/types.hpp>
#include <cudf/scalar/scalar.hpp>
#include <cudf/utilities/type_dispatcher.hpp>
#include <cudf/column/column_factories.hpp>
#include <cudf/column/column_view.hpp>
#include <cudf/column/column.hpp>
#include <cudf/utilities/bit.hpp>
#include <cudf/column/column_device_view.cuh>
#include <cudf/detail/utilities/cuda.cuh>
#include <cudf/copying.hpp>
#include <cudf/utilities/traits.hpp>
#include <cudf/utilities/error.hpp>
#include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_factories.hpp>
#include <cudf/utilities/type_dispatcher.hpp>
#include <cudf/detail/iterator.cuh>
#include <cudf/strings/replace.hpp>
#include <cudf/strings/detail/utilities.hpp>
#include <cudf/strings/detail/utilities.cuh>
#include <cudf/detail/copy.hpp>
#include <cudf/null_mask.hpp>
namespace { //anonymous
static constexpr int BLOCK_SIZE = 256;
// return the new_value for output column at index `idx`
template<class T, bool replacement_has_nulls>
__device__ auto get_new_value(cudf::size_type idx,
const T* __restrict__ input_data,
const T* __restrict__ values_to_replace_begin,
const T* __restrict__ values_to_replace_end,
const T* __restrict__ d_replacement_values,
cudf::bitmask_type const * __restrict__ replacement_valid)
{
auto found_ptr = thrust::find(thrust::seq, values_to_replace_begin,
values_to_replace_end,
input_data[idx]);
T new_value { 0 };
bool output_is_valid { true };
if (found_ptr != values_to_replace_end) {
auto d = thrust::distance(values_to_replace_begin, found_ptr);
new_value = d_replacement_values[d];
if (replacement_has_nulls) {
output_is_valid = cudf::bit_is_set(replacement_valid, d);
}
} else {
new_value = input_data[idx];
}
return thrust::make_pair(new_value, output_is_valid);
}
__device__ int get_new_string_value(cudf::size_type idx,
cudf::column_device_view& input,
cudf::column_device_view& values_to_replace,
cudf::column_device_view& replacement_values) {
cudf::string_view input_string = input.element<cudf::string_view>(idx);
int match = -1;
for (int i = 0; i < values_to_replace.size(); i++) {
cudf::string_view value_string = values_to_replace.element<cudf::string_view>(i);
if (input_string == value_string){
match = i;
break;
}
}
return match;
}
/*
* Kernel which does the first pass of strings replace. It computes the output null_mask, null_count,
* and the offsets.
*
* @param input The input column to replace strings in.
* @param values_to_replace The string values to replace.
* @param replacement The replacement values.
* @param offsets The column which will contain the offsets of the new string column
* @param indices Temporary column used to store the replacement indices
* @param output_valid The output null_mask
* @param output_valid_count The output valid count
*/
template<bool input_has_nulls, bool replacement_has_nulls>
__global__ void replace_strings_first_pass(cudf::column_device_view input,
cudf::column_device_view values_to_replace,
cudf::column_device_view replacement,
cudf::mutable_column_device_view offsets,
cudf::mutable_column_device_view indices,
cudf::bitmask_type * output_valid,
cudf::size_type* __restrict__ output_valid_count) {
cudf::size_type nrows = input.size();
cudf::size_type i = blockIdx.x * blockDim.x + threadIdx.x;
uint32_t active_mask = 0xffffffff;
active_mask = __ballot_sync(active_mask, i < nrows);
auto const lane_id { threadIdx.x % cudf::experimental::detail::warp_size };
uint32_t valid_sum { 0 };
while (i < nrows) {
bool input_is_valid = true;
if (input_has_nulls)
input_is_valid = input.is_valid_nocheck(i);
bool output_is_valid = input_is_valid;
if (input_is_valid){
int result = get_new_string_value(i, input, values_to_replace, replacement);
cudf::string_view output = (result == -1) ? input.element<cudf::string_view>(i) : replacement.element<cudf::string_view>(result);
offsets.data<cudf::size_type>()[i] = output.size_bytes();
indices.data<cudf::size_type>()[i] = result;
if (replacement_has_nulls && result != -1){
output_is_valid = replacement.is_valid_nocheck(result);
}
} else {
offsets.data<cudf::size_type>()[i] = 0;
indices.data<cudf::size_type>()[i] = -1;
}
uint32_t bitmask = __ballot_sync(active_mask, output_is_valid);
if (0 == lane_id) {
output_valid[cudf::word_index(i)] = bitmask;
valid_sum += __popc(bitmask);
}
i += blockDim.x * gridDim.x;
active_mask = __ballot_sync(active_mask, i < nrows);
}
// Compute total valid count for this block and add it to global count
uint32_t block_valid_count = cudf::experimental::detail::single_lane_block_sum_reduce<BLOCK_SIZE, 0>(valid_sum);
// one thread computes and adds to output_valid_count
if (threadIdx.x == 0) {
atomicAdd(output_valid_count, block_valid_count);
}
}
/*
* Kernel which does the second pass of strings replace. It copies the string data needed from input and
* replacement into the new strings column chars column.
* @param input The input column
* @param replacement The replacement values
* @param offsets The offsets column of the new strings column
* @param strings The chars column of the new strings column
* @param indices Temporary column used to store the replacement indices.
*/
template<bool input_has_nulls, bool replacement_has_nulls>
__global__ void replace_strings_second_pass(cudf::column_device_view input,
cudf::column_device_view replacement,
cudf::mutable_column_device_view offsets,
cudf::mutable_column_device_view strings,
cudf::mutable_column_device_view indices) {
cudf::size_type nrows = input.size();
cudf::size_type i = blockIdx.x * blockDim.x + threadIdx.x;
while (i < nrows) {
bool output_is_valid = true;
bool input_is_valid = true;
cudf::size_type idx = indices.element<cudf::size_type>(i);
if (input_has_nulls){
input_is_valid = input.is_valid_nocheck(i);
output_is_valid = input_is_valid;
}
if (replacement_has_nulls && idx != -1) {
output_is_valid = replacement.is_valid_nocheck(idx);
}
if (output_is_valid) {
cudf::string_view output = (idx == -1) ? input.element<cudf::string_view>(i) : replacement.element<cudf::string_view>(idx);
std::memcpy(strings.data<char>() + offsets.data<cudf::size_type>()[i],
output.data(),
output.size_bytes());
}
i += blockDim.x * gridDim.x;
}
}
/* --------------------------------------------------------------------------*/
/**
* @brief Kernel that replaces elements from `output_data` given the following
* rule: replace all `values_to_replace[i]` in [values_to_replace_begin`,
* `values_to_replace_end`) present in `output_data` with `d_replacement_values[i]`.
*
* @tparam input_has_nulls `true` if output column has valid mask, `false` otherwise
* @tparam replacement_has_nulls `true` if replacement_values column has valid mask, `false` otherwise
* The input_has_nulls and replacement_has_nulls template parameters allows us to specialize
* this kernel for the different scenario for performance without writing different kernel.
*
* @param[in] input_data Device array with the data to be modified
* @param[in] input_valid Valid mask associated with input_data
* @param[out] output_data Device array to store the data from input_data
* @param[out] output_valid Valid mask associated with output_data
* @param[out] output_valid_count #valid in output column
* @param[in] nrows # rows in `output_data`
* @param[in] values_to_replace_begin Device pointer to the beginning of the sequence
* of old values to be replaced
* @param[in] values_to_replace_end Device pointer to the end of the sequence
* of old values to be replaced
* @param[in] d_replacement_values Device array with the new values
* @param[in] replacement_valid Valid mask associated with d_replacement_values
*
* @returns
*/
/* ----------------------------------------------------------------------------*/
template<class T,
bool input_has_nulls, bool replacement_has_nulls>
__global__
void replace_kernel(cudf::column_device_view input,
cudf::mutable_column_device_view output,
cudf::size_type * __restrict__ output_valid_count,
cudf::size_type nrows,
cudf::column_device_view values_to_replace,
cudf::column_device_view replacement)
{
T * __restrict__ output_data = output.data<T>();
cudf::size_type i = blockIdx.x * blockDim.x + threadIdx.x;
uint32_t active_mask = 0xffffffff;
active_mask = __ballot_sync(active_mask, i < nrows);
auto const lane_id { threadIdx.x % cudf::experimental::detail::warp_size };
uint32_t valid_sum { 0 };
while (i < nrows) {
bool output_is_valid{true};
bool input_is_valid{true};
if (input_has_nulls) {
input_is_valid = input.is_valid_nocheck(i);
output_is_valid = input_is_valid;
}
if (input_is_valid)
thrust::tie(output_data[i], output_is_valid) =
get_new_value<T, replacement_has_nulls>(i,
input.data<T>(),
values_to_replace.data<T>(),
values_to_replace.data<T>() + values_to_replace.size(),
replacement.data<T>(),
replacement.null_mask());
/* output valid counts calculations*/
if (input_has_nulls or replacement_has_nulls) {
uint32_t bitmask = __ballot_sync(active_mask, output_is_valid);
if (0 == lane_id) {
output.set_mask_word(cudf::word_index(i), bitmask);
valid_sum += __popc(bitmask);
}
}
i += blockDim.x * gridDim.x;
active_mask = __ballot_sync(active_mask, i < nrows);
}
if (input_has_nulls or replacement_has_nulls) {
// Compute total valid count for this block and add it to global count
uint32_t block_valid_count = cudf::experimental::detail::single_lane_block_sum_reduce<
BLOCK_SIZE, 0>(valid_sum);
// one thread computes and adds to output_valid_count
if (threadIdx.x == 0) {
atomicAdd(output_valid_count, block_valid_count);
}
}
}
/*
* @brief Functor called by the `type_dispatcher` in order to invoke and instantiate
* `replace_kernel` with the appropriate data types.
*/
struct replace_kernel_forwarder {
template<typename col_type,
std::enable_if_t<cudf::is_fixed_width<col_type>()>* = nullptr>
std::unique_ptr<cudf::column> operator()(cudf::column_view const& input_col,
cudf::column_view const& values_to_replace,
cudf::column_view const& replacement_values,
rmm::mr::device_memory_resource* mr,
cudaStream_t stream = 0)
{
rmm::device_scalar<cudf::size_type> valid_counter(0, stream);
cudf::size_type *valid_count = valid_counter.data();
auto replace = replace_kernel<col_type, true, true>;
if (input_col.has_nulls()) {
if (replacement_values.has_nulls()) {
replace = replace_kernel<col_type, true, true>;
} else {
replace = replace_kernel<col_type, true, false>;
}
} else {
if (replacement_values.has_nulls()) {
replace = replace_kernel<col_type, false, true>;
} else {
replace = replace_kernel<col_type, false, false>;
}
}
std::unique_ptr<cudf::column> output;
if (input_col.has_nulls() || replacement_values.has_nulls()) {
output = cudf::experimental::detail::allocate_like(input_col,
input_col.size(),
cudf::experimental::mask_allocation_policy::ALWAYS,
mr,
stream);
}
else {
output = cudf::experimental::detail::allocate_like(input_col,
input_col.size(),
cudf::experimental::mask_allocation_policy::NEVER,
mr,
stream);
}
cudf::mutable_column_view outputView = output->mutable_view();
cudf::experimental::detail::grid_1d grid { outputView.size(), BLOCK_SIZE, 1 };
auto device_in = cudf::column_device_view::create(input_col);
auto device_out = cudf::mutable_column_device_view::create(outputView);
auto device_values_to_replace = cudf::column_device_view::create(values_to_replace);
auto device_replacement_values = cudf::column_device_view::create(replacement_values);
replace<<<grid.num_blocks, BLOCK_SIZE, 0, stream>>>(*device_in,
*device_out,
valid_count,
outputView.size(),
*device_values_to_replace,
*device_replacement_values);
if (outputView.nullable()) {
output->set_null_count(output->size() - valid_counter.value(stream));
}
return output;
}
template<typename col_type,
std::enable_if_t<not cudf::is_fixed_width<col_type>()>* = nullptr>
std::unique_ptr<cudf::column> operator()(cudf::column_view const& input_col,
cudf::column_view const& values_to_replace,
cudf::column_view const& replacement_values,
rmm::mr::device_memory_resource* mr,
cudaStream_t stream = 0) {
CUDF_FAIL("No specialization exists for this type");
}
};
template<>
std::unique_ptr<cudf::column> replace_kernel_forwarder::operator()<cudf::string_view>(cudf::column_view const& input_col,
cudf::column_view const& values_to_replace,
cudf::column_view const& replacement_values,
rmm::mr::device_memory_resource* mr,
cudaStream_t stream){
rmm::device_scalar<cudf::size_type> valid_counter(0, stream);
cudf::size_type *valid_count = valid_counter.data();
auto replace_first = replace_strings_first_pass<true, false>;
auto replace_second = replace_strings_second_pass<true, false>;
if (input_col.has_nulls()) {
if (replacement_values.has_nulls()) {
replace_first = replace_strings_first_pass<true, true>;
replace_second = replace_strings_second_pass<true,true>;
}
} else {
if (replacement_values.has_nulls()) {
replace_first = replace_strings_first_pass<false, true>;
replace_second = replace_strings_second_pass<false, true>;
} else {
replace_first = replace_strings_first_pass<false, false>;
replace_second = replace_strings_second_pass<false, false>;
}
}
// Create new offsets column to use in kernel
std::unique_ptr<cudf::column> sizes = cudf::make_numeric_column(cudf::data_type(cudf::type_id::INT32),
input_col.size(),
cudf::mask_state::UNALLOCATED,
stream,
mr);
std::unique_ptr<cudf::column> indices = cudf::make_numeric_column(cudf::data_type(cudf::type_id::INT32),
input_col.size(),
cudf::mask_state::UNALLOCATED,
stream);
auto sizes_view = sizes->mutable_view();
auto indices_view = indices->mutable_view();
auto device_in = cudf::column_device_view::create(input_col);
auto device_values_to_replace = cudf::column_device_view::create(values_to_replace);
auto device_replacement = cudf::column_device_view::create(replacement_values);
auto device_sizes = cudf::mutable_column_device_view::create(sizes_view);
auto device_indices = cudf::mutable_column_device_view::create(indices_view);
rmm::device_buffer valid_bits = cudf::create_null_mask(input_col.size(),
cudf::mask_state::UNINITIALIZED,
stream,
mr);
// Call first pass kernel to get sizes in offsets
cudf::experimental::detail::grid_1d grid { input_col.size(), BLOCK_SIZE, 1 };
replace_first<<<grid.num_blocks, BLOCK_SIZE, 0, stream>>>(*device_in,
*device_values_to_replace,
*device_replacement,
*device_sizes,
*device_indices,
reinterpret_cast<cudf::bitmask_type*>(valid_bits.data()),
valid_count);
std::unique_ptr<cudf::column> offsets = cudf::strings::detail::make_offsets_child_column(sizes_view.begin<int32_t>(),
sizes_view.end<int32_t>(),
mr,
stream);
auto offsets_view = offsets->mutable_view();
auto device_offsets = cudf::mutable_column_device_view::create(offsets_view);
int32_t size;
CUDA_TRY(cudaMemcpyAsync(&size, offsets_view.end<int32_t>() - 1, sizeof(int32_t), cudaMemcpyDefault, stream));
// Allocate chars array and output null mask
cudf::size_type null_count = input_col.size() - valid_counter.value(stream);
std::unique_ptr<cudf::column> output_chars =
cudf::strings::detail::create_chars_child_column(input_col.size(), null_count, size, mr, stream);
auto output_chars_view = output_chars->mutable_view();
auto device_chars = cudf::mutable_column_device_view::create(output_chars_view);
replace_second<<<grid.num_blocks, BLOCK_SIZE, 0, stream>>>(*device_in,
*device_replacement,
*device_offsets,
*device_chars,
*device_indices);
std::unique_ptr<cudf::column> output = cudf::make_strings_column(input_col.size(),
std::move(offsets),
std::move(output_chars),
null_count,
std::move(valid_bits),
stream,
mr);
return output;
}
} //end anonymous namespace
namespace cudf {
namespace detail {
std::unique_ptr<cudf::column> find_and_replace_all(cudf::column_view const& input_col,
cudf::column_view const& values_to_replace,
cudf::column_view const& replacement_values,
rmm::mr::device_memory_resource* mr,
cudaStream_t stream) {
CUDF_EXPECTS(values_to_replace.size() == replacement_values.size(),
"values_to_replace and replacement_values size mismatch.");
CUDF_EXPECTS(input_col.type() == values_to_replace.type() &&
input_col.type() == replacement_values.type(),
"Columns type mismatch");
CUDF_EXPECTS(values_to_replace.has_nulls() == false,
"values_to_replace must not have nulls");
if (0 == input_col.size() || 0 == values_to_replace.size() || 0 == replacement_values.size()) {
return std::make_unique < cudf::column > (input_col);
}
return cudf::experimental::type_dispatcher(input_col.type(),
replace_kernel_forwarder { },
input_col,
values_to_replace,
replacement_values,
mr,
stream);
}
} //end details
namespace experimental {
/* --------------------------------------------------------------------------*/
/*
* @brief Replace elements from `input_col` according to the mapping `values_to_replace` to
* `replacement_values`, that is, replace all `values_to_replace[i]` present in `input_col`
* with `replacement_values[i]`.
*
* @param[in] col gdf_column with the data to be modified
* @param[in] values_to_replace gdf_column with the old values to be replaced
* @param[in] replacement_values gdf_column with the new values
*
* @returns output gdf_column with the modified data
*/
/* ----------------------------------------------------------------------------*/
std::unique_ptr<cudf::column> find_and_replace_all(cudf::column_view const& input_col,
cudf::column_view const& values_to_replace,
cudf::column_view const& replacement_values,
rmm::mr::device_memory_resource* mr) {
return cudf::detail::find_and_replace_all(input_col, values_to_replace, replacement_values, mr, 0);
}
} //end experimental
} //end cudf
namespace { //anonymous
template<int phase, bool replacement_has_nulls>
__global__
void replace_nulls_strings(cudf::column_device_view input,
cudf::column_device_view replacement,
cudf::bitmask_type *output_valid,
cudf::size_type* offsets,
char* chars,
cudf::size_type* valid_counter){
cudf::size_type nrows = input.size();
cudf::size_type i = blockIdx.x * blockDim.x + threadIdx.x;
uint32_t active_mask = 0xffffffff;
active_mask = __ballot_sync(active_mask, i < nrows);
auto const lane_id { threadIdx.x % cudf::experimental::detail::warp_size };
uint32_t valid_sum { 0 };
while (i < nrows) {
bool input_is_valid = input.is_valid_nocheck(i);
bool output_is_valid = true;
if(replacement_has_nulls && !input_is_valid) {
output_is_valid = replacement.is_valid_nocheck(i);
}
cudf::string_view out;
if (input_is_valid){
out = input.element<cudf::string_view>(i);
} else if (output_is_valid) {
out = replacement.element<cudf::string_view>(i);
}
bool nonzero_output = (input_is_valid || output_is_valid);
if (phase == 0) {
offsets[i] = nonzero_output ? out.size_bytes() : 0;
uint32_t bitmask = __ballot_sync(active_mask, output_is_valid);
if (0 == lane_id) {
output_valid[cudf::word_index(i)] = bitmask;
valid_sum += __popc(bitmask);
}
} else if (phase == 1) {
if (nonzero_output)
std::memcpy(chars + offsets[i], out.data(), out.size_bytes());
}
i += blockDim.x * gridDim.x;
active_mask = __ballot_sync(active_mask, i < nrows);
}
// Compute total valid count for this block and add it to global count
uint32_t block_valid_count = cudf::experimental::detail::single_lane_block_sum_reduce<BLOCK_SIZE, 0>(valid_sum);
// one thread computes and adds to output_valid_count
if (threadIdx.x == 0) {
atomicAdd(valid_counter, block_valid_count);
}
}
template<typename Type, bool replacement_has_nulls>
__global__
void replace_nulls(cudf::column_device_view input,
cudf::column_device_view replacement,
cudf::mutable_column_device_view output,
cudf::size_type* output_valid_count) {
cudf::size_type nrows = input.size();
cudf::size_type i = blockIdx.x * blockDim.x + threadIdx.x;
uint32_t active_mask = 0xffffffff;
active_mask = __ballot_sync(active_mask, i < nrows);
auto const lane_id { threadIdx.x % cudf::experimental::detail::warp_size };
uint32_t valid_sum { 0 };
while (i < nrows) {
bool input_is_valid = input.is_valid_nocheck(i);
bool output_is_valid = true;
if (input_is_valid) {
output.data<Type>()[i] = input.element<Type>(i);
}
else {
if (replacement_has_nulls) {
output_is_valid = replacement.is_valid_nocheck(i);
}
output.data<Type>()[i] = replacement.element<Type>(i);
}
/* output valid counts calculations*/
if (replacement_has_nulls) {
uint32_t bitmask = __ballot_sync(active_mask, output_is_valid);
if (0 == lane_id) {
output.set_mask_word(cudf::word_index(i), bitmask);
valid_sum += __popc(bitmask);
}
}
i += blockDim.x * gridDim.x;
active_mask = __ballot_sync(active_mask, i < nrows);
}
if (replacement_has_nulls) {
// Compute total valid count for this block and add it to global count
uint32_t block_valid_count = cudf::experimental::detail::single_lane_block_sum_reduce<
BLOCK_SIZE, 0>(valid_sum);
// one thread computes and adds to output_valid_count
if (threadIdx.x == 0) {
atomicAdd(output_valid_count, block_valid_count);
}
}
}
/* --------------------------------------------------------------------------*/
/**
* @brief Functor called by the `type_dispatcher` in order to invoke and instantiate
* `replace_nulls` with the appropriate data types.
*/
/* ----------------------------------------------------------------------------*/
struct replace_nulls_column_kernel_forwarder {
template<typename col_type,
std::enable_if_t<cudf::is_fixed_width<col_type>()>* = nullptr>
std::unique_ptr<cudf::column> operator()(cudf::column_view const& input,
cudf::column_view const& replacement,
rmm::mr::device_memory_resource* mr,
cudaStream_t stream = 0)
{
cudf::size_type nrows = input.size();
cudf::experimental::detail::grid_1d grid { nrows, BLOCK_SIZE };
std::unique_ptr<cudf::column> output;
if (replacement.has_nulls())
output = cudf::experimental::detail::allocate_like(input,
input.size(),
cudf::experimental::mask_allocation_policy::ALWAYS,
mr,
stream);
else
output = cudf::experimental::detail::allocate_like(input,
input.size(),
cudf::experimental::mask_allocation_policy::NEVER,
mr,
stream);
auto output_view = output->mutable_view();
auto replace = replace_nulls<col_type, false>;
if (output_view.nullable())
replace = replace_nulls<col_type, true>;
auto device_in = cudf::column_device_view::create(input);
auto device_out = cudf::mutable_column_device_view::create(output_view);
auto device_replacement = cudf::column_device_view::create(replacement);
rmm::device_scalar<cudf::size_type> valid_counter(0, stream);
cudf::size_type *valid_count = valid_counter.data();
replace<<<grid.num_blocks, BLOCK_SIZE, 0, stream>>>(*device_in,
*device_replacement,
*device_out,
valid_count);
if (output_view.nullable()) {
output->set_null_count(output->size() - valid_counter.value(stream));
}
return output;
}
template<typename col_type,
std::enable_if_t<not cudf::is_fixed_width<col_type>()>* = nullptr>
std::unique_ptr<cudf::column> operator()(cudf::column_view const& input,
cudf::column_view const& replacement,
rmm::mr::device_memory_resource* mr,
cudaStream_t stream = 0) {
CUDF_FAIL("No specialization exists for the given type.");
}
};
template<>
std::unique_ptr<cudf::column> replace_nulls_column_kernel_forwarder::operator ()<cudf::string_view>(cudf::column_view const& input,
cudf::column_view const& replacement,
rmm::mr::device_memory_resource* mr,
cudaStream_t stream) {
rmm::device_scalar<cudf::size_type> valid_counter(0, stream);
cudf::size_type *valid_count = valid_counter.data();
auto replace_first = replace_nulls_strings<0, false>;
auto replace_second = replace_nulls_strings<1, false>;
if (replacement.has_nulls()){
replace_first = replace_nulls_strings<0, true>;
replace_second = replace_nulls_strings<1, true>;
}
// Create new offsets column to use in kernel
std::unique_ptr<cudf::column> sizes = cudf::make_numeric_column(cudf::data_type(cudf::type_id::INT32),
input.size(),
cudf::mask_state::UNALLOCATED,
stream);
auto sizes_view = sizes->mutable_view();
auto device_in = cudf::column_device_view::create(input);
auto device_replacement = cudf::column_device_view::create(replacement);
rmm::device_buffer valid_bits = cudf::create_null_mask(input.size(),
cudf::mask_state::UNINITIALIZED,
stream,
mr);
// Call first pass kernel to get sizes in offsets
cudf::experimental::detail::grid_1d grid { input.size(), BLOCK_SIZE, 1 };
replace_first<<<grid.num_blocks, BLOCK_SIZE, 0, stream>>>(*device_in,
*device_replacement,
reinterpret_cast<cudf::bitmask_type*>(valid_bits.data()),
sizes_view.begin<cudf::size_type>(),
nullptr,
valid_count);
std::unique_ptr<cudf::column> offsets = cudf::strings::detail::make_offsets_child_column(sizes_view.begin<int32_t>(),
sizes_view.end<int32_t>(),
mr,
stream);
auto offsets_view = offsets->mutable_view();
int32_t size;
CUDA_TRY(cudaMemcpyAsync(&size, offsets_view.end<int32_t>() - 1, sizeof(int32_t), cudaMemcpyDefault, stream));
// Allocate chars array and output null mask
cudf::size_type null_count = input.size() - valid_counter.value(stream);
std::unique_ptr<cudf::column> output_chars =
cudf::strings::detail::create_chars_child_column(input.size(), null_count, size, mr, stream);
auto output_chars_view = output_chars->mutable_view();
replace_second<<<grid.num_blocks, BLOCK_SIZE, 0, stream>>>(*device_in,
*device_replacement,
reinterpret_cast<cudf::bitmask_type*>(valid_bits.data()),
offsets_view.begin<cudf::size_type>(),
output_chars_view.data<char>(),
valid_count);
std::unique_ptr<cudf::column> output = cudf::make_strings_column(input.size(),
std::move(offsets),
std::move(output_chars),
input.size() - valid_counter.value(stream),
std::move(valid_bits),
stream,
mr);
return output;
}
template<typename T>
struct replace_nulls_functor {
T* value_it;
replace_nulls_functor(T* _value_it):value_it(_value_it) {}
__device__
T operator()(T input, bool is_valid) {
return is_valid ? input : *value_it;
}
};
/* --------------------------------------------------------------------------*/
/**
* @brief Functor called by the `type_dispatcher` in order to invoke and instantiate
* `replace_nulls` with the appropriate data types.
*/
/* ----------------------------------------------------------------------------*/
struct replace_nulls_scalar_kernel_forwarder {
template<typename col_type,
std::enable_if_t<cudf::is_fixed_width<col_type>()>* = nullptr>
std::unique_ptr<cudf::column> operator()(cudf::column_view const& input,
cudf::scalar const& replacement,
rmm::mr::device_memory_resource* mr,
cudaStream_t stream = 0)
{
std::unique_ptr<cudf::column> output = cudf::experimental::allocate_like(input,
cudf::experimental::mask_allocation_policy::NEVER,
mr);
auto output_view = output->mutable_view();
using ScalarType = cudf::experimental::scalar_type_t<col_type>;
auto s1 = static_cast<ScalarType const&>(replacement);
auto device_in = cudf::column_device_view::create(input);
replace_nulls_functor<col_type> func(s1.data());
thrust::transform(rmm::exec_policy(stream)->on(stream),
input.data<col_type>(),
input.data<col_type>() + input.size(),
cudf::experimental::detail::make_validity_iterator(*device_in),
output_view.data<col_type>(),
func);
return output;
}
template<typename col_type,
std::enable_if_t<not cudf::is_fixed_width<col_type>()>* = nullptr>
std::unique_ptr<cudf::column> operator()(cudf::column_view const& input,
cudf::scalar const& replacement,
rmm::mr::device_memory_resource* mr,
cudaStream_t stream = 0) {
CUDF_FAIL("No specialization exists for the given type.");
}
};
template<>
std::unique_ptr<cudf::column> replace_nulls_scalar_kernel_forwarder::operator ()<cudf::string_view>(cudf::column_view const& input,
cudf::scalar const& replacement,
rmm::mr::device_memory_resource* mr,
cudaStream_t stream) {
cudf::strings_column_view input_s(input);
const cudf::string_scalar& repl = static_cast<const cudf::string_scalar&>(replacement);
return cudf::strings::replace_nulls(input_s, repl, mr);
}
} //end anonymous namespace
namespace cudf {
namespace detail {
std::unique_ptr<cudf::column> replace_nulls(cudf::column_view const& input,
cudf::column_view const& replacement,
rmm::mr::device_memory_resource* mr,
cudaStream_t stream)
{
CUDF_EXPECTS(input.type() == replacement.type(), "Data type mismatch");
CUDF_EXPECTS(replacement.size() == input.size(), "Column size mismatch");
if (input.size() == 0) {
return cudf::experimental::empty_like(input);
}
if (!input.has_nulls()) {
return std::make_unique < cudf::column > (input);
}
return cudf::experimental::type_dispatcher(input.type(),
replace_nulls_column_kernel_forwarder { },
input,
replacement,
mr,
stream);
}
std::unique_ptr<cudf::column> replace_nulls(cudf::column_view const& input,
cudf::scalar const& replacement,
rmm::mr::device_memory_resource* mr,
cudaStream_t stream)
{
if (input.size() == 0) {
return cudf::experimental::empty_like(input);
}
if (!input.has_nulls() || !replacement.is_valid()) {
return std::make_unique < cudf::column > (input, stream, mr);
}
CUDF_EXPECTS(input.type() == replacement.type(), "Data type mismatch");
return cudf::experimental::type_dispatcher(input.type(),
replace_nulls_scalar_kernel_forwarder { },
input,
replacement,
mr,
stream);
}
} //namespace detail
namespace experimental {
std::unique_ptr<cudf::column> replace_nulls(cudf::column_view const& input,
cudf::column_view const& replacement,
rmm::mr::device_memory_resource* mr)
{
return cudf::detail::replace_nulls(input, replacement, mr, 0);
}
std::unique_ptr<cudf::column> replace_nulls(cudf::column_view const& input,
cudf::scalar const& replacement,
rmm::mr::device_memory_resource* mr)
{
return cudf::detail::replace_nulls(input, replacement, mr, 0);
}
} //end experimental
} //namespace cudf
namespace { // anonymous
template<typename T>
struct normalize_nans_and_zeros_lambda {
cudf::column_device_view in;
T __device__ operator()(cudf::size_type i)
{
auto e = in.element < T > (i);
if (isnan(e)) {
return std::numeric_limits<T>::quiet_NaN();
}
if (T { 0.0 } == e) {
return T { 0.0 };
}
return e;
}
};
/* --------------------------------------------------------------------------*/
/**
* @brief Functor called by the `type_dispatcher` in order to invoke and instantiate
* `normalize_nans_and_zeros` with the appropriate data types.
*/
/* ----------------------------------------------------------------------------*/
struct normalize_nans_and_zeros_kernel_forwarder {
// floats and doubles. what we really care about.
template<typename T, std::enable_if_t<std::is_floating_point<T>::value>* = nullptr>
void operator()(cudf::column_device_view in,
cudf::mutable_column_device_view out,
cudaStream_t stream)
{
thrust::transform(rmm::exec_policy(stream)->on(stream),
thrust::make_counting_iterator(0),
thrust::make_counting_iterator(in.size()),
out.head<T>(),
normalize_nans_and_zeros_lambda<T> { in });
}
// if we get in here for anything but a float or double, that's a problem.
template<typename T, std::enable_if_t<not std::is_floating_point<T>::value>* = nullptr>
void operator()(cudf::column_device_view in,
cudf::mutable_column_device_view out,
cudaStream_t stream)
{
CUDF_FAIL("Unexpected non floating-point type.");
}
};
} // end anonymous namespace
namespace cudf {
namespace detail {
void normalize_nans_and_zeros(mutable_column_view in_out,
cudaStream_t stream = 0)
{
if (in_out.size() == 0) {
return;
}
CUDF_EXPECTS(in_out.type() == data_type(FLOAT32) || in_out.type() == data_type(FLOAT64),
"Expects float or double input");
// wrapping the in_out data in a column_view so we can call the same lower level code.
// that we use for the non in-place version.
column_view input = in_out;
// to device. unique_ptr which gets automatically cleaned up when we leave
auto device_in = column_device_view::create(input);
// from device. unique_ptr which gets automatically cleaned up when we leave.
auto device_out = mutable_column_device_view::create(in_out);
// invoke the actual kernel.
cudf::experimental::type_dispatcher(input.type(),
normalize_nans_and_zeros_kernel_forwarder { },
*device_in,
*device_out,
stream);
}
} // namespace detail
/*
* @brief Makes all NaNs and zeroes positive.
*
* Converts floating point values from @p input using the following rules:
* Convert -NaN -> NaN
* Convert -0.0 -> 0.0
*
* @throws cudf::logic_error if column does not have floating point data type.
* @param[in] column_view representing input data
* @param[in] device_memory_resource allocator for allocating output data
*
* @returns new column with the modified data
*/
std::unique_ptr<column> normalize_nans_and_zeros(column_view const& input,
rmm::mr::device_memory_resource *mr)
{
// output. copies the input
std::unique_ptr<column> out = std::make_unique < column > (input, (cudaStream_t) 0, mr);
// from device. unique_ptr which gets automatically cleaned up when we leave.
auto out_view = out->mutable_view();
detail::normalize_nans_and_zeros(out_view, 0);
return out;
}
/*
* @brief Makes all Nans and zeroes positive.
*
* Converts floating point values from @p in_out using the following rules:
* Convert -NaN -> NaN
* Convert -0.0 -> 0.0
*
* @throws cudf::logic_error if column does not have floating point data type.
* @param[in, out] mutable_column_view representing input data. data is processed in-place
*/
void normalize_nans_and_zeros(mutable_column_view& in_out)
{
detail::normalize_nans_and_zeros(in_out, 0);
}
}
|
a29f878e2ca7869c6d3fffa0a9212643c9cd0d15.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include "math.h"
#include <string>
// Image IO
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <iostream>
#include <sstream>
#include <fstream>
#include "common_hip.cuh"
#include "triangle.cuh"
// Output generated from Teg
#include "linear.cuh"
// End Temporary placeholder.
//#define ALPHA_COLOR 0.001
//#define ALPHA_VERTEX 0.08
#define ALPHA_COLOR 0.3
#define ALPHA_VERTEX 10000
int main(int argc, char** argv)
{
if (argc != 5) {
std::cout << "Usage: ./triangulate_linear <image_file> <tri-grid nx> <tri-grid ny> <use-deltas:y/n>" << std::endl;
exit(1);
}
std::stringstream ss_nx(argv[2]);
std::stringstream ss_ny(argv[3]);
std::stringstream ss_delta(argv[4]);
int nx;
int ny;
ss_nx >> nx;
ss_ny >> ny;
char c_use_deltas;
ss_delta >> c_use_deltas;
if (c_use_deltas != 'y' && c_use_deltas != 'n') {
std::cout << "Please specify y/n for 4th argument" << std::endl;
return -1;
}
bool use_deltas = c_use_deltas == 'y';
// Load an image.
cv::Mat image;
image = cv::imread(argv[1], cv::IMREAD_COLOR);
if( !image.data ) {
std::cout << "Could not open or find the image" << std::endl;
return -1;
}
std::cout << "Fitting " << image.rows << "x" << image.cols << " image" << std::endl;
auto pcolor_num = image.rows * image.cols;
auto pcolor_sz = pcolor_num * sizeof(float) * 3;
auto tcolor_num = nx * ny * 2;
auto tcolor_sz = tcolor_num * sizeof(float) * 3;
auto vertices_num = (nx + 1) * (ny + 1);
auto vertices_sz = vertices_num * sizeof(float) * 2;
auto indices_num = nx * ny * 2;
auto indices_sz = indices_num * sizeof(int) * 3;
auto image_pcs = image.rows * image.cols * 3;
auto image_sz = image_pcs * sizeof(float);
Image* tri_image;
LinearFragment* colors;
DLinearFragment* d_colors;
TriMesh* mesh;
DTriMesh* d_mesh;
hipMallocManaged(&tri_image, sizeof(Image));
hipMallocManaged(&mesh, sizeof(TriMesh));
build_initial_triangles(mesh, nx, ny, image.rows, image.cols);
build_d_mesh(mesh, &d_mesh);
std::cout << "Build meshes" << std::endl;
// Const fragments.
build_linear_colors(mesh, &colors);
build_d_linear_colors(mesh, &d_colors);
std::cout << "Build colors" << std::endl;
float* triangle_image;
char* triangle_bimage = (char*) malloc(image_pcs * 1);
hipMallocManaged(&triangle_image, image_sz);
float* loss_image;
char* loss_bimage = (char*) malloc(image.rows * image.cols * 1);
hipMallocManaged(&loss_image, sizeof(float) * image.rows * image.cols);
int max_jobs = image.rows * image.cols * 10 * sizeof(int);
int* tids;
int* pids;
hipMallocManaged(&tids, max_jobs * sizeof(int));
hipMallocManaged(&pids, max_jobs * sizeof(int));
std::cout << type2str(image.type()) << std::endl;
tri_image->rows = image.rows;
tri_image->cols = image.cols;
hipMallocManaged(&(tri_image->colors), sizeof(Color) * image.rows * image.cols);
// Load image data.
for(int i = 0; i < image.rows; i++)
for(int j = 0; j < image.cols; j++){
cv::Vec3b v = image.at<cv::Vec3b>(i, j);
tri_image->colors[(image.cols * i + j)].r = ((float)v[0]) / 255.0;//*(image.data + idx + 0);
tri_image->colors[(image.cols * i + j)].g = ((float)v[1]) / 255.0;//*(image.data + idx + 1);
tri_image->colors[(image.cols * i + j)].b = ((float)v[2]) / 255.0;//*(image.data + idx + 2);
}
//x = (float*) malloc(N*sizeof(float));
//y = (float*) malloc(N*sizeof(float));
int num_jobs = 0;
std::stringstream loss_string;
for (int iter = 0; iter < 150; iter ++){
printf("Iteration %d", iter);
// Zero buffers.
// set_zero<<<((tcolor_num * 3) / 256), 256>>>(d_tcolors);
// set_zero<<<((vertices_num * 2) / 256), 256>>>(d_vertices);
hipLaunchKernelGGL(( set_zero<DLinearFragment>), dim3(((mesh->num_triangles) / 256 + 1)), dim3(256), 0, 0, d_colors, mesh->num_triangles);
hipLaunchKernelGGL(( set_zero<DVertex>), dim3(((mesh->num_vertices) / 256 + 1)), dim3(256), 0, 0, d_mesh->d_vertices, mesh->num_triangles);
hipLaunchKernelGGL(( set_zero<float>), dim3((image_pcs / 256) + 1), dim3(256), 0, 0, triangle_image, image_pcs);
hipLaunchKernelGGL(( set_zero<float>), dim3((image.rows * image.cols / 256) + 1), dim3(256), 0, 0, loss_image, image.rows * image.cols);
num_jobs = generate_jobs(image.rows, image.cols, mesh, tids, pids);
printf("jobs: %d\n", num_jobs);
assert(num_jobs <= max_jobs);
hipDeviceSynchronize();
// Compute derivatives.
if (use_deltas) {
hipLaunchKernelGGL(( linear_deriv_kernel), dim3((num_jobs / 256) + 1), dim3(256), 0, 0,
tids,
pids,
num_jobs,
tri_image,
mesh,
d_mesh,
colors,
d_colors);
} else {
hipLaunchKernelGGL(( linear_deriv_kernel_nodelta), dim3((num_jobs / 256) + 1), dim3(256), 0, 0,
tids,
pids,
num_jobs,
tri_image,
mesh,
d_mesh,
colors,
d_colors);
}
hipDeviceSynchronize();
hipLaunchKernelGGL(( linear_loss_kernel), dim3((num_jobs / 256) + 1), dim3(256), 0, 0,
tids,
pids,
num_jobs,
tri_image,
mesh,
colors,
loss_image);
hipDeviceSynchronize();
// TODO: temp disable regularization
// compute_triangle_regularization(mesh, d_mesh);
// Update values.
/*update_values<<< (nx * ny) / 256 + 1, 256 >>>(
nx, ny, vertices, tcolors, d_vertices, d_tcolors, ALPHA
);*/
compute_triangle_regularization(mesh, d_mesh, 30);
float avg_total_pixel_area = image.rows * image.cols / (nx * ny);
float avg_triangle_surface_area = image.rows * image.cols / (sqrt(nx * ny));
hipLaunchKernelGGL(( update_vertices), dim3((mesh->num_vertices) / 256 + 1), dim3(256) , 0, 0,
mesh, d_mesh, ALPHA_VERTEX / avg_triangle_surface_area
);
hipLaunchKernelGGL(( update_linear_colors), dim3((mesh->num_triangles) / 256 + 1), dim3(256) , 0, 0,
mesh->num_triangles, colors, d_colors, ALPHA_COLOR / avg_total_pixel_area
);
hipDeviceSynchronize();
// Render triangles to image.
hipLaunchKernelGGL(( linear_integral_kernel), dim3((num_jobs / 256) + 1), dim3(256), 0, 0,
tids,
pids,
num_jobs,
mesh,
colors,
triangle_image,
image.rows, image.cols);
hipDeviceSynchronize();
for(int idx = 0; idx < image_pcs; idx ++){
int _val = (int)(triangle_image[idx] * 256);
triangle_bimage[idx] = (char) ((_val < 0) ? 0 : (_val > 255 ? 255 : _val));
}
float total_loss = 0.f;
for(int idx = 0; idx < image.rows * image.cols; idx ++){
int _val = (int)(loss_image[idx] * 256);
total_loss += loss_image[idx];
loss_bimage[idx] = (char) ((_val < 0) ? 0 : (_val > 255 ? 255 : _val));
}
loss_string << total_loss << std::endl;
std::cout << "Loss: " << total_loss << std::endl;
std::stringstream ss;
ss << "iter-" << iter << ".png";
cv::imwrite(ss.str(), cv::Mat(image.rows, image.cols, CV_8UC3, triangle_bimage));
std::stringstream ss_loss;
ss_loss << "loss-" << iter << ".png";
cv::imwrite(ss_loss.str(), cv::Mat(image.rows, image.cols, CV_8UC1, loss_bimage));
}
std::ofstream outfile("out.loss");
outfile << loss_string.str();
outfile.close();
/*for (int i = 0; i < 50; i++)
for (int j = 0; j < 50; j++){
float f0 = d_tcolors[(i * 50 + j) * 3 + 0];
float f1 = d_tcolors[(i * 50 + j) * 3 + 1];
float f2 = d_tcolors[(i * 50 + j) * 3 + 2];
if (f0 != 0 || f1 != 0 || f2 != 0)
std::cout << f0 << ", " << f1 << ", " << f2 << std::endl;
}
for (int i = 0; i < 50; i++)
for (int j = 0; j < 50; j++){
float f0 = d_vertices[(i * 50 + j) * 2 + 0];
float f1 = d_vertices[(i * 50 + j) * 2 + 1];
if (f0 != 0 || f1 != 0)
std::cout << f0 << ", " << f1 << std::endl;
}*/
hipFree(triangle_image);
hipFree(tids);
hipFree(pids);
} | a29f878e2ca7869c6d3fffa0a9212643c9cd0d15.cu | #include <stdio.h>
#include "math.h"
#include <string>
// Image IO
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <iostream>
#include <sstream>
#include <fstream>
#include "common.cuh"
#include "triangle.cuh"
// Output generated from Teg
#include "linear.cuh"
// End Temporary placeholder.
//#define ALPHA_COLOR 0.001
//#define ALPHA_VERTEX 0.08
#define ALPHA_COLOR 0.3
#define ALPHA_VERTEX 10000
int main(int argc, char** argv)
{
if (argc != 5) {
std::cout << "Usage: ./triangulate_linear <image_file> <tri-grid nx> <tri-grid ny> <use-deltas:y/n>" << std::endl;
exit(1);
}
std::stringstream ss_nx(argv[2]);
std::stringstream ss_ny(argv[3]);
std::stringstream ss_delta(argv[4]);
int nx;
int ny;
ss_nx >> nx;
ss_ny >> ny;
char c_use_deltas;
ss_delta >> c_use_deltas;
if (c_use_deltas != 'y' && c_use_deltas != 'n') {
std::cout << "Please specify y/n for 4th argument" << std::endl;
return -1;
}
bool use_deltas = c_use_deltas == 'y';
// Load an image.
cv::Mat image;
image = cv::imread(argv[1], cv::IMREAD_COLOR);
if( !image.data ) {
std::cout << "Could not open or find the image" << std::endl;
return -1;
}
std::cout << "Fitting " << image.rows << "x" << image.cols << " image" << std::endl;
auto pcolor_num = image.rows * image.cols;
auto pcolor_sz = pcolor_num * sizeof(float) * 3;
auto tcolor_num = nx * ny * 2;
auto tcolor_sz = tcolor_num * sizeof(float) * 3;
auto vertices_num = (nx + 1) * (ny + 1);
auto vertices_sz = vertices_num * sizeof(float) * 2;
auto indices_num = nx * ny * 2;
auto indices_sz = indices_num * sizeof(int) * 3;
auto image_pcs = image.rows * image.cols * 3;
auto image_sz = image_pcs * sizeof(float);
Image* tri_image;
LinearFragment* colors;
DLinearFragment* d_colors;
TriMesh* mesh;
DTriMesh* d_mesh;
cudaMallocManaged(&tri_image, sizeof(Image));
cudaMallocManaged(&mesh, sizeof(TriMesh));
build_initial_triangles(mesh, nx, ny, image.rows, image.cols);
build_d_mesh(mesh, &d_mesh);
std::cout << "Build meshes" << std::endl;
// Const fragments.
build_linear_colors(mesh, &colors);
build_d_linear_colors(mesh, &d_colors);
std::cout << "Build colors" << std::endl;
float* triangle_image;
char* triangle_bimage = (char*) malloc(image_pcs * 1);
cudaMallocManaged(&triangle_image, image_sz);
float* loss_image;
char* loss_bimage = (char*) malloc(image.rows * image.cols * 1);
cudaMallocManaged(&loss_image, sizeof(float) * image.rows * image.cols);
int max_jobs = image.rows * image.cols * 10 * sizeof(int);
int* tids;
int* pids;
cudaMallocManaged(&tids, max_jobs * sizeof(int));
cudaMallocManaged(&pids, max_jobs * sizeof(int));
std::cout << type2str(image.type()) << std::endl;
tri_image->rows = image.rows;
tri_image->cols = image.cols;
cudaMallocManaged(&(tri_image->colors), sizeof(Color) * image.rows * image.cols);
// Load image data.
for(int i = 0; i < image.rows; i++)
for(int j = 0; j < image.cols; j++){
cv::Vec3b v = image.at<cv::Vec3b>(i, j);
tri_image->colors[(image.cols * i + j)].r = ((float)v[0]) / 255.0;//*(image.data + idx + 0);
tri_image->colors[(image.cols * i + j)].g = ((float)v[1]) / 255.0;//*(image.data + idx + 1);
tri_image->colors[(image.cols * i + j)].b = ((float)v[2]) / 255.0;//*(image.data + idx + 2);
}
//x = (float*) malloc(N*sizeof(float));
//y = (float*) malloc(N*sizeof(float));
int num_jobs = 0;
std::stringstream loss_string;
for (int iter = 0; iter < 150; iter ++){
printf("Iteration %d", iter);
// Zero buffers.
// set_zero<<<((tcolor_num * 3) / 256), 256>>>(d_tcolors);
// set_zero<<<((vertices_num * 2) / 256), 256>>>(d_vertices);
set_zero<DLinearFragment><<<((mesh->num_triangles) / 256 + 1), 256>>>(d_colors, mesh->num_triangles);
set_zero<DVertex><<<((mesh->num_vertices) / 256 + 1), 256>>>(d_mesh->d_vertices, mesh->num_triangles);
set_zero<float><<<(image_pcs / 256) + 1, 256>>>(triangle_image, image_pcs);
set_zero<float><<<(image.rows * image.cols / 256) + 1, 256>>>(loss_image, image.rows * image.cols);
num_jobs = generate_jobs(image.rows, image.cols, mesh, tids, pids);
printf("jobs: %d\n", num_jobs);
assert(num_jobs <= max_jobs);
cudaDeviceSynchronize();
// Compute derivatives.
if (use_deltas) {
linear_deriv_kernel<<<(num_jobs / 256) + 1, 256>>>(
tids,
pids,
num_jobs,
tri_image,
mesh,
d_mesh,
colors,
d_colors);
} else {
linear_deriv_kernel_nodelta<<<(num_jobs / 256) + 1, 256>>>(
tids,
pids,
num_jobs,
tri_image,
mesh,
d_mesh,
colors,
d_colors);
}
cudaDeviceSynchronize();
linear_loss_kernel<<<(num_jobs / 256) + 1, 256>>>(
tids,
pids,
num_jobs,
tri_image,
mesh,
colors,
loss_image);
cudaDeviceSynchronize();
// TODO: temp disable regularization
// compute_triangle_regularization(mesh, d_mesh);
// Update values.
/*update_values<<< (nx * ny) / 256 + 1, 256 >>>(
nx, ny, vertices, tcolors, d_vertices, d_tcolors, ALPHA
);*/
compute_triangle_regularization(mesh, d_mesh, 30);
float avg_total_pixel_area = image.rows * image.cols / (nx * ny);
float avg_triangle_surface_area = image.rows * image.cols / (sqrt(nx * ny));
update_vertices<<< (mesh->num_vertices) / 256 + 1, 256 >>>(
mesh, d_mesh, ALPHA_VERTEX / avg_triangle_surface_area
);
update_linear_colors<<< (mesh->num_triangles) / 256 + 1, 256 >>>(
mesh->num_triangles, colors, d_colors, ALPHA_COLOR / avg_total_pixel_area
);
cudaDeviceSynchronize();
// Render triangles to image.
linear_integral_kernel<<<(num_jobs / 256) + 1, 256>>>(
tids,
pids,
num_jobs,
mesh,
colors,
triangle_image,
image.rows, image.cols);
cudaDeviceSynchronize();
for(int idx = 0; idx < image_pcs; idx ++){
int _val = (int)(triangle_image[idx] * 256);
triangle_bimage[idx] = (char) ((_val < 0) ? 0 : (_val > 255 ? 255 : _val));
}
float total_loss = 0.f;
for(int idx = 0; idx < image.rows * image.cols; idx ++){
int _val = (int)(loss_image[idx] * 256);
total_loss += loss_image[idx];
loss_bimage[idx] = (char) ((_val < 0) ? 0 : (_val > 255 ? 255 : _val));
}
loss_string << total_loss << std::endl;
std::cout << "Loss: " << total_loss << std::endl;
std::stringstream ss;
ss << "iter-" << iter << ".png";
cv::imwrite(ss.str(), cv::Mat(image.rows, image.cols, CV_8UC3, triangle_bimage));
std::stringstream ss_loss;
ss_loss << "loss-" << iter << ".png";
cv::imwrite(ss_loss.str(), cv::Mat(image.rows, image.cols, CV_8UC1, loss_bimage));
}
std::ofstream outfile("out.loss");
outfile << loss_string.str();
outfile.close();
/*for (int i = 0; i < 50; i++)
for (int j = 0; j < 50; j++){
float f0 = d_tcolors[(i * 50 + j) * 3 + 0];
float f1 = d_tcolors[(i * 50 + j) * 3 + 1];
float f2 = d_tcolors[(i * 50 + j) * 3 + 2];
if (f0 != 0 || f1 != 0 || f2 != 0)
std::cout << f0 << ", " << f1 << ", " << f2 << std::endl;
}
for (int i = 0; i < 50; i++)
for (int j = 0; j < 50; j++){
float f0 = d_vertices[(i * 50 + j) * 2 + 0];
float f1 = d_vertices[(i * 50 + j) * 2 + 1];
if (f0 != 0 || f1 != 0)
std::cout << f0 << ", " << f1 << std::endl;
}*/
cudaFree(triangle_image);
cudaFree(tids);
cudaFree(pids);
} |
histogramFixedWidth.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author Yurii Shyrma (iuriish@yahoo.com), created on 31.08.2018
//
#include <ops/declarable/helpers/histogramFixedWidth.h>
#include <exceptions/cuda_exception.h>
#include <helpers/PointersManager.h>
namespace sd {
namespace ops {
namespace helpers {
///////////////////////////////////////////////////////////////////
template<typename X, typename Z>
__global__ static void histogramFixedWidthCuda( const void* vx, const Nd4jLong* xShapeInfo,
void* vz, const Nd4jLong* zShapeInfo,
const X leftEdge, const X rightEdge) {
const auto x = reinterpret_cast<const X*>(vx);
auto z = reinterpret_cast<Z*>(vz);
__shared__ Nd4jLong xLen, zLen, totalThreads, nbins;
__shared__ X binWidth, secondEdge, lastButOneEdge;
if (threadIdx.x == 0) {
xLen = shape::length(xShapeInfo);
nbins = shape::length(zShapeInfo); // nbins = zLen
totalThreads = gridDim.x * blockDim.x;
binWidth = (rightEdge - leftEdge ) / nbins;
secondEdge = leftEdge + binWidth;
lastButOneEdge = rightEdge - binWidth;
}
__syncthreads();
const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
for (Nd4jLong i = tid; i < xLen; i += totalThreads) {
const X value = x[shape::getIndexOffset(i, xShapeInfo)];
Nd4jLong zIndex;
if(value < secondEdge)
zIndex = 0;
else if(value >= lastButOneEdge)
zIndex = nbins - 1;
else
zIndex = static_cast<Nd4jLong>((value - leftEdge) / binWidth);
sd::math::atomics::nd4j_atomicAdd<Z>(&z[shape::getIndexOffset(zIndex, zShapeInfo)], 1);
}
}
///////////////////////////////////////////////////////////////////
template<typename X, typename Z>
__host__ static void histogramFixedWidthCudaLauncher(const hipStream_t *stream, const NDArray& input, const NDArray& range, NDArray& output) {
const X leftEdge = range.e<X>(0);
const X rightEdge = range.e<X>(1);
hipLaunchKernelGGL(( histogramFixedWidthCuda<X, Z>), dim3(256), dim3(256), 1024, *stream, input.getSpecialBuffer(), input.getSpecialShapeInfo(), output.specialBuffer(), output.specialShapeInfo(), leftEdge, rightEdge);
}
////////////////////////////////////////////////////////////////////////
void histogramFixedWidth(sd::LaunchContext* context, const NDArray& input, const NDArray& range, NDArray& output) {
// firstly initialize output with zeros
output.nullify();
PointersManager manager(context, "histogramFixedWidth");
NDArray::prepareSpecialUse({&output}, {&input});
BUILD_DOUBLE_SELECTOR(input.dataType(), output.dataType(), histogramFixedWidthCudaLauncher, (context->getCudaStream(), input, range, output), LIBND4J_TYPES, INDEXING_TYPES);
NDArray::registerSpecialUse({&output}, {&input});
manager.synchronize();
}
// template <typename T>
// __global__ static void copyBuffers(Nd4jLong* destination, void const* source, Nd4jLong* sourceShape, Nd4jLong bufferLength) {
// const auto tid = blockIdx.x * gridDim.x + threadIdx.x;
// const auto step = gridDim.x * blockDim.x;
// for (int t = tid; t < bufferLength; t += step) {
// destination[t] = reinterpret_cast<T const*>(source)[shape::getIndexOffset(t, sourceShape)];
// }
// }
// template <typename T>
// __global__ static void returnBuffers(void* destination, Nd4jLong const* source, Nd4jLong* destinationShape, Nd4jLong bufferLength) {
// const auto tid = blockIdx.x * gridDim.x + threadIdx.x;
// const auto step = gridDim.x * blockDim.x;
// for (int t = tid; t < bufferLength; t += step) {
// reinterpret_cast<T*>(destination)[shape::getIndexOffset(t, destinationShape)] = source[t];
// }
// }
// template <typename T>
// static __global__ void histogramFixedWidthKernel(void* outputBuffer, Nd4jLong outputLength, void const* inputBuffer, Nd4jLong* inputShape, Nd4jLong inputLength, double const leftEdge, double binWidth, double secondEdge, double lastButOneEdge) {
// __shared__ T const* x;
// __shared__ Nd4jLong* z; // output buffer
// if (threadIdx.x == 0) {
// z = reinterpret_cast<Nd4jLong*>(outputBuffer);
// x = reinterpret_cast<T const*>(inputBuffer);
// }
// __syncthreads();
// auto tid = blockIdx.x * gridDim.x + threadIdx.x;
// auto step = blockDim.x * gridDim.x;
// for(auto i = tid; i < inputLength; i += step) {
// const T value = x[shape::getIndexOffset(i, inputShape)];
// Nd4jLong currInd = static_cast<Nd4jLong>((value - leftEdge) / binWidth);
// if(value < secondEdge)
// currInd = 0;
// else if(value >= lastButOneEdge)
// currInd = outputLength - 1;
// sd::math::atomics::nd4j_atomicAdd(&z[currInd], 1LL);
// }
// }
// template <typename T>
// void histogramFixedWidth_(sd::LaunchContext * context, const NDArray& input, const NDArray& range, NDArray& output) {
// const int nbins = output.lengthOf();
// auto stream = context->getCudaStream();
// // firstly initialize output with zeros
// //if(output.ews() == 1)
// // memset(output.buffer(), 0, nbins * output.sizeOfT());
// //else
// output.assign(0);
// if (!input.isActualOnDeviceSide())
// input.syncToDevice();
// const double leftEdge = range.e<double>(0);
// const double rightEdge = range.e<double>(1);
// const double binWidth = (rightEdge - leftEdge ) / nbins;
// const double secondEdge = leftEdge + binWidth;
// double lastButOneEdge = rightEdge - binWidth;
// Nd4jLong* outputBuffer;
// hipError_t err = hipMalloc(&outputBuffer, output.lengthOf() * sizeof(Nd4jLong));
// if (err != 0)
// throw cuda_exception::build("helpers::histogramFixedWidth: Cannot allocate memory for output", err);
// copyBuffers<Nd4jLong ><<<256, 512, 8192, *stream>>>(outputBuffer, output.getSpecialBuffer(), output.getSpecialShapeInfo(), output.lengthOf());
// histogramFixedWidthKernel<T><<<256, 512, 8192, *stream>>>(outputBuffer, output.lengthOf(), input.getSpecialBuffer(), input.getSpecialShapeInfo(), input.lengthOf(), leftEdge, binWidth, secondEdge, lastButOneEdge);
// returnBuffers<Nd4jLong><<<256, 512, 8192, *stream>>>(output.specialBuffer(), outputBuffer, output.specialShapeInfo(), output.lengthOf());
// //cudaSyncStream(*stream);
// err = hipFree(outputBuffer);
// if (err != 0)
// throw cuda_exception::build("helpers::histogramFixedWidth: Cannot deallocate memory for output buffer", err);
// output.tickWriteDevice();
// //#pragma omp parallel for schedule(guided)
// // for(Nd4jLong i = 0; i < input.lengthOf(); ++i) {
// //
// // const T value = input.e<T>(i);
// //
// // if(value < secondEdge)
// //#pragma omp critical
// // output.p<Nd4jLong>(0, output.e<Nd4jLong>(0) + 1);
// // else if(value >= lastButOneEdge)
// //#pragma omp critical
// // output.p<Nd4jLong>(nbins-1, output.e<Nd4jLong>(nbins-1) + 1);
// // else {
// // Nd4jLong currInd = static_cast<Nd4jLong>((value - leftEdge) / binWidth);
// //#pragma omp critical
// // output.p<Nd4jLong>(currInd, output.e<Nd4jLong>(currInd) + 1);
// // }
// // }
// }
// void histogramFixedWidth(sd::LaunchContext * context, const NDArray& input, const NDArray& range, NDArray& output) {
// BUILD_SINGLE_SELECTOR(input.dataType(), histogramFixedWidth_, (context, input, range, output), LIBND4J_TYPES);
// }
// BUILD_SINGLE_TEMPLATE(template void histogramFixedWidth_, (sd::LaunchContext * context, const NDArray& input, const NDArray& range, NDArray& output), LIBND4J_TYPES);
}
}
} | histogramFixedWidth.cu | /*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author Yurii Shyrma (iuriish@yahoo.com), created on 31.08.2018
//
#include <ops/declarable/helpers/histogramFixedWidth.h>
#include <exceptions/cuda_exception.h>
#include <helpers/PointersManager.h>
namespace sd {
namespace ops {
namespace helpers {
///////////////////////////////////////////////////////////////////
template<typename X, typename Z>
__global__ static void histogramFixedWidthCuda( const void* vx, const Nd4jLong* xShapeInfo,
void* vz, const Nd4jLong* zShapeInfo,
const X leftEdge, const X rightEdge) {
const auto x = reinterpret_cast<const X*>(vx);
auto z = reinterpret_cast<Z*>(vz);
__shared__ Nd4jLong xLen, zLen, totalThreads, nbins;
__shared__ X binWidth, secondEdge, lastButOneEdge;
if (threadIdx.x == 0) {
xLen = shape::length(xShapeInfo);
nbins = shape::length(zShapeInfo); // nbins = zLen
totalThreads = gridDim.x * blockDim.x;
binWidth = (rightEdge - leftEdge ) / nbins;
secondEdge = leftEdge + binWidth;
lastButOneEdge = rightEdge - binWidth;
}
__syncthreads();
const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
for (Nd4jLong i = tid; i < xLen; i += totalThreads) {
const X value = x[shape::getIndexOffset(i, xShapeInfo)];
Nd4jLong zIndex;
if(value < secondEdge)
zIndex = 0;
else if(value >= lastButOneEdge)
zIndex = nbins - 1;
else
zIndex = static_cast<Nd4jLong>((value - leftEdge) / binWidth);
sd::math::atomics::nd4j_atomicAdd<Z>(&z[shape::getIndexOffset(zIndex, zShapeInfo)], 1);
}
}
///////////////////////////////////////////////////////////////////
template<typename X, typename Z>
__host__ static void histogramFixedWidthCudaLauncher(const cudaStream_t *stream, const NDArray& input, const NDArray& range, NDArray& output) {
const X leftEdge = range.e<X>(0);
const X rightEdge = range.e<X>(1);
histogramFixedWidthCuda<X, Z><<<256, 256, 1024, *stream>>>(input.getSpecialBuffer(), input.getSpecialShapeInfo(), output.specialBuffer(), output.specialShapeInfo(), leftEdge, rightEdge);
}
////////////////////////////////////////////////////////////////////////
void histogramFixedWidth(sd::LaunchContext* context, const NDArray& input, const NDArray& range, NDArray& output) {
// firstly initialize output with zeros
output.nullify();
PointersManager manager(context, "histogramFixedWidth");
NDArray::prepareSpecialUse({&output}, {&input});
BUILD_DOUBLE_SELECTOR(input.dataType(), output.dataType(), histogramFixedWidthCudaLauncher, (context->getCudaStream(), input, range, output), LIBND4J_TYPES, INDEXING_TYPES);
NDArray::registerSpecialUse({&output}, {&input});
manager.synchronize();
}
// template <typename T>
// __global__ static void copyBuffers(Nd4jLong* destination, void const* source, Nd4jLong* sourceShape, Nd4jLong bufferLength) {
// const auto tid = blockIdx.x * gridDim.x + threadIdx.x;
// const auto step = gridDim.x * blockDim.x;
// for (int t = tid; t < bufferLength; t += step) {
// destination[t] = reinterpret_cast<T const*>(source)[shape::getIndexOffset(t, sourceShape)];
// }
// }
// template <typename T>
// __global__ static void returnBuffers(void* destination, Nd4jLong const* source, Nd4jLong* destinationShape, Nd4jLong bufferLength) {
// const auto tid = blockIdx.x * gridDim.x + threadIdx.x;
// const auto step = gridDim.x * blockDim.x;
// for (int t = tid; t < bufferLength; t += step) {
// reinterpret_cast<T*>(destination)[shape::getIndexOffset(t, destinationShape)] = source[t];
// }
// }
// template <typename T>
// static __global__ void histogramFixedWidthKernel(void* outputBuffer, Nd4jLong outputLength, void const* inputBuffer, Nd4jLong* inputShape, Nd4jLong inputLength, double const leftEdge, double binWidth, double secondEdge, double lastButOneEdge) {
// __shared__ T const* x;
// __shared__ Nd4jLong* z; // output buffer
// if (threadIdx.x == 0) {
// z = reinterpret_cast<Nd4jLong*>(outputBuffer);
// x = reinterpret_cast<T const*>(inputBuffer);
// }
// __syncthreads();
// auto tid = blockIdx.x * gridDim.x + threadIdx.x;
// auto step = blockDim.x * gridDim.x;
// for(auto i = tid; i < inputLength; i += step) {
// const T value = x[shape::getIndexOffset(i, inputShape)];
// Nd4jLong currInd = static_cast<Nd4jLong>((value - leftEdge) / binWidth);
// if(value < secondEdge)
// currInd = 0;
// else if(value >= lastButOneEdge)
// currInd = outputLength - 1;
// sd::math::atomics::nd4j_atomicAdd(&z[currInd], 1LL);
// }
// }
// template <typename T>
// void histogramFixedWidth_(sd::LaunchContext * context, const NDArray& input, const NDArray& range, NDArray& output) {
// const int nbins = output.lengthOf();
// auto stream = context->getCudaStream();
// // firstly initialize output with zeros
// //if(output.ews() == 1)
// // memset(output.buffer(), 0, nbins * output.sizeOfT());
// //else
// output.assign(0);
// if (!input.isActualOnDeviceSide())
// input.syncToDevice();
// const double leftEdge = range.e<double>(0);
// const double rightEdge = range.e<double>(1);
// const double binWidth = (rightEdge - leftEdge ) / nbins;
// const double secondEdge = leftEdge + binWidth;
// double lastButOneEdge = rightEdge - binWidth;
// Nd4jLong* outputBuffer;
// cudaError_t err = cudaMalloc(&outputBuffer, output.lengthOf() * sizeof(Nd4jLong));
// if (err != 0)
// throw cuda_exception::build("helpers::histogramFixedWidth: Cannot allocate memory for output", err);
// copyBuffers<Nd4jLong ><<<256, 512, 8192, *stream>>>(outputBuffer, output.getSpecialBuffer(), output.getSpecialShapeInfo(), output.lengthOf());
// histogramFixedWidthKernel<T><<<256, 512, 8192, *stream>>>(outputBuffer, output.lengthOf(), input.getSpecialBuffer(), input.getSpecialShapeInfo(), input.lengthOf(), leftEdge, binWidth, secondEdge, lastButOneEdge);
// returnBuffers<Nd4jLong><<<256, 512, 8192, *stream>>>(output.specialBuffer(), outputBuffer, output.specialShapeInfo(), output.lengthOf());
// //cudaSyncStream(*stream);
// err = cudaFree(outputBuffer);
// if (err != 0)
// throw cuda_exception::build("helpers::histogramFixedWidth: Cannot deallocate memory for output buffer", err);
// output.tickWriteDevice();
// //#pragma omp parallel for schedule(guided)
// // for(Nd4jLong i = 0; i < input.lengthOf(); ++i) {
// //
// // const T value = input.e<T>(i);
// //
// // if(value < secondEdge)
// //#pragma omp critical
// // output.p<Nd4jLong>(0, output.e<Nd4jLong>(0) + 1);
// // else if(value >= lastButOneEdge)
// //#pragma omp critical
// // output.p<Nd4jLong>(nbins-1, output.e<Nd4jLong>(nbins-1) + 1);
// // else {
// // Nd4jLong currInd = static_cast<Nd4jLong>((value - leftEdge) / binWidth);
// //#pragma omp critical
// // output.p<Nd4jLong>(currInd, output.e<Nd4jLong>(currInd) + 1);
// // }
// // }
// }
// void histogramFixedWidth(sd::LaunchContext * context, const NDArray& input, const NDArray& range, NDArray& output) {
// BUILD_SINGLE_SELECTOR(input.dataType(), histogramFixedWidth_, (context, input, range, output), LIBND4J_TYPES);
// }
// BUILD_SINGLE_TEMPLATE(template void histogramFixedWidth_, (sd::LaunchContext * context, const NDArray& input, const NDArray& range, NDArray& output), LIBND4J_TYPES);
}
}
} |
6a8c8c2e257b15a813f782046928ea334bbe44e0.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hipfft.h>
#include <stdlib.h>
#include <stdio.h>
int imin(int a, int b)
{
return (a < b ? a : b);
}
/* Number of samples per spectrum */
#define NX 1024
/* Number of frequencies per spectrum */
#define NF (NX/2+1)
/* Total number of samples in a time bin */
#define NTOT 20000000
/* Number of samples to average per channel per time bin
* remaining samples are skipped. */
#define N ((NTOT / NX) * NX)
/* Number of FFTs to perform in one batch */
#define BATCH (N / NX)
/* Number of samples after FFT */
#define NS (NF*BATCH)
const int threadsPerBlock = 32;
const int blocksPerGrid = imin(32, (NF + threadsPerBlock-1) / threadsPerBlock);
__global__ void correlate(float *c, float *s, hipfftComplex *a, hipfftComplex *b)
{
float temp_c = 0;
float temp_s = 0;
hipfftComplex ccorr;
int tid = threadIdx.x + blockIdx.x * blockDim.x;
/* We launch more threads then needed, so some do nothing */
if (tid < NF) {
while (tid < NS) {
/* Normalize FFT */
a[tid].x /= NX;
a[tid].y /= NX;
b[tid].x /= NX;
b[tid].y /= NX;
/* Correlate */
ccorr = cuCmulf(a[tid], cuConjf(b[tid]));
/* Sum channel over time */
temp_c += cuCrealf(ccorr);
temp_s += cuCimagf(ccorr);
/* Go to next time step, NF frequencies away */
tid += NF;
}
c[threadIdx.x + blockIdx.x * blockDim.x] = temp_c;
s[threadIdx.x + blockIdx.x * blockDim.x] = temp_s;
}
}
int main(int argc, char* argv[])
{
int i, j;
FILE *fp, *fo;
char *buffer;
float *c, *s, *dev_c, *dev_s;
float *a, *b, *dev_a, *dev_b;
hipfftComplex *cdev_a, *cdev_b;
hipError_t err;
hipfftHandle plan;
/* Allocate memory on host */
buffer = (char*) malloc(2*NTOT*sizeof(char));
a = (float*) malloc(N*sizeof(float));
b = (float*) malloc(N*sizeof(float));
c = (float*) malloc(NF*sizeof(float));
s = (float*) malloc(NF*sizeof(float));
/* Allocate memory on device */
err = hipMalloc(&dev_c, NF*sizeof(float));
if (err != hipSuccess) {
fprintf(stderr, "Error %s\n", hipGetErrorString(err));
return 1;
}
err = hipMalloc(&dev_s, NF*sizeof(float));
if (err != hipSuccess) {
fprintf(stderr, "Error %s\n", hipGetErrorString(err));
return 1;
}
err = hipMalloc(&dev_a, N*sizeof(float));
if (err != hipSuccess) {
fprintf(stderr, "Error %s\n", hipGetErrorString(err));
return 1;
}
err = hipMalloc(&dev_b, N*sizeof(float));
if (err != hipSuccess) {
fprintf(stderr, "Error %s\n", hipGetErrorString(err));
return 1;
}
err = hipMalloc(&cdev_a, NF*BATCH*sizeof(hipfftComplex));
if (err != hipSuccess) {
fprintf(stderr, "Error %s\n", hipGetErrorString(err));
return 1;
}
err = hipMalloc(&cdev_b, NF*BATCH*sizeof(hipfftComplex));
if (err != hipSuccess) {
fprintf(stderr, "Error %s\n", hipGetErrorString(err));
return 1;
}
/* Create FFT plan */
if (hipfftPlan1d(&plan, NX, HIPFFT_R2C, BATCH) != HIPFFT_SUCCESS) {
fprintf(stderr, "CUFFT error: Plan creation failed");
return 1;
}
/* Open input file */
fp = fopen(argv[1], "rb");
if (fp == NULL) {
fprintf(stderr, "Error: could not open input file!\n");
return 1;
}
/* Open output file */
fo = fopen(argv[2], "w");
if (fo == NULL) {
fprintf(stderr, "Error: could not open output file!\n");
return 1;
}
i = 0;
while (fread(buffer, sizeof(char), 2*NTOT, fp) == 2*NTOT*sizeof(char)) {
printf("%d\n", i);
/* Copy data to device */
for (j=0; j<N; j++) {
a[j] = (float) buffer[2*j];
b[j] = (float) buffer[2*j+1];
}
err = hipMemcpy(dev_a, a, N*sizeof(float), hipMemcpyHostToDevice);
if (err != hipSuccess) {
printf("Error %s\n", hipGetErrorString(err));
}
err = hipMemcpy(dev_b, b, N*sizeof(float), hipMemcpyHostToDevice);
if (err != hipSuccess) {
printf("Error %s\n", hipGetErrorString(err));
}
/* Perform FFT on device */
if (hipfftExecR2C(plan, dev_a, cdev_a) != HIPFFT_SUCCESS){
fprintf(stderr, "CUFFT error: ExecC2C Forward failed");
return 1;
}
if (hipDeviceSynchronize() != hipSuccess){
fprintf(stderr, "Cuda error: Failed to synchronize\n");
return 1;
}
/* Perform FFT on device */
if (hipfftExecR2C(plan, dev_b, cdev_b) != HIPFFT_SUCCESS){
fprintf(stderr, "CUFFT error: ExecC2C Forward failed");
return 1;
}
if (hipDeviceSynchronize() != hipSuccess){
fprintf(stderr, "Cuda error: Failed to synchronize\n");
return 1;
}
hipLaunchKernelGGL(( correlate), dim3(blocksPerGrid),dim3(threadsPerBlock), 0, 0, dev_c, dev_s, cdev_a, cdev_b);
err = hipMemcpy(c, dev_c, NF*sizeof(float), hipMemcpyDeviceToHost);
if (err != hipSuccess) {
printf("Error %s\n", hipGetErrorString(err));
}
err = hipMemcpy(s, dev_s, NF*sizeof(float), hipMemcpyDeviceToHost);
if (err != hipSuccess) {
printf("Error %s\n", hipGetErrorString(err));
}
/* From sum to average on the CPU */
for (j=0; j<NF; j++) {
c[j] /= BATCH;
s[j] /= BATCH;
fprintf(fo, "%d %.3f\t%.3f\n", j, c[j], s[j]);
}
i++;
}
/* Cleanup */
free(a);
free(b);
free(buffer);
hipfftDestroy(plan);
hipFree(dev_a);
hipFree(dev_b);
hipFree(cdev_a);
hipFree(cdev_b);
/* Close file */
fclose(fp);
fclose(fo);
return 0;
}
| 6a8c8c2e257b15a813f782046928ea334bbe44e0.cu | #include <cuda_runtime.h>
#include <cufft.h>
#include <stdlib.h>
#include <stdio.h>
int imin(int a, int b)
{
return (a < b ? a : b);
}
/* Number of samples per spectrum */
#define NX 1024
/* Number of frequencies per spectrum */
#define NF (NX/2+1)
/* Total number of samples in a time bin */
#define NTOT 20000000
/* Number of samples to average per channel per time bin
* remaining samples are skipped. */
#define N ((NTOT / NX) * NX)
/* Number of FFTs to perform in one batch */
#define BATCH (N / NX)
/* Number of samples after FFT */
#define NS (NF*BATCH)
const int threadsPerBlock = 32;
const int blocksPerGrid = imin(32, (NF + threadsPerBlock-1) / threadsPerBlock);
__global__ void correlate(float *c, float *s, cufftComplex *a, cufftComplex *b)
{
float temp_c = 0;
float temp_s = 0;
cufftComplex ccorr;
int tid = threadIdx.x + blockIdx.x * blockDim.x;
/* We launch more threads then needed, so some do nothing */
if (tid < NF) {
while (tid < NS) {
/* Normalize FFT */
a[tid].x /= NX;
a[tid].y /= NX;
b[tid].x /= NX;
b[tid].y /= NX;
/* Correlate */
ccorr = cuCmulf(a[tid], cuConjf(b[tid]));
/* Sum channel over time */
temp_c += cuCrealf(ccorr);
temp_s += cuCimagf(ccorr);
/* Go to next time step, NF frequencies away */
tid += NF;
}
c[threadIdx.x + blockIdx.x * blockDim.x] = temp_c;
s[threadIdx.x + blockIdx.x * blockDim.x] = temp_s;
}
}
int main(int argc, char* argv[])
{
int i, j;
FILE *fp, *fo;
char *buffer;
float *c, *s, *dev_c, *dev_s;
float *a, *b, *dev_a, *dev_b;
cufftComplex *cdev_a, *cdev_b;
cudaError_t err;
cufftHandle plan;
/* Allocate memory on host */
buffer = (char*) malloc(2*NTOT*sizeof(char));
a = (float*) malloc(N*sizeof(float));
b = (float*) malloc(N*sizeof(float));
c = (float*) malloc(NF*sizeof(float));
s = (float*) malloc(NF*sizeof(float));
/* Allocate memory on device */
err = cudaMalloc(&dev_c, NF*sizeof(float));
if (err != cudaSuccess) {
fprintf(stderr, "Error %s\n", cudaGetErrorString(err));
return 1;
}
err = cudaMalloc(&dev_s, NF*sizeof(float));
if (err != cudaSuccess) {
fprintf(stderr, "Error %s\n", cudaGetErrorString(err));
return 1;
}
err = cudaMalloc(&dev_a, N*sizeof(float));
if (err != cudaSuccess) {
fprintf(stderr, "Error %s\n", cudaGetErrorString(err));
return 1;
}
err = cudaMalloc(&dev_b, N*sizeof(float));
if (err != cudaSuccess) {
fprintf(stderr, "Error %s\n", cudaGetErrorString(err));
return 1;
}
err = cudaMalloc(&cdev_a, NF*BATCH*sizeof(cufftComplex));
if (err != cudaSuccess) {
fprintf(stderr, "Error %s\n", cudaGetErrorString(err));
return 1;
}
err = cudaMalloc(&cdev_b, NF*BATCH*sizeof(cufftComplex));
if (err != cudaSuccess) {
fprintf(stderr, "Error %s\n", cudaGetErrorString(err));
return 1;
}
/* Create FFT plan */
if (cufftPlan1d(&plan, NX, CUFFT_R2C, BATCH) != CUFFT_SUCCESS) {
fprintf(stderr, "CUFFT error: Plan creation failed");
return 1;
}
/* Open input file */
fp = fopen(argv[1], "rb");
if (fp == NULL) {
fprintf(stderr, "Error: could not open input file!\n");
return 1;
}
/* Open output file */
fo = fopen(argv[2], "w");
if (fo == NULL) {
fprintf(stderr, "Error: could not open output file!\n");
return 1;
}
i = 0;
while (fread(buffer, sizeof(char), 2*NTOT, fp) == 2*NTOT*sizeof(char)) {
printf("%d\n", i);
/* Copy data to device */
for (j=0; j<N; j++) {
a[j] = (float) buffer[2*j];
b[j] = (float) buffer[2*j+1];
}
err = cudaMemcpy(dev_a, a, N*sizeof(float), cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
printf("Error %s\n", cudaGetErrorString(err));
}
err = cudaMemcpy(dev_b, b, N*sizeof(float), cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
printf("Error %s\n", cudaGetErrorString(err));
}
/* Perform FFT on device */
if (cufftExecR2C(plan, dev_a, cdev_a) != CUFFT_SUCCESS){
fprintf(stderr, "CUFFT error: ExecC2C Forward failed");
return 1;
}
if (cudaThreadSynchronize() != cudaSuccess){
fprintf(stderr, "Cuda error: Failed to synchronize\n");
return 1;
}
/* Perform FFT on device */
if (cufftExecR2C(plan, dev_b, cdev_b) != CUFFT_SUCCESS){
fprintf(stderr, "CUFFT error: ExecC2C Forward failed");
return 1;
}
if (cudaThreadSynchronize() != cudaSuccess){
fprintf(stderr, "Cuda error: Failed to synchronize\n");
return 1;
}
correlate<<<blocksPerGrid,threadsPerBlock>>>(dev_c, dev_s, cdev_a, cdev_b);
err = cudaMemcpy(c, dev_c, NF*sizeof(float), cudaMemcpyDeviceToHost);
if (err != cudaSuccess) {
printf("Error %s\n", cudaGetErrorString(err));
}
err = cudaMemcpy(s, dev_s, NF*sizeof(float), cudaMemcpyDeviceToHost);
if (err != cudaSuccess) {
printf("Error %s\n", cudaGetErrorString(err));
}
/* From sum to average on the CPU */
for (j=0; j<NF; j++) {
c[j] /= BATCH;
s[j] /= BATCH;
fprintf(fo, "%d %.3f\t%.3f\n", j, c[j], s[j]);
}
i++;
}
/* Cleanup */
free(a);
free(b);
free(buffer);
cufftDestroy(plan);
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(cdev_a);
cudaFree(cdev_b);
/* Close file */
fclose(fp);
fclose(fo);
return 0;
}
|
edee87e013692cf92731817bb084372566c04331.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <thrust/device_ptr.h>
#include <thrust/sort.h>
#include "config.cuh"
extern __device__ void mapper(const input_type* input, value_type *value);
/*
Mapping Kernel: Since each mapper runs independently of each other, we can
give each thread its own input to process and a disjoint space where it can`
store the key/value pairs it produces.
*/
__global__ void mapKernel(const input_type* input, value_type *values) {
size_t threadId = blockIdx.x * blockDim.x + threadIdx.x; // Global id of the thread
// Total number of threads, by jumping this much, it ensures that no thread gets the same data
size_t jump = blockDim.x * gridDim.x;
for (size_t i=threadId; i<NUM_INPUT; i+=jump) {
// Input data to run mapper on, and the location to place the output
mapper(&input[i], &values[i]);
}
}
/*
Call Mapper kernel with the required grid, blocks
TODO: Err checking
*/
void runMapper(const input_type* dev_input, value_type *dev_values) {
hipLaunchKernelGGL(( mapKernel), dim3(GRID_SIZE), dim3(BLOCK_SIZE), 0, 0, dev_input, dev_values);
hipDeviceSynchronize();
}
/*
Main function to run Map-Reduce program
*/
void runMapReduce(input_type *input, output_type *output) {
// 1. Allocate memory on GPU for inputs
// 2. Allocate momory for Value array
// 3. Copy inputs to GPU
// 4. Run Mapper kernel, which calls mapper function for the inputs decided for that thread
// 5. Free input memory
// 6. Reduce using thrust to find total points
// Calculate Pi and assign to output. Done!
// Pointers for input & value arrays
input_type *dev_input;
value_type *dev_values;
// Allocate memory on GPU for input
size_t input_size = NUM_INPUT * sizeof(input_type);
hipMalloc(&dev_input, input_size);
// Allocate memory for value array
size_t value_size = NUM_INPUT * sizeof(value_type);
hipMalloc(&dev_values, value_size);
// Copy input data to device
hipMemcpy(dev_input, input, input_size, hipMemcpyHostToDevice);
// Run mapper
// This will run mapper kernel on all the inputs, and produces the key-value pairs
runMapper(dev_input, dev_values);
// Sum up the array using thrust::reduce
thrust::device_ptr<value_type> dev_value_thrust_ptr = thrust::device_pointer_cast(dev_values);
uint64_cu total_points = thrust::count(thrust::device, dev_value_thrust_ptr, dev_value_thrust_ptr + NUM_INPUT, true);
// std::cout << "Total points: " << total_points << std::endl;
*output = 4.0 * (double(total_points) / NUM_INPUT);
// Free all memory
hipFree(dev_input);
hipFree(dev_values);
}
| edee87e013692cf92731817bb084372566c04331.cu | #include <iostream>
#include <thrust/device_ptr.h>
#include <thrust/sort.h>
#include "config.cuh"
extern __device__ void mapper(const input_type* input, value_type *value);
/*
Mapping Kernel: Since each mapper runs independently of each other, we can
give each thread its own input to process and a disjoint space where it can`
store the key/value pairs it produces.
*/
__global__ void mapKernel(const input_type* input, value_type *values) {
size_t threadId = blockIdx.x * blockDim.x + threadIdx.x; // Global id of the thread
// Total number of threads, by jumping this much, it ensures that no thread gets the same data
size_t jump = blockDim.x * gridDim.x;
for (size_t i=threadId; i<NUM_INPUT; i+=jump) {
// Input data to run mapper on, and the location to place the output
mapper(&input[i], &values[i]);
}
}
/*
Call Mapper kernel with the required grid, blocks
TODO: Err checking
*/
void runMapper(const input_type* dev_input, value_type *dev_values) {
mapKernel<<<GRID_SIZE, BLOCK_SIZE>>>(dev_input, dev_values);
cudaDeviceSynchronize();
}
/*
Main function to run Map-Reduce program
*/
void runMapReduce(input_type *input, output_type *output) {
// 1. Allocate memory on GPU for inputs
// 2. Allocate momory for Value array
// 3. Copy inputs to GPU
// 4. Run Mapper kernel, which calls mapper function for the inputs decided for that thread
// 5. Free input memory
// 6. Reduce using thrust to find total points
// Calculate Pi and assign to output. Done!
// Pointers for input & value arrays
input_type *dev_input;
value_type *dev_values;
// Allocate memory on GPU for input
size_t input_size = NUM_INPUT * sizeof(input_type);
cudaMalloc(&dev_input, input_size);
// Allocate memory for value array
size_t value_size = NUM_INPUT * sizeof(value_type);
cudaMalloc(&dev_values, value_size);
// Copy input data to device
cudaMemcpy(dev_input, input, input_size, cudaMemcpyHostToDevice);
// Run mapper
// This will run mapper kernel on all the inputs, and produces the key-value pairs
runMapper(dev_input, dev_values);
// Sum up the array using thrust::reduce
thrust::device_ptr<value_type> dev_value_thrust_ptr = thrust::device_pointer_cast(dev_values);
uint64_cu total_points = thrust::count(thrust::device, dev_value_thrust_ptr, dev_value_thrust_ptr + NUM_INPUT, true);
// std::cout << "Total points: " << total_points << std::endl;
*output = 4.0 * (double(total_points) / NUM_INPUT);
// Free all memory
cudaFree(dev_input);
cudaFree(dev_values);
}
|
8ee8443e8ed162dcfcb6eb57908e93371c565951.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "malloc.h"
#include <stdio.h>
__global__ void add(int *a, int *b, int *c,int *col) {
// *c = *a + *b
int id=blockIdx.x;
int start=id*col[0];
for(int i=0;i<col[0];i++){
c[start+i]=a[start+i]+b[start+i];
}
}
int main(void) {
int *a, *b, *c;
int numR,numC;
printf("Enter number of rows and columns :\n");
scanf("%d",&numR);
scanf("%d",&numC);
int size = sizeof(int);
// a=(int*)malloc(numC*numR*size);
// b=(int*)malloc(numC*numR*size);
// c=(int*)malloc(numC*numR*size);
hipMalloc((void **)&b, numC*numR*size);
hipMalloc((void **)&a, numC*numR*size);
hipMalloc((void **)&c, numC*numR*size);
printf("Enter A\n");
int temp;
for (int i = 0; i < numC*numR; ++i)
{
scanf("%d",&temp);
a[i]=temp;
}
printf("Enter B\n");
for (int i = 0; i < numC*numR; ++i)
{
scanf("%d",&temp);
b[i]=temp;
}
// host copies of variables a, b & c
int *d_a, *d_b, *d_c,*col; // device copies of variables a, b & c
// Allocate space for device copies of a, b, c
hipMalloc((void **)&d_a, numC*numR*size);
hipMalloc((void **)&d_b, numC*numR*size);
hipMalloc((void **)&d_c, numC*numR*size);
hipMalloc((void **)&col, size);
// Setup input values
// Copy inputs to device
hipMemcpy(d_a, &a, numC*numR*size, hipMemcpyHostToDevice);
hipMemcpy(d_b, &b, numC*numR*size, hipMemcpyHostToDevice);
hipMemcpy(col,&numC,size,hipMemcpyHostToDevice);
// Launch add() kernel on GPU
printf("LOOOL\n");
hipLaunchKernelGGL(( add), dim3(numR),dim3(1), 0, 0, d_a, d_b, d_c,col);
// Copy result back to host
hipMemcpy(&c, d_c,numC*numR*size, hipMemcpyDeviceToHost);
printf("LOOOL\n");
for (int i = 0; i < numC*numR; ++i)
{
if(i%numC==0)
printf("wot\n");
printf("%d ",c[i]);
}
printf("LOOOL\n");
// printf("ANS %d\t%d\n",c[0],c[1] );
// Cleanup
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
return 0;
} | 8ee8443e8ed162dcfcb6eb57908e93371c565951.cu | #include "malloc.h"
#include <stdio.h>
__global__ void add(int *a, int *b, int *c,int *col) {
// *c = *a + *b
int id=blockIdx.x;
int start=id*col[0];
for(int i=0;i<col[0];i++){
c[start+i]=a[start+i]+b[start+i];
}
}
int main(void) {
int *a, *b, *c;
int numR,numC;
printf("Enter number of rows and columns :\n");
scanf("%d",&numR);
scanf("%d",&numC);
int size = sizeof(int);
// a=(int*)malloc(numC*numR*size);
// b=(int*)malloc(numC*numR*size);
// c=(int*)malloc(numC*numR*size);
cudaMalloc((void **)&b, numC*numR*size);
cudaMalloc((void **)&a, numC*numR*size);
cudaMalloc((void **)&c, numC*numR*size);
printf("Enter A\n");
int temp;
for (int i = 0; i < numC*numR; ++i)
{
scanf("%d",&temp);
a[i]=temp;
}
printf("Enter B\n");
for (int i = 0; i < numC*numR; ++i)
{
scanf("%d",&temp);
b[i]=temp;
}
// host copies of variables a, b & c
int *d_a, *d_b, *d_c,*col; // device copies of variables a, b & c
// Allocate space for device copies of a, b, c
cudaMalloc((void **)&d_a, numC*numR*size);
cudaMalloc((void **)&d_b, numC*numR*size);
cudaMalloc((void **)&d_c, numC*numR*size);
cudaMalloc((void **)&col, size);
// Setup input values
// Copy inputs to device
cudaMemcpy(d_a, &a, numC*numR*size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, &b, numC*numR*size, cudaMemcpyHostToDevice);
cudaMemcpy(col,&numC,size,cudaMemcpyHostToDevice);
// Launch add() kernel on GPU
printf("LOOOL\n");
add<<<numR,1>>>(d_a, d_b, d_c,col);
// Copy result back to host
cudaMemcpy(&c, d_c,numC*numR*size, cudaMemcpyDeviceToHost);
printf("LOOOL\n");
for (int i = 0; i < numC*numR; ++i)
{
if(i%numC==0)
printf("wot\n");
printf("%d ",c[i]);
}
printf("LOOOL\n");
// printf("ANS %d\t%d\n",c[0],c[1] );
// Cleanup
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
return 0;
} |
5097388c448c846a833c0797dfe53b3177494ee1.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <cstdlib>
#include "Static/TriangleCounting/triangle.cuh"
#include <StandardAPI.hpp>
#include <Device/Util/Timer.cuh>
#include <Graph/GraphStd.hpp>
using namespace std;
using namespace timer;
using namespace hornets_nest;
#define STAND_PRINTF(sys, time, triangles) printf("%s : \t%ld \t%f\n", sys,triangles, time);
// int arrayBlocks[]={16000};
// int arrayBlockSize[]={32,64,96,128,192,256};
// int arrayThreadPerIntersection[]={1,2,4,8,16,32};
// int arrayThreadShift[]={0,1,2,3,4,5};
// int arrayBlocks[]={16000};
// int arrayBlockSize[]={256};
// int arrayThreadPerIntersection[]={32};
// int arrayThreadShift[]={5};
// int arrayBlocks[]={96000};
// int arrayBlockSize[]={128,192,256};
// int arrayThreadPerIntersection[]={8,16,32};
// int arrayThreadShift[]={3,4,5};
int arrayBlocks[]={96000};
int arrayBlockSize[]={192};
int arrayThreadPerIntersection[]={16};
int arrayThreadShift[]={3};
int cutoff[]={-1, 100, 200, 300, 400, 500, 600, 700, 800, 900, 1000, 1100, 1200, 1300,
1400, 1500, 1600, 1700, 1800, 1900, 2000,
2100, 2200, 2300, 2400,
2500, 2600, 2700
};
void initHostTriangleArray(triangle_t* h_triangles, vid_t nv){
for(vid_t sd=0; sd<(nv);sd++){
h_triangles[sd]=0;
}
}
int64_t sumTriangleArray(triangle_t* h_triangles, vid_t nv){
int64_t sum=0;
for(vid_t sd=0; sd<(nv);sd++){
sum+=h_triangles[sd];
}
return sum;
}
void testTriangleCountingConfigurations(HornetGraph& hornet, vid_t nv,degree_t ne, int *histogram)
{
float minTime=10e9,time,minTimeHornet=10e9;
int blocksToTest=sizeof(arrayBlocks)/sizeof(int);
int blockSizeToTest=sizeof(arrayBlockSize)/sizeof(int);
int tSPToTest=sizeof(arrayThreadPerIntersection)/sizeof(int);
for(int b=0;b<blocksToTest; b++){
int blocks=arrayBlocks[b];
for(int bs=0; bs<blockSizeToTest; bs++){
int sps=arrayBlockSize[bs];
for(int t=0; t<tSPToTest;t++){
int tsp=arrayThreadPerIntersection[t];
double prev_average = 0;
for (auto cutoff_id : cutoff) {
double running_time[10];
double average=0, stddev=0;
for (int q=0; q<10; q++) {
Timer<DEVICE> TM;
TriangleCounting tc(hornet);
tc.setInitParameters(blocks,sps,tsp);
tc.init();
tc.reset();
TM.start();
tc.run(cutoff_id);
TM.stop();
time = TM.duration();
triangle_t sumDevice = 0;
sumDevice = tc.countTriangles();
if(time<minTimeHornet) minTimeHornet=time;
tc.release();
int shifter=arrayThreadShift[t];
int nbl=sps/tsp;
running_time[q] = time;
printf("### %d %d %d %d %d \t\t %u \t %f\n", blocks,sps, tsp, nbl, shifter, sumDevice, time);
average += time;
}
average = average/10;
for (int q=0; q<10; q++) {
stddev += (running_time[q] - average) * (running_time[q] - average);
}
stddev = sqrt(stddev/10);
auto diff = cutoff_id/100;
if (diff > 2600) diff = 2600;
double rate = histogram[diff]/(average-prev_average);
prev_average = average;
printf("cutoff = %d, rate = %lf, average = %lf , standard deviation = %lf\n", cutoff_id, rate, average, stddev);
}
}
}
}
cout << nv << ", " << ne << ", "<< minTime << ", " << minTimeHornet<< endl;
}
// CPU Version - assume sorted index lists.
int hostSingleIntersection (const vid_t ai, const degree_t alen, const vid_t * a,
const vid_t bi, const degree_t blen, const vid_t * b){
//int32_t ka = 0, kb = 0;
int32_t out = 0;
if (!alen || !blen || a[alen-1] < b[0] || b[blen-1] < a[0])
return 0;
const vid_t *aptr=a, *aend=a+alen;
const vid_t *bptr=b, *bend=b+blen;
while(aptr< aend && bptr<bend){
if(*aptr==*bptr){
aptr++, bptr++, out++;
}
else if(*aptr<*bptr){
aptr++;
}
else {
bptr++;
}
}
return out;
}
int* hostCountTriangles (const vid_t nv, const vid_t ne, const eoff_t * off,
const vid_t * ind, int64_t* allTriangles)
{
//int32_t edge=0;
int64_t sum=0;
int count = 0;
int *histogram = new int[27]();
degree_t maxd = 0;
for (vid_t src = 0; src < nv; src++)
{
degree_t srcLen=off[src+1]-off[src];
for(int iter=off[src]; iter<off[src+1]; iter++)
{
vid_t dest=ind[iter];
degree_t destLen=off[dest+1]-off[dest];
if (destLen+srcLen > maxd) maxd = destLen+srcLen;
size_t diff = abs(destLen+srcLen);
if (diff > 2600) diff = 2600;
histogram[diff/100] ++;
if((destLen < srcLen - 1380) || destLen > srcLen + 1380) {
count ++;
}
//int64_t tris= hostSingleIntersection (src, srcLen, ind+off[src],
// dest, destLen, ind+off[dest]);
//sum+=tris;
}
}
printf("max: %d\n", maxd);
for(int i=0; i<27; i++)
printf("histogram %d: %d\n", i, histogram[i]);
*allTriangles=sum;
printf("count number %d for distance bigger than\n", count);
printf("Sequential number of triangles %ld\n",sum);
return histogram;
}
int exec(const int argc, char *argv[]){
using namespace graph::structure_prop;
using namespace graph::parsing_prop;
int device=0;
hipSetDevice(device);
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, device);
graph::GraphStd<vid_t, eoff_t> graph(UNDIRECTED);
graph.read(argv[1], SORT | PRINT_INFO);
HornetInit hornet_init(graph.nV(), graph.nE(),
graph.csr_out_offsets(),
graph.csr_out_edges());
std::cout << "Initializing GPU graph" << std::endl;
HornetGraph hornet_graph(hornet_init);
std::cout << "Checking sortd adj" << std::endl;
hornet_graph.check_sorted_adjs();
// std::cout << "Is sorted " << << std::endl;
int64_t hostTris;
std::cout << "Starting host triangle counting" << std::endl;
int *histogram = hostCountTriangles(graph.nV(), graph.nE(),graph.csr_out_offsets(), graph.csr_out_edges(),&hostTris);
testTriangleCountingConfigurations(hornet_graph,graph.nV(),graph.nE(),histogram);
delete histogram;
return 0;
}
int main(int argc, char* argv[]) {
int ret = 0;
#if defined(RMM_WRAPPER)
hornets_nest::gpu::initializeRMMPoolAllocation();//update initPoolSize if you know your memory requirement and memory availability in your system, if initial pool size is set to 0 (default value), RMM currently assigns half the device memory.
{//scoping technique to make sure that hornets_nest::gpu::finalizeRMMPoolAllocation is called after freeing all RMM allocations.
#endif
ret = exec(argc, argv);
#if defined(RMM_WRAPPER)
}//scoping technique to make sure that hornets_nest::gpu::finalizeRMMPoolAllocation is called after freeing all RMM allocations.
hornets_nest::gpu::finalizeRMMPoolAllocation();
#endif
return ret;
}
| 5097388c448c846a833c0797dfe53b3177494ee1.cu |
#include <stdio.h>
#include <stdlib.h>
#include <cstdlib>
#include "Static/TriangleCounting/triangle.cuh"
#include <StandardAPI.hpp>
#include <Device/Util/Timer.cuh>
#include <Graph/GraphStd.hpp>
using namespace std;
using namespace timer;
using namespace hornets_nest;
#define STAND_PRINTF(sys, time, triangles) printf("%s : \t%ld \t%f\n", sys,triangles, time);
// int arrayBlocks[]={16000};
// int arrayBlockSize[]={32,64,96,128,192,256};
// int arrayThreadPerIntersection[]={1,2,4,8,16,32};
// int arrayThreadShift[]={0,1,2,3,4,5};
// int arrayBlocks[]={16000};
// int arrayBlockSize[]={256};
// int arrayThreadPerIntersection[]={32};
// int arrayThreadShift[]={5};
// int arrayBlocks[]={96000};
// int arrayBlockSize[]={128,192,256};
// int arrayThreadPerIntersection[]={8,16,32};
// int arrayThreadShift[]={3,4,5};
int arrayBlocks[]={96000};
int arrayBlockSize[]={192};
int arrayThreadPerIntersection[]={16};
int arrayThreadShift[]={3};
int cutoff[]={-1, 100, 200, 300, 400, 500, 600, 700, 800, 900, 1000, 1100, 1200, 1300,
1400, 1500, 1600, 1700, 1800, 1900, 2000,
2100, 2200, 2300, 2400,
2500, 2600, 2700
};
void initHostTriangleArray(triangle_t* h_triangles, vid_t nv){
for(vid_t sd=0; sd<(nv);sd++){
h_triangles[sd]=0;
}
}
int64_t sumTriangleArray(triangle_t* h_triangles, vid_t nv){
int64_t sum=0;
for(vid_t sd=0; sd<(nv);sd++){
sum+=h_triangles[sd];
}
return sum;
}
void testTriangleCountingConfigurations(HornetGraph& hornet, vid_t nv,degree_t ne, int *histogram)
{
float minTime=10e9,time,minTimeHornet=10e9;
int blocksToTest=sizeof(arrayBlocks)/sizeof(int);
int blockSizeToTest=sizeof(arrayBlockSize)/sizeof(int);
int tSPToTest=sizeof(arrayThreadPerIntersection)/sizeof(int);
for(int b=0;b<blocksToTest; b++){
int blocks=arrayBlocks[b];
for(int bs=0; bs<blockSizeToTest; bs++){
int sps=arrayBlockSize[bs];
for(int t=0; t<tSPToTest;t++){
int tsp=arrayThreadPerIntersection[t];
double prev_average = 0;
for (auto cutoff_id : cutoff) {
double running_time[10];
double average=0, stddev=0;
for (int q=0; q<10; q++) {
Timer<DEVICE> TM;
TriangleCounting tc(hornet);
tc.setInitParameters(blocks,sps,tsp);
tc.init();
tc.reset();
TM.start();
tc.run(cutoff_id);
TM.stop();
time = TM.duration();
triangle_t sumDevice = 0;
sumDevice = tc.countTriangles();
if(time<minTimeHornet) minTimeHornet=time;
tc.release();
int shifter=arrayThreadShift[t];
int nbl=sps/tsp;
running_time[q] = time;
printf("### %d %d %d %d %d \t\t %u \t %f\n", blocks,sps, tsp, nbl, shifter, sumDevice, time);
average += time;
}
average = average/10;
for (int q=0; q<10; q++) {
stddev += (running_time[q] - average) * (running_time[q] - average);
}
stddev = sqrt(stddev/10);
auto diff = cutoff_id/100;
if (diff > 2600) diff = 2600;
double rate = histogram[diff]/(average-prev_average);
prev_average = average;
printf("cutoff = %d, rate = %lf, average = %lf , standard deviation = %lf\n", cutoff_id, rate, average, stddev);
}
}
}
}
cout << nv << ", " << ne << ", "<< minTime << ", " << minTimeHornet<< endl;
}
// CPU Version - assume sorted index lists.
int hostSingleIntersection (const vid_t ai, const degree_t alen, const vid_t * a,
const vid_t bi, const degree_t blen, const vid_t * b){
//int32_t ka = 0, kb = 0;
int32_t out = 0;
if (!alen || !blen || a[alen-1] < b[0] || b[blen-1] < a[0])
return 0;
const vid_t *aptr=a, *aend=a+alen;
const vid_t *bptr=b, *bend=b+blen;
while(aptr< aend && bptr<bend){
if(*aptr==*bptr){
aptr++, bptr++, out++;
}
else if(*aptr<*bptr){
aptr++;
}
else {
bptr++;
}
}
return out;
}
int* hostCountTriangles (const vid_t nv, const vid_t ne, const eoff_t * off,
const vid_t * ind, int64_t* allTriangles)
{
//int32_t edge=0;
int64_t sum=0;
int count = 0;
int *histogram = new int[27]();
degree_t maxd = 0;
for (vid_t src = 0; src < nv; src++)
{
degree_t srcLen=off[src+1]-off[src];
for(int iter=off[src]; iter<off[src+1]; iter++)
{
vid_t dest=ind[iter];
degree_t destLen=off[dest+1]-off[dest];
if (destLen+srcLen > maxd) maxd = destLen+srcLen;
size_t diff = abs(destLen+srcLen);
if (diff > 2600) diff = 2600;
histogram[diff/100] ++;
if((destLen < srcLen - 1380) || destLen > srcLen + 1380) {
count ++;
}
//int64_t tris= hostSingleIntersection (src, srcLen, ind+off[src],
// dest, destLen, ind+off[dest]);
//sum+=tris;
}
}
printf("max: %d\n", maxd);
for(int i=0; i<27; i++)
printf("histogram %d: %d\n", i, histogram[i]);
*allTriangles=sum;
printf("count number %d for distance bigger than\n", count);
printf("Sequential number of triangles %ld\n",sum);
return histogram;
}
int exec(const int argc, char *argv[]){
using namespace graph::structure_prop;
using namespace graph::parsing_prop;
int device=0;
cudaSetDevice(device);
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, device);
graph::GraphStd<vid_t, eoff_t> graph(UNDIRECTED);
graph.read(argv[1], SORT | PRINT_INFO);
HornetInit hornet_init(graph.nV(), graph.nE(),
graph.csr_out_offsets(),
graph.csr_out_edges());
std::cout << "Initializing GPU graph" << std::endl;
HornetGraph hornet_graph(hornet_init);
std::cout << "Checking sortd adj" << std::endl;
hornet_graph.check_sorted_adjs();
// std::cout << "Is sorted " << << std::endl;
int64_t hostTris;
std::cout << "Starting host triangle counting" << std::endl;
int *histogram = hostCountTriangles(graph.nV(), graph.nE(),graph.csr_out_offsets(), graph.csr_out_edges(),&hostTris);
testTriangleCountingConfigurations(hornet_graph,graph.nV(),graph.nE(),histogram);
delete histogram;
return 0;
}
int main(int argc, char* argv[]) {
int ret = 0;
#if defined(RMM_WRAPPER)
hornets_nest::gpu::initializeRMMPoolAllocation();//update initPoolSize if you know your memory requirement and memory availability in your system, if initial pool size is set to 0 (default value), RMM currently assigns half the device memory.
{//scoping technique to make sure that hornets_nest::gpu::finalizeRMMPoolAllocation is called after freeing all RMM allocations.
#endif
ret = exec(argc, argv);
#if defined(RMM_WRAPPER)
}//scoping technique to make sure that hornets_nest::gpu::finalizeRMMPoolAllocation is called after freeing all RMM allocations.
hornets_nest::gpu::finalizeRMMPoolAllocation();
#endif
return ret;
}
|
d5887d443bb4e4cfdf95a1eae82c28cf45e86953.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
#define UPPERTHRESHOLD 90
#define LOWERTHRESHOLD 30
const float G_x[3 * 3] = {
-1, 0, 1,
-2, 0, 2,
-1, 0, 1
};
const float G_y[3 * 3] = {
1, 2, 1,
0, 0, 0,
-1, -2, -1
};
const float gaussian[5 * 5] = {
2.f/159, 4.f/159, 5.f/159, 4.f/159, 2.f/159,
4.f/159, 9.f/159, 12.f/159, 9.f/159, 4.f/159,
5.f/159, 12.f/159, 15.f/159, 12.f/159, 2.f/159,
4.f/159, 9.f/159, 12.f/159, 9.f/159, 4.f/159,
2.f/159, 4.f/159, 5.f/159, 4.f/159, 2.f/159
};
__global__ void nonMaxSuppression(int N, int width, int height, unsigned char * in, unsigned char * out) {
int D = 1;
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
if (x >= width || y >= height) {
return;
}
int angle = in[y * width + x];
switch(angle) {
case 0:
if (out[y * width + x] < out[(y + D) * width + x] || out[y * width + x] < out[(y - D) * width + x]) {
out[y * width + x] = 0;
}
break;
case 45:
if (out[y * width + x] < out[(y + D) * width + x - D] || out[y * width + x] < out[(y - D) * width + x + D]) {
out[y * width + x] = 0;
}
break;
case 90:
if (out[y * width + x] < out[y * width + x + D] || out[y * width + x] < out[y * width + x - D]) {
out[y * width + x] = 0;
}
break;
case 135:
if (out[y * width + x] < out[(y + D) * width + x + D] || out[y * width + x] < out[(y - D) * width + x - D]) {
out[y * width + x] = 0;
}
break;
default:
break;
}
} | d5887d443bb4e4cfdf95a1eae82c28cf45e86953.cu | #include "includes.h"
#define UPPERTHRESHOLD 90
#define LOWERTHRESHOLD 30
const float G_x[3 * 3] = {
-1, 0, 1,
-2, 0, 2,
-1, 0, 1
};
const float G_y[3 * 3] = {
1, 2, 1,
0, 0, 0,
-1, -2, -1
};
const float gaussian[5 * 5] = {
2.f/159, 4.f/159, 5.f/159, 4.f/159, 2.f/159,
4.f/159, 9.f/159, 12.f/159, 9.f/159, 4.f/159,
5.f/159, 12.f/159, 15.f/159, 12.f/159, 2.f/159,
4.f/159, 9.f/159, 12.f/159, 9.f/159, 4.f/159,
2.f/159, 4.f/159, 5.f/159, 4.f/159, 2.f/159
};
__global__ void nonMaxSuppression(int N, int width, int height, unsigned char * in, unsigned char * out) {
int D = 1;
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
if (x >= width || y >= height) {
return;
}
int angle = in[y * width + x];
switch(angle) {
case 0:
if (out[y * width + x] < out[(y + D) * width + x] || out[y * width + x] < out[(y - D) * width + x]) {
out[y * width + x] = 0;
}
break;
case 45:
if (out[y * width + x] < out[(y + D) * width + x - D] || out[y * width + x] < out[(y - D) * width + x + D]) {
out[y * width + x] = 0;
}
break;
case 90:
if (out[y * width + x] < out[y * width + x + D] || out[y * width + x] < out[y * width + x - D]) {
out[y * width + x] = 0;
}
break;
case 135:
if (out[y * width + x] < out[(y + D) * width + x + D] || out[y * width + x] < out[(y - D) * width + x - D]) {
out[y * width + x] = 0;
}
break;
default:
break;
}
} |
0351dc58dfe990d3bf56e35cf868e23fca85a9e5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<stdio.h>
#include < cuda_runtime.h>
#include <d:/book.h>
#define N 1000
#define M 128
#define K 2
#define threadsPerBlock 64
__global__ void jtj_cam( int * J, int* blocks ,int* cameraID ,int *cameraCount)
{
//
__shared__ int value[threadsPerBlock * 8];
int index=threadIdx.x+blockIdx.x*blockDim.x;
//
int colpos= threadIdx.x & 0x7;
//
int rowpos = threadIdx.x - colpos;
int row[8] = {0, 0, 0, 0, 0, 0, 0, 0};
//
int tid =threadIdx.x+cameraID[blockIdx.x];
while (tid < cameraCount[blockIdx.x])
{
//
value[threadIdx.x] = J[tid];
for(int j = 0; j < 8; ++j)
if(j>colpos||j==colpos) //
row[j] += (value[threadIdx.x] * value[rowpos + j]);
tid += blockDim.x;
}
//
for(int i = 0; i < 8; ++i)
value[threadIdx.x * 8 + i] = row[i];
int i=8*threadsPerBlock;
while (i != 64)
{
tid =threadIdx.x;
while(tid < i/2)
{
//
value[tid] += value[tid + i/2];
tid += 64;
}
i /= 2;
}
//64
blocks[index]=value[threadIdx.x];
}
int main( void )
{
int j[N], jtj[M],cameraID[K],cameraCount[K];
int *dev_j, *dev_jtj ,*dev_cameraID,*dev_cameraCount;
//GPU
HANDLE_ERROR( hipMalloc( (void**)&dev_j, N * sizeof(int) ) );
HANDLE_ERROR( hipMalloc( (void**)&dev_jtj, N * sizeof(int) ) );
HANDLE_ERROR( hipMalloc( (void**)&dev_cameraID, K * sizeof(int) ) );
HANDLE_ERROR( hipMalloc( (void**)&dev_cameraCount, K * sizeof(int) ) );
//
const char *rpc1Filename = "D:\\in.txt";
FILE* fid21 = fopen(rpc1Filename, "rt");
for (int i = 0; i < N; i++)
{
fscanf(fid21, "%d ", &j[i]);
}
//
rpc1Filename = "D:\\f.txt";
fid21 = fopen(rpc1Filename, "rt");
for (int i = 0; i < N; i++)
{
fscanf(fid21, "%d ", &jtj[i]);
}
cameraID[0]=0;
cameraID[1]=256;
cameraCount[0]=256;
cameraCount[1]=1000;
// GPUx
HANDLE_ERROR( hipMemcpy( dev_j, j, N * sizeof(int),
hipMemcpyHostToDevice ) );
HANDLE_ERROR( hipMemcpy( dev_jtj, jtj, M * sizeof(int),
hipMemcpyHostToDevice ) );
HANDLE_ERROR( hipMemcpy( dev_cameraID, cameraID, K * sizeof(int),
hipMemcpyHostToDevice ) );
HANDLE_ERROR( hipMemcpy( dev_cameraCount, cameraCount, K * sizeof(int),
hipMemcpyHostToDevice ) );
//
dim3 grid(2), block(threadsPerBlock);
hipLaunchKernelGGL(( jtj_cam), dim3(grid), dim3(block), 0, 0, dev_j, dev_jtj,dev_cameraID,dev_cameraCount);
//CPUGPU
HANDLE_ERROR( hipMemcpy( j, dev_j, N * sizeof(int),hipMemcpyDeviceToHost ) );
HANDLE_ERROR( hipMemcpy( jtj, dev_jtj, M * sizeof(int),hipMemcpyDeviceToHost ) );
HANDLE_ERROR( hipMemcpy( cameraID, dev_cameraID, K * sizeof(int),hipMemcpyDeviceToHost ) );
HANDLE_ERROR( hipMemcpy(cameraCount, dev_cameraCount, K * sizeof(int),hipMemcpyDeviceToHost ) );
// display the results
for (int i=0; i<M; i++)
{
printf( "%d ", jtj[i] );
if((i+1)%8==0)
printf( "\n");
}
// free the memory allocated on the GPU
HANDLE_ERROR( hipFree( dev_j ) );
HANDLE_ERROR( hipFree( dev_jtj ) );
HANDLE_ERROR( hipFree( dev_cameraID ) );
HANDLE_ERROR( hipFree( dev_cameraCount ) );
return 0;
}
| 0351dc58dfe990d3bf56e35cf868e23fca85a9e5.cu | #include<stdio.h>
#include < cuda_runtime.h>
#include <d:/book.h>
#define N 1000
#define M 128
#define K 2
#define threadsPerBlock 64
__global__ void jtj_cam( int * J, int* blocks ,int* cameraID ,int *cameraCount)
{
//分配共享内存
__shared__ int value[threadsPerBlock * 8];
int index=threadIdx.x+blockIdx.x*blockDim.x;
//列索引
int colpos= threadIdx.x & 0x7;
//行索引
int rowpos = threadIdx.x - colpos;
int row[8] = {0, 0, 0, 0, 0, 0, 0, 0};
//取出每个相机对应的雅各比矩阵
int tid =threadIdx.x+cameraID[blockIdx.x];
while (tid < cameraCount[blockIdx.x])
{
//共享内存赋值
value[threadIdx.x] = J[tid];
for(int j = 0; j < 8; ++j)
if(j>colpos||j==colpos) //减少对角重复计算
row[j] += (value[threadIdx.x] * value[rowpos + j]);
tid += blockDim.x;
}
//每个线程计算完毕
for(int i = 0; i < 8; ++i)
value[threadIdx.x * 8 + i] = row[i];
int i=8*threadsPerBlock;
while (i != 64)
{
tid =threadIdx.x;
while(tid < i/2)
{
//共享内存规约
value[tid] += value[tid + i/2];
tid += 64;
}
i /= 2;
}
//得到每个相机对应的矩阵,64个值
blocks[index]=value[threadIdx.x];
}
int main( void )
{
int j[N], jtj[M],cameraID[K],cameraCount[K];
int *dev_j, *dev_jtj ,*dev_cameraID,*dev_cameraCount;
//分配GPU显存
HANDLE_ERROR( cudaMalloc( (void**)&dev_j, N * sizeof(int) ) );
HANDLE_ERROR( cudaMalloc( (void**)&dev_jtj, N * sizeof(int) ) );
HANDLE_ERROR( cudaMalloc( (void**)&dev_cameraID, K * sizeof(int) ) );
HANDLE_ERROR( cudaMalloc( (void**)&dev_cameraCount, K * sizeof(int) ) );
//读取雅可比矩阵
const char *rpc1Filename = "D:\\in.txt";
FILE* fid21 = fopen(rpc1Filename, "rt");
for (int i = 0; i < N; i++)
{
fscanf(fid21, "%d ", &j[i]);
}
//读取雅可比矩阵
rpc1Filename = "D:\\f.txt";
fid21 = fopen(rpc1Filename, "rt");
for (int i = 0; i < N; i++)
{
fscanf(fid21, "%d ", &jtj[i]);
}
cameraID[0]=0;
cameraID[1]=256;
cameraCount[0]=256;
cameraCount[1]=1000;
// 雅可比矩阵拷贝到GPUx
HANDLE_ERROR( cudaMemcpy( dev_j, j, N * sizeof(int),
cudaMemcpyHostToDevice ) );
HANDLE_ERROR( cudaMemcpy( dev_jtj, jtj, M * sizeof(int),
cudaMemcpyHostToDevice ) );
HANDLE_ERROR( cudaMemcpy( dev_cameraID, cameraID, K * sizeof(int),
cudaMemcpyHostToDevice ) );
HANDLE_ERROR( cudaMemcpy( dev_cameraCount, cameraCount, K * sizeof(int),
cudaMemcpyHostToDevice ) );
//核函数
dim3 grid(2), block(threadsPerBlock);
jtj_cam<<<grid, block>>>(dev_j, dev_jtj,dev_cameraID,dev_cameraCount);
//从CPU拷贝回GPU
HANDLE_ERROR( cudaMemcpy( j, dev_j, N * sizeof(int),cudaMemcpyDeviceToHost ) );
HANDLE_ERROR( cudaMemcpy( jtj, dev_jtj, M * sizeof(int),cudaMemcpyDeviceToHost ) );
HANDLE_ERROR( cudaMemcpy( cameraID, dev_cameraID, K * sizeof(int),cudaMemcpyDeviceToHost ) );
HANDLE_ERROR( cudaMemcpy(cameraCount, dev_cameraCount, K * sizeof(int),cudaMemcpyDeviceToHost ) );
// display the results
for (int i=0; i<M; i++)
{
printf( "%d ", jtj[i] );
if((i+1)%8==0)
printf( "\n");
}
// free the memory allocated on the GPU
HANDLE_ERROR( cudaFree( dev_j ) );
HANDLE_ERROR( cudaFree( dev_jtj ) );
HANDLE_ERROR( cudaFree( dev_cameraID ) );
HANDLE_ERROR( cudaFree( dev_cameraCount ) );
return 0;
}
|
8fc43a6edfcf7b4441ede88ba5dbb98f6247b82f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
============================================================================
Name : cuda_knn.cu
Author : Tyler Ouyang
Version :
Copyright : Copyright 2016 Tyler Ouyang. All rights reserved.
Description : CUDA compute reciprocals
============================================================================
*/
#include <iostream>
#include <stdlib.h>
#include "common_hip.cuh"
#include "common.h"
#include "utils.cuh"
#include "thrust_utils.h"
using namespace std;
#define FREEMEM_SCALER 50;
__device__
float calculateLiraDistance(
Rating *r1Start,
Rating *r1End,
Rating *r2Start,
Rating *r2End) {
int maxRating = 5;
// TODO: change 10 to MACRO
int deltaCounter[5] = {0};
while (r1Start < r1End && r2Start < r2End) {
if (r1Start->x < r2Start->x) {
r1Start++;
}
else if (r1Start->x == r2Start->x) {
deltaCounter[abs(r1Start->y - r2Start->y)]++;
r1Start++;
r2Start++;
}
else {
r2Start++;
}
}
float accumulator = 1;
accumulator *= pow(0.5, deltaCounter[0]) / pow(1.0 / (float)maxRating, deltaCounter[0]);
for (int i = 1; i < maxRating - 1; i++) {
float pA = pow(2, -i - 1),
pRand = 2 * (maxRating - i) / (float)(maxRating * maxRating);
accumulator *= pow(pA, deltaCounter[i]) / pow(pRand, deltaCounter[i]);
}
float pRand = 2 / (float)(maxRating * maxRating);
accumulator *= pow(pow(2, -maxRating + 1), deltaCounter[maxRating-1]) / pow(pRand, deltaCounter[maxRating-1]);
return log10(accumulator);
}
__device__
float calculateCOSDistance(
Rating *r1Start,
Rating *r1End,
Rating *r2Start,
Rating *r2End) {
float dotProduct = 0.0, r1NormSQ = 0.0, r2NormSQ = 0.0;
while (r1Start < r1End && r2Start < r2End) {
if (r1Start->x > r2Start->x) {
// treat r1Start->rating as 0
r2NormSQ += r2Start->y * r2Start->y;
r2Start++;
} else if (r1Start->x == r2Start->x) {
dotProduct += r1Start->y * r2Start->y;
r1NormSQ += r1Start->y * r1Start->y;
r2NormSQ += r2Start->y * r2Start->y;
r1Start++;
r2Start++;
} else {
// treat r2Start->y as 0
r1NormSQ += r1Start->y * r1Start->y;
r1Start++;
}
}
// finish baseUser tail, if any
while (r1Start < r1End) {
r1NormSQ += r1Start->y * r1Start->y;
r1Start++;
}
// finish neighbor tail, if any
while (r2Start < r2End) {
r2NormSQ += r2Start->y * r2Start->y;
r2Start++;
}
// distance
return dotProduct / (sqrt(r1NormSQ) * sqrt(r2NormSQ));
}
/**
* CUDA kernel that computes distances between every two users in d_trainUsers
*/
__global__
void calculateAllDistance(
int stageStartUser,
int numUsers,
int *d_trainUsers,
Rating *allRatings,
float *d_distances) {
int globalUserId = stageStartUser + blockIdx.x * TILE_SIZE + threadIdx.x;
// user id relative in stage
int localUserId = blockIdx.x * TILE_SIZE + threadIdx.x;
// TODO: experimental, need optimization
// space for TILE_SIZE * 2 users, each one has at most TILE_DEPTH ratings
__shared__ Rating ratings[TILE_DEPTH * TILE_SIZE * 2];
int numRatings = d_trainUsers[globalUserId];
Rating *baseStart = ratings + (threadIdx.x + TILE_SIZE) * TILE_DEPTH,
*baseEnd = baseStart + numRatings;
// copy data to shared memory, base users are the last TILE_SIZE users in ratings
Rating *copyFrom = allRatings + globalUserId * TILE_DEPTH;
#pragma unroll
for (int i = threadIdx.y; i < numRatings; i += TILE_SIZE)
baseStart[i] = copyFrom[i];
__syncthreads();
// TILE_SIZE users per iteration for now
for (int i = threadIdx.y; i < numUsers; i += TILE_SIZE) {
int nbNumRatings = d_trainUsers[i];
Rating *neighborStart = ratings + threadIdx.y * TILE_DEPTH,
*neighborEnd = neighborStart + nbNumRatings;
copyFrom = allRatings + i * TILE_DEPTH;
// copy data to shared memory, neighbors are the first TILE_SIZE users
#pragma unroll
for (int j = threadIdx.x; j < nbNumRatings; j += TILE_SIZE)
neighborStart[j] = copyFrom[j];
__syncthreads();
d_distances[localUserId * numUsers + i]
= calculateCOSDistance(baseStart, baseEnd, neighborStart, neighborEnd);
// if (globalUserId == 0) {
// printf("%d, %.10lf\n", i+1,
// d_distances[localUserId * numUsers + i]);
// }
__syncthreads();
}
}
/**
* CUDA kernel that computes KNN
*/
__global__
void knn_8(int numUsers, int k,
int *idxIdMap,
Rating *trainRatings, Rating *testRatings,
int *trainUser,
int *ratingSums, int *ratingCounts) {
// space to store ratings found by each thread
extern __shared__ short foundRatings[];
short *finished = (short*) &foundRatings[blockDim.x * NUM_NEIGHBORS];
int threadId = threadIdx.x * NUM_NEIGHBORS + threadIdx.y;
// initialize shared memory
foundRatings[threadId] = 0;
if (threadIdx.y == 0 ) finished[threadIdx.x] = 0;
int sumOfRatings = 0;
int numOfMatchedNeighbors = 0;
int testItemId = testRatings[threadIdx.x].x;
// TODO: consider stopping at 20*K instead of numUsers
for (int neighborIdx = threadIdx.y; neighborIdx < numUsers; neighborIdx += NUM_NEIGHBORS) {
// load ratings of NUM_NEIGHBORS users to shared memory
int nbNumRatings = trainUser[idxIdMap[neighborIdx]];
Rating *neighborStart = trainRatings + idxIdMap[neighborIdx] * TILE_DEPTH;
__syncthreads();
if (!finished[threadIdx.x]) {
foundRatings[threadId] = findItemRating(testItemId, neighborStart, nbNumRatings);
__syncthreads();
// thread 0 of each movie collects information
if (threadIdx.y == 0) {
int count = min(numUsers - neighborIdx, NUM_NEIGHBORS);
for (int i = 0; i < count; i++) {
if (numOfMatchedNeighbors == k) {
finished[threadIdx.x] = 1;
break;
}
int rate = foundRatings[threadId + i];
if (rate > 0) {
sumOfRatings += rate;
numOfMatchedNeighbors++;
}
}
}
}
}
if (threadIdx.y == 0) {
ratingSums[threadIdx.x] = sumOfRatings;
ratingCounts[threadIdx.x] = numOfMatchedNeighbors;
}
}
void moveRatingsToDevice(
H_Users h_trainUsers,
int **d_users,
Rating **d_ratings) {
// make numTrainUsers a multiple of TILE_SIZE
int numUsers = h_trainUsers.size() / TILE_SIZE * TILE_SIZE;
int totalNumRatings = numUsers * TILE_DEPTH;
int *h_users = new int[numUsers];
for (int i = 0; i < numUsers; i++)
h_users[i] = 0;
Rating *h_ratings = new Rating[sizeof(Rating) * totalNumRatings];
checkCudaErrors(hipMalloc((void **) d_ratings, sizeof(Rating) * totalNumRatings));
// cout << "size of train ratings in bytes: " << sizeof(Rating) * totalNumRatings << endl;
for (int i = 0; i < numUsers; i++) {
int numRatings = min((int)h_trainUsers[i].size(), TILE_DEPTH);
// copy vector to intermediate host array
for (int j = 0; j < numRatings; j++) {
h_ratings[i * TILE_DEPTH + j].x = h_trainUsers[i][j].first;
h_ratings[i * TILE_DEPTH + j].y = h_trainUsers[i][j].second * 2;
}
h_users[i] = numRatings;
}
// move data from host to device
checkCudaErrors(hipMemcpy(*d_ratings, h_ratings, sizeof(Rating) * totalNumRatings, hipMemcpyHostToDevice));
checkCudaErrors(hipMalloc((void **) d_users, sizeof(int) * numUsers));
checkCudaErrors(hipMemcpy(*d_users, h_users, sizeof(int) * numUsers, hipMemcpyHostToDevice));
delete[] h_ratings;
delete[] h_users;
}
void initUsers(User *users, int num) {
for (int i = 0; i < num; i++)
users[i] = {NULL, 0};
}
void moveTestRatingsToDevice(
H_Users h_testUsers,
User *h_users,
Rating **d_ratings,
int numUsers,
int testUserRatingCount) {
initUsers(h_users, numUsers);
numUsers = min(numUsers, (int) h_testUsers.size());
Rating *h_ratings = new Rating[sizeof(Rating) * testUserRatingCount];
checkCudaErrors(hipMalloc((void **) d_ratings, sizeof(Rating) * testUserRatingCount));
int ratingsSoFar = 0;
for (int i = 0; i < numUsers; i++) {
int numRatings = h_testUsers[i].size();
if (numRatings < 1) continue;
// copy vector to intermediate host array
for (int j = 0; j < numRatings; j++) {
h_ratings[ratingsSoFar + j].x = h_testUsers[i][j].first;
h_ratings[ratingsSoFar + j].y = h_testUsers[i][j].second * 2;
}
// save index
h_users[i].ratings = *d_ratings + ratingsSoFar;
h_users[i].numRatings = numRatings;
ratingsSoFar += numRatings;
}
// move data from host to device
checkCudaErrors(hipMemcpy(*d_ratings, h_ratings, sizeof(Rating) * testUserRatingCount, hipMemcpyHostToDevice));
delete[] h_ratings;
}
void cudaCore(
int trainUserRatingCount,
int testUserRatingCount,
H_Users h_trainUsers,
H_Users h_testUsers,
int k) {
int *d_trainUsers, *d_ratingSums, *d_ratingCounts;
int h_ratingCounts[CONC_ITEMS_NUM] = { 0 }, h_ratingSums[CONC_ITEMS_NUM] = { 0 };
Rating *d_trainRatings, *d_testRatings;
int numTrainUsers = h_trainUsers.size() / TILE_SIZE * TILE_SIZE;
User *h_testUsersIdx = new User[numTrainUsers];
float *d_distances;
int *d_idxIdMap;
int predictedCount = 0, validTestSize = 0;
double errorSum = 0, errorSumSq = 0;
// cout << "trainUserRatingCount: " << trainUserRatingCount << endl;
// cout << "number of users: " << h_trainUsers.size() << "; effective user: " << numTrainUsers << endl;
// cout << "testUserRatingCount: " << testUserRatingCount << endl;
// cout << "number of test users: " << h_testUsers.size() << endl;
moveRatingsToDevice(h_trainUsers, &d_trainUsers, &d_trainRatings);
moveTestRatingsToDevice(h_testUsers, h_testUsersIdx, &d_testRatings, numTrainUsers, testUserRatingCount);
// cout << "data moved to device\n";
// get free memory
size_t freeMemSize, totalMemSize;
checkCudaErrors(hipMemGetInfo(&freeMemSize, &totalMemSize));
// cout << "device has " << freeMemSize << " free global memory\n";
checkCudaErrors(hipMalloc((void **) &d_ratingSums, CONC_ITEMS_NUM * sizeof(int)));
checkCudaErrors(hipMalloc((void **) &d_ratingCounts, CONC_ITEMS_NUM * sizeof(int)));
checkCudaErrors(hipMalloc((void **) &d_idxIdMap, numTrainUsers * sizeof(int)));
// calculate how many distances GPU can store, e.g. size of stage
size_t ratingsSize = numTrainUsers * TILE_DEPTH * sizeof(Rating);
freeMemSize -= ratingsSize * FREEMEM_SCALER;
// cout << "train rating size " << ratingsSize << "\nfreeMemSize is " << freeMemSize << endl;
int stageHeight = min(freeMemSize / (numTrainUsers * sizeof(float)) / TILE_SIZE, (long) numTrainUsers / TILE_SIZE);
// allocate memory for distances
checkCudaErrors(hipMalloc((void **) &d_distances, sizeof(float) * numTrainUsers * stageHeight * TILE_SIZE));
hipDeviceSynchronize();
dim3 threadsPerBlock(TILE_SIZE, TILE_SIZE);
// cout << "each stage has " << stageHeight << " blocks\n";
// cout << (numTrainUsers + stageHeight * TILE_SIZE - 1) / (stageHeight * TILE_SIZE) << " stages will be launched\n";
hipEvent_t start, stop;
float milliseconds = 0, distanceCalTime = 0, knnCalTime = 0;
hipEventCreate(&start);
hipEventCreate(&stop);
for (int stageStartUser = 0; stageStartUser < numTrainUsers; stageStartUser += stageHeight * TILE_SIZE) {
hipEventRecord(start);
hipEventSynchronize(start);
int effectiveStageHeight = min(stageHeight, (numTrainUsers - stageStartUser) / TILE_SIZE);
hipLaunchKernelGGL(( calculateAllDistance), dim3(effectiveStageHeight), dim3(threadsPerBlock), 0, 0,
stageStartUser, numTrainUsers, d_trainUsers, d_trainRatings, d_distances);
hipEventRecord(stop);
hipEventSynchronize(stop);
hipEventElapsedTime(&milliseconds, start, stop);
distanceCalTime += milliseconds;
// KNN
hipEventRecord(start);
hipEventSynchronize(start);
for (int testUserIdOffset = 0; testUserIdOffset < effectiveStageHeight * TILE_SIZE; testUserIdOffset++) {
int numTestItems = h_testUsersIdx[stageStartUser + testUserIdOffset].numRatings;
if (numTestItems < 1) continue;
validTestSize += numTestItems;
// sort
sortNeighbors(d_distances + testUserIdOffset * numTrainUsers, numTrainUsers, d_idxIdMap);
// predict
int numBlocks = (numTestItems + CONC_ITEMS_NUM - 1) / CONC_ITEMS_NUM;
int remaining = numTestItems;
for (int block = 0; block < numBlocks; block++) {
int itemsInBlock = min(remaining, CONC_ITEMS_NUM);
remaining -= CONC_ITEMS_NUM;
dim3 threadsPerBlock(itemsInBlock, NUM_NEIGHBORS);
hipLaunchKernelGGL(( knn_8), dim3(1), dim3(threadsPerBlock), (itemsInBlock*(NUM_NEIGHBORS+1))*sizeof(short), 0,
numTrainUsers, k,
d_idxIdMap,
d_trainRatings, h_testUsersIdx[stageStartUser + testUserIdOffset].ratings + block * CONC_ITEMS_NUM,
d_trainUsers,
d_ratingSums, d_ratingCounts);
checkCudaErrors(hipMemcpy(h_ratingSums, d_ratingSums, sizeof(int) * itemsInBlock, hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(h_ratingCounts, d_ratingCounts, sizeof(int) * itemsInBlock, hipMemcpyDeviceToHost));
for (int i = 0; i < itemsInBlock; i++) {
if (h_ratingCounts[i] == 0)
continue;
double actual = h_testUsers[stageStartUser + testUserIdOffset][i + block * CONC_ITEMS_NUM].second;
double prediction = (double)h_ratingSums[i] / 2 / h_ratingCounts[i];
// cout << "user: " << stageStartUser + testUserIdOffset + 1
// << " item: " << h_testUsers[stageStartUser + testUserIdOffset][i+block * itemsInBlock].first
// << " actual = " << actual << " predicted = "<< prediction << "\n";// " based on " << h_ratingCounts[i] << " ratings\n";
// if (stageStartUser + testUserIdOffset + 1 < 10) {
// cout << stageStartUser + testUserIdOffset + 1
// << ", " << h_testUsers[stageStartUser + testUserIdOffset][i+block * CONC_ITEMS_NUM].first
// << ", " << actual << ", "<< prediction << ", " << h_ratingSums[i] << ", " << h_ratingCounts[i] << "\n";
// }
errorSum += fabs(actual - prediction);
errorSumSq += pow(actual - prediction, 2);
predictedCount++;
}
}
}
// cout << "\nerror sum so far: " << errorSum << ", error sum squared so far " << errorSumSq << endl;
// double mae = errorSum / predictedCount,
// rmse = sqrt(errorSumSq / predictedCount);
// cout << "MAE = " << mae << endl;
// cout << "RMSE = " << rmse << endl;
// cout << "Predicted count so far = " << predictedCount << endl;
hipEventRecord(stop);
hipEventSynchronize(stop);
hipEventElapsedTime(&milliseconds, start, stop);
knnCalTime += milliseconds;
}
// printptr<<<1,1>>>(d_idxIdMap, numTrainUsers);
// cout << "\ndistance calculation took " << distanceCalTime << "ms\n";
// cout << "knn took " << knnCalTime << "ms\n";
cout << distanceCalTime << " " << knnCalTime << " ";
double mae = errorSum / predictedCount,
rmse = sqrt(errorSumSq / predictedCount);
// cout << "MAE = " << mae << endl;
// cout << "RMSE = " << rmse << endl;
// cout << "Predicted count = " << predictedCount << endl;
cout << mae << " " << rmse << " " << (double)predictedCount / (double)validTestSize << endl;
hipDeviceSynchronize();
/* Free memory */
hipEventDestroy(start);
hipEventDestroy(stop);
checkCudaErrors(hipFree(d_trainRatings));
checkCudaErrors(hipFree(d_testRatings));
checkCudaErrors(hipFree(d_trainUsers));
checkCudaErrors(hipFree(d_distances));
checkCudaErrors(hipFree(d_ratingSums));
checkCudaErrors(hipFree(d_ratingCounts));
checkCudaErrors(hipFree(d_idxIdMap));
hipDeviceReset();
delete[] h_testUsersIdx;
}
| 8fc43a6edfcf7b4441ede88ba5dbb98f6247b82f.cu | /*
============================================================================
Name : cuda_knn.cu
Author : Tyler Ouyang
Version :
Copyright : Copyright © 2016 Tyler Ouyang. All rights reserved.
Description : CUDA compute reciprocals
============================================================================
*/
#include <iostream>
#include <stdlib.h>
#include "common.cuh"
#include "common.h"
#include "utils.cuh"
#include "thrust_utils.h"
using namespace std;
#define FREEMEM_SCALER 50;
__device__
float calculateLiraDistance(
Rating *r1Start,
Rating *r1End,
Rating *r2Start,
Rating *r2End) {
int maxRating = 5;
// TODO: change 10 to MACRO
int deltaCounter[5] = {0};
while (r1Start < r1End && r2Start < r2End) {
if (r1Start->x < r2Start->x) {
r1Start++;
}
else if (r1Start->x == r2Start->x) {
deltaCounter[abs(r1Start->y - r2Start->y)]++;
r1Start++;
r2Start++;
}
else {
r2Start++;
}
}
float accumulator = 1;
accumulator *= pow(0.5, deltaCounter[0]) / pow(1.0 / (float)maxRating, deltaCounter[0]);
for (int i = 1; i < maxRating - 1; i++) {
float pA = pow(2, -i - 1),
pRand = 2 * (maxRating - i) / (float)(maxRating * maxRating);
accumulator *= pow(pA, deltaCounter[i]) / pow(pRand, deltaCounter[i]);
}
float pRand = 2 / (float)(maxRating * maxRating);
accumulator *= pow(pow(2, -maxRating + 1), deltaCounter[maxRating-1]) / pow(pRand, deltaCounter[maxRating-1]);
return log10(accumulator);
}
__device__
float calculateCOSDistance(
Rating *r1Start,
Rating *r1End,
Rating *r2Start,
Rating *r2End) {
float dotProduct = 0.0, r1NormSQ = 0.0, r2NormSQ = 0.0;
while (r1Start < r1End && r2Start < r2End) {
if (r1Start->x > r2Start->x) {
// treat r1Start->rating as 0
r2NormSQ += r2Start->y * r2Start->y;
r2Start++;
} else if (r1Start->x == r2Start->x) {
dotProduct += r1Start->y * r2Start->y;
r1NormSQ += r1Start->y * r1Start->y;
r2NormSQ += r2Start->y * r2Start->y;
r1Start++;
r2Start++;
} else {
// treat r2Start->y as 0
r1NormSQ += r1Start->y * r1Start->y;
r1Start++;
}
}
// finish baseUser tail, if any
while (r1Start < r1End) {
r1NormSQ += r1Start->y * r1Start->y;
r1Start++;
}
// finish neighbor tail, if any
while (r2Start < r2End) {
r2NormSQ += r2Start->y * r2Start->y;
r2Start++;
}
// distance
return dotProduct / (sqrt(r1NormSQ) * sqrt(r2NormSQ));
}
/**
* CUDA kernel that computes distances between every two users in d_trainUsers
*/
__global__
void calculateAllDistance(
int stageStartUser,
int numUsers,
int *d_trainUsers,
Rating *allRatings,
float *d_distances) {
int globalUserId = stageStartUser + blockIdx.x * TILE_SIZE + threadIdx.x;
// user id relative in stage
int localUserId = blockIdx.x * TILE_SIZE + threadIdx.x;
// TODO: experimental, need optimization
// space for TILE_SIZE * 2 users, each one has at most TILE_DEPTH ratings
__shared__ Rating ratings[TILE_DEPTH * TILE_SIZE * 2];
int numRatings = d_trainUsers[globalUserId];
Rating *baseStart = ratings + (threadIdx.x + TILE_SIZE) * TILE_DEPTH,
*baseEnd = baseStart + numRatings;
// copy data to shared memory, base users are the last TILE_SIZE users in ratings
Rating *copyFrom = allRatings + globalUserId * TILE_DEPTH;
#pragma unroll
for (int i = threadIdx.y; i < numRatings; i += TILE_SIZE)
baseStart[i] = copyFrom[i];
__syncthreads();
// TILE_SIZE users per iteration for now
for (int i = threadIdx.y; i < numUsers; i += TILE_SIZE) {
int nbNumRatings = d_trainUsers[i];
Rating *neighborStart = ratings + threadIdx.y * TILE_DEPTH,
*neighborEnd = neighborStart + nbNumRatings;
copyFrom = allRatings + i * TILE_DEPTH;
// copy data to shared memory, neighbors are the first TILE_SIZE users
#pragma unroll
for (int j = threadIdx.x; j < nbNumRatings; j += TILE_SIZE)
neighborStart[j] = copyFrom[j];
__syncthreads();
d_distances[localUserId * numUsers + i]
= calculateCOSDistance(baseStart, baseEnd, neighborStart, neighborEnd);
// if (globalUserId == 0) {
// printf("%d, %.10lf\n", i+1,
// d_distances[localUserId * numUsers + i]);
// }
__syncthreads();
}
}
/**
* CUDA kernel that computes KNN
*/
__global__
void knn_8(int numUsers, int k,
int *idxIdMap,
Rating *trainRatings, Rating *testRatings,
int *trainUser,
int *ratingSums, int *ratingCounts) {
// space to store ratings found by each thread
extern __shared__ short foundRatings[];
short *finished = (short*) &foundRatings[blockDim.x * NUM_NEIGHBORS];
int threadId = threadIdx.x * NUM_NEIGHBORS + threadIdx.y;
// initialize shared memory
foundRatings[threadId] = 0;
if (threadIdx.y == 0 ) finished[threadIdx.x] = 0;
int sumOfRatings = 0;
int numOfMatchedNeighbors = 0;
int testItemId = testRatings[threadIdx.x].x;
// TODO: consider stopping at 20*K instead of numUsers
for (int neighborIdx = threadIdx.y; neighborIdx < numUsers; neighborIdx += NUM_NEIGHBORS) {
// load ratings of NUM_NEIGHBORS users to shared memory
int nbNumRatings = trainUser[idxIdMap[neighborIdx]];
Rating *neighborStart = trainRatings + idxIdMap[neighborIdx] * TILE_DEPTH;
__syncthreads();
if (!finished[threadIdx.x]) {
foundRatings[threadId] = findItemRating(testItemId, neighborStart, nbNumRatings);
__syncthreads();
// thread 0 of each movie collects information
if (threadIdx.y == 0) {
int count = min(numUsers - neighborIdx, NUM_NEIGHBORS);
for (int i = 0; i < count; i++) {
if (numOfMatchedNeighbors == k) {
finished[threadIdx.x] = 1;
break;
}
int rate = foundRatings[threadId + i];
if (rate > 0) {
sumOfRatings += rate;
numOfMatchedNeighbors++;
}
}
}
}
}
if (threadIdx.y == 0) {
ratingSums[threadIdx.x] = sumOfRatings;
ratingCounts[threadIdx.x] = numOfMatchedNeighbors;
}
}
void moveRatingsToDevice(
H_Users h_trainUsers,
int **d_users,
Rating **d_ratings) {
// make numTrainUsers a multiple of TILE_SIZE
int numUsers = h_trainUsers.size() / TILE_SIZE * TILE_SIZE;
int totalNumRatings = numUsers * TILE_DEPTH;
int *h_users = new int[numUsers];
for (int i = 0; i < numUsers; i++)
h_users[i] = 0;
Rating *h_ratings = new Rating[sizeof(Rating) * totalNumRatings];
checkCudaErrors(cudaMalloc((void **) d_ratings, sizeof(Rating) * totalNumRatings));
// cout << "size of train ratings in bytes: " << sizeof(Rating) * totalNumRatings << endl;
for (int i = 0; i < numUsers; i++) {
int numRatings = min((int)h_trainUsers[i].size(), TILE_DEPTH);
// copy vector to intermediate host array
for (int j = 0; j < numRatings; j++) {
h_ratings[i * TILE_DEPTH + j].x = h_trainUsers[i][j].first;
h_ratings[i * TILE_DEPTH + j].y = h_trainUsers[i][j].second * 2;
}
h_users[i] = numRatings;
}
// move data from host to device
checkCudaErrors(cudaMemcpy(*d_ratings, h_ratings, sizeof(Rating) * totalNumRatings, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMalloc((void **) d_users, sizeof(int) * numUsers));
checkCudaErrors(cudaMemcpy(*d_users, h_users, sizeof(int) * numUsers, cudaMemcpyHostToDevice));
delete[] h_ratings;
delete[] h_users;
}
void initUsers(User *users, int num) {
for (int i = 0; i < num; i++)
users[i] = {NULL, 0};
}
void moveTestRatingsToDevice(
H_Users h_testUsers,
User *h_users,
Rating **d_ratings,
int numUsers,
int testUserRatingCount) {
initUsers(h_users, numUsers);
numUsers = min(numUsers, (int) h_testUsers.size());
Rating *h_ratings = new Rating[sizeof(Rating) * testUserRatingCount];
checkCudaErrors(cudaMalloc((void **) d_ratings, sizeof(Rating) * testUserRatingCount));
int ratingsSoFar = 0;
for (int i = 0; i < numUsers; i++) {
int numRatings = h_testUsers[i].size();
if (numRatings < 1) continue;
// copy vector to intermediate host array
for (int j = 0; j < numRatings; j++) {
h_ratings[ratingsSoFar + j].x = h_testUsers[i][j].first;
h_ratings[ratingsSoFar + j].y = h_testUsers[i][j].second * 2;
}
// save index
h_users[i].ratings = *d_ratings + ratingsSoFar;
h_users[i].numRatings = numRatings;
ratingsSoFar += numRatings;
}
// move data from host to device
checkCudaErrors(cudaMemcpy(*d_ratings, h_ratings, sizeof(Rating) * testUserRatingCount, cudaMemcpyHostToDevice));
delete[] h_ratings;
}
void cudaCore(
int trainUserRatingCount,
int testUserRatingCount,
H_Users h_trainUsers,
H_Users h_testUsers,
int k) {
int *d_trainUsers, *d_ratingSums, *d_ratingCounts;
int h_ratingCounts[CONC_ITEMS_NUM] = { 0 }, h_ratingSums[CONC_ITEMS_NUM] = { 0 };
Rating *d_trainRatings, *d_testRatings;
int numTrainUsers = h_trainUsers.size() / TILE_SIZE * TILE_SIZE;
User *h_testUsersIdx = new User[numTrainUsers];
float *d_distances;
int *d_idxIdMap;
int predictedCount = 0, validTestSize = 0;
double errorSum = 0, errorSumSq = 0;
// cout << "trainUserRatingCount: " << trainUserRatingCount << endl;
// cout << "number of users: " << h_trainUsers.size() << "; effective user: " << numTrainUsers << endl;
// cout << "testUserRatingCount: " << testUserRatingCount << endl;
// cout << "number of test users: " << h_testUsers.size() << endl;
moveRatingsToDevice(h_trainUsers, &d_trainUsers, &d_trainRatings);
moveTestRatingsToDevice(h_testUsers, h_testUsersIdx, &d_testRatings, numTrainUsers, testUserRatingCount);
// cout << "data moved to device\n";
// get free memory
size_t freeMemSize, totalMemSize;
checkCudaErrors(cudaMemGetInfo(&freeMemSize, &totalMemSize));
// cout << "device has " << freeMemSize << " free global memory\n";
checkCudaErrors(cudaMalloc((void **) &d_ratingSums, CONC_ITEMS_NUM * sizeof(int)));
checkCudaErrors(cudaMalloc((void **) &d_ratingCounts, CONC_ITEMS_NUM * sizeof(int)));
checkCudaErrors(cudaMalloc((void **) &d_idxIdMap, numTrainUsers * sizeof(int)));
// calculate how many distances GPU can store, e.g. size of stage
size_t ratingsSize = numTrainUsers * TILE_DEPTH * sizeof(Rating);
freeMemSize -= ratingsSize * FREEMEM_SCALER;
// cout << "train rating size " << ratingsSize << "\nfreeMemSize is " << freeMemSize << endl;
int stageHeight = min(freeMemSize / (numTrainUsers * sizeof(float)) / TILE_SIZE, (long) numTrainUsers / TILE_SIZE);
// allocate memory for distances
checkCudaErrors(cudaMalloc((void **) &d_distances, sizeof(float) * numTrainUsers * stageHeight * TILE_SIZE));
cudaDeviceSynchronize();
dim3 threadsPerBlock(TILE_SIZE, TILE_SIZE);
// cout << "each stage has " << stageHeight << " blocks\n";
// cout << (numTrainUsers + stageHeight * TILE_SIZE - 1) / (stageHeight * TILE_SIZE) << " stages will be launched\n";
cudaEvent_t start, stop;
float milliseconds = 0, distanceCalTime = 0, knnCalTime = 0;
cudaEventCreate(&start);
cudaEventCreate(&stop);
for (int stageStartUser = 0; stageStartUser < numTrainUsers; stageStartUser += stageHeight * TILE_SIZE) {
cudaEventRecord(start);
cudaEventSynchronize(start);
int effectiveStageHeight = min(stageHeight, (numTrainUsers - stageStartUser) / TILE_SIZE);
calculateAllDistance<<<effectiveStageHeight, threadsPerBlock>>>
(stageStartUser, numTrainUsers, d_trainUsers, d_trainRatings, d_distances);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&milliseconds, start, stop);
distanceCalTime += milliseconds;
// KNN
cudaEventRecord(start);
cudaEventSynchronize(start);
for (int testUserIdOffset = 0; testUserIdOffset < effectiveStageHeight * TILE_SIZE; testUserIdOffset++) {
int numTestItems = h_testUsersIdx[stageStartUser + testUserIdOffset].numRatings;
if (numTestItems < 1) continue;
validTestSize += numTestItems;
// sort
sortNeighbors(d_distances + testUserIdOffset * numTrainUsers, numTrainUsers, d_idxIdMap);
// predict
int numBlocks = (numTestItems + CONC_ITEMS_NUM - 1) / CONC_ITEMS_NUM;
int remaining = numTestItems;
for (int block = 0; block < numBlocks; block++) {
int itemsInBlock = min(remaining, CONC_ITEMS_NUM);
remaining -= CONC_ITEMS_NUM;
dim3 threadsPerBlock(itemsInBlock, NUM_NEIGHBORS);
knn_8<<<1, threadsPerBlock, (itemsInBlock*(NUM_NEIGHBORS+1))*sizeof(short)>>>
(numTrainUsers, k,
d_idxIdMap,
d_trainRatings, h_testUsersIdx[stageStartUser + testUserIdOffset].ratings + block * CONC_ITEMS_NUM,
d_trainUsers,
d_ratingSums, d_ratingCounts);
checkCudaErrors(cudaMemcpy(h_ratingSums, d_ratingSums, sizeof(int) * itemsInBlock, cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(h_ratingCounts, d_ratingCounts, sizeof(int) * itemsInBlock, cudaMemcpyDeviceToHost));
for (int i = 0; i < itemsInBlock; i++) {
if (h_ratingCounts[i] == 0)
continue;
double actual = h_testUsers[stageStartUser + testUserIdOffset][i + block * CONC_ITEMS_NUM].second;
double prediction = (double)h_ratingSums[i] / 2 / h_ratingCounts[i];
// cout << "user: " << stageStartUser + testUserIdOffset + 1
// << " item: " << h_testUsers[stageStartUser + testUserIdOffset][i+block * itemsInBlock].first
// << " actual = " << actual << " predicted = "<< prediction << "\n";// " based on " << h_ratingCounts[i] << " ratings\n";
// if (stageStartUser + testUserIdOffset + 1 < 10) {
// cout << stageStartUser + testUserIdOffset + 1
// << ", " << h_testUsers[stageStartUser + testUserIdOffset][i+block * CONC_ITEMS_NUM].first
// << ", " << actual << ", "<< prediction << ", " << h_ratingSums[i] << ", " << h_ratingCounts[i] << "\n";
// }
errorSum += fabs(actual - prediction);
errorSumSq += pow(actual - prediction, 2);
predictedCount++;
}
}
}
// cout << "\nerror sum so far: " << errorSum << ", error sum squared so far " << errorSumSq << endl;
// double mae = errorSum / predictedCount,
// rmse = sqrt(errorSumSq / predictedCount);
// cout << "MAE = " << mae << endl;
// cout << "RMSE = " << rmse << endl;
// cout << "Predicted count so far = " << predictedCount << endl;
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&milliseconds, start, stop);
knnCalTime += milliseconds;
}
// printptr<<<1,1>>>(d_idxIdMap, numTrainUsers);
// cout << "\ndistance calculation took " << distanceCalTime << "ms\n";
// cout << "knn took " << knnCalTime << "ms\n";
cout << distanceCalTime << " " << knnCalTime << " ";
double mae = errorSum / predictedCount,
rmse = sqrt(errorSumSq / predictedCount);
// cout << "MAE = " << mae << endl;
// cout << "RMSE = " << rmse << endl;
// cout << "Predicted count = " << predictedCount << endl;
cout << mae << " " << rmse << " " << (double)predictedCount / (double)validTestSize << endl;
cudaDeviceSynchronize();
/* Free memory */
cudaEventDestroy(start);
cudaEventDestroy(stop);
checkCudaErrors(cudaFree(d_trainRatings));
checkCudaErrors(cudaFree(d_testRatings));
checkCudaErrors(cudaFree(d_trainUsers));
checkCudaErrors(cudaFree(d_distances));
checkCudaErrors(cudaFree(d_ratingSums));
checkCudaErrors(cudaFree(d_ratingCounts));
checkCudaErrors(cudaFree(d_idxIdMap));
cudaDeviceReset();
delete[] h_testUsersIdx;
}
|
ff379ef3954e8568f83f0cf20a61612c39f0534f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* \file dnn/src/cuda/argsort/argsort.cu
* MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
*
* Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*/
#include "./argsort.cuh"
#include "./bitonic_sort.cuh"
#include "megdnn/basic_types.h"
#include "src/cuda/utils.cuh"
#include "src/cuda/hipcub/hipcub.hpp"
#include "src/cuda/cub/device/device_segmented_radix_sort.cuh"
using namespace megdnn;
using namespace cuda;
namespace {
struct StridedOffsetIterator {
int bias, stride;
StridedOffsetIterator(int bias_, int stride_)
: bias(bias_), stride(stride_) {}
__device__ __forceinline__ int operator[](int i) const {
return stride * i + bias;
}
};
bool use_bitonic(uint32_t /*M*/, uint32_t N) {
// bitonic sort is preferred when N is small (alwyas faster than radix sort)
return N <= BITONIC_SORT_MAX_LENGTH;
}
bool use_segmented(uint32_t M, uint32_t /*N*/) {
// an empirical value:
// sort(1, 1e6): 0.574ms
// segsort({1,2,8,16}, 1e6): 7-8ms
// sort(1, 1e7): 3.425ms
// segsort({1,2,8,16}, 1e7): 71-84ms
//
// segsort is about 7x-10x slower than sort on small batches, so we can
// expect it to be faster than sort when batch is large enough.
return M >= 8;
}
__global__ void kern_arange(int* dst, uint32_t n, uint32_t mod) {
uint32_t i = threadIdx.x + blockIdx.x * blockDim.x;
if (i < n) {
dst[i] = i % mod;
}
}
template <typename ctype>
size_t get_sort_workspace(uint32_t M, uint32_t N, bool is_ascending) {
if (use_bitonic(M, N)) {
return 0;
}
return argsort::cub_sort_pairs<ctype, int>(is_ascending, NULL, 0, NULL, NULL, NULL, NULL,
M, N, 0, sizeof(float)*8, NULL);
}
} // anonymous namespace
template <typename KeyType, typename ValueType>
MEGDNN_NOINLINE size_t argsort::cub_sort_pairs(
bool is_ascending, void* workspace, size_t workspace_size,
const KeyType* keys_in, KeyType* keys_out, const ValueType* values_in,
ValueType* values_out, uint32_t M, uint32_t N, int begin_bit, int end_bit,hipStream_t stream){
hipError_t err;
if (use_segmented(M, N)) {
if (is_ascending) {
err = hipcub::DeviceSegmentedRadixSort::SortPairs(
workspace, workspace_size, keys_in, keys_out, values_in,
values_out, N * M, M, StridedOffsetIterator(0, N),
StridedOffsetIterator(N, N), begin_bit, end_bit, stream);
cuda_check(err);
} else {
err = hipcub::DeviceSegmentedRadixSort::SortPairsDescending(
workspace, workspace_size, keys_in, keys_out, values_in,
values_out, N * M, M, StridedOffsetIterator(0, N),
StridedOffsetIterator(N, N), begin_bit, end_bit, stream);
cuda_check(err);
}
} else {
if (is_ascending) {
for (size_t i = 0; i < M; ++i) {
err = hipcub::DeviceRadixSort::SortPairs(
workspace, workspace_size, keys_in + N * i,
keys_out + N * i, values_in + N * i, values_out + N * i,
N, begin_bit, end_bit, stream);
cuda_check(err);
if (!keys_in) {
return workspace_size;
}
}
} else {
for (size_t i = 0; i < M; ++i) {
err = hipcub::DeviceRadixSort::SortPairsDescending(
workspace, workspace_size, keys_in + N * i,
keys_out + N * i, values_in + N * i, values_out + N * i,
N, begin_bit, end_bit, stream);
cuda_check(err);
if (!keys_in) {
return workspace_size;
}
}
}
}
return workspace_size;
}
size_t argsort::get_fwd_workspace_in_bytes(uint32_t M, uint32_t N, DType dtype,
bool is_ascending,
bool iptr_src_given) {
size_t size = 0;
switch (dtype.enumv().ev) {
#define cb(ctype) \
case DTypeTrait<ctype>::enumv: \
size = get_sort_workspace<ctype>(M, N, is_ascending); \
break;
ARGSORT_FOREACH_CTYPE(cb)
#undef cb
default:
megdnn_throw("argsort only supports float, int32 and float16");
}
if (!iptr_src_given) {
size = DIVUP(size, sizeof(float)) * sizeof(float) + M * N * sizeof(int);
}
return size;
}
template <typename dtype>
void argsort::forward(const dtype* sptr, dtype* dptr, int* iptr,
void* workspace, uint32_t M, uint32_t N,
bool is_ascending, hipStream_t stream,
const int* iptr_src) {
size_t wk_size = get_sort_workspace<dtype>(M, N, is_ascending);
if (!iptr_src) {
int* ptr = reinterpret_cast<int*>(static_cast<uint8_t*>(workspace) +
DIVUP(wk_size, sizeof(float)) *
sizeof(float));
hipLaunchKernelGGL(( kern_arange), dim3(DIVUP(N * M, 512)), dim3(512), 0, stream, ptr, M * N, N);
iptr_src = ptr;
}
if (use_bitonic(M, N)) {
cuda_check(bitonic_sort(M, N, sptr, iptr_src, dptr, iptr, is_ascending,
stream));
} else {
cub_sort_pairs(is_ascending, workspace, wk_size, sptr, dptr, iptr_src,
iptr, M, N, 0, sizeof(float)*8, stream);
}
}
namespace megdnn {
namespace cuda {
#define INST_CUB_SORT(dtype) \
template MEGDNN_NOINLINE size_t argsort::cub_sort_pairs<dtype, dtype>(bool, \
void*, size_t, const dtype*, dtype*, \
const dtype*, dtype*, uint32_t, uint32_t,\
int, int, hipStream_t);
#define INST_FORWARD(dtype) \
template void argsort::forward<dtype>(const dtype*, dtype*, int*, void*, \
uint32_t, uint32_t, bool, hipStream_t, \
const int*);
ARGSORT_FOREACH_CTYPE(INST_FORWARD)
INST_CUB_SORT(uint32_t)
INST_CUB_SORT(uint64_t)
#undef INST_CUB_SORT
#undef INST_FORWARD
}
} // namespace megdnn
// vim: ft=cuda syntax=cuda.doxygen
| ff379ef3954e8568f83f0cf20a61612c39f0534f.cu | /**
* \file dnn/src/cuda/argsort/argsort.cu
* MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
*
* Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*/
#include "./argsort.cuh"
#include "./bitonic_sort.cuh"
#include "megdnn/basic_types.h"
#include "src/cuda/utils.cuh"
#include "src/cuda/cub/device/device_radix_sort.cuh"
#include "src/cuda/cub/device/device_segmented_radix_sort.cuh"
using namespace megdnn;
using namespace cuda;
namespace {
struct StridedOffsetIterator {
int bias, stride;
StridedOffsetIterator(int bias_, int stride_)
: bias(bias_), stride(stride_) {}
__device__ __forceinline__ int operator[](int i) const {
return stride * i + bias;
}
};
bool use_bitonic(uint32_t /*M*/, uint32_t N) {
// bitonic sort is preferred when N is small (alwyas faster than radix sort)
return N <= BITONIC_SORT_MAX_LENGTH;
}
bool use_segmented(uint32_t M, uint32_t /*N*/) {
// an empirical value:
// sort(1, 1e6): 0.574ms
// segsort({1,2,8,16}, 1e6): 7-8ms
// sort(1, 1e7): 3.425ms
// segsort({1,2,8,16}, 1e7): 71-84ms
//
// segsort is about 7x-10x slower than sort on small batches, so we can
// expect it to be faster than sort when batch is large enough.
return M >= 8;
}
__global__ void kern_arange(int* dst, uint32_t n, uint32_t mod) {
uint32_t i = threadIdx.x + blockIdx.x * blockDim.x;
if (i < n) {
dst[i] = i % mod;
}
}
template <typename ctype>
size_t get_sort_workspace(uint32_t M, uint32_t N, bool is_ascending) {
if (use_bitonic(M, N)) {
return 0;
}
return argsort::cub_sort_pairs<ctype, int>(is_ascending, NULL, 0, NULL, NULL, NULL, NULL,
M, N, 0, sizeof(float)*8, NULL);
}
} // anonymous namespace
template <typename KeyType, typename ValueType>
MEGDNN_NOINLINE size_t argsort::cub_sort_pairs(
bool is_ascending, void* workspace, size_t workspace_size,
const KeyType* keys_in, KeyType* keys_out, const ValueType* values_in,
ValueType* values_out, uint32_t M, uint32_t N, int begin_bit, int end_bit,cudaStream_t stream){
cudaError_t err;
if (use_segmented(M, N)) {
if (is_ascending) {
err = cub::DeviceSegmentedRadixSort::SortPairs(
workspace, workspace_size, keys_in, keys_out, values_in,
values_out, N * M, M, StridedOffsetIterator(0, N),
StridedOffsetIterator(N, N), begin_bit, end_bit, stream);
cuda_check(err);
} else {
err = cub::DeviceSegmentedRadixSort::SortPairsDescending(
workspace, workspace_size, keys_in, keys_out, values_in,
values_out, N * M, M, StridedOffsetIterator(0, N),
StridedOffsetIterator(N, N), begin_bit, end_bit, stream);
cuda_check(err);
}
} else {
if (is_ascending) {
for (size_t i = 0; i < M; ++i) {
err = cub::DeviceRadixSort::SortPairs(
workspace, workspace_size, keys_in + N * i,
keys_out + N * i, values_in + N * i, values_out + N * i,
N, begin_bit, end_bit, stream);
cuda_check(err);
if (!keys_in) {
return workspace_size;
}
}
} else {
for (size_t i = 0; i < M; ++i) {
err = cub::DeviceRadixSort::SortPairsDescending(
workspace, workspace_size, keys_in + N * i,
keys_out + N * i, values_in + N * i, values_out + N * i,
N, begin_bit, end_bit, stream);
cuda_check(err);
if (!keys_in) {
return workspace_size;
}
}
}
}
return workspace_size;
}
size_t argsort::get_fwd_workspace_in_bytes(uint32_t M, uint32_t N, DType dtype,
bool is_ascending,
bool iptr_src_given) {
size_t size = 0;
switch (dtype.enumv().ev) {
#define cb(ctype) \
case DTypeTrait<ctype>::enumv: \
size = get_sort_workspace<ctype>(M, N, is_ascending); \
break;
ARGSORT_FOREACH_CTYPE(cb)
#undef cb
default:
megdnn_throw("argsort only supports float, int32 and float16");
}
if (!iptr_src_given) {
size = DIVUP(size, sizeof(float)) * sizeof(float) + M * N * sizeof(int);
}
return size;
}
template <typename dtype>
void argsort::forward(const dtype* sptr, dtype* dptr, int* iptr,
void* workspace, uint32_t M, uint32_t N,
bool is_ascending, cudaStream_t stream,
const int* iptr_src) {
size_t wk_size = get_sort_workspace<dtype>(M, N, is_ascending);
if (!iptr_src) {
int* ptr = reinterpret_cast<int*>(static_cast<uint8_t*>(workspace) +
DIVUP(wk_size, sizeof(float)) *
sizeof(float));
kern_arange<<<DIVUP(N * M, 512), 512, 0, stream>>>(ptr, M * N, N);
iptr_src = ptr;
}
if (use_bitonic(M, N)) {
cuda_check(bitonic_sort(M, N, sptr, iptr_src, dptr, iptr, is_ascending,
stream));
} else {
cub_sort_pairs(is_ascending, workspace, wk_size, sptr, dptr, iptr_src,
iptr, M, N, 0, sizeof(float)*8, stream);
}
}
namespace megdnn {
namespace cuda {
#define INST_CUB_SORT(dtype) \
template MEGDNN_NOINLINE size_t argsort::cub_sort_pairs<dtype, dtype>(bool, \
void*, size_t, const dtype*, dtype*, \
const dtype*, dtype*, uint32_t, uint32_t,\
int, int, cudaStream_t);
#define INST_FORWARD(dtype) \
template void argsort::forward<dtype>(const dtype*, dtype*, int*, void*, \
uint32_t, uint32_t, bool, cudaStream_t, \
const int*);
ARGSORT_FOREACH_CTYPE(INST_FORWARD)
INST_CUB_SORT(uint32_t)
INST_CUB_SORT(uint64_t)
#undef INST_CUB_SORT
#undef INST_FORWARD
}
} // namespace megdnn
// vim: ft=cuda syntax=cuda.doxygen
|
e14ba83bcf85f8cd34a9cd69bc637bee3b13ef80.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "common.h"
#include "utils.h"
#include "THHDeviceTensor.cuh"
#include "THHDeviceTensorUtils.cuh"
#include "THHDeviceUtils.cuh"
__global__ void cuda_VolumetricAveragePooling_updateOutput(
THCDeviceTensor<float, 4> input, THCDeviceTensor<float, 4> output,
int kT, int kH, int kW, int dT, int dH, int dW, float normFactor) {
int oCol = blockIdx.x * blockDim.x + threadIdx.x;
int oRow = blockIdx.y * blockDim.y + threadIdx.y;
int oFrame = blockIdx.z % output.getSize(1); // output frame/time
int slice = blockIdx.z / output.getSize(1); // output slice/feature
if (oRow < output.getSize(2) && oCol < output.getSize(3)) {
float sum = 0.0;
int iColumn = oCol * dW;
int iRow = oRow * dH;
int iFrame = oFrame * dT;
for (int frame = 0; frame < kT; ++frame) {
if (iFrame + frame < input.getSize(1)) {
for (int row = 0; row < kH; ++row) {
if (iRow + row < input.getSize(2)) {
for (int column = 0; column < kW; ++column) {
if (iColumn + column < input.getSize(3)) {
float val = input[slice][iFrame + frame][iRow + row][iColumn + column];
sum += val;
}
}
}
}
}
}
output[slice][oFrame][oRow][oCol] = sum * normFactor;
}
}
// Inner-most loop size (kW) passed as template parameter for
// performance reasons.
//
template<int KERNEL_WIDTH>
__global__ void cuda_VolumetricAveragePooling_updateOutput(
THCDeviceTensor<float, 4> input, THCDeviceTensor<float, 4> output,
int kT, int kH, int dT, int dH, int dW, float normFactor) {
int oCol = blockIdx.x * blockDim.x + threadIdx.x;
int oRow = blockIdx.y * blockDim.y + threadIdx.y;
int oFrame = blockIdx.z % output.getSize(1); // output frame/time
int slice = blockIdx.z / output.getSize(1); // output slice/feature
if (oRow < output.getSize(2) && oCol < output.getSize(3)) {
float sum = 0.0;
int iColumn = oCol * dW;
int iRow = oRow * dH;
int iFrame = oFrame * dT;
for (int frame = 0; frame < kT; ++frame) {
if (iFrame + frame < input.getSize(1)) {
for (int row = 0; row < kH; ++row) {
if (iRow + row < input.getSize(2)) {
for (int column = 0; column < KERNEL_WIDTH; ++column) {
if (iColumn + column < input.getSize(3)) {
float val = input[slice][iFrame + frame][iRow + row][iColumn + column];
sum += val;
}
}
}
}
}
}
output[slice][oFrame][oRow][oCol] = sum * normFactor;
}
}
#define LAUNCH_UPDATE_OUTPUT_KERNEL_WIDTH(KW) case KW: \
hipLaunchKernelGGL(( cuda_VolumetricAveragePooling_updateOutput<KW>), dim3(grid), dim3(block), 0, 0, \
cudaInput, cudaOutput, kT, kH, dT, dH, dW, normFactor); \
break
static int cunn_VolumetricAveragePooling_updateOutput(lua_State *L) {
// State
THCState *state = getCutorchState(L);
// Input
THCudaTensor* input = static_cast<THCudaTensor*>(
luaT_checkudata(L, 2, "torch.CudaTensor"));
// Params:
int dT = luaT_getfieldcheckint(L, 1, "dT");
int dH = luaT_getfieldcheckint(L, 1, "dH");
int dW = luaT_getfieldcheckint(L, 1, "dW");
int kT = luaT_getfieldcheckint(L, 1, "kT");
int kH = luaT_getfieldcheckint(L, 1, "kH");
int kW = luaT_getfieldcheckint(L, 1, "kW");
THCudaTensor *output = static_cast<THCudaTensor*>(
luaT_getfieldcheckudata(L, 1, "output", "torch.CudaTensor"));
int batchSize;
int inputSlices;
int inputTime;
int inputHeight;
int inputWidth;
if (THCudaTensor_nDimension(state, input) == 4) {
luaL_argcheck(L,
THCudaTensor_size(state, input, 1) >= kT &&
THCudaTensor_size(state, input, 2) >= kH &&
THCudaTensor_size(state, input, 3) >= kW, 2,
"input image smaller than kernel size");
/* sizes */
batchSize = 1;
inputSlices = THCudaTensor_size(state, input, 0);
inputTime = THCudaTensor_size(state, input, 1);
inputHeight = THCudaTensor_size(state, input, 2);
inputWidth = THCudaTensor_size(state, input, 3);
} else if (THCudaTensor_nDimension(state, input) == 5) {
luaL_argcheck(L,
THCudaTensor_size(state, input, 2) >= kT &&
THCudaTensor_size(state, input, 3) >= kH &&
THCudaTensor_size(state, input, 4) >= kW, 2,
"input image smaller than kernel size");
/* sizes */
batchSize = THCudaTensor_size(state, input, 0);
inputSlices = THCudaTensor_size(state, input, 1);
inputTime = THCudaTensor_size(state, input, 2);
inputHeight = THCudaTensor_size(state, input, 3);
inputWidth = THCudaTensor_size(state, input, 4);
} else {
luaL_argcheck(L, 0, 2, "4D or 5D tensor expected");
}
int outputTime = (inputTime - kT) / dT + 1;
int outputHeight = (inputHeight - kH) / dH + 1;
int outputWidth = (inputWidth - kW) / dW + 1;
if (input->nDimension == 4) { /* 4D */
/* resize output */
THCudaTensor_resize4d(state, output, inputSlices,
outputTime, outputHeight, outputWidth);
} else { /* 5D */
THCudaTensor_resize5d(state, output, batchSize, inputSlices,
outputTime, outputHeight, outputWidth);
}
input = THCudaTensor_newContiguous(state, input);
// Collapse batch and feature dimensions
THCDeviceTensor<float, 4> cudaInput;
THCDeviceTensor<float, 4> cudaOutput;
if (THCudaTensor_nDimension(state, input) == 4) {
cudaInput = toDeviceTensor<float, 4>(state, input);
cudaOutput = toDeviceTensor<float, 4>(state, output);
} else {
cudaInput = toDeviceTensor<float, 5>(state, input).downcastOuter<4>();
cudaOutput =
toDeviceTensor<float, 5>(state, output).downcastOuter<4>();
}
dim3 block(32, 8);
dim3 grid(THCCeilDiv(outputWidth, static_cast<int>(block.x)),
THCCeilDiv(outputHeight, static_cast<int>(block.y)),
outputTime * inputSlices * batchSize);
float normFactor = 1.0f / static_cast<float>(kT * kH * kW);
switch (kW) {
LAUNCH_UPDATE_OUTPUT_KERNEL_WIDTH(1);
LAUNCH_UPDATE_OUTPUT_KERNEL_WIDTH(2);
LAUNCH_UPDATE_OUTPUT_KERNEL_WIDTH(3);
LAUNCH_UPDATE_OUTPUT_KERNEL_WIDTH(4);
LAUNCH_UPDATE_OUTPUT_KERNEL_WIDTH(5);
LAUNCH_UPDATE_OUTPUT_KERNEL_WIDTH(6);
LAUNCH_UPDATE_OUTPUT_KERNEL_WIDTH(7);
default:
hipLaunchKernelGGL(( cuda_VolumetricAveragePooling_updateOutput), dim3(grid), dim3(block), 0, 0, cudaInput,
cudaOutput,
kT, kH, kW,
dT, dH, dW,
normFactor);
break;
}
THCudaTensor_free(state, input);
return 1;
}
__global__ void cuda_VolumetricAveragePooling_updateGradInput_Stride1(
THCDeviceTensor<float, 4> gradOutput,
THCDeviceTensor<float, 4> gradInput,
int kT, int kH, int kW, float normFactor) {
int iCol = blockIdx.x * blockDim.x + threadIdx.x;
int iRow = blockIdx.y * blockDim.y + threadIdx.y;
int iFrame = blockIdx.z % gradInput.getSize(1); // input frame/time
int slice = blockIdx.z / gradInput.getSize(1); // input slice/feature
// guard against over-tiled threads
if (iRow < gradInput.getSize(2) && iCol < gradInput.getSize(3)) {
float sum = 0.0;
float *gOut = &gradOutput[slice][max(0, iFrame - kT + 1)]
[max(0, iRow - kH + 1)][max(0, iCol - kW + 1)];
int frameOffset = 0;
for (int oFrame = max(0, iFrame - kT + 1);
oFrame < min(iFrame + 1, gradOutput.getSize(1));
++oFrame) {
int rowOffset = frameOffset;
for (int oRow = max(0, iRow - kH + 1);
oRow < min(iRow + 1, gradOutput.getSize(2));
++oRow) {
int colOffset = rowOffset;
for (int oCol = max(0, iCol - kW + 1);
oCol < min(iCol + 1, gradOutput.getSize(3));
++oCol) {
sum += gOut[colOffset];
++colOffset;
}
rowOffset += gradOutput.getSize(3);
}
frameOffset += gradOutput.getSize(2) * gradOutput.getSize(3);
}
gradInput[slice][iFrame][iRow][iCol] = sum * normFactor;
}
}
__global__ void cuda_VolumetricAveragePooling_updateGradInput_atomicAdd(
THCDeviceTensor<float, 4> gradOutput,
THCDeviceTensor<float, 4> gradInput,
int kT, int kH, int kW, int dT, int dH, int dW) {
int oCol = blockIdx.x * blockDim.x + threadIdx.x;
int oRow = blockIdx.y * blockDim.y + threadIdx.y;
int oFrame = blockIdx.z % gradOutput.getSize(1); // gradOutput frame/time
int slice = blockIdx.z / gradOutput.getSize(1); // gradOutput slice/feature
// guard against over-tiled threads
if (oRow < gradOutput.getSize(2) && oCol < gradOutput.getSize(3)) {
float val = gradOutput[slice][oFrame][oRow][oCol] / (kT * kH * kW);
for (int iFrame = oFrame * dT; iFrame < oFrame * dT + kT; ++iFrame) {
for (int iRow = oRow * dH; iRow < oRow * dH + kH; ++iRow) {
for (int iCol = oCol * dW; iCol < oCol * dW + kW; ++iCol) {
atomicAdd(&gradInput[slice][iFrame][iRow][iCol], val);
}
}
}
}
}
__global__ void cuda_VolumetricAveragePooling_updateGradInput(
THCDeviceTensor<float, 4> gradOutput,
THCDeviceTensor<float, 4> gradInput,
int kT, int kH, int kW, int dT, int dH, int dW) {
int oCol = blockIdx.x * blockDim.x + threadIdx.x;
int oRow = blockIdx.y * blockDim.y + threadIdx.y;
int oFrame = blockIdx.z % gradOutput.getSize(1); // gradOutput frame/time
int slice = blockIdx.z / gradOutput.getSize(1); // gradOutput slice/feature
// guard against over-tiled threads
if (oRow < gradOutput.getSize(2) && oCol < gradOutput.getSize(3)) {
float val = gradOutput[slice][oFrame][oRow][oCol] / (kT * kH * kW);
for (int iFrame = oFrame * dT; iFrame < oFrame * dT + kT; ++iFrame) {
for (int iRow = oRow * dH; iRow < oRow * dH + kH; ++iRow) {
for (int iCol = oCol * dW; iCol < oCol * dW + kW; ++iCol) {
gradInput[slice][iFrame][iRow][iCol] = val;
}
}
}
}
}
static int cunn_VolumetricAveragePooling_updateGradInput(lua_State *L) {
// State
THCState *state = getCutorchState(L);
// Input
THCudaTensor* input = static_cast<THCudaTensor*>(
luaT_checkudata(L, 2, "torch.CudaTensor"));
// gradOutput
THCudaTensor* gradOutput = static_cast<THCudaTensor*>(
luaT_checkudata(L, 3, "torch.CudaTensor"));
// Params
int dT = luaT_getfieldcheckint(L, 1, "dT");
int dH = luaT_getfieldcheckint(L, 1, "dH");
int dW = luaT_getfieldcheckint(L, 1, "dW");
int kT = luaT_getfieldcheckint(L, 1, "kT");
int kH = luaT_getfieldcheckint(L, 1, "kH");
int kW = luaT_getfieldcheckint(L, 1, "kW");
bool kernelsOverlap = (dT < kT) || (dH < kH) || (dW < kW);
// gradInput
THCudaTensor* gradInput = static_cast<THCudaTensor*>(
luaT_getfieldcheckudata(L, 1, "gradInput", "torch.CudaTensor"));
// Resize and initialize result tensor.
THCudaTensor_resizeAs(state, gradInput, input);
THCudaTensor_zero(state, gradInput);
int batchSize;
int inputSlices;
int inputTime;
int inputHeight;
int inputWidth;
int outputTime;
int outputHeight;
int outputWidth;
if (THCudaTensor_nDimension(state, input) == 4) { /* 4D */
batchSize = 1;
inputSlices = THCudaTensor_size(state, input, 0);
inputTime = THCudaTensor_size(state, input, 1);
inputHeight = THCudaTensor_size(state, input, 2);
inputWidth = THCudaTensor_size(state, input, 3);
outputTime = THCudaTensor_size(state, gradOutput, 1);
outputHeight = THCudaTensor_size(state, gradOutput, 2);
outputWidth = THCudaTensor_size(state, gradOutput, 3);
} else {
batchSize = THCudaTensor_size(state, input, 0);
inputSlices = THCudaTensor_size(state, input, 1);
inputTime = THCudaTensor_size(state, input, 2);
inputHeight = THCudaTensor_size(state, input, 3);
inputWidth = THCudaTensor_size(state, input, 4);
outputTime = THCudaTensor_size(state, gradOutput, 2);
outputHeight = THCudaTensor_size(state, gradOutput, 3);
outputWidth = THCudaTensor_size(state, gradOutput, 4);
}
gradOutput = THCudaTensor_newContiguous(state, gradOutput);
// Collapse batch and feature dimensions
THCDeviceTensor<float, 4> cudaGradInput;
THCDeviceTensor<float, 4> cudaGradOutput;
if (THCudaTensor_nDimension(state, input) == 4) {
cudaGradInput = toDeviceTensor<float, 4>(state, gradInput);
cudaGradOutput = toDeviceTensor<float, 4>(state, gradOutput);
} else {
cudaGradInput =
toDeviceTensor<float, 5>(state, gradInput).downcastOuter<4>();
cudaGradOutput =
toDeviceTensor<float, 5>(state, gradOutput).downcastOuter<4>();
}
dim3 block(32, 8);
// Optimizing for stride 1 is probably only of limited value, but this
// specialization yields 3x speedup over the atomicAdd implementation.
if (dT == 1 && dH == 1 && dW == 1) {
dim3 grid(THCCeilDiv(inputWidth, static_cast<int>(block.x)),
THCCeilDiv(inputHeight, static_cast<int>(block.y)),
inputTime * inputSlices * batchSize);
hipLaunchKernelGGL(( cuda_VolumetricAveragePooling_updateGradInput_Stride1), dim3(grid), dim3(block), 0, 0,
cudaGradOutput, cudaGradInput, kT, kH, kW, 1.0f/(kT * kH * kW));
} else {
dim3 grid(THCCeilDiv(outputWidth, static_cast<int>(block.x)),
THCCeilDiv(outputHeight, static_cast<int>(block.y)),
outputTime * inputSlices * batchSize);
if (kernelsOverlap) {
hipLaunchKernelGGL(( cuda_VolumetricAveragePooling_updateGradInput_atomicAdd), dim3(grid), dim3(block), 0, 0,
cudaGradOutput, cudaGradInput, kT, kH, kW, dT, dH, dW);
} else {
hipLaunchKernelGGL(( cuda_VolumetricAveragePooling_updateGradInput), dim3(grid), dim3(block), 0, 0,
cudaGradOutput, cudaGradInput, kT, kH, kW, dT, dH, dW);
}
}
THCudaTensor_free(state, gradOutput);
return 1;
}
static const struct luaL_Reg cunn_VolumetricAveragePooling__ [] = {
{"VolumetricAveragePooling_updateOutput",
cunn_VolumetricAveragePooling_updateOutput},
{"VolumetricAveragePooling_updateGradInput",
cunn_VolumetricAveragePooling_updateGradInput},
{NULL, NULL}
};
void cunn_VolumetricAveragePooling_init(lua_State *L)
{
luaT_pushmetatable(L, "torch.CudaTensor");
luaT_registeratname(L, cunn_VolumetricAveragePooling__, "nn");
lua_pop(L,1);
}
| e14ba83bcf85f8cd34a9cd69bc637bee3b13ef80.cu | #include "common.h"
#include "utils.h"
#include "THCDeviceTensor.cuh"
#include "THCDeviceTensorUtils.cuh"
#include "THCDeviceUtils.cuh"
__global__ void cuda_VolumetricAveragePooling_updateOutput(
THCDeviceTensor<float, 4> input, THCDeviceTensor<float, 4> output,
int kT, int kH, int kW, int dT, int dH, int dW, float normFactor) {
int oCol = blockIdx.x * blockDim.x + threadIdx.x;
int oRow = blockIdx.y * blockDim.y + threadIdx.y;
int oFrame = blockIdx.z % output.getSize(1); // output frame/time
int slice = blockIdx.z / output.getSize(1); // output slice/feature
if (oRow < output.getSize(2) && oCol < output.getSize(3)) {
float sum = 0.0;
int iColumn = oCol * dW;
int iRow = oRow * dH;
int iFrame = oFrame * dT;
for (int frame = 0; frame < kT; ++frame) {
if (iFrame + frame < input.getSize(1)) {
for (int row = 0; row < kH; ++row) {
if (iRow + row < input.getSize(2)) {
for (int column = 0; column < kW; ++column) {
if (iColumn + column < input.getSize(3)) {
float val = input[slice][iFrame + frame][iRow + row][iColumn + column];
sum += val;
}
}
}
}
}
}
output[slice][oFrame][oRow][oCol] = sum * normFactor;
}
}
// Inner-most loop size (kW) passed as template parameter for
// performance reasons.
//
template<int KERNEL_WIDTH>
__global__ void cuda_VolumetricAveragePooling_updateOutput(
THCDeviceTensor<float, 4> input, THCDeviceTensor<float, 4> output,
int kT, int kH, int dT, int dH, int dW, float normFactor) {
int oCol = blockIdx.x * blockDim.x + threadIdx.x;
int oRow = blockIdx.y * blockDim.y + threadIdx.y;
int oFrame = blockIdx.z % output.getSize(1); // output frame/time
int slice = blockIdx.z / output.getSize(1); // output slice/feature
if (oRow < output.getSize(2) && oCol < output.getSize(3)) {
float sum = 0.0;
int iColumn = oCol * dW;
int iRow = oRow * dH;
int iFrame = oFrame * dT;
for (int frame = 0; frame < kT; ++frame) {
if (iFrame + frame < input.getSize(1)) {
for (int row = 0; row < kH; ++row) {
if (iRow + row < input.getSize(2)) {
for (int column = 0; column < KERNEL_WIDTH; ++column) {
if (iColumn + column < input.getSize(3)) {
float val = input[slice][iFrame + frame][iRow + row][iColumn + column];
sum += val;
}
}
}
}
}
}
output[slice][oFrame][oRow][oCol] = sum * normFactor;
}
}
#define LAUNCH_UPDATE_OUTPUT_KERNEL_WIDTH(KW) case KW: \
cuda_VolumetricAveragePooling_updateOutput<KW><<<grid, block>>>( \
cudaInput, cudaOutput, kT, kH, dT, dH, dW, normFactor); \
break
static int cunn_VolumetricAveragePooling_updateOutput(lua_State *L) {
// State
THCState *state = getCutorchState(L);
// Input
THCudaTensor* input = static_cast<THCudaTensor*>(
luaT_checkudata(L, 2, "torch.CudaTensor"));
// Params:
int dT = luaT_getfieldcheckint(L, 1, "dT");
int dH = luaT_getfieldcheckint(L, 1, "dH");
int dW = luaT_getfieldcheckint(L, 1, "dW");
int kT = luaT_getfieldcheckint(L, 1, "kT");
int kH = luaT_getfieldcheckint(L, 1, "kH");
int kW = luaT_getfieldcheckint(L, 1, "kW");
THCudaTensor *output = static_cast<THCudaTensor*>(
luaT_getfieldcheckudata(L, 1, "output", "torch.CudaTensor"));
int batchSize;
int inputSlices;
int inputTime;
int inputHeight;
int inputWidth;
if (THCudaTensor_nDimension(state, input) == 4) {
luaL_argcheck(L,
THCudaTensor_size(state, input, 1) >= kT &&
THCudaTensor_size(state, input, 2) >= kH &&
THCudaTensor_size(state, input, 3) >= kW, 2,
"input image smaller than kernel size");
/* sizes */
batchSize = 1;
inputSlices = THCudaTensor_size(state, input, 0);
inputTime = THCudaTensor_size(state, input, 1);
inputHeight = THCudaTensor_size(state, input, 2);
inputWidth = THCudaTensor_size(state, input, 3);
} else if (THCudaTensor_nDimension(state, input) == 5) {
luaL_argcheck(L,
THCudaTensor_size(state, input, 2) >= kT &&
THCudaTensor_size(state, input, 3) >= kH &&
THCudaTensor_size(state, input, 4) >= kW, 2,
"input image smaller than kernel size");
/* sizes */
batchSize = THCudaTensor_size(state, input, 0);
inputSlices = THCudaTensor_size(state, input, 1);
inputTime = THCudaTensor_size(state, input, 2);
inputHeight = THCudaTensor_size(state, input, 3);
inputWidth = THCudaTensor_size(state, input, 4);
} else {
luaL_argcheck(L, 0, 2, "4D or 5D tensor expected");
}
int outputTime = (inputTime - kT) / dT + 1;
int outputHeight = (inputHeight - kH) / dH + 1;
int outputWidth = (inputWidth - kW) / dW + 1;
if (input->nDimension == 4) { /* 4D */
/* resize output */
THCudaTensor_resize4d(state, output, inputSlices,
outputTime, outputHeight, outputWidth);
} else { /* 5D */
THCudaTensor_resize5d(state, output, batchSize, inputSlices,
outputTime, outputHeight, outputWidth);
}
input = THCudaTensor_newContiguous(state, input);
// Collapse batch and feature dimensions
THCDeviceTensor<float, 4> cudaInput;
THCDeviceTensor<float, 4> cudaOutput;
if (THCudaTensor_nDimension(state, input) == 4) {
cudaInput = toDeviceTensor<float, 4>(state, input);
cudaOutput = toDeviceTensor<float, 4>(state, output);
} else {
cudaInput = toDeviceTensor<float, 5>(state, input).downcastOuter<4>();
cudaOutput =
toDeviceTensor<float, 5>(state, output).downcastOuter<4>();
}
dim3 block(32, 8);
dim3 grid(THCCeilDiv(outputWidth, static_cast<int>(block.x)),
THCCeilDiv(outputHeight, static_cast<int>(block.y)),
outputTime * inputSlices * batchSize);
float normFactor = 1.0f / static_cast<float>(kT * kH * kW);
switch (kW) {
LAUNCH_UPDATE_OUTPUT_KERNEL_WIDTH(1);
LAUNCH_UPDATE_OUTPUT_KERNEL_WIDTH(2);
LAUNCH_UPDATE_OUTPUT_KERNEL_WIDTH(3);
LAUNCH_UPDATE_OUTPUT_KERNEL_WIDTH(4);
LAUNCH_UPDATE_OUTPUT_KERNEL_WIDTH(5);
LAUNCH_UPDATE_OUTPUT_KERNEL_WIDTH(6);
LAUNCH_UPDATE_OUTPUT_KERNEL_WIDTH(7);
default:
cuda_VolumetricAveragePooling_updateOutput<<<grid, block>>>(cudaInput,
cudaOutput,
kT, kH, kW,
dT, dH, dW,
normFactor);
break;
}
THCudaTensor_free(state, input);
return 1;
}
__global__ void cuda_VolumetricAveragePooling_updateGradInput_Stride1(
THCDeviceTensor<float, 4> gradOutput,
THCDeviceTensor<float, 4> gradInput,
int kT, int kH, int kW, float normFactor) {
int iCol = blockIdx.x * blockDim.x + threadIdx.x;
int iRow = blockIdx.y * blockDim.y + threadIdx.y;
int iFrame = blockIdx.z % gradInput.getSize(1); // input frame/time
int slice = blockIdx.z / gradInput.getSize(1); // input slice/feature
// guard against over-tiled threads
if (iRow < gradInput.getSize(2) && iCol < gradInput.getSize(3)) {
float sum = 0.0;
float *gOut = &gradOutput[slice][max(0, iFrame - kT + 1)]
[max(0, iRow - kH + 1)][max(0, iCol - kW + 1)];
int frameOffset = 0;
for (int oFrame = max(0, iFrame - kT + 1);
oFrame < min(iFrame + 1, gradOutput.getSize(1));
++oFrame) {
int rowOffset = frameOffset;
for (int oRow = max(0, iRow - kH + 1);
oRow < min(iRow + 1, gradOutput.getSize(2));
++oRow) {
int colOffset = rowOffset;
for (int oCol = max(0, iCol - kW + 1);
oCol < min(iCol + 1, gradOutput.getSize(3));
++oCol) {
sum += gOut[colOffset];
++colOffset;
}
rowOffset += gradOutput.getSize(3);
}
frameOffset += gradOutput.getSize(2) * gradOutput.getSize(3);
}
gradInput[slice][iFrame][iRow][iCol] = sum * normFactor;
}
}
__global__ void cuda_VolumetricAveragePooling_updateGradInput_atomicAdd(
THCDeviceTensor<float, 4> gradOutput,
THCDeviceTensor<float, 4> gradInput,
int kT, int kH, int kW, int dT, int dH, int dW) {
int oCol = blockIdx.x * blockDim.x + threadIdx.x;
int oRow = blockIdx.y * blockDim.y + threadIdx.y;
int oFrame = blockIdx.z % gradOutput.getSize(1); // gradOutput frame/time
int slice = blockIdx.z / gradOutput.getSize(1); // gradOutput slice/feature
// guard against over-tiled threads
if (oRow < gradOutput.getSize(2) && oCol < gradOutput.getSize(3)) {
float val = gradOutput[slice][oFrame][oRow][oCol] / (kT * kH * kW);
for (int iFrame = oFrame * dT; iFrame < oFrame * dT + kT; ++iFrame) {
for (int iRow = oRow * dH; iRow < oRow * dH + kH; ++iRow) {
for (int iCol = oCol * dW; iCol < oCol * dW + kW; ++iCol) {
atomicAdd(&gradInput[slice][iFrame][iRow][iCol], val);
}
}
}
}
}
__global__ void cuda_VolumetricAveragePooling_updateGradInput(
THCDeviceTensor<float, 4> gradOutput,
THCDeviceTensor<float, 4> gradInput,
int kT, int kH, int kW, int dT, int dH, int dW) {
int oCol = blockIdx.x * blockDim.x + threadIdx.x;
int oRow = blockIdx.y * blockDim.y + threadIdx.y;
int oFrame = blockIdx.z % gradOutput.getSize(1); // gradOutput frame/time
int slice = blockIdx.z / gradOutput.getSize(1); // gradOutput slice/feature
// guard against over-tiled threads
if (oRow < gradOutput.getSize(2) && oCol < gradOutput.getSize(3)) {
float val = gradOutput[slice][oFrame][oRow][oCol] / (kT * kH * kW);
for (int iFrame = oFrame * dT; iFrame < oFrame * dT + kT; ++iFrame) {
for (int iRow = oRow * dH; iRow < oRow * dH + kH; ++iRow) {
for (int iCol = oCol * dW; iCol < oCol * dW + kW; ++iCol) {
gradInput[slice][iFrame][iRow][iCol] = val;
}
}
}
}
}
static int cunn_VolumetricAveragePooling_updateGradInput(lua_State *L) {
// State
THCState *state = getCutorchState(L);
// Input
THCudaTensor* input = static_cast<THCudaTensor*>(
luaT_checkudata(L, 2, "torch.CudaTensor"));
// gradOutput
THCudaTensor* gradOutput = static_cast<THCudaTensor*>(
luaT_checkudata(L, 3, "torch.CudaTensor"));
// Params
int dT = luaT_getfieldcheckint(L, 1, "dT");
int dH = luaT_getfieldcheckint(L, 1, "dH");
int dW = luaT_getfieldcheckint(L, 1, "dW");
int kT = luaT_getfieldcheckint(L, 1, "kT");
int kH = luaT_getfieldcheckint(L, 1, "kH");
int kW = luaT_getfieldcheckint(L, 1, "kW");
bool kernelsOverlap = (dT < kT) || (dH < kH) || (dW < kW);
// gradInput
THCudaTensor* gradInput = static_cast<THCudaTensor*>(
luaT_getfieldcheckudata(L, 1, "gradInput", "torch.CudaTensor"));
// Resize and initialize result tensor.
THCudaTensor_resizeAs(state, gradInput, input);
THCudaTensor_zero(state, gradInput);
int batchSize;
int inputSlices;
int inputTime;
int inputHeight;
int inputWidth;
int outputTime;
int outputHeight;
int outputWidth;
if (THCudaTensor_nDimension(state, input) == 4) { /* 4D */
batchSize = 1;
inputSlices = THCudaTensor_size(state, input, 0);
inputTime = THCudaTensor_size(state, input, 1);
inputHeight = THCudaTensor_size(state, input, 2);
inputWidth = THCudaTensor_size(state, input, 3);
outputTime = THCudaTensor_size(state, gradOutput, 1);
outputHeight = THCudaTensor_size(state, gradOutput, 2);
outputWidth = THCudaTensor_size(state, gradOutput, 3);
} else {
batchSize = THCudaTensor_size(state, input, 0);
inputSlices = THCudaTensor_size(state, input, 1);
inputTime = THCudaTensor_size(state, input, 2);
inputHeight = THCudaTensor_size(state, input, 3);
inputWidth = THCudaTensor_size(state, input, 4);
outputTime = THCudaTensor_size(state, gradOutput, 2);
outputHeight = THCudaTensor_size(state, gradOutput, 3);
outputWidth = THCudaTensor_size(state, gradOutput, 4);
}
gradOutput = THCudaTensor_newContiguous(state, gradOutput);
// Collapse batch and feature dimensions
THCDeviceTensor<float, 4> cudaGradInput;
THCDeviceTensor<float, 4> cudaGradOutput;
if (THCudaTensor_nDimension(state, input) == 4) {
cudaGradInput = toDeviceTensor<float, 4>(state, gradInput);
cudaGradOutput = toDeviceTensor<float, 4>(state, gradOutput);
} else {
cudaGradInput =
toDeviceTensor<float, 5>(state, gradInput).downcastOuter<4>();
cudaGradOutput =
toDeviceTensor<float, 5>(state, gradOutput).downcastOuter<4>();
}
dim3 block(32, 8);
// Optimizing for stride 1 is probably only of limited value, but this
// specialization yields 3x speedup over the atomicAdd implementation.
if (dT == 1 && dH == 1 && dW == 1) {
dim3 grid(THCCeilDiv(inputWidth, static_cast<int>(block.x)),
THCCeilDiv(inputHeight, static_cast<int>(block.y)),
inputTime * inputSlices * batchSize);
cuda_VolumetricAveragePooling_updateGradInput_Stride1<<<grid, block>>>(
cudaGradOutput, cudaGradInput, kT, kH, kW, 1.0f/(kT * kH * kW));
} else {
dim3 grid(THCCeilDiv(outputWidth, static_cast<int>(block.x)),
THCCeilDiv(outputHeight, static_cast<int>(block.y)),
outputTime * inputSlices * batchSize);
if (kernelsOverlap) {
cuda_VolumetricAveragePooling_updateGradInput_atomicAdd<<<grid, block>>>(
cudaGradOutput, cudaGradInput, kT, kH, kW, dT, dH, dW);
} else {
cuda_VolumetricAveragePooling_updateGradInput<<<grid, block>>>(
cudaGradOutput, cudaGradInput, kT, kH, kW, dT, dH, dW);
}
}
THCudaTensor_free(state, gradOutput);
return 1;
}
static const struct luaL_Reg cunn_VolumetricAveragePooling__ [] = {
{"VolumetricAveragePooling_updateOutput",
cunn_VolumetricAveragePooling_updateOutput},
{"VolumetricAveragePooling_updateGradInput",
cunn_VolumetricAveragePooling_updateGradInput},
{NULL, NULL}
};
void cunn_VolumetricAveragePooling_init(lua_State *L)
{
luaT_pushmetatable(L, "torch.CudaTensor");
luaT_registeratname(L, cunn_VolumetricAveragePooling__, "nn");
lua_pop(L,1);
}
|
478352b3f37573be9f01afd9955569f88c7b80fa.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Software License Agreement (BSD License)
*
* Point Cloud Library (PCL) - www.pointclouds.org
* Copyright (c) 2010-2012, Willow Garage, Inc.
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Willow Garage, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* $Id: $
* @authors: Cedric Cagniart, Koen Buys, Anatoly Baksheev
*
*/
#include <pcl/gpu/people/tree.h>
#include <pcl/gpu/people/label_common.h>
#include <pcl/gpu/utils/device/limits.hpp>
#include <pcl/gpu/utils/safe_call.hpp>
#include <pcl/gpu/utils/texture_binder.hpp>
#include <stdio.h>
#include <limits>
#include <assert.h>
#include "internal.h"
using pcl::gpu::people::trees::Node;
using pcl::gpu::people::trees::Label;
using pcl::gpu::people::trees::AttribLocation;
using pcl::gpu::people::trees::Attrib;
using pcl::gpu::people::trees::focal;
using pcl::gpu::people::trees::NUM_LABELS;
using namespace std;
using uint = unsigned int;
#ifdef __CDT_PARSER__ // This is an eclipse specific hack, does nothing to the code
#define __global__
#define __device__
#define __shared__
#define __forceinline__
#define __constant__
#endif
namespace pcl
{
namespace device
{
texture<unsigned short, 2, hipReadModeElementType> depthTex;
texture<char4, 2, hipReadModeElementType> multilabelTex;
__constant__ int constFGThresh;
template<bool testFG> __device__ __forceinline__ Label
evaluateTree(int u, int v, float f, int treeHeight, int numNodes, const Node* nodes, const Label* leaves)
{
int depth = tex2D(depthTex, u, v);
float scale = f / depth;
// go down the tree
int nid = 0;
for(int nodeDepth = 0; nodeDepth < treeHeight; ++nodeDepth)
{
const Node node = nodes[nid];
const AttribLocation& loc = node.loc;
int d1 = tex2D (depthTex, u + loc.du1 * scale, v + loc.dv1 * scale);
int d2 = tex2D (depthTex, u + loc.du2 * scale, v + loc.dv2 * scale);
if (testFG)
{
if( d1 - depth > constFGThresh )
d1 = numeric_limits<short>::max();
if( d2 - depth > constFGThresh )
d2 = numeric_limits<short>::max();
}
int delta = d1-d2;
bool test = delta > (int)node.thresh;
if( test ) nid = nid*2+2;
else nid = nid*2+1;
}
return leaves[nid-numNodes];
}
/** \brief This is the CUDA kernel doing the actual RDF evaluation */
__global__ void
KernelCUDA_runTree( const float f,
const int treeHeight,
const int numNodes,
const Node* nodes,
const Label* leaves,
PtrStepSz<Label> labels)
{
int u = blockIdx.x * blockDim.x + threadIdx.x;
int v = blockIdx.y * blockDim.y + threadIdx.y;
if( u < labels.cols && v < labels.rows)
labels.ptr(v)[u] = evaluateTree<false>(u, v, f, treeHeight, numNodes, nodes, leaves);
}
template<bool testFG> __global__ void
KernelCUDA_MultiTreePass( const int treeId,
const float f,
const int treeHeight,
const int numNodes,
const Node* nodes,
const Label* leaves,
PtrStepSz<unsigned short> depth,
PtrStepSz<char4> multiLabels)
{
int u = blockIdx.x * blockDim.x + threadIdx.x;
int v = blockIdx.y * blockDim.y + threadIdx.y;
if(u < multiLabels.cols && v < multiLabels.rows)
{
// This maps a char4 pointer on a char pointer
char* pixel = (char*)&multiLabels.ptr(v)[u];
// This test assures that in next iterations the FGPreperation is taking into account see utils.cu
if(depth.ptr(v)[u] == numeric_limits<unsigned short>::max())
pixel[treeId] = 29; // see label_common.h for Background label (=29)
// TODO remove this hardcoded label with enum part_t label
else
pixel[treeId] = evaluateTree<testFG>(u, v, f, treeHeight, numNodes, nodes, leaves);
}
}
/** \brief This function wraps the actual CUDA kernel doing the RDF evaluation */
void CUDA_runTree ( float focal, int treeHeight, int numNodes, const Node* nodes, const Label* leaves, const Depth& depth, Labels& labels )
{
labels.create( depth.rows(), depth.cols() );
depthTex.addressMode[0] = hipAddressModeClamp;
TextureBinder binder(depth, depthTex);
dim3 block(32, 8);
dim3 grid(divUp(depth.cols(), block.x), divUp(depth.rows(), block.y) );
hipLaunchKernelGGL(( KernelCUDA_runTree), dim3(grid), dim3(block) , 0, 0, focal, treeHeight, numNodes, nodes, leaves, labels);
cudaSafeCall( hipGetLastError() );
cudaSafeCall( hipDeviceSynchronize() );
}
void CUDA_runMultiTreePass ( int FGThresh,
int treeId,
float focal,
int treeHeight,
int numNodes,
const Node* nodes_device,
const Label* leaves_device,
const Depth& depth,
MultiLabels& multilabel )
{
//std::cout << "(I) : CUDA_runMultiTreePass() called" << std::endl;
depthTex.addressMode[0] = hipAddressModeClamp;
TextureBinder binder(depth, depthTex);
dim3 block(32, 8);
dim3 grid( divUp(depth.cols(), block.x), divUp(depth.rows(), block.y) );
if(FGThresh == std::numeric_limits<int>::max())
{
hipLaunchKernelGGL(( KernelCUDA_MultiTreePass<false>), dim3(grid), dim3(block) , 0, 0, treeId, focal, treeHeight,
numNodes, nodes_device, leaves_device, depth, multilabel);
}
else
{
cudaSafeCall( hipMemcpyToSymbol(constFGThresh, &FGThresh, sizeof(FGThresh)) );
hipLaunchKernelGGL(( KernelCUDA_MultiTreePass<true>), dim3(grid), dim3(block) , 0, 0, treeId, focal, treeHeight,
numNodes, nodes_device, leaves_device, depth, multilabel);
}
cudaSafeCall( hipGetLastError() );
cudaSafeCall( hipDeviceSynchronize() );
}
///////////////////////////////////////////////////////////////////////////////////////
__device__ int findMaxId( int numBins, char* bins )
{
// HACK .. not testing against numBins = 0
int maxId = 0;
char maxVal = bins[0];
for(int i=1;i<numBins;++i)
{
char val = bins[i];
if( val > maxVal ) { maxId = i; maxVal = val; }
}
return maxId;
}
//this will find the max Index but return -1 if there is a tie
__device__ int findMaxId_testTie(int numBins, char* bins)
{
int maxId = 0;
int maxId_other = -1;
char maxVal = bins[0];
for(int i=1;i<numBins;++i) {
char val = bins[i];
if( val == maxVal ) { maxId_other = i; }
if( val > maxVal ) { maxId = i; maxId_other = -1; maxVal = val; }
}
if( maxId_other != -1) return -1;
else return maxId;
}
__global__ void KernelCUDA_MultiTreeMerge( const int numTrees, PtrStepSz<Label> labels )
{
int u = blockIdx.x * blockDim.x + threadIdx.x;
int v = blockIdx.y * blockDim.y + threadIdx.y;
if( u >= labels.cols || v >= labels.rows)
return;
// reset the bins
char bins[NUM_LABELS];
for(int li = 0; li < NUM_LABELS; ++li)
bins[li] = 0;
// find a consensus with the current trees
{
char4 pixlabels = tex2D(multilabelTex, u ,v);
char* bob = (char*)&pixlabels; //horrible but char4's have xyzw members
for(int ti = 0; ti < numTrees; ++ti)
bins[ bob[ti] ]++;
}
int res = findMaxId_testTie(NUM_LABELS, bins);
// if this fails... find a consensus in a 1 neighbourhood
if( res < 0 )
{
int depth = tex2D(depthTex, u,v);
for(int i = -1 ; i <= 1; ++i)
{
for(int j = -1; j <= 1; ++j)
{
int depth_neighbor = tex2D(depthTex,u+i,v+j);
char4 labels_neighbor = tex2D(multilabelTex, u+i,v+j);
char* bob = (char*)&labels_neighbor; //horrible but char4's have xyzw members
//TODO: redo this part
int weight = abs(depth-depth_neighbor) < 50 ? 1:0; // 5cms
for(int ti = 0; ti < numTrees; ++ti)
bins[ bob[ti] ] += weight;
}
}
res = findMaxId( NUM_LABELS, bins );
}
labels.ptr(v)[u] = res;
}
/** \brief This merges the labels from all trees into a histogram of probabilities **/
__global__ void KernelCUDA_MultiTreeCreateProb (const int numTrees, PtrStepSz<prob_histogram> prob)
{
// map block and thread onto image coordinates
int u = blockIdx.x * blockDim.x + threadIdx.x;
int v = blockIdx.y * blockDim.y + threadIdx.y;
if( u >= prob.cols || v >= prob.rows )
return;
char4 pixlabels = tex2D (multilabelTex, u ,v);
char* bob = (char*)&pixlabels; //horrible but char4's have xyzw members
// Reset prob first, this should become NUM_LABELS
for(int in = 0; in < NUM_LABELS; in++)
{
prob.ptr(v)[u].probs[in] = 0;
}
for(int ti = 0; ti < numTrees; ++ti)
{
// Each tree casts a vote to the probability
// TODO: replace this with a histogram copy
prob.ptr(v)[u].probs[bob[ti]] += 0.25;
}
}
/** \brief This will merge the votes from the different trees into one final vote */
void CUDA_runMultiTreeMerge( int numTrees, const Depth& depth, const MultiLabels& multilabel, Labels& labels)
{
//std::cout << "(I) : CUDA_runMultiTreeMerge() called" << std::endl;
labels.create(depth.rows(), depth.cols());
depthTex.addressMode[0] = hipAddressModeClamp;
TextureBinder binder(depth, depthTex);
multilabelTex.addressMode[0] = hipAddressModeClamp;
TextureBinder mlabels_binder(multilabel, multilabelTex);
dim3 block(32, 8);
dim3 grid(divUp(depth.cols(), block.x), divUp(depth.rows(), block.y) );
hipLaunchKernelGGL(( KernelCUDA_MultiTreeMerge), dim3(grid), dim3(block) , 0, 0, numTrees, labels );
cudaSafeCall( hipGetLastError() );
cudaSafeCall( hipDeviceSynchronize() );
}
/** \brief This will merge the votes from the different trees into one final vote, including probabilistic's */
void CUDA_runMultiTreeProb ( int numTrees,
const Depth& depth,
const MultiLabels& multilabel,
Labels& labels,
LabelProbability& probabilities)
{
std::cout << "(I) : CUDA_runMultiTreeProb() called" << std::endl;
//labels.create(depth.rows(), depth.cols());
//depthTex.addressMode[0] = hipAddressModeClamp;
//TextureBinder binder(depth, depthTex);
multilabelTex.addressMode[0] = hipAddressModeClamp;
TextureBinder mlabels_binder(multilabel, multilabelTex);
dim3 block(32, 8);
dim3 grid(divUp(depth.cols(), block.x), divUp(depth.rows(), block.y) );
hipLaunchKernelGGL(( KernelCUDA_MultiTreeCreateProb), dim3(grid), dim3(block) , 0, 0, numTrees, probabilities);
cudaSafeCall( hipGetLastError() );
cudaSafeCall( hipDeviceSynchronize() );
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
pcl::device::CUDATree::CUDATree (int treeHeight_arg, const std::vector<Node>& nodes, const std::vector<Label>& leaves)
{
treeHeight = treeHeight_arg;
numNodes = (1 << treeHeight) - 1;
assert (static_cast<int> (nodes.size ()) == numNodes );
assert (static_cast<int> (leaves.size ()) == (1 << treeHeight) );
nodes_device.upload(nodes);
leaves_device.upload(leaves);
}
void
pcl::device::MultiTreeLiveProc::process (const Depth& dmap, Labels& lmap)
{
// TODO: is this assert needed if we only call process?
//assert(!trees.empty());
// TODO is this iteration needed when we call multitreepass in the process step?
/* if (trees.size() == 1)
{
const CUDATree& t = trees[0];
CUDA_runTree( focal, t.treeHeight, t.numNodes, t.nodes_device, t.leaves_device, dmap, lmap );
return;
}
*/
process(dmap, lmap, std::numeric_limits<int>::max());
}
void
pcl::device::MultiTreeLiveProc::process (const Depth& dmap, Labels& lmap, int FGThresh)
{
assert(!trees.empty());
unsigned int numTrees = static_cast<int> (trees.size ());
multilmap.create(dmap.rows(), dmap.cols());
// 1 - run the multi passes
for( int ti = 0; ti < numTrees; ++ti )
{
const CUDATree& t = trees[ti];
CUDA_runMultiTreePass ( FGThresh, ti, static_cast<float> (focal), t.treeHeight, t.numNodes, t.nodes_device, t.leaves_device, dmap, multilmap );
}
// 2 - run the merging
assert( numTrees <= 4 );
device::CUDA_runMultiTreeMerge(numTrees, dmap, multilmap, lmap);
}
void
pcl::device::MultiTreeLiveProc::processProb (const Depth& dmap, Labels& lmap, LabelProbability& prob, int FGThresh)
{
assert(!trees.empty());
unsigned int numTrees = static_cast<unsigned int> (trees.size ());
assert( numTrees <= 4 );
multilmap.create(dmap.rows(), dmap.cols());
// 1 - run the multi passes
for( int ti = 0; ti < numTrees; ++ti )
{
const CUDATree& t = trees[ti];
CUDA_runMultiTreePass ( FGThresh, ti, static_cast<float> (focal), t.treeHeight, t.numNodes, t.nodes_device, t.leaves_device, dmap, multilmap );
}
device::CUDA_runMultiTreeProb(numTrees, dmap, multilmap, lmap, prob);
}
| 478352b3f37573be9f01afd9955569f88c7b80fa.cu | /*
* Software License Agreement (BSD License)
*
* Point Cloud Library (PCL) - www.pointclouds.org
* Copyright (c) 2010-2012, Willow Garage, Inc.
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Willow Garage, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* $Id: $
* @authors: Cedric Cagniart, Koen Buys, Anatoly Baksheev
*
*/
#include <pcl/gpu/people/tree.h>
#include <pcl/gpu/people/label_common.h>
#include <pcl/gpu/utils/device/limits.hpp>
#include <pcl/gpu/utils/safe_call.hpp>
#include <pcl/gpu/utils/texture_binder.hpp>
#include <stdio.h>
#include <limits>
#include <assert.h>
#include "internal.h"
using pcl::gpu::people::trees::Node;
using pcl::gpu::people::trees::Label;
using pcl::gpu::people::trees::AttribLocation;
using pcl::gpu::people::trees::Attrib;
using pcl::gpu::people::trees::focal;
using pcl::gpu::people::trees::NUM_LABELS;
using namespace std;
using uint = unsigned int;
#ifdef __CDT_PARSER__ // This is an eclipse specific hack, does nothing to the code
#define __global__
#define __device__
#define __shared__
#define __forceinline__
#define __constant__
#endif
namespace pcl
{
namespace device
{
texture<unsigned short, 2, cudaReadModeElementType> depthTex;
texture<char4, 2, cudaReadModeElementType> multilabelTex;
__constant__ int constFGThresh;
template<bool testFG> __device__ __forceinline__ Label
evaluateTree(int u, int v, float f, int treeHeight, int numNodes, const Node* nodes, const Label* leaves)
{
int depth = tex2D(depthTex, u, v);
float scale = f / depth;
// go down the tree
int nid = 0;
for(int nodeDepth = 0; nodeDepth < treeHeight; ++nodeDepth)
{
const Node node = nodes[nid];
const AttribLocation& loc = node.loc;
int d1 = tex2D (depthTex, u + loc.du1 * scale, v + loc.dv1 * scale);
int d2 = tex2D (depthTex, u + loc.du2 * scale, v + loc.dv2 * scale);
if (testFG)
{
if( d1 - depth > constFGThresh )
d1 = numeric_limits<short>::max();
if( d2 - depth > constFGThresh )
d2 = numeric_limits<short>::max();
}
int delta = d1-d2;
bool test = delta > (int)node.thresh;
if( test ) nid = nid*2+2;
else nid = nid*2+1;
}
return leaves[nid-numNodes];
}
/** \brief This is the CUDA kernel doing the actual RDF evaluation */
__global__ void
KernelCUDA_runTree( const float f,
const int treeHeight,
const int numNodes,
const Node* nodes,
const Label* leaves,
PtrStepSz<Label> labels)
{
int u = blockIdx.x * blockDim.x + threadIdx.x;
int v = blockIdx.y * blockDim.y + threadIdx.y;
if( u < labels.cols && v < labels.rows)
labels.ptr(v)[u] = evaluateTree<false>(u, v, f, treeHeight, numNodes, nodes, leaves);
}
template<bool testFG> __global__ void
KernelCUDA_MultiTreePass( const int treeId,
const float f,
const int treeHeight,
const int numNodes,
const Node* nodes,
const Label* leaves,
PtrStepSz<unsigned short> depth,
PtrStepSz<char4> multiLabels)
{
int u = blockIdx.x * blockDim.x + threadIdx.x;
int v = blockIdx.y * blockDim.y + threadIdx.y;
if(u < multiLabels.cols && v < multiLabels.rows)
{
// This maps a char4 pointer on a char pointer
char* pixel = (char*)&multiLabels.ptr(v)[u];
// This test assures that in next iterations the FGPreperation is taking into account see utils.cu
if(depth.ptr(v)[u] == numeric_limits<unsigned short>::max())
pixel[treeId] = 29; // see label_common.h for Background label (=29)
// TODO remove this hardcoded label with enum part_t label
else
pixel[treeId] = evaluateTree<testFG>(u, v, f, treeHeight, numNodes, nodes, leaves);
}
}
/** \brief This function wraps the actual CUDA kernel doing the RDF evaluation */
void CUDA_runTree ( float focal, int treeHeight, int numNodes, const Node* nodes, const Label* leaves, const Depth& depth, Labels& labels )
{
labels.create( depth.rows(), depth.cols() );
depthTex.addressMode[0] = cudaAddressModeClamp;
TextureBinder binder(depth, depthTex);
dim3 block(32, 8);
dim3 grid(divUp(depth.cols(), block.x), divUp(depth.rows(), block.y) );
KernelCUDA_runTree<<< grid, block >>>( focal, treeHeight, numNodes, nodes, leaves, labels);
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
}
void CUDA_runMultiTreePass ( int FGThresh,
int treeId,
float focal,
int treeHeight,
int numNodes,
const Node* nodes_device,
const Label* leaves_device,
const Depth& depth,
MultiLabels& multilabel )
{
//std::cout << "(I) : CUDA_runMultiTreePass() called" << std::endl;
depthTex.addressMode[0] = cudaAddressModeClamp;
TextureBinder binder(depth, depthTex);
dim3 block(32, 8);
dim3 grid( divUp(depth.cols(), block.x), divUp(depth.rows(), block.y) );
if(FGThresh == std::numeric_limits<int>::max())
{
KernelCUDA_MultiTreePass<false><<< grid, block >>>( treeId, focal, treeHeight,
numNodes, nodes_device, leaves_device, depth, multilabel);
}
else
{
cudaSafeCall( cudaMemcpyToSymbol(constFGThresh, &FGThresh, sizeof(FGThresh)) );
KernelCUDA_MultiTreePass<true><<< grid, block >>>( treeId, focal, treeHeight,
numNodes, nodes_device, leaves_device, depth, multilabel);
}
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
}
///////////////////////////////////////////////////////////////////////////////////////
__device__ int findMaxId( int numBins, char* bins )
{
// HACK .. not testing against numBins = 0
int maxId = 0;
char maxVal = bins[0];
for(int i=1;i<numBins;++i)
{
char val = bins[i];
if( val > maxVal ) { maxId = i; maxVal = val; }
}
return maxId;
}
//this will find the max Index but return -1 if there is a tie
__device__ int findMaxId_testTie(int numBins, char* bins)
{
int maxId = 0;
int maxId_other = -1;
char maxVal = bins[0];
for(int i=1;i<numBins;++i) {
char val = bins[i];
if( val == maxVal ) { maxId_other = i; }
if( val > maxVal ) { maxId = i; maxId_other = -1; maxVal = val; }
}
if( maxId_other != -1) return -1;
else return maxId;
}
__global__ void KernelCUDA_MultiTreeMerge( const int numTrees, PtrStepSz<Label> labels )
{
int u = blockIdx.x * blockDim.x + threadIdx.x;
int v = blockIdx.y * blockDim.y + threadIdx.y;
if( u >= labels.cols || v >= labels.rows)
return;
// reset the bins
char bins[NUM_LABELS];
for(int li = 0; li < NUM_LABELS; ++li)
bins[li] = 0;
// find a consensus with the current trees
{
char4 pixlabels = tex2D(multilabelTex, u ,v);
char* bob = (char*)&pixlabels; //horrible but char4's have xyzw members
for(int ti = 0; ti < numTrees; ++ti)
bins[ bob[ti] ]++;
}
int res = findMaxId_testTie(NUM_LABELS, bins);
// if this fails... find a consensus in a 1 neighbourhood
if( res < 0 )
{
int depth = tex2D(depthTex, u,v);
for(int i = -1 ; i <= 1; ++i)
{
for(int j = -1; j <= 1; ++j)
{
int depth_neighbor = tex2D(depthTex,u+i,v+j);
char4 labels_neighbor = tex2D(multilabelTex, u+i,v+j);
char* bob = (char*)&labels_neighbor; //horrible but char4's have xyzw members
//TODO: redo this part
int weight = abs(depth-depth_neighbor) < 50 ? 1:0; // 5cms
for(int ti = 0; ti < numTrees; ++ti)
bins[ bob[ti] ] += weight;
}
}
res = findMaxId( NUM_LABELS, bins );
}
labels.ptr(v)[u] = res;
}
/** \brief This merges the labels from all trees into a histogram of probabilities **/
__global__ void KernelCUDA_MultiTreeCreateProb (const int numTrees, PtrStepSz<prob_histogram> prob)
{
// map block and thread onto image coordinates
int u = blockIdx.x * blockDim.x + threadIdx.x;
int v = blockIdx.y * blockDim.y + threadIdx.y;
if( u >= prob.cols || v >= prob.rows )
return;
char4 pixlabels = tex2D (multilabelTex, u ,v);
char* bob = (char*)&pixlabels; //horrible but char4's have xyzw members
// Reset prob first, this should become NUM_LABELS
for(int in = 0; in < NUM_LABELS; in++)
{
prob.ptr(v)[u].probs[in] = 0;
}
for(int ti = 0; ti < numTrees; ++ti)
{
// Each tree casts a vote to the probability
// TODO: replace this with a histogram copy
prob.ptr(v)[u].probs[bob[ti]] += 0.25;
}
}
/** \brief This will merge the votes from the different trees into one final vote */
void CUDA_runMultiTreeMerge( int numTrees, const Depth& depth, const MultiLabels& multilabel, Labels& labels)
{
//std::cout << "(I) : CUDA_runMultiTreeMerge() called" << std::endl;
labels.create(depth.rows(), depth.cols());
depthTex.addressMode[0] = cudaAddressModeClamp;
TextureBinder binder(depth, depthTex);
multilabelTex.addressMode[0] = cudaAddressModeClamp;
TextureBinder mlabels_binder(multilabel, multilabelTex);
dim3 block(32, 8);
dim3 grid(divUp(depth.cols(), block.x), divUp(depth.rows(), block.y) );
KernelCUDA_MultiTreeMerge<<< grid, block >>>( numTrees, labels );
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
}
/** \brief This will merge the votes from the different trees into one final vote, including probabilistic's */
void CUDA_runMultiTreeProb ( int numTrees,
const Depth& depth,
const MultiLabels& multilabel,
Labels& labels,
LabelProbability& probabilities)
{
std::cout << "(I) : CUDA_runMultiTreeProb() called" << std::endl;
//labels.create(depth.rows(), depth.cols());
//depthTex.addressMode[0] = cudaAddressModeClamp;
//TextureBinder binder(depth, depthTex);
multilabelTex.addressMode[0] = cudaAddressModeClamp;
TextureBinder mlabels_binder(multilabel, multilabelTex);
dim3 block(32, 8);
dim3 grid(divUp(depth.cols(), block.x), divUp(depth.rows(), block.y) );
KernelCUDA_MultiTreeCreateProb<<< grid, block >>>( numTrees, probabilities);
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
pcl::device::CUDATree::CUDATree (int treeHeight_arg, const std::vector<Node>& nodes, const std::vector<Label>& leaves)
{
treeHeight = treeHeight_arg;
numNodes = (1 << treeHeight) - 1;
assert (static_cast<int> (nodes.size ()) == numNodes );
assert (static_cast<int> (leaves.size ()) == (1 << treeHeight) );
nodes_device.upload(nodes);
leaves_device.upload(leaves);
}
void
pcl::device::MultiTreeLiveProc::process (const Depth& dmap, Labels& lmap)
{
// TODO: is this assert needed if we only call process?
//assert(!trees.empty());
// TODO is this iteration needed when we call multitreepass in the process step?
/* if (trees.size() == 1)
{
const CUDATree& t = trees[0];
CUDA_runTree( focal, t.treeHeight, t.numNodes, t.nodes_device, t.leaves_device, dmap, lmap );
return;
}
*/
process(dmap, lmap, std::numeric_limits<int>::max());
}
void
pcl::device::MultiTreeLiveProc::process (const Depth& dmap, Labels& lmap, int FGThresh)
{
assert(!trees.empty());
unsigned int numTrees = static_cast<int> (trees.size ());
multilmap.create(dmap.rows(), dmap.cols());
// 1 - run the multi passes
for( int ti = 0; ti < numTrees; ++ti )
{
const CUDATree& t = trees[ti];
CUDA_runMultiTreePass ( FGThresh, ti, static_cast<float> (focal), t.treeHeight, t.numNodes, t.nodes_device, t.leaves_device, dmap, multilmap );
}
// 2 - run the merging
assert( numTrees <= 4 );
device::CUDA_runMultiTreeMerge(numTrees, dmap, multilmap, lmap);
}
void
pcl::device::MultiTreeLiveProc::processProb (const Depth& dmap, Labels& lmap, LabelProbability& prob, int FGThresh)
{
assert(!trees.empty());
unsigned int numTrees = static_cast<unsigned int> (trees.size ());
assert( numTrees <= 4 );
multilmap.create(dmap.rows(), dmap.cols());
// 1 - run the multi passes
for( int ti = 0; ti < numTrees; ++ti )
{
const CUDATree& t = trees[ti];
CUDA_runMultiTreePass ( FGThresh, ti, static_cast<float> (focal), t.treeHeight, t.numNodes, t.nodes_device, t.leaves_device, dmap, multilmap );
}
device::CUDA_runMultiTreeProb(numTrees, dmap, multilmap, lmap, prob);
}
|
173ca168a9ad088f6941197db0aea5d3759bc762.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <math.h>
#include <hip/hip_runtime.h>
#include "kernel.hip"
#include "traj_scatter.hh"
#include "mol_param.hh" // So we have Ele to use
#include "scat_param.hh" // So we have q to use
#include "env_param.hh" // So we have c2 to use
#include "WaasKirf.hh"
int main() {
// Parameters
//int num_q = 98;
//int num_atom = 3649;
int frames_to_average = 500; // Will be the last n frames of the traj
int frames_total = 2001; // Look at the xyz file
float *coord, *S_calc, *S_calc_tot;
float *d_coord, *d_S_calc;
int *d_Ele;
float *d_q_S_ref_dS, *d_Force, *d_FF;
float *d_Aq;
float *d_S_calcc, *d_f_ptxc, *d_f_ptyc, *d_f_ptzc;
float *d_raster, *d_V, *d_V_s;
float *d_WK;
int *d_close_flag, *d_close_num, *d_close_idx;
float *d_vdW;
int *close_num, *close_idx;
float *V;
float *d_FF_table, *d_FF_full;
float *d_c2;
int size_coord = 3 * num_atom * sizeof(float);
int size_atom = num_atom * sizeof(int);
int size_atom2 = num_atom2 * sizeof(int);
int size_atom2f = num_atom2 * sizeof(float);
int size_atomxatom = num_atom * num_atom * sizeof(float);
int size_atom2xatom2 = 1024 * num_atom2 * sizeof(int);
int size_q = num_q * sizeof(float);
int size_FF = num_ele * num_q * sizeof(float);
int size_qxatom2 = num_q2 * num_atom2 * sizeof(float); // check if overflow
int size_FF_table = (num_ele+1) * num_q * sizeof(float);
int size_surf = num_atom * num_raster * 3 * sizeof(float);
int size_WK = 11 * num_ele * sizeof(float);
int size_vdW = (num_ele+1) * sizeof(float);
int size_c2 = 10 * sizeof(float);
// Allocate cuda memories
hipMalloc((void **)&d_Aq, size_q);
hipMalloc((void **)&d_coord, size_coord); // 40 KB
hipMalloc((void **)&d_Force, size_coord); // 40 KB
hipMalloc((void **)&d_Ele, size_atom);
hipMalloc((void **)&d_q_S_ref_dS, 3 * size_q);
hipMalloc((void **)&d_S_calc, size_q); // Will be computed on GPU
hipMalloc((void **)&d_f_ptxc, size_qxatom2);
hipMalloc((void **)&d_f_ptyc, size_qxatom2);
hipMalloc((void **)&d_f_ptzc, size_qxatom2);
hipMalloc((void **)&d_S_calcc, size_qxatom2);
hipMalloc((void **)&d_V, size_atom2f);
hipMalloc((void **)&d_V_s, size_atom2f);
hipMalloc((void **)&d_close_flag, size_atom2xatom2);
hipMalloc((void **)&d_close_num, size_atom2);
hipMalloc((void **)&d_close_idx, size_atom2xatom2);
hipMalloc((void **)&d_vdW, size_vdW);
hipMalloc((void **)&d_FF_table, size_FF_table);
hipMalloc((void **)&d_FF_full, size_qxatom2);
hipMalloc((void **)&d_WK, size_WK);
hipMalloc((void **)&d_c2, size_c2);
// Allocate local memory
coord = (float *)malloc(size_coord);
S_calc = (float *)malloc(size_q);
S_calc_tot = (float *)malloc(size_q);
char* buf[100], buf1[100], buf2[100], buf3[100];
float f1, f2, f3;
// Initialize cuda matrices
hipMemset(d_Aq, 0.0, size_q);
hipMemset(d_S_calc, 0.0, size_q);
hipMemset(d_f_ptxc,0.0, size_qxatom2);
hipMemset(d_f_ptyc,0.0, size_qxatom2);
hipMemset(d_f_ptzc,0.0, size_qxatom2);
hipMemset(d_S_calcc,0.0, size_qxatom2);
hipMemset(d_close_flag, 0, size_qxatom2);
hipMemset(d_close_num, 0, size_atom2);
hipMemset(d_close_idx, 0, size_atom2xatom2);
// Copy necessary data
// hipMemcpy(d_coord, coord, size_coord, hipMemcpyHostToDevice);
hipMemcpy(d_vdW, vdW, size_vdW, hipMemcpyHostToDevice);
hipMemcpy(d_Ele, Ele, size_atom, hipMemcpyHostToDevice);
hipMemcpy(d_q_S_ref_dS, q_S_ref_dS, 3 * size_q, hipMemcpyHostToDevice);
hipMemcpy(d_WK, WK, size_WK, hipMemcpyHostToDevice);
//hipMemcpy(d_c2, c2, size_c2, hipMemcpyHostToDevice);
// Initialize local matrices
for (int ii = 0; ii < 3 * num_atom; ii++) coord[ii] = 0.0;
for (int ii = 0; ii < num_q; ii++) {
S_calc[ii] = 0.0;
S_calc_tot[ii] = 0.0;
}
float sigma2 = 1.0;
float alpha = 1.0;
FILE *fp = fopen("../test.txt","r");
if (fp == NULL) {
printf("Opening file failed.\n");
return 1;
} else {
printf("Opened file.\n");
}
// Read file by num_atom
for (int ii = 0; ii < frames_total; ii++) {
fscanf(fp,"%*s",buf);
fscanf(fp,"%*s %d",buf);
printf("Read the first two lines, ii = %d\n", ii);
for (int jj = 0; jj < num_atom; jj++) {
fscanf(fp,"%s %f %f %f",buf, &f1, &f2, &f3);
//printf("Readed line %d\n", jj);
coord[3*jj] = f1;
coord[3*jj+1] = f2;
coord[3*jj+2] = f3;
//printf("Coord[jj] = %.3f, Coord[jj+1] = %.3f, Coord[jj+2] = %.3f\n",coord[3*jj], coord[3*jj+1], coord[3*jj+2]);
}
if (ii >= frames_total - frames_to_average) {
printf("Calculating frame %d...\n", ii);
hipMemcpy(d_coord, coord, size_coord, hipMemcpyHostToDevice);
hipMemset(d_Aq, 0.0, size_q);
hipMemset(d_S_calc, 0.0, size_q);
hipMemset(d_f_ptxc,0.0, size_qxatom2);
hipMemset(d_f_ptyc,0.0, size_qxatom2);
hipMemset(d_f_ptzc,0.0, size_qxatom2);
hipMemset(d_S_calcc,0.0, size_qxatom2);
hipMemset(d_close_flag, 0, size_qxatom2);
hipMemset(d_close_num, 0, size_atom2);
hipMemset(d_close_idx, 0, size_atom2xatom2);
hipLaunchKernelGGL(( dist_calc), dim3(1024), dim3(1024), 0, 0, d_coord, //d_dx, d_dy, d_dz,
d_close_num, d_close_flag, d_close_idx, num_atom, num_atom2);
hipLaunchKernelGGL(( surf_calc), dim3(1024),dim3(512), 0, 0, d_coord, d_Ele, d_close_num, d_close_idx, d_vdW,
num_atom, num_atom2, num_raster, sol_s, d_V);
hipLaunchKernelGGL(( sum_V), dim3(1),dim3(1024), 0, 0, d_V, d_V_s, num_atom, num_atom2, d_Ele, d_vdW);
hipLaunchKernelGGL(( FF_calc), dim3(320), dim3(32), 0, 0, d_q_S_ref_dS, d_WK, d_vdW, num_q, num_ele, c1, r_m, d_FF_table);
hipLaunchKernelGGL(( create_FF_full_FoXS), dim3(320), dim3(1024), 0, 0, d_FF_table, d_V, c2, d_Ele, d_FF_full,
num_q, num_ele, num_atom, num_atom2);
hipLaunchKernelGGL(( scat_calc), dim3(320), dim3(1024), 0, 0, d_coord,
d_Ele,
d_q_S_ref_dS,
d_S_calc, num_atom, num_q, num_ele, d_Aq,
alpha, k_chi, sigma2, d_f_ptxc, d_f_ptyc,
d_f_ptzc, d_S_calcc, num_atom2,
d_FF_full);
hipMemcpy(S_calc ,d_S_calc, size_q, hipMemcpyDeviceToHost);
for (int jj = 0; jj < num_q; jj++) {
S_calc_tot[jj] += S_calc[jj];
}
}
}
fclose(fp);
for (int ii = 0; ii < num_q; ii++) {
S_calc_tot[ii] /= float(frames_to_average);
printf("q = %.3f, S(q) = %.5f \n", q_S_ref_dS[ii], S_calc_tot[ii]);
}
// Free cuda and local memories
hipFree(d_coord); hipFree(d_Force);
hipFree(d_Ele);
hipFree(d_q_S_ref_dS);
hipFree(d_S_calc); hipFree(d_Aq);
hipFree(d_f_ptxc); hipFree(d_f_ptyc); hipFree(d_f_ptzc);
hipFree(d_S_calcc); hipFree(d_WK);
hipFree(d_V);
hipFree(d_close_flag); hipFree(d_close_num); hipFree(d_close_idx);
hipFree(d_vdW);
return 0;
}
| 173ca168a9ad088f6941197db0aea5d3759bc762.cu |
#include <stdio.h>
#include <math.h>
#include <cuda.h>
#include "kernel.cu"
#include "traj_scatter.hh"
#include "mol_param.hh" // So we have Ele to use
#include "scat_param.hh" // So we have q to use
#include "env_param.hh" // So we have c2 to use
#include "WaasKirf.hh"
int main() {
// Parameters
//int num_q = 98;
//int num_atom = 3649;
int frames_to_average = 500; // Will be the last n frames of the traj
int frames_total = 2001; // Look at the xyz file
float *coord, *S_calc, *S_calc_tot;
float *d_coord, *d_S_calc;
int *d_Ele;
float *d_q_S_ref_dS, *d_Force, *d_FF;
float *d_Aq;
float *d_S_calcc, *d_f_ptxc, *d_f_ptyc, *d_f_ptzc;
float *d_raster, *d_V, *d_V_s;
float *d_WK;
int *d_close_flag, *d_close_num, *d_close_idx;
float *d_vdW;
int *close_num, *close_idx;
float *V;
float *d_FF_table, *d_FF_full;
float *d_c2;
int size_coord = 3 * num_atom * sizeof(float);
int size_atom = num_atom * sizeof(int);
int size_atom2 = num_atom2 * sizeof(int);
int size_atom2f = num_atom2 * sizeof(float);
int size_atomxatom = num_atom * num_atom * sizeof(float);
int size_atom2xatom2 = 1024 * num_atom2 * sizeof(int);
int size_q = num_q * sizeof(float);
int size_FF = num_ele * num_q * sizeof(float);
int size_qxatom2 = num_q2 * num_atom2 * sizeof(float); // check if overflow
int size_FF_table = (num_ele+1) * num_q * sizeof(float);
int size_surf = num_atom * num_raster * 3 * sizeof(float);
int size_WK = 11 * num_ele * sizeof(float);
int size_vdW = (num_ele+1) * sizeof(float);
int size_c2 = 10 * sizeof(float);
// Allocate cuda memories
cudaMalloc((void **)&d_Aq, size_q);
cudaMalloc((void **)&d_coord, size_coord); // 40 KB
cudaMalloc((void **)&d_Force, size_coord); // 40 KB
cudaMalloc((void **)&d_Ele, size_atom);
cudaMalloc((void **)&d_q_S_ref_dS, 3 * size_q);
cudaMalloc((void **)&d_S_calc, size_q); // Will be computed on GPU
cudaMalloc((void **)&d_f_ptxc, size_qxatom2);
cudaMalloc((void **)&d_f_ptyc, size_qxatom2);
cudaMalloc((void **)&d_f_ptzc, size_qxatom2);
cudaMalloc((void **)&d_S_calcc, size_qxatom2);
cudaMalloc((void **)&d_V, size_atom2f);
cudaMalloc((void **)&d_V_s, size_atom2f);
cudaMalloc((void **)&d_close_flag, size_atom2xatom2);
cudaMalloc((void **)&d_close_num, size_atom2);
cudaMalloc((void **)&d_close_idx, size_atom2xatom2);
cudaMalloc((void **)&d_vdW, size_vdW);
cudaMalloc((void **)&d_FF_table, size_FF_table);
cudaMalloc((void **)&d_FF_full, size_qxatom2);
cudaMalloc((void **)&d_WK, size_WK);
cudaMalloc((void **)&d_c2, size_c2);
// Allocate local memory
coord = (float *)malloc(size_coord);
S_calc = (float *)malloc(size_q);
S_calc_tot = (float *)malloc(size_q);
char* buf[100], buf1[100], buf2[100], buf3[100];
float f1, f2, f3;
// Initialize cuda matrices
cudaMemset(d_Aq, 0.0, size_q);
cudaMemset(d_S_calc, 0.0, size_q);
cudaMemset(d_f_ptxc,0.0, size_qxatom2);
cudaMemset(d_f_ptyc,0.0, size_qxatom2);
cudaMemset(d_f_ptzc,0.0, size_qxatom2);
cudaMemset(d_S_calcc,0.0, size_qxatom2);
cudaMemset(d_close_flag, 0, size_qxatom2);
cudaMemset(d_close_num, 0, size_atom2);
cudaMemset(d_close_idx, 0, size_atom2xatom2);
// Copy necessary data
// cudaMemcpy(d_coord, coord, size_coord, cudaMemcpyHostToDevice);
cudaMemcpy(d_vdW, vdW, size_vdW, cudaMemcpyHostToDevice);
cudaMemcpy(d_Ele, Ele, size_atom, cudaMemcpyHostToDevice);
cudaMemcpy(d_q_S_ref_dS, q_S_ref_dS, 3 * size_q, cudaMemcpyHostToDevice);
cudaMemcpy(d_WK, WK, size_WK, cudaMemcpyHostToDevice);
//cudaMemcpy(d_c2, c2, size_c2, cudaMemcpyHostToDevice);
// Initialize local matrices
for (int ii = 0; ii < 3 * num_atom; ii++) coord[ii] = 0.0;
for (int ii = 0; ii < num_q; ii++) {
S_calc[ii] = 0.0;
S_calc_tot[ii] = 0.0;
}
float sigma2 = 1.0;
float alpha = 1.0;
FILE *fp = fopen("../test.txt","r");
if (fp == NULL) {
printf("Opening file failed.\n");
return 1;
} else {
printf("Opened file.\n");
}
// Read file by num_atom
for (int ii = 0; ii < frames_total; ii++) {
fscanf(fp,"%*s",buf);
fscanf(fp,"%*s %d",buf);
printf("Read the first two lines, ii = %d\n", ii);
for (int jj = 0; jj < num_atom; jj++) {
fscanf(fp,"%s %f %f %f",buf, &f1, &f2, &f3);
//printf("Readed line %d\n", jj);
coord[3*jj] = f1;
coord[3*jj+1] = f2;
coord[3*jj+2] = f3;
//printf("Coord[jj] = %.3f, Coord[jj+1] = %.3f, Coord[jj+2] = %.3f\n",coord[3*jj], coord[3*jj+1], coord[3*jj+2]);
}
if (ii >= frames_total - frames_to_average) {
printf("Calculating frame %d...\n", ii);
cudaMemcpy(d_coord, coord, size_coord, cudaMemcpyHostToDevice);
cudaMemset(d_Aq, 0.0, size_q);
cudaMemset(d_S_calc, 0.0, size_q);
cudaMemset(d_f_ptxc,0.0, size_qxatom2);
cudaMemset(d_f_ptyc,0.0, size_qxatom2);
cudaMemset(d_f_ptzc,0.0, size_qxatom2);
cudaMemset(d_S_calcc,0.0, size_qxatom2);
cudaMemset(d_close_flag, 0, size_qxatom2);
cudaMemset(d_close_num, 0, size_atom2);
cudaMemset(d_close_idx, 0, size_atom2xatom2);
dist_calc<<<1024, 1024>>>(d_coord, //d_dx, d_dy, d_dz,
d_close_num, d_close_flag, d_close_idx, num_atom, num_atom2);
surf_calc<<<1024,512>>>(d_coord, d_Ele, d_close_num, d_close_idx, d_vdW,
num_atom, num_atom2, num_raster, sol_s, d_V);
sum_V<<<1,1024>>>(d_V, d_V_s, num_atom, num_atom2, d_Ele, d_vdW);
FF_calc<<<320, 32>>>(d_q_S_ref_dS, d_WK, d_vdW, num_q, num_ele, c1, r_m, d_FF_table);
create_FF_full_FoXS<<<320, 1024>>>(d_FF_table, d_V, c2, d_Ele, d_FF_full,
num_q, num_ele, num_atom, num_atom2);
scat_calc<<<320, 1024>>>(d_coord,
d_Ele,
d_q_S_ref_dS,
d_S_calc, num_atom, num_q, num_ele, d_Aq,
alpha, k_chi, sigma2, d_f_ptxc, d_f_ptyc,
d_f_ptzc, d_S_calcc, num_atom2,
d_FF_full);
cudaMemcpy(S_calc ,d_S_calc, size_q, cudaMemcpyDeviceToHost);
for (int jj = 0; jj < num_q; jj++) {
S_calc_tot[jj] += S_calc[jj];
}
}
}
fclose(fp);
for (int ii = 0; ii < num_q; ii++) {
S_calc_tot[ii] /= float(frames_to_average);
printf("q = %.3f, S(q) = %.5f \n", q_S_ref_dS[ii], S_calc_tot[ii]);
}
// Free cuda and local memories
cudaFree(d_coord); cudaFree(d_Force);
cudaFree(d_Ele);
cudaFree(d_q_S_ref_dS);
cudaFree(d_S_calc); cudaFree(d_Aq);
cudaFree(d_f_ptxc); cudaFree(d_f_ptyc); cudaFree(d_f_ptzc);
cudaFree(d_S_calcc); cudaFree(d_WK);
cudaFree(d_V);
cudaFree(d_close_flag); cudaFree(d_close_num); cudaFree(d_close_idx);
cudaFree(d_vdW);
return 0;
}
|
4b7bc6710c9917ba8c398b9da861e6969b26e197.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <quda_internal.h>
#include <quda_matrix.h>
#include <tune_quda.h>
#include <gauge_field_order.h>
#include <launch_kernel.cuh>
#include <cub_helper.cuh>
namespace quda {
using namespace gauge;
#ifdef GPU_GAUGE_TOOLS
template <typename Mom>
struct MomActionArg : public ReduceArg<double> {
int threads; // number of active threads required
Mom mom;
int X[4]; // grid dimensions
MomActionArg(const Mom &mom, const GaugeField &meta)
: ReduceArg<double>(), mom(mom) {
threads = meta.VolumeCB();
for(int dir=0; dir<4; ++dir) X[dir] = meta.X()[dir];
}
};
template<int blockSize, typename Float, typename Mom>
__global__ void computeMomAction(MomActionArg<Mom> arg){
int x = threadIdx.x + blockIdx.x*blockDim.x;
int parity = threadIdx.y;
double action = 0.0;
if(x < arg.threads) {
// loop over direction
for (int mu=0; mu<4; mu++) {
Float v[10];
arg.mom.load(v, x, mu, parity);
double local_sum = 0.0;
for (int j=0; j<6; j++) local_sum += v[j]*v[j];
for (int j=6; j<9; j++) local_sum += 0.5*v[j]*v[j];
local_sum -= 4.0;
action += local_sum;
}
}
// perform final inter-block reduction and write out result
reduce2d<blockSize,2>(arg, action);
}
template<typename Float, typename Mom>
class MomAction : TunableLocalParity {
MomActionArg<Mom> arg;
const QudaFieldLocation location;
private:
unsigned int sharedBytesPerThread() const { return 0; }
unsigned int sharedBytesPerBlock(const TuneParam ¶m) const { return 0; }
bool tuneGridDim() const { return false; } // Don't tune the grid dimensions.
unsigned int minThreads() const { return arg.threads; }
public:
MomAction(MomActionArg<Mom> &arg)
: arg(arg), location(QUDA_CUDA_FIELD_LOCATION) {}
~MomAction () { }
void apply(const hipStream_t &stream){
if(location == QUDA_CUDA_FIELD_LOCATION){
arg.result_h[0] = 0.0;
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
LAUNCH_KERNEL_LOCAL_PARITY(computeMomAction, tp, stream, arg, Float, Mom);
} else {
errorQuda("CPU not supported yet\n");
}
}
TuneKey tuneKey() const {
std::stringstream vol, aux;
vol << arg.X[0] << "x" << arg.X[1] << "x" << arg.X[2] << "x" << arg.X[3];
aux << "threads=" << arg.threads << ",prec=" << sizeof(Float);
return TuneKey(vol.str().c_str(), typeid(*this).name(), aux.str().c_str());
}
long long flops() const { return 4*2*arg.threads*23; }
long long bytes() const { return 4*2*arg.threads*arg.mom.Bytes(); }
};
template<typename Float, typename Mom>
void momAction(const Mom mom, const GaugeField& meta, double &action) {
MomActionArg<Mom> arg(mom, meta);
MomAction<Float,Mom> momAction(arg);
momAction.apply(0);
hipDeviceSynchronize();
comm_allreduce((double*)arg.result_h);
action = arg.result_h[0];
}
template<typename Float>
double momAction(const GaugeField& mom) {
double action = 0.0;
if (mom.Order() == QUDA_FLOAT2_GAUGE_ORDER) {
if (mom.Reconstruct() == QUDA_RECONSTRUCT_10) {
momAction<Float>(FloatNOrder<Float,10,2,10>(mom), mom, action);
} else {
errorQuda("Reconstruction type %d not supported", mom.Reconstruct());
}
} else {
errorQuda("Gauge Field order %d not supported", mom.Order());
}
return action;
}
#endif
double computeMomAction(const GaugeField& mom) {
double action = 0.0;
#ifdef GPU_GAUGE_TOOLS
if (mom.Precision() == QUDA_DOUBLE_PRECISION) {
action = momAction<double>(mom);
} else if(mom.Precision() == QUDA_SINGLE_PRECISION) {
action = momAction<float>(mom);
} else {
errorQuda("Precision %d not supported", mom.Precision());
}
#else
errorQuda("%s not build", __func__);
#endif
return action;
}
#ifdef GPU_GAUGE_TOOLS
template<typename Float, typename Mom, typename Force>
struct UpdateMomArg {
int volumeCB;
Mom mom;
Float coeff;
Force force;
UpdateMomArg(Mom &mom, const Float &coeff, Force &force, GaugeField &meta)
: volumeCB(meta.VolumeCB()), mom(mom), coeff(coeff), force(force) {}
};
template<typename Float, typename Mom, typename Force>
__global__ void UpdateMom(UpdateMomArg<Float, Mom, Force> arg) {
int x = blockIdx.x*blockDim.x + threadIdx.x;
int parity = blockIdx.y;
Matrix<complex<Float>,3> m, f;
while(x<arg.volumeCB){
for (int d=0; d<4; d++) {
arg.mom.load(reinterpret_cast<Float*>(m.data), x, d, parity);
arg.force.load(reinterpret_cast<Float*>(f.data), x, d, parity);
m = m + arg.coeff * f;
makeAntiHerm(m);
arg.mom.save(reinterpret_cast<Float*>(m.data), x, d, parity);
}
x += gridDim.x*blockDim.x;
}
return;
} // UpdateMom
template<typename Float, typename Mom, typename Force>
void updateMomentum(Mom mom, Float coeff, Force force, GaugeField &meta) {
UpdateMomArg<Float,Mom,Force> arg(mom, coeff, force, meta);
dim3 block(128, 1, 1);
dim3 grid((arg.volumeCB + block.x - 1)/ block.x, 2, 1); // y dimension is parity
hipLaunchKernelGGL(( UpdateMom<Float,Mom,Force>), dim3(grid),dim3(block), 0, 0, arg);
}
template <typename Float>
void updateMomentum(cudaGaugeField &mom, double coeff, cudaGaugeField &force) {
if (mom.Reconstruct() != QUDA_RECONSTRUCT_10)
errorQuda("Momentum field with reconstruct %d not supported", mom.Reconstruct());
if (force.Reconstruct() == QUDA_RECONSTRUCT_10) {
updateMomentum<Float>(FloatNOrder<Float, 18, 2, 11>(mom), static_cast<Float>(coeff),
FloatNOrder<Float, 18, 2, 11>(force), force);
} else if (force.Reconstruct() == QUDA_RECONSTRUCT_NO) {
updateMomentum<Float>(FloatNOrder<Float, 18, 2, 11>(mom), static_cast<Float>(coeff),
FloatNOrder<Float, 18, 2, 18>(force), force);
} else {
errorQuda("Unsupported force reconstruction: %d", force.Reconstruct());
}
}
#endif // GPU_GAUGE_TOOLS
void updateMomentum(cudaGaugeField &mom, double coeff, cudaGaugeField &force) {
#ifdef GPU_GAUGE_TOOLS
if(mom.Order() != QUDA_FLOAT2_GAUGE_ORDER)
errorQuda("Unsupported output ordering: %d\n", mom.Order());
if (mom.Precision() != force.Precision())
errorQuda("Mixed precision not supported: %d %d\n", mom.Precision(), force.Precision());
if (mom.Precision() == QUDA_DOUBLE_PRECISION) {
updateMomentum<double>(mom, coeff, force);
} else {
errorQuda("Unsupported precision: %d", mom.Precision());
}
checkCudaError();
#else
errorQuda("%s not built", __func__);
#endif // GPU_GAUGE_TOOLS
return;
}
} // namespace quda
| 4b7bc6710c9917ba8c398b9da861e6969b26e197.cu | #include <quda_internal.h>
#include <quda_matrix.h>
#include <tune_quda.h>
#include <gauge_field_order.h>
#include <launch_kernel.cuh>
#include <cub_helper.cuh>
namespace quda {
using namespace gauge;
#ifdef GPU_GAUGE_TOOLS
template <typename Mom>
struct MomActionArg : public ReduceArg<double> {
int threads; // number of active threads required
Mom mom;
int X[4]; // grid dimensions
MomActionArg(const Mom &mom, const GaugeField &meta)
: ReduceArg<double>(), mom(mom) {
threads = meta.VolumeCB();
for(int dir=0; dir<4; ++dir) X[dir] = meta.X()[dir];
}
};
template<int blockSize, typename Float, typename Mom>
__global__ void computeMomAction(MomActionArg<Mom> arg){
int x = threadIdx.x + blockIdx.x*blockDim.x;
int parity = threadIdx.y;
double action = 0.0;
if(x < arg.threads) {
// loop over direction
for (int mu=0; mu<4; mu++) {
Float v[10];
arg.mom.load(v, x, mu, parity);
double local_sum = 0.0;
for (int j=0; j<6; j++) local_sum += v[j]*v[j];
for (int j=6; j<9; j++) local_sum += 0.5*v[j]*v[j];
local_sum -= 4.0;
action += local_sum;
}
}
// perform final inter-block reduction and write out result
reduce2d<blockSize,2>(arg, action);
}
template<typename Float, typename Mom>
class MomAction : TunableLocalParity {
MomActionArg<Mom> arg;
const QudaFieldLocation location;
private:
unsigned int sharedBytesPerThread() const { return 0; }
unsigned int sharedBytesPerBlock(const TuneParam ¶m) const { return 0; }
bool tuneGridDim() const { return false; } // Don't tune the grid dimensions.
unsigned int minThreads() const { return arg.threads; }
public:
MomAction(MomActionArg<Mom> &arg)
: arg(arg), location(QUDA_CUDA_FIELD_LOCATION) {}
~MomAction () { }
void apply(const cudaStream_t &stream){
if(location == QUDA_CUDA_FIELD_LOCATION){
arg.result_h[0] = 0.0;
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
LAUNCH_KERNEL_LOCAL_PARITY(computeMomAction, tp, stream, arg, Float, Mom);
} else {
errorQuda("CPU not supported yet\n");
}
}
TuneKey tuneKey() const {
std::stringstream vol, aux;
vol << arg.X[0] << "x" << arg.X[1] << "x" << arg.X[2] << "x" << arg.X[3];
aux << "threads=" << arg.threads << ",prec=" << sizeof(Float);
return TuneKey(vol.str().c_str(), typeid(*this).name(), aux.str().c_str());
}
long long flops() const { return 4*2*arg.threads*23; }
long long bytes() const { return 4*2*arg.threads*arg.mom.Bytes(); }
};
template<typename Float, typename Mom>
void momAction(const Mom mom, const GaugeField& meta, double &action) {
MomActionArg<Mom> arg(mom, meta);
MomAction<Float,Mom> momAction(arg);
momAction.apply(0);
cudaDeviceSynchronize();
comm_allreduce((double*)arg.result_h);
action = arg.result_h[0];
}
template<typename Float>
double momAction(const GaugeField& mom) {
double action = 0.0;
if (mom.Order() == QUDA_FLOAT2_GAUGE_ORDER) {
if (mom.Reconstruct() == QUDA_RECONSTRUCT_10) {
momAction<Float>(FloatNOrder<Float,10,2,10>(mom), mom, action);
} else {
errorQuda("Reconstruction type %d not supported", mom.Reconstruct());
}
} else {
errorQuda("Gauge Field order %d not supported", mom.Order());
}
return action;
}
#endif
double computeMomAction(const GaugeField& mom) {
double action = 0.0;
#ifdef GPU_GAUGE_TOOLS
if (mom.Precision() == QUDA_DOUBLE_PRECISION) {
action = momAction<double>(mom);
} else if(mom.Precision() == QUDA_SINGLE_PRECISION) {
action = momAction<float>(mom);
} else {
errorQuda("Precision %d not supported", mom.Precision());
}
#else
errorQuda("%s not build", __func__);
#endif
return action;
}
#ifdef GPU_GAUGE_TOOLS
template<typename Float, typename Mom, typename Force>
struct UpdateMomArg {
int volumeCB;
Mom mom;
Float coeff;
Force force;
UpdateMomArg(Mom &mom, const Float &coeff, Force &force, GaugeField &meta)
: volumeCB(meta.VolumeCB()), mom(mom), coeff(coeff), force(force) {}
};
template<typename Float, typename Mom, typename Force>
__global__ void UpdateMom(UpdateMomArg<Float, Mom, Force> arg) {
int x = blockIdx.x*blockDim.x + threadIdx.x;
int parity = blockIdx.y;
Matrix<complex<Float>,3> m, f;
while(x<arg.volumeCB){
for (int d=0; d<4; d++) {
arg.mom.load(reinterpret_cast<Float*>(m.data), x, d, parity);
arg.force.load(reinterpret_cast<Float*>(f.data), x, d, parity);
m = m + arg.coeff * f;
makeAntiHerm(m);
arg.mom.save(reinterpret_cast<Float*>(m.data), x, d, parity);
}
x += gridDim.x*blockDim.x;
}
return;
} // UpdateMom
template<typename Float, typename Mom, typename Force>
void updateMomentum(Mom mom, Float coeff, Force force, GaugeField &meta) {
UpdateMomArg<Float,Mom,Force> arg(mom, coeff, force, meta);
dim3 block(128, 1, 1);
dim3 grid((arg.volumeCB + block.x - 1)/ block.x, 2, 1); // y dimension is parity
UpdateMom<Float,Mom,Force><<<grid,block>>>(arg);
}
template <typename Float>
void updateMomentum(cudaGaugeField &mom, double coeff, cudaGaugeField &force) {
if (mom.Reconstruct() != QUDA_RECONSTRUCT_10)
errorQuda("Momentum field with reconstruct %d not supported", mom.Reconstruct());
if (force.Reconstruct() == QUDA_RECONSTRUCT_10) {
updateMomentum<Float>(FloatNOrder<Float, 18, 2, 11>(mom), static_cast<Float>(coeff),
FloatNOrder<Float, 18, 2, 11>(force), force);
} else if (force.Reconstruct() == QUDA_RECONSTRUCT_NO) {
updateMomentum<Float>(FloatNOrder<Float, 18, 2, 11>(mom), static_cast<Float>(coeff),
FloatNOrder<Float, 18, 2, 18>(force), force);
} else {
errorQuda("Unsupported force reconstruction: %d", force.Reconstruct());
}
}
#endif // GPU_GAUGE_TOOLS
void updateMomentum(cudaGaugeField &mom, double coeff, cudaGaugeField &force) {
#ifdef GPU_GAUGE_TOOLS
if(mom.Order() != QUDA_FLOAT2_GAUGE_ORDER)
errorQuda("Unsupported output ordering: %d\n", mom.Order());
if (mom.Precision() != force.Precision())
errorQuda("Mixed precision not supported: %d %d\n", mom.Precision(), force.Precision());
if (mom.Precision() == QUDA_DOUBLE_PRECISION) {
updateMomentum<double>(mom, coeff, force);
} else {
errorQuda("Unsupported precision: %d", mom.Precision());
}
checkCudaError();
#else
errorQuda("%s not built", __func__);
#endif // GPU_GAUGE_TOOLS
return;
}
} // namespace quda
|
2123fa0ad7532d6fe9020822f5982434a897f6c6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <iostream>
#include <time.h>
#include <string>
#include <random>
#include <cstdlib>
#include <cmath>
#include <limits>
#include <opencv.hpp>
#include "residualMaker.cuh"
#include "168modelMaker.cuh"
using namespace cv;
using namespace std;
void free_hists(PSRM_Features &host_features)
{
for (size_t i = 0; i < HIST_COUNT; i++)
{
hipFree(host_features.hists[i]);
}
}
int save_features(string file_path, int class_id, int** hists)
{
int ret_val = 0;
file_path += "_Features.fea";
FILE* fp_result = fopen(file_path.c_str(), "wb");
if (fp_result)
{
std::fprintf(fp_result, "%d\n", class_id);
float sum0 = 0;
for (size_t i = 0; i < COUNT_OF_SUBMODELS; i++)
{
sum0 = 0;
for (size_t j = 0; j < SPAM_SYM_COUNT * 2; j++)
{
sum0 += hists[i][j];
}
sum0 /= 4.0f;
for (size_t j = 0; j < SPAM_SYM_COUNT * 2; j++)
{
if (sum0 > 0)
std::fprintf(fp_result, "\t%f", hists[i][j] / sum0);
else
std::fprintf(fp_result, "\t%f", 0);
}
std::fprintf(fp_result, "\n");
}
fclose(fp_result);
}
else
{
ret_val = -1;
}
return ret_val;
}
int MINMAXsymmCoord[ 2 * FULL_DIM];
int SPAMsymmCoord[FULL_DIM];
void ComputeSymmCoords()
{
bool symmSign = true, symmReverse = true, symmMinMax = true;
// Preparation of inSymCoord matrix for co-occurrence and symmetrization
int B = 2 * T + 1;
int alreadyUsed;
// MINMAX
alreadyUsed = 0;
//MINMAXsymmCoord = new int[2 * FULL_DIM]; // [0, FULL_DIM-1] = min; [FULL_DIM, 2*FULL_DIM-1] = max
for (int i = 0; i<2 * FULL_DIM; i++) MINMAXsymmCoord[i] = -1;
for (int numIter = 0; numIter < FULL_DIM; numIter++)
{
if (MINMAXsymmCoord[numIter] == -1)
{
int coordReverse = 0;
int num = numIter;
for (int i = 0; i<ORDER; i++)
{
coordReverse += (num % B) * ((int)::pow((float)B, ORDER - i - 1));
num = num / B;
}
// To the same bin: min(X), max(-X), min(Xreverse), max(-Xreverse)
if (MINMAXsymmCoord[numIter] == -1)
{
MINMAXsymmCoord[numIter] = alreadyUsed; // min(X)
if (symmMinMax) MINMAXsymmCoord[2 * FULL_DIM - numIter - 1] = alreadyUsed; // max(-X)
if (symmReverse) MINMAXsymmCoord[coordReverse] = alreadyUsed; // min(Xreverse)
if ((symmMinMax) && (symmReverse)) MINMAXsymmCoord[2 * FULL_DIM - coordReverse - 1] = alreadyUsed; // max(-Xreverse)
alreadyUsed++;
}
}
}
for (int numIter = 0; numIter < FULL_DIM; numIter++)
{
if (MINMAXsymmCoord[FULL_DIM + numIter] == -1)
{
int coordReverse = 0;
int num = numIter;
for (int i = 0; i<ORDER; i++)
{
coordReverse += (num % B) * ((int)::pow((float)B, ORDER - i - 1));
num = num / B;
}
// To the same bin: max(X), min(-X), max(Xreverse), min(-Xreverse)
if (MINMAXsymmCoord[FULL_DIM + numIter] == -1)
{
MINMAXsymmCoord[FULL_DIM + numIter] = alreadyUsed; // max(X)
if (symmMinMax) MINMAXsymmCoord[FULL_DIM - numIter - 1] = alreadyUsed; // min(-X)
if (symmReverse) MINMAXsymmCoord[FULL_DIM + coordReverse] = alreadyUsed; // max(Xreverse)
if ((symmMinMax) && (symmReverse)) MINMAXsymmCoord[FULL_DIM - coordReverse - 1] = alreadyUsed; // min(-Xreverse)
alreadyUsed++;
}
}
}
// SPAM
alreadyUsed = 0;
//SPAMsymmCoord = new int[FULL_DIM];
for (int i = 0; i<FULL_DIM; i++) SPAMsymmCoord[i] = -1;
for (int numIter = 0; numIter < FULL_DIM; numIter++)
{
if (SPAMsymmCoord[numIter] == -1)
{
int coordReverse = 0;
int num = numIter;
for (int i = 0; i<ORDER; i++)
{
coordReverse += (num % B) * ((int)::pow((float)B, ORDER - i - 1));
num = num / B;
}
// To the same bin: X, -X, Xreverse, -Xreverse
SPAMsymmCoord[numIter] = alreadyUsed; // X
if (symmSign) SPAMsymmCoord[FULL_DIM - numIter - 1] = alreadyUsed; // -X
if (symmReverse) SPAMsymmCoord[coordReverse] = alreadyUsed; // Xreverse
if ((symmSign) && (symmReverse)) SPAMsymmCoord[FULL_DIM - coordReverse - 1] = alreadyUsed; // -Xreverse
alreadyUsed++;
}
}
// In order to have the same order of the features as the matlab SRM - shift +1
for (int i = 0; i<FULL_DIM; i++)
{
if (SPAMsymmCoord[i] == alreadyUsed - 1) SPAMsymmCoord[i] = 0;
else SPAMsymmCoord[i]++;
}
}
int main(int argc, char*argv[])
{
if (argc < 2)
{
printf("Please, Enter the List File Path as first argument");
getchar();
return -1;
}
printf("file list is %s\n", argv[1]);
FILE* fp_list = fopen(argv[1], "r");
FILE *fp_existance = 0;
if (!fp_list)
{
printf("the your Entered List File Path is not exist");
getchar();
return -2;
}
hipError_t cudaStatus;
ComputeSymmCoords();
int *dev_MINMAXsymmCoord;
int *dev_SPAMsymmCoord;
cudaStatus = hipMalloc(&dev_MINMAXsymmCoord, FULL_DIM * 2 * sizeof(int));
if (cudaStatus != hipSuccess) {
std::fprintf(stderr, hipGetErrorString(cudaStatus), __LINE__);
}
cudaStatus = hipMemcpy(dev_MINMAXsymmCoord, MINMAXsymmCoord, FULL_DIM * 2 * sizeof(int), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
std::fprintf(stderr, hipGetErrorString(cudaStatus), __LINE__);
}
cudaStatus = hipMalloc(&dev_SPAMsymmCoord, FULL_DIM * sizeof(int));
if (cudaStatus != hipSuccess) {
std::fprintf(stderr, hipGetErrorString(cudaStatus), __LINE__);
}
cudaStatus = hipMemcpy(dev_SPAMsymmCoord, SPAMsymmCoord, FULL_DIM * sizeof(int), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
std::fprintf(stderr, hipGetErrorString(cudaStatus), __LINE__);
}
PSRM_Features host_features = {};
int *hists[COUNT_OF_SUBMODELS];
for (size_t i = 0; i < COUNT_OF_SUBMODELS; i++)
{
hists[i] = new int[SPAM_SYM_COUNT * 2];
}
for (size_t i = 0; i < HIST_COUNT; i++)
{
cudaStatus = hipMalloc(&(host_features.hists[i]), SPAM_SYM_COUNT * 2 * sizeof(int));
if (cudaStatus != hipSuccess) {
std::fprintf(stderr, "\nline %d: hipMalloc failed!", __LINE__);
return 1;
}
}
int class_id = 0;
int dim1 = 0, dim2 = 0;
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, 0);
char str_file_path[_MAX_PATH];
float* dev_src = NULL, *host_dev_residuals[KERNELS_COUNT] = {};
float* host_src[KERNELS_COUNT] = {};
hipStream_t streams[STREAM_COUNT + 5];
cudaStatus = hipSetDevice(0);
hipEvent_t start[STREAM_COUNT], stop[STREAM_COUNT];
float **dev_residuals;
int* residuals[HIST_COUNT];
const int MAX_COLS = 1024 + (1024 / 7.0f + KERNEL_RIGHT_BOTTOM_PADD) * KERNEL_RIGHT_BOTTOM_PADD;
const int MAX_ROWS = 1024 + (1024 / 7.0f + KERNEL_RIGHT_BOTTOM_PADD) * KERNEL_RIGHT_BOTTOM_PADD;
const int TILE_HEIGHT = 8;
const int TILE_WEIGHT = 8;
uint3 blocks_res = { 128, 128, 1 }, threads_res = { 2, 2, 1 };
for (size_t i = 0; i < STREAM_COUNT + 5; i++)
{
hipStreamCreate(&streams[i]);
if (i >= STREAM_COUNT)continue;
hipEventCreate(&start[i]);
hipEventCreate(&stop[i]);
}
if (cudaStatus != hipSuccess) {
std::fprintf(stderr, "\nline %d: hipSetDevice failed! Do you have a CUDA-capable GPU installed?", __LINE__);
}
cudaStatus = hipMalloc((void**)&dev_src, MAX_COLS * MAX_ROWS * sizeof(float));
if (cudaStatus != hipSuccess) {
std::fprintf(stderr, "\nline %d: hipMalloc failed!", __LINE__);
return 1;
}
for (size_t i = 0; i < KERNELS_COUNT; i++)
{
cudaStatus = hipHostMalloc((void**)&host_src[i], MAX_COLS * MAX_ROWS * sizeof(float), hipHostMallocMapped | hipHostMallocWriteCombined);
if (cudaStatus != hipSuccess) {
std::fprintf(stderr, hipGetErrorString(cudaStatus), __LINE__);
return 1;
}
cudaStatus = hipHostGetDevicePointer(&host_dev_residuals[i], host_src[i], 0);
if (cudaStatus != hipSuccess) {
std::fprintf(stderr, hipGetErrorString(cudaStatus), __LINE__);
return 1;
}
}
for (int i = 0; i < HIST_COUNT; i++)
{
cudaStatus = hipMalloc((void**)&residuals[i], 5 * sizeof(int));
if (cudaStatus != hipSuccess) {
std::fprintf(stderr, "\nline %d: hipMalloc for failed!", __LINE__);
}
}
cudaStatus = hipMalloc((void**)&dev_residuals, KERNELS_COUNT * sizeof(float*));
if (cudaStatus != hipSuccess) {
std::fprintf(stderr, "\nline %d: hipMalloc failed!", __LINE__);
}
cudaStatus = hipMemcpy(dev_residuals, host_dev_residuals, KERNELS_COUNT * sizeof(float*), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
std::fprintf(stderr, "\nline %d: hipMemcpy failed!", __LINE__);
}
//std::fprintf(stderr, "\nline %d: hipMemcpy failed!", __LINE__);
while (!feof(fp_list))
{
host_features.last_index = 0;
host_features.submodel_index = 0;
if (!fscanf(fp_list, "%d\t%s\n", &class_id, str_file_path))
{
printf("\nProcessing is finished");
break;
}
printf("\nclassID = %d, path = %s", class_id, str_file_path);
fp_existance = fopen((string(str_file_path) + string("_Features.fea")).c_str(), "rb");
if (fp_existance)
{
fclose(fp_existance);
printf("\tProcessed in the Past");
//continue;
}
Mat img = imread(str_file_path, CV_LOAD_IMAGE_GRAYSCALE);
if (!img.data || !img.cols || !img.rows)
{
printf("\nNULL image, %s", str_file_path);
continue;
}
if (img.cols > 1018 && img.rows > 1018)
img = img(Rect(0, 0, 1018, 1018));
else if (img.cols > 1018)
img = img(Rect(0, 0, 1018, img.rows));
else if (img.rows > 1018)
img = img(Rect(0, 0, img.cols, 1018));
//copyMakeBorder(img, img, 2, 2, 2, 2, BORDER_CONSTANT, Scalar(0, 0, 0, 0));
dim1 = (int)ceil(sqrt(img.cols / 8.0f));
blocks_res.x = dim1;
threads_res.x = (unsigned int)ceil(img.cols / 8.0f / dim1);
dim2 = (int)ceil(sqrt(img.rows / 8.0f));
blocks_res.y = dim2;
threads_res.y = (unsigned int)ceil(img.rows / 8.0f / dim2);
blocks = blocks_res;
threads = threads_res;
//imshow("1", img); cvWaitKey();
img.convertTo(img, CV_32FC1);
cudaStatus = hipMemcpy(dev_src, img.data, img.rows * img.cols * sizeof(float), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
std::fprintf(stderr, hipGetErrorString(cudaStatus), __LINE__);
continue;
}
cudaStatus = hipDeviceSynchronize();
// do some work on the GPU
float t1 = (float)clock();
int i = 0;
int kernel_index = 1;
hipEventRecord(start[i], streams[STREAM_COUNT + 0]);
for (; i < 8; i++)
{
make_res_1st << <blocks_res, threads_res, 0, streams[STREAM_COUNT + 0] >> >(dev_src, host_dev_residuals[i], img.cols, img.rows, kernel_index++, TILE_WEIGHT, TILE_HEIGHT);
}
cudaStatus = hipStreamSynchronize(streams[STREAM_COUNT + 0]);
if (cudaStatus != hipSuccess) {
std::fprintf(stderr, hipGetErrorString(cudaStatus), __LINE__);
//return 1;
}
/*
Mat newImg = Mat(img.rows + 3 * (blocks.y + 1), img.cols + 3 * (blocks.x+1), CV_32F);
cudaStatus = hipMemcpy(newImg.data, host_dev_residuals[0], (img.cols + 3 * (blocks.x + 1)) * (img.rows + 3 * (blocks.y + 1)) * sizeof(float), hipMemcpyDeviceToHost);
*/
/*Mat newImg = Mat(img.rows + 3 * (img.rows / 8.0 + 1), img.cols + 3 * (img.cols / 8.0 + 1), CV_32F);
cudaStatus = hipMemcpy(newImg.data, host_dev_residuals[0], (newImg.cols * newImg.rows) * sizeof(float), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
std::fprintf(stderr, hipGetErrorString(cudaStatus), __LINE__);
continue;
}
//newImg.convertTo(newImg, CV_8UC1);
imwrite("e:\\0--1024.bmp", newImg);*/
/*imshow("2", img); cvWaitKey();
*/
hipEventRecord(stop[0], streams[STREAM_COUNT + 0]); hipEventSynchronize(stop[0]);
make_models_1st(dev_residuals, residuals, host_dev_residuals, dev_MINMAXsymmCoord, dev_SPAMsymmCoord, streams, host_features, img.cols, img.rows);
printf("\n make_models_1st is done\n");
kernel_index = 1; hipEventRecord(start[1], streams[STREAM_COUNT + 1]);
for (; i < 12; i++)
{
make_res_2st << <blocks_res, threads_res, 0, streams[STREAM_COUNT + 1] >> >(dev_src, host_dev_residuals[i], img.cols, img.rows, kernel_index++, TILE_WEIGHT, TILE_HEIGHT);
}
cudaStatus = hipStreamSynchronize(streams[STREAM_COUNT + 1]);
if (cudaStatus != hipSuccess) {
std::fprintf(stderr, hipGetErrorString(cudaStatus), __LINE__);
//return 1;
}
hipEventRecord(stop[1], streams[STREAM_COUNT + 1]); hipEventSynchronize(stop[1]);
make_models_2st(dev_residuals, residuals, host_dev_residuals, dev_MINMAXsymmCoord, dev_SPAMsymmCoord, streams, host_features, img.cols, img.rows);
printf("\n make_models_2st is done\n");
kernel_index = 1; hipEventRecord(start[2], streams[STREAM_COUNT + 2]);
for (; i < 20; i++)
{
make_res_3st << <blocks_res, threads_res, 0, streams[STREAM_COUNT + 2] >> >(dev_src, host_dev_residuals[i], img.cols, img.rows, kernel_index++, TILE_WEIGHT, TILE_HEIGHT);
}
cudaStatus = hipStreamSynchronize(streams[STREAM_COUNT + 2]);
if (cudaStatus != hipSuccess) {
std::fprintf(stderr, hipGetErrorString(cudaStatus), __LINE__);
//return 1;
}
hipEventRecord(stop[2], streams[STREAM_COUNT + 2]); hipEventSynchronize(stop[2]);
make_models_3st(dev_residuals, residuals, host_dev_residuals, dev_MINMAXsymmCoord, dev_SPAMsymmCoord, streams, host_features, img.cols, img.rows);
printf("\n make_models_3st is done\n");
kernel_index = 1; hipEventRecord(start[3], streams[STREAM_COUNT + 3]);
for (; i < 25; i++)
{
make_res_3x3 << <blocks_res, threads_res, 0, streams[STREAM_COUNT + 3] >> >(dev_src, host_dev_residuals[i], img.cols, img.rows, kernel_index++, TILE_WEIGHT, TILE_HEIGHT);
}
cudaStatus = hipStreamSynchronize(streams[STREAM_COUNT + 3]);
if (cudaStatus != hipSuccess) {
std::fprintf(stderr, hipGetErrorString(cudaStatus), __LINE__);
//return 1;
}
hipEventRecord(stop[3], streams[STREAM_COUNT + 3]); hipEventSynchronize(stop[3]);
make_models_3x3(dev_residuals, residuals, host_dev_residuals, dev_MINMAXsymmCoord, dev_SPAMsymmCoord, streams, host_features, img.cols, img.rows);
printf("\n make_models_3x3 is done\n");
kernel_index = 1; hipEventRecord(start[4], streams[STREAM_COUNT + 4]);
for (; i < KERNELS_COUNT; i++)
{
make_res_5x5 << <blocks_res, threads_res, 0, streams[STREAM_COUNT + 4] >> >(dev_src, host_dev_residuals[i], img.cols, img.rows, kernel_index++, TILE_WEIGHT, TILE_HEIGHT);
}
cudaStatus = hipStreamSynchronize(streams[STREAM_COUNT + 4]);
if (cudaStatus != hipSuccess) {
std::fprintf(stderr, hipGetErrorString(cudaStatus), __LINE__);
//return 1;
}
hipEventRecord(stop[4], streams[STREAM_COUNT + 4]); hipEventSynchronize(stop[4]);
make_models_5x5(dev_residuals, residuals, host_dev_residuals, dev_MINMAXsymmCoord, dev_SPAMsymmCoord, streams, host_features, img.cols, img.rows);
printf("\n make_models_5x5 is done\n");
for (size_t i = 0; i < STREAM_COUNT; i++)
{
hipStreamSynchronize(streams[i]);
}
/*
*/
compute_submodels(host_features);
hipDeviceSynchronize();
float t2 = (clock() - t1) / CLOCKS_PER_SEC;
printf("\n %d\n", host_features.submodel_index);
for (size_t i = 0; i < COUNT_OF_SUBMODELS; i++)
{
printf("\n%d", host_features.sub_model_index[i]);
cudaStatus = hipMemcpy(hists[i], host_features.hists[host_features.sub_model_index[i]], SPAM_SYM_COUNT * 2 * sizeof(int), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
std::fprintf(stderr, "\nline %d: %s\n", __LINE__, hipGetErrorString(cudaStatus) );
break;
}
}
if (cudaStatus != hipSuccess)
{
printf("\nfeature extracting is not successfully");
continue;
}
float sum0 = 0;
for (size_t i = 0; i < 1; i++)
{
printf("\n\n\n\n");
sum0 = 0;
for (size_t j = 0; j < SPAM_SYM_COUNT * 2; j++)
{
sum0 += hists[i][j];
printf(" %d", hists[i][j]);
}
printf("\n\n\n\n");
sum0 /= 4.0f;
for (size_t j = 0; j < SPAM_SYM_COUNT * 2; j++)
{
if (sum0 > 0)
printf(" %f", hists[i][j] / sum0);
}
}
/*for (size_t i = 38; i < 39; i++)
{
printf("\n\n\n\n");
for (size_t j = 0; j < SPAM_SYM_COUNT * 2; j++)
{
printf(" %d", hists[i][j]);
}
}
printf("\n\n", sum);*/
/*for (size_t i = 0; i < STREAM_COUNT; i++)
{
hipEventElapsedTime(&elapsedTime,
start[i], stop[i]);
std::fprintf(stderr, "\nline %d: \nGPU Elapsed Time is %f Second", elapsedTime/1000, t2);
}*/
std::fprintf(stderr, "\nCPU Elapsed Time is %f Seconds\n", t2);
if (save_features(str_file_path, class_id, hists))
{
printf("Saving in file is not successfully \n");
}
}
hipFree(dev_src);
hipFree(dev_MINMAXsymmCoord);
hipFree(dev_SPAMsymmCoord);
for (int i = 0; i < HIST_COUNT; i++)
{
cudaStatus = hipFree(residuals[i]);
}
cudaStatus = hipFree(dev_residuals);
//hipFree(dev_residuals);
for (size_t i = 0; i < HIST_COUNT; i++)
{
hipFree(host_features.hists[i]);
}
for (size_t i = 0; i < COUNT_OF_SUBMODELS; i++)
{
delete hists[i];
}
for (size_t i = 0; i < KERNELS_COUNT; i++)
{
hipHostFree(host_src[i]);
}
for (size_t i = 0; i < STREAM_COUNT + 5; i++)
{
hipStreamDestroy(streams[i]);
}
fclose(fp_list);
return 0;
}
// Helper function for using CUDA to add vectors in parallel.
hipError_t addWithCuda(unsigned int *c, const int *a, const int *b, unsigned int size)
{
int *dev_a = 0;
int *dev_b = 0;
unsigned int *dev_c = 0;
hipError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess) {
std::fprintf(stderr, "\nline %d: hipSetDevice failed! Do you have a CUDA-capable GPU installed?", __LINE__);
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = hipMalloc((void**)&dev_c, size * sizeof(int));
if (cudaStatus != hipSuccess) {
std::fprintf(stderr, "\nline %d: hipMalloc failed!", __LINE__);
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_a, size * sizeof(int));
if (cudaStatus != hipSuccess) {
std::fprintf(stderr, "\nline %d: hipMalloc failed!", __LINE__);
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_b, size * sizeof(int));
if (cudaStatus != hipSuccess) {
std::fprintf(stderr, "\nline %d: hipMalloc failed!", __LINE__);
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = hipMemcpy(dev_a, a, size * sizeof(int), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
std::fprintf(stderr, "\nline %d: hipMemcpy failed!", __LINE__);
goto Error;
}
cudaStatus = hipMemcpy(dev_b, b, size * sizeof(int), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
std::fprintf(stderr, "\nline %d: hipMemcpy failed!", __LINE__);
goto Error;
}
// Launch a kernel on the GPU with one thread for each element.
//PreFeature<<<1, 2>>>(dev_c, dev_a, dev_b);
// Check for any errors launching the kernel
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
std::fprintf(stderr, "\nline %d: addKernel launch failed: %s\n", __LINE__, hipGetErrorString(cudaStatus));
goto Error;
}
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
std::fprintf(stderr, "\nline %d: hipDeviceSynchronize returned error code %d after launching addKernel!\n", __LINE__, cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = hipMemcpy(c, dev_c, size * sizeof(int), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
std::fprintf(stderr, "\nline %d: hipMemcpy failed!", __LINE__);
goto Error;
}
Error:
hipFree(dev_c);
hipFree(dev_a);
hipFree(dev_b);
return cudaStatus;
}
| 2123fa0ad7532d6fe9020822f5982434a897f6c6.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <iostream>
#include <time.h>
#include <string>
#include <random>
#include <cstdlib>
#include <cmath>
#include <limits>
#include <opencv.hpp>
#include "residualMaker.cuh"
#include "168modelMaker.cuh"
using namespace cv;
using namespace std;
void free_hists(PSRM_Features &host_features)
{
for (size_t i = 0; i < HIST_COUNT; i++)
{
cudaFree(host_features.hists[i]);
}
}
int save_features(string file_path, int class_id, int** hists)
{
int ret_val = 0;
file_path += "_Features.fea";
FILE* fp_result = fopen(file_path.c_str(), "wb");
if (fp_result)
{
std::fprintf(fp_result, "%d\n", class_id);
float sum0 = 0;
for (size_t i = 0; i < COUNT_OF_SUBMODELS; i++)
{
sum0 = 0;
for (size_t j = 0; j < SPAM_SYM_COUNT * 2; j++)
{
sum0 += hists[i][j];
}
sum0 /= 4.0f;
for (size_t j = 0; j < SPAM_SYM_COUNT * 2; j++)
{
if (sum0 > 0)
std::fprintf(fp_result, "\t%f", hists[i][j] / sum0);
else
std::fprintf(fp_result, "\t%f", 0);
}
std::fprintf(fp_result, "\n");
}
fclose(fp_result);
}
else
{
ret_val = -1;
}
return ret_val;
}
int MINMAXsymmCoord[ 2 * FULL_DIM];
int SPAMsymmCoord[FULL_DIM];
void ComputeSymmCoords()
{
bool symmSign = true, symmReverse = true, symmMinMax = true;
// Preparation of inSymCoord matrix for co-occurrence and symmetrization
int B = 2 * T + 1;
int alreadyUsed;
// MINMAX
alreadyUsed = 0;
//MINMAXsymmCoord = new int[2 * FULL_DIM]; // [0, FULL_DIM-1] = min; [FULL_DIM, 2*FULL_DIM-1] = max
for (int i = 0; i<2 * FULL_DIM; i++) MINMAXsymmCoord[i] = -1;
for (int numIter = 0; numIter < FULL_DIM; numIter++)
{
if (MINMAXsymmCoord[numIter] == -1)
{
int coordReverse = 0;
int num = numIter;
for (int i = 0; i<ORDER; i++)
{
coordReverse += (num % B) * ((int)std::pow((float)B, ORDER - i - 1));
num = num / B;
}
// To the same bin: min(X), max(-X), min(Xreverse), max(-Xreverse)
if (MINMAXsymmCoord[numIter] == -1)
{
MINMAXsymmCoord[numIter] = alreadyUsed; // min(X)
if (symmMinMax) MINMAXsymmCoord[2 * FULL_DIM - numIter - 1] = alreadyUsed; // max(-X)
if (symmReverse) MINMAXsymmCoord[coordReverse] = alreadyUsed; // min(Xreverse)
if ((symmMinMax) && (symmReverse)) MINMAXsymmCoord[2 * FULL_DIM - coordReverse - 1] = alreadyUsed; // max(-Xreverse)
alreadyUsed++;
}
}
}
for (int numIter = 0; numIter < FULL_DIM; numIter++)
{
if (MINMAXsymmCoord[FULL_DIM + numIter] == -1)
{
int coordReverse = 0;
int num = numIter;
for (int i = 0; i<ORDER; i++)
{
coordReverse += (num % B) * ((int)std::pow((float)B, ORDER - i - 1));
num = num / B;
}
// To the same bin: max(X), min(-X), max(Xreverse), min(-Xreverse)
if (MINMAXsymmCoord[FULL_DIM + numIter] == -1)
{
MINMAXsymmCoord[FULL_DIM + numIter] = alreadyUsed; // max(X)
if (symmMinMax) MINMAXsymmCoord[FULL_DIM - numIter - 1] = alreadyUsed; // min(-X)
if (symmReverse) MINMAXsymmCoord[FULL_DIM + coordReverse] = alreadyUsed; // max(Xreverse)
if ((symmMinMax) && (symmReverse)) MINMAXsymmCoord[FULL_DIM - coordReverse - 1] = alreadyUsed; // min(-Xreverse)
alreadyUsed++;
}
}
}
// SPAM
alreadyUsed = 0;
//SPAMsymmCoord = new int[FULL_DIM];
for (int i = 0; i<FULL_DIM; i++) SPAMsymmCoord[i] = -1;
for (int numIter = 0; numIter < FULL_DIM; numIter++)
{
if (SPAMsymmCoord[numIter] == -1)
{
int coordReverse = 0;
int num = numIter;
for (int i = 0; i<ORDER; i++)
{
coordReverse += (num % B) * ((int)std::pow((float)B, ORDER - i - 1));
num = num / B;
}
// To the same bin: X, -X, Xreverse, -Xreverse
SPAMsymmCoord[numIter] = alreadyUsed; // X
if (symmSign) SPAMsymmCoord[FULL_DIM - numIter - 1] = alreadyUsed; // -X
if (symmReverse) SPAMsymmCoord[coordReverse] = alreadyUsed; // Xreverse
if ((symmSign) && (symmReverse)) SPAMsymmCoord[FULL_DIM - coordReverse - 1] = alreadyUsed; // -Xreverse
alreadyUsed++;
}
}
// In order to have the same order of the features as the matlab SRM - shift +1
for (int i = 0; i<FULL_DIM; i++)
{
if (SPAMsymmCoord[i] == alreadyUsed - 1) SPAMsymmCoord[i] = 0;
else SPAMsymmCoord[i]++;
}
}
int main(int argc, char*argv[])
{
if (argc < 2)
{
printf("Please, Enter the List File Path as first argument");
getchar();
return -1;
}
printf("file list is %s\n", argv[1]);
FILE* fp_list = fopen(argv[1], "r");
FILE *fp_existance = 0;
if (!fp_list)
{
printf("the your Entered List File Path is not exist");
getchar();
return -2;
}
cudaError_t cudaStatus;
ComputeSymmCoords();
int *dev_MINMAXsymmCoord;
int *dev_SPAMsymmCoord;
cudaStatus = cudaMalloc(&dev_MINMAXsymmCoord, FULL_DIM * 2 * sizeof(int));
if (cudaStatus != cudaSuccess) {
std::fprintf(stderr, cudaGetErrorString(cudaStatus), __LINE__);
}
cudaStatus = cudaMemcpy(dev_MINMAXsymmCoord, MINMAXsymmCoord, FULL_DIM * 2 * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
std::fprintf(stderr, cudaGetErrorString(cudaStatus), __LINE__);
}
cudaStatus = cudaMalloc(&dev_SPAMsymmCoord, FULL_DIM * sizeof(int));
if (cudaStatus != cudaSuccess) {
std::fprintf(stderr, cudaGetErrorString(cudaStatus), __LINE__);
}
cudaStatus = cudaMemcpy(dev_SPAMsymmCoord, SPAMsymmCoord, FULL_DIM * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
std::fprintf(stderr, cudaGetErrorString(cudaStatus), __LINE__);
}
PSRM_Features host_features = {};
int *hists[COUNT_OF_SUBMODELS];
for (size_t i = 0; i < COUNT_OF_SUBMODELS; i++)
{
hists[i] = new int[SPAM_SYM_COUNT * 2];
}
for (size_t i = 0; i < HIST_COUNT; i++)
{
cudaStatus = cudaMalloc(&(host_features.hists[i]), SPAM_SYM_COUNT * 2 * sizeof(int));
if (cudaStatus != cudaSuccess) {
std::fprintf(stderr, "\nline %d: cudaMalloc failed!", __LINE__);
return 1;
}
}
int class_id = 0;
int dim1 = 0, dim2 = 0;
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, 0);
char str_file_path[_MAX_PATH];
float* dev_src = NULL, *host_dev_residuals[KERNELS_COUNT] = {};
float* host_src[KERNELS_COUNT] = {};
cudaStream_t streams[STREAM_COUNT + 5];
cudaStatus = cudaSetDevice(0);
cudaEvent_t start[STREAM_COUNT], stop[STREAM_COUNT];
float **dev_residuals;
int* residuals[HIST_COUNT];
const int MAX_COLS = 1024 + (1024 / 7.0f + KERNEL_RIGHT_BOTTOM_PADD) * KERNEL_RIGHT_BOTTOM_PADD;
const int MAX_ROWS = 1024 + (1024 / 7.0f + KERNEL_RIGHT_BOTTOM_PADD) * KERNEL_RIGHT_BOTTOM_PADD;
const int TILE_HEIGHT = 8;
const int TILE_WEIGHT = 8;
uint3 blocks_res = { 128, 128, 1 }, threads_res = { 2, 2, 1 };
for (size_t i = 0; i < STREAM_COUNT + 5; i++)
{
cudaStreamCreate(&streams[i]);
if (i >= STREAM_COUNT)continue;
cudaEventCreate(&start[i]);
cudaEventCreate(&stop[i]);
}
if (cudaStatus != cudaSuccess) {
std::fprintf(stderr, "\nline %d: cudaSetDevice failed! Do you have a CUDA-capable GPU installed?", __LINE__);
}
cudaStatus = cudaMalloc((void**)&dev_src, MAX_COLS * MAX_ROWS * sizeof(float));
if (cudaStatus != cudaSuccess) {
std::fprintf(stderr, "\nline %d: cudaMalloc failed!", __LINE__);
return 1;
}
for (size_t i = 0; i < KERNELS_COUNT; i++)
{
cudaStatus = cudaHostAlloc((void**)&host_src[i], MAX_COLS * MAX_ROWS * sizeof(float), cudaHostAllocMapped | cudaHostAllocWriteCombined);
if (cudaStatus != cudaSuccess) {
std::fprintf(stderr, cudaGetErrorString(cudaStatus), __LINE__);
return 1;
}
cudaStatus = cudaHostGetDevicePointer(&host_dev_residuals[i], host_src[i], 0);
if (cudaStatus != cudaSuccess) {
std::fprintf(stderr, cudaGetErrorString(cudaStatus), __LINE__);
return 1;
}
}
for (int i = 0; i < HIST_COUNT; i++)
{
cudaStatus = cudaMalloc((void**)&residuals[i], 5 * sizeof(int));
if (cudaStatus != cudaSuccess) {
std::fprintf(stderr, "\nline %d: cudaMalloc for failed!", __LINE__);
}
}
cudaStatus = cudaMalloc((void**)&dev_residuals, KERNELS_COUNT * sizeof(float*));
if (cudaStatus != cudaSuccess) {
std::fprintf(stderr, "\nline %d: cudaMalloc failed!", __LINE__);
}
cudaStatus = cudaMemcpy(dev_residuals, host_dev_residuals, KERNELS_COUNT * sizeof(float*), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
std::fprintf(stderr, "\nline %d: cudaMemcpy failed!", __LINE__);
}
//std::fprintf(stderr, "\nline %d: cudaMemcpy failed!", __LINE__);
while (!feof(fp_list))
{
host_features.last_index = 0;
host_features.submodel_index = 0;
if (!fscanf(fp_list, "%d\t%s\n", &class_id, str_file_path))
{
printf("\nProcessing is finished");
break;
}
printf("\nclassID = %d, path = %s", class_id, str_file_path);
fp_existance = fopen((string(str_file_path) + string("_Features.fea")).c_str(), "rb");
if (fp_existance)
{
fclose(fp_existance);
printf("\tProcessed in the Past");
//continue;
}
Mat img = imread(str_file_path, CV_LOAD_IMAGE_GRAYSCALE);
if (!img.data || !img.cols || !img.rows)
{
printf("\nNULL image, %s", str_file_path);
continue;
}
if (img.cols > 1018 && img.rows > 1018)
img = img(Rect(0, 0, 1018, 1018));
else if (img.cols > 1018)
img = img(Rect(0, 0, 1018, img.rows));
else if (img.rows > 1018)
img = img(Rect(0, 0, img.cols, 1018));
//copyMakeBorder(img, img, 2, 2, 2, 2, BORDER_CONSTANT, Scalar(0, 0, 0, 0));
dim1 = (int)ceil(sqrt(img.cols / 8.0f));
blocks_res.x = dim1;
threads_res.x = (unsigned int)ceil(img.cols / 8.0f / dim1);
dim2 = (int)ceil(sqrt(img.rows / 8.0f));
blocks_res.y = dim2;
threads_res.y = (unsigned int)ceil(img.rows / 8.0f / dim2);
blocks = blocks_res;
threads = threads_res;
//imshow("1", img); cvWaitKey();
img.convertTo(img, CV_32FC1);
cudaStatus = cudaMemcpy(dev_src, img.data, img.rows * img.cols * sizeof(float), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
std::fprintf(stderr, cudaGetErrorString(cudaStatus), __LINE__);
continue;
}
cudaStatus = cudaDeviceSynchronize();
// do some work on the GPU
float t1 = (float)clock();
int i = 0;
int kernel_index = 1;
cudaEventRecord(start[i], streams[STREAM_COUNT + 0]);
for (; i < 8; i++)
{
make_res_1st << <blocks_res, threads_res, 0, streams[STREAM_COUNT + 0] >> >(dev_src, host_dev_residuals[i], img.cols, img.rows, kernel_index++, TILE_WEIGHT, TILE_HEIGHT);
}
cudaStatus = cudaStreamSynchronize(streams[STREAM_COUNT + 0]);
if (cudaStatus != cudaSuccess) {
std::fprintf(stderr, cudaGetErrorString(cudaStatus), __LINE__);
//return 1;
}
/*
Mat newImg = Mat(img.rows + 3 * (blocks.y + 1), img.cols + 3 * (blocks.x+1), CV_32F);
cudaStatus = cudaMemcpy(newImg.data, host_dev_residuals[0], (img.cols + 3 * (blocks.x + 1)) * (img.rows + 3 * (blocks.y + 1)) * sizeof(float), cudaMemcpyDeviceToHost);
*/
/*Mat newImg = Mat(img.rows + 3 * (img.rows / 8.0 + 1), img.cols + 3 * (img.cols / 8.0 + 1), CV_32F);
cudaStatus = cudaMemcpy(newImg.data, host_dev_residuals[0], (newImg.cols * newImg.rows) * sizeof(float), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
std::fprintf(stderr, cudaGetErrorString(cudaStatus), __LINE__);
continue;
}
//newImg.convertTo(newImg, CV_8UC1);
imwrite("e:\\0--1024.bmp", newImg);*/
/*imshow("2", img); cvWaitKey();
*/
cudaEventRecord(stop[0], streams[STREAM_COUNT + 0]); cudaEventSynchronize(stop[0]);
make_models_1st(dev_residuals, residuals, host_dev_residuals, dev_MINMAXsymmCoord, dev_SPAMsymmCoord, streams, host_features, img.cols, img.rows);
printf("\n make_models_1st is done\n");
kernel_index = 1; cudaEventRecord(start[1], streams[STREAM_COUNT + 1]);
for (; i < 12; i++)
{
make_res_2st << <blocks_res, threads_res, 0, streams[STREAM_COUNT + 1] >> >(dev_src, host_dev_residuals[i], img.cols, img.rows, kernel_index++, TILE_WEIGHT, TILE_HEIGHT);
}
cudaStatus = cudaStreamSynchronize(streams[STREAM_COUNT + 1]);
if (cudaStatus != cudaSuccess) {
std::fprintf(stderr, cudaGetErrorString(cudaStatus), __LINE__);
//return 1;
}
cudaEventRecord(stop[1], streams[STREAM_COUNT + 1]); cudaEventSynchronize(stop[1]);
make_models_2st(dev_residuals, residuals, host_dev_residuals, dev_MINMAXsymmCoord, dev_SPAMsymmCoord, streams, host_features, img.cols, img.rows);
printf("\n make_models_2st is done\n");
kernel_index = 1; cudaEventRecord(start[2], streams[STREAM_COUNT + 2]);
for (; i < 20; i++)
{
make_res_3st << <blocks_res, threads_res, 0, streams[STREAM_COUNT + 2] >> >(dev_src, host_dev_residuals[i], img.cols, img.rows, kernel_index++, TILE_WEIGHT, TILE_HEIGHT);
}
cudaStatus = cudaStreamSynchronize(streams[STREAM_COUNT + 2]);
if (cudaStatus != cudaSuccess) {
std::fprintf(stderr, cudaGetErrorString(cudaStatus), __LINE__);
//return 1;
}
cudaEventRecord(stop[2], streams[STREAM_COUNT + 2]); cudaEventSynchronize(stop[2]);
make_models_3st(dev_residuals, residuals, host_dev_residuals, dev_MINMAXsymmCoord, dev_SPAMsymmCoord, streams, host_features, img.cols, img.rows);
printf("\n make_models_3st is done\n");
kernel_index = 1; cudaEventRecord(start[3], streams[STREAM_COUNT + 3]);
for (; i < 25; i++)
{
make_res_3x3 << <blocks_res, threads_res, 0, streams[STREAM_COUNT + 3] >> >(dev_src, host_dev_residuals[i], img.cols, img.rows, kernel_index++, TILE_WEIGHT, TILE_HEIGHT);
}
cudaStatus = cudaStreamSynchronize(streams[STREAM_COUNT + 3]);
if (cudaStatus != cudaSuccess) {
std::fprintf(stderr, cudaGetErrorString(cudaStatus), __LINE__);
//return 1;
}
cudaEventRecord(stop[3], streams[STREAM_COUNT + 3]); cudaEventSynchronize(stop[3]);
make_models_3x3(dev_residuals, residuals, host_dev_residuals, dev_MINMAXsymmCoord, dev_SPAMsymmCoord, streams, host_features, img.cols, img.rows);
printf("\n make_models_3x3 is done\n");
kernel_index = 1; cudaEventRecord(start[4], streams[STREAM_COUNT + 4]);
for (; i < KERNELS_COUNT; i++)
{
make_res_5x5 << <blocks_res, threads_res, 0, streams[STREAM_COUNT + 4] >> >(dev_src, host_dev_residuals[i], img.cols, img.rows, kernel_index++, TILE_WEIGHT, TILE_HEIGHT);
}
cudaStatus = cudaStreamSynchronize(streams[STREAM_COUNT + 4]);
if (cudaStatus != cudaSuccess) {
std::fprintf(stderr, cudaGetErrorString(cudaStatus), __LINE__);
//return 1;
}
cudaEventRecord(stop[4], streams[STREAM_COUNT + 4]); cudaEventSynchronize(stop[4]);
make_models_5x5(dev_residuals, residuals, host_dev_residuals, dev_MINMAXsymmCoord, dev_SPAMsymmCoord, streams, host_features, img.cols, img.rows);
printf("\n make_models_5x5 is done\n");
for (size_t i = 0; i < STREAM_COUNT; i++)
{
cudaStreamSynchronize(streams[i]);
}
/*
*/
compute_submodels(host_features);
cudaDeviceSynchronize();
float t2 = (clock() - t1) / CLOCKS_PER_SEC;
printf("\n %d\n", host_features.submodel_index);
for (size_t i = 0; i < COUNT_OF_SUBMODELS; i++)
{
printf("\n%d", host_features.sub_model_index[i]);
cudaStatus = cudaMemcpy(hists[i], host_features.hists[host_features.sub_model_index[i]], SPAM_SYM_COUNT * 2 * sizeof(int), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
std::fprintf(stderr, "\nline %d: %s\n", __LINE__, cudaGetErrorString(cudaStatus) );
break;
}
}
if (cudaStatus != cudaSuccess)
{
printf("\nfeature extracting is not successfully");
continue;
}
float sum0 = 0;
for (size_t i = 0; i < 1; i++)
{
printf("\n\n\n\n");
sum0 = 0;
for (size_t j = 0; j < SPAM_SYM_COUNT * 2; j++)
{
sum0 += hists[i][j];
printf(" %d", hists[i][j]);
}
printf("\n\n\n\n");
sum0 /= 4.0f;
for (size_t j = 0; j < SPAM_SYM_COUNT * 2; j++)
{
if (sum0 > 0)
printf(" %f", hists[i][j] / sum0);
}
}
/*for (size_t i = 38; i < 39; i++)
{
printf("\n\n\n\n");
for (size_t j = 0; j < SPAM_SYM_COUNT * 2; j++)
{
printf(" %d", hists[i][j]);
}
}
printf("\n\n", sum);*/
/*for (size_t i = 0; i < STREAM_COUNT; i++)
{
cudaEventElapsedTime(&elapsedTime,
start[i], stop[i]);
std::fprintf(stderr, "\nline %d: \nGPU Elapsed Time is %f Second", elapsedTime/1000, t2);
}*/
std::fprintf(stderr, "\nCPU Elapsed Time is %f Seconds\n", t2);
if (save_features(str_file_path, class_id, hists))
{
printf("Saving in file is not successfully \n");
}
}
cudaFree(dev_src);
cudaFree(dev_MINMAXsymmCoord);
cudaFree(dev_SPAMsymmCoord);
for (int i = 0; i < HIST_COUNT; i++)
{
cudaStatus = cudaFree(residuals[i]);
}
cudaStatus = cudaFree(dev_residuals);
//cudaFree(dev_residuals);
for (size_t i = 0; i < HIST_COUNT; i++)
{
cudaFree(host_features.hists[i]);
}
for (size_t i = 0; i < COUNT_OF_SUBMODELS; i++)
{
delete hists[i];
}
for (size_t i = 0; i < KERNELS_COUNT; i++)
{
cudaFreeHost(host_src[i]);
}
for (size_t i = 0; i < STREAM_COUNT + 5; i++)
{
cudaStreamDestroy(streams[i]);
}
fclose(fp_list);
return 0;
}
// Helper function for using CUDA to add vectors in parallel.
cudaError_t addWithCuda(unsigned int *c, const int *a, const int *b, unsigned int size)
{
int *dev_a = 0;
int *dev_b = 0;
unsigned int *dev_c = 0;
cudaError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
std::fprintf(stderr, "\nline %d: cudaSetDevice failed! Do you have a CUDA-capable GPU installed?", __LINE__);
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = cudaMalloc((void**)&dev_c, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
std::fprintf(stderr, "\nline %d: cudaMalloc failed!", __LINE__);
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_a, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
std::fprintf(stderr, "\nline %d: cudaMalloc failed!", __LINE__);
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_b, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
std::fprintf(stderr, "\nline %d: cudaMalloc failed!", __LINE__);
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_a, a, size * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
std::fprintf(stderr, "\nline %d: cudaMemcpy failed!", __LINE__);
goto Error;
}
cudaStatus = cudaMemcpy(dev_b, b, size * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
std::fprintf(stderr, "\nline %d: cudaMemcpy failed!", __LINE__);
goto Error;
}
// Launch a kernel on the GPU with one thread for each element.
//PreFeature<<<1, 2>>>(dev_c, dev_a, dev_b);
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
std::fprintf(stderr, "\nline %d: addKernel launch failed: %s\n", __LINE__, cudaGetErrorString(cudaStatus));
goto Error;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
std::fprintf(stderr, "\nline %d: cudaDeviceSynchronize returned error code %d after launching addKernel!\n", __LINE__, cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(c, dev_c, size * sizeof(int), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
std::fprintf(stderr, "\nline %d: cudaMemcpy failed!", __LINE__);
goto Error;
}
Error:
cudaFree(dev_c);
cudaFree(dev_a);
cudaFree(dev_b);
return cudaStatus;
}
|
dfffc4ec6a1a0082f84079c98d968deed74c3e5c.hip | // !!! This is a file automatically generated by hipify!!!
#if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<4>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<4>;
using LayoutDst = cutlass::layout::TensorNCxHWx<4>;
using ThreadBlockShape = cutlass::gemm::GemmShape<128, 64, 32>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 32>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationClamp<
int8_t, 4, int32_t, int32_t, float>;
using Convolution = cutlass::conv::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t,
LayoutDst, int32_t, LayoutDst, int32_t,
cutlass::conv::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::conv::threadblock::ConvolutionFpropNCxHWxThreadblockSwizzle,
2, 4, 16, false,
cutlass::arch::OpMultiplyAddSaturate>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const typename Convolution::ElementSrc* d_src,
const typename Convolution::ElementFilter* d_filter,
const typename Convolution::ElementBias* d_bias,
const typename Convolution::ElementDst* d_z,
typename Convolution::ElementDst* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
hipStream_t stream);
#pragma GCC diagnostic pop
#endif
| dfffc4ec6a1a0082f84079c98d968deed74c3e5c.cu | #if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<4>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<4>;
using LayoutDst = cutlass::layout::TensorNCxHWx<4>;
using ThreadBlockShape = cutlass::gemm::GemmShape<128, 64, 32>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 32>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationClamp<
int8_t, 4, int32_t, int32_t, float>;
using Convolution = cutlass::conv::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t,
LayoutDst, int32_t, LayoutDst, int32_t,
cutlass::conv::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::conv::threadblock::ConvolutionFpropNCxHWxThreadblockSwizzle,
2, 4, 16, false,
cutlass::arch::OpMultiplyAddSaturate>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const typename Convolution::ElementSrc* d_src,
const typename Convolution::ElementFilter* d_filter,
const typename Convolution::ElementBias* d_bias,
const typename Convolution::ElementDst* d_z,
typename Convolution::ElementDst* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
cudaStream_t stream);
#pragma GCC diagnostic pop
#endif
|
745e7091756314d3db51e896e301e25e4f79cf6d.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <stdio.h>
#include <GL/glew.h>
#include <GL/freeglut.h>
#include <hip/hip_runtime.h>
#include <cuda_gl_interop.h>
#define N 600
#define BLOCK_SIZE 15
#define ITERS 100
texture<float> texture_a;
texture<float> texture_b;
GLuint buffer_obj;
cudaGraphicsResource* resource;
float *dev_a, *dev_b;
__global__ void stencil(float* b) {
int x = threadIdx.x + blockIdx.x*blockDim.x;
int y = threadIdx.y + blockIdx.y*blockDim.y;
int offset = x + y*N;
int left = offset - 1;
int right = offset + 1;
int top = offset - N;
int bot = offset + N;
float update = 0.0;
if (x > 0) {
update += tex1Dfetch(texture_a, left);
}
if (x < N-1) {
update += tex1Dfetch(texture_a, right);
}
if (y > 0) {
update += tex1Dfetch(texture_a, top);
}
if (y < N-1) {
update += tex1Dfetch(texture_a, bot);
}
b[offset] = update / 4.0;
}
__global__ void copy(float* a) {
int x = threadIdx.x + blockIdx.x*blockDim.x;
int y = threadIdx.y + blockIdx.y*blockDim.y;
int offset = x + y*N;
a[offset] = tex1Dfetch(texture_b, offset);
}
__global__ void data_to_color(uchar4* display, float* b) {
int x = threadIdx.x + blockIdx.x*blockDim.x;
int y = threadIdx.y + blockIdx.y*blockDim.y;
int offset = x + y*N;
display[offset].x = b[offset];
display[offset].y = 0;
display[offset].z = 255 - b[offset];
display[offset].w = 255;
}
static void key_func(unsigned char key, int x, int y) {
switch(key) {
// esc key
case 27:
hipGraphicsUnregisterResource(resource);
glBindBuffer(GL_PIXEL_UNPACK_BUFFER_ARB, 0);
glDeleteBuffers(1, &buffer_obj);
hipUnbindTexture(texture_a);
hipUnbindTexture(texture_b);
hipFree(dev_a);
hipFree(dev_b);
exit(0);
}
}
static void draw_func() {
glClearColor(0.0, 0.0, 0.0, 1.0);
glClear(GL_COLOR_BUFFER_BIT);
glDrawPixels(N, N, GL_RGBA, GL_UNSIGNED_BYTE, 0);
glutSwapBuffers();
}
static void update_data() {
dim3 blocks(N/BLOCK_SIZE, N/BLOCK_SIZE);
dim3 threads(BLOCK_SIZE, BLOCK_SIZE);
uchar4* dev_ptr;
size_t size;
// get the pointer mappped to the cuda resource
hipGraphicsMapResources(1, &resource, NULL);
hipGraphicsResourceGetMappedPointer((void**)&dev_ptr, &size, resource);
hipLaunchKernelGGL(( stencil), dim3(blocks), dim3(threads), 0, 0, dev_b);
hipLaunchKernelGGL(( copy), dim3(blocks), dim3(threads), 0, 0, dev_a);
hipLaunchKernelGGL(( data_to_color), dim3(blocks), dim3(threads), 0, 0, dev_ptr, dev_b);
hipGraphicsUnmapResources(1, &resource, NULL);
glutPostRedisplay();
}
static void idle_func() {
update_data();
}
int main(int argc, char** argv) {
float a[N*N], b[N*N];
hipDeviceProp_t prop;
int dev;
// find the right cuda device to use
memset(&prop, 0, sizeof(hipDeviceProp_t));
prop.major = 1;
prop.minor = 0;
hipChooseDevice(&dev, &prop);
hipGLSetGLDevice(dev);
// initialize the window
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGBA);
glutInitWindowSize(N, N);
glutCreateWindow("cuda_interop/heat");
// create a pixel buffer object to be used in OpenGL
glewInit();
glGenBuffers(1, &buffer_obj);
glBindBuffer(GL_PIXEL_UNPACK_BUFFER_ARB, buffer_obj);
// allocate the NxN 32-bit values on the GPU
glBufferData(GL_PIXEL_UNPACK_BUFFER_ARB, N*N*sizeof(float), NULL,
GL_DYNAMIC_DRAW_ARB);
// notify the runtime that we're gonna share the bufffer with cuda
hipGraphicsGLRegisterBuffer(&resource, buffer_obj, hipGraphicsMapFlagsNone);
hipMalloc((void**)&dev_a, N*N*sizeof(float));
hipMalloc((void**)&dev_b, N*N*sizeof(float));
hipBindTexture(NULL, texture_a, dev_a, N*N*sizeof(float));
hipBindTexture(NULL, texture_b, dev_b, N*N*sizeof(float));
int mid = N/2;
for (int i = 0; i < N; i++) {
for (int j = 0; j < N; j++) {
if (i > mid - 50 && i < mid + 50 &&
j > mid - 50 && j < mid + 50) {
a[j+i*N] = 255;
} else {
a[j+i*N] = 0;
}
}
}
hipMemcpy(dev_a, a, N*N*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(dev_b, b, N*N*sizeof(float), hipMemcpyHostToDevice);
glutKeyboardFunc(key_func);
glutDisplayFunc(draw_func);
glutIdleFunc(idle_func);
glutMainLoop();
hipFree(dev_a);
hipFree(dev_b);
}
| 745e7091756314d3db51e896e301e25e4f79cf6d.cu | #include <iostream>
#include <stdio.h>
#include <GL/glew.h>
#include <GL/freeglut.h>
#include <cuda.h>
#include <cuda_gl_interop.h>
#define N 600
#define BLOCK_SIZE 15
#define ITERS 100
texture<float> texture_a;
texture<float> texture_b;
GLuint buffer_obj;
cudaGraphicsResource* resource;
float *dev_a, *dev_b;
__global__ void stencil(float* b) {
int x = threadIdx.x + blockIdx.x*blockDim.x;
int y = threadIdx.y + blockIdx.y*blockDim.y;
int offset = x + y*N;
int left = offset - 1;
int right = offset + 1;
int top = offset - N;
int bot = offset + N;
float update = 0.0;
if (x > 0) {
update += tex1Dfetch(texture_a, left);
}
if (x < N-1) {
update += tex1Dfetch(texture_a, right);
}
if (y > 0) {
update += tex1Dfetch(texture_a, top);
}
if (y < N-1) {
update += tex1Dfetch(texture_a, bot);
}
b[offset] = update / 4.0;
}
__global__ void copy(float* a) {
int x = threadIdx.x + blockIdx.x*blockDim.x;
int y = threadIdx.y + blockIdx.y*blockDim.y;
int offset = x + y*N;
a[offset] = tex1Dfetch(texture_b, offset);
}
__global__ void data_to_color(uchar4* display, float* b) {
int x = threadIdx.x + blockIdx.x*blockDim.x;
int y = threadIdx.y + blockIdx.y*blockDim.y;
int offset = x + y*N;
display[offset].x = b[offset];
display[offset].y = 0;
display[offset].z = 255 - b[offset];
display[offset].w = 255;
}
static void key_func(unsigned char key, int x, int y) {
switch(key) {
// esc key
case 27:
cudaGraphicsUnregisterResource(resource);
glBindBuffer(GL_PIXEL_UNPACK_BUFFER_ARB, 0);
glDeleteBuffers(1, &buffer_obj);
cudaUnbindTexture(texture_a);
cudaUnbindTexture(texture_b);
cudaFree(dev_a);
cudaFree(dev_b);
exit(0);
}
}
static void draw_func() {
glClearColor(0.0, 0.0, 0.0, 1.0);
glClear(GL_COLOR_BUFFER_BIT);
glDrawPixels(N, N, GL_RGBA, GL_UNSIGNED_BYTE, 0);
glutSwapBuffers();
}
static void update_data() {
dim3 blocks(N/BLOCK_SIZE, N/BLOCK_SIZE);
dim3 threads(BLOCK_SIZE, BLOCK_SIZE);
uchar4* dev_ptr;
size_t size;
// get the pointer mappped to the cuda resource
cudaGraphicsMapResources(1, &resource, NULL);
cudaGraphicsResourceGetMappedPointer((void**)&dev_ptr, &size, resource);
stencil<<<blocks, threads>>>(dev_b);
copy<<<blocks, threads>>>(dev_a);
data_to_color<<<blocks, threads>>>(dev_ptr, dev_b);
cudaGraphicsUnmapResources(1, &resource, NULL);
glutPostRedisplay();
}
static void idle_func() {
update_data();
}
int main(int argc, char** argv) {
float a[N*N], b[N*N];
cudaDeviceProp prop;
int dev;
// find the right cuda device to use
memset(&prop, 0, sizeof(cudaDeviceProp));
prop.major = 1;
prop.minor = 0;
cudaChooseDevice(&dev, &prop);
cudaGLSetGLDevice(dev);
// initialize the window
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGBA);
glutInitWindowSize(N, N);
glutCreateWindow("cuda_interop/heat");
// create a pixel buffer object to be used in OpenGL
glewInit();
glGenBuffers(1, &buffer_obj);
glBindBuffer(GL_PIXEL_UNPACK_BUFFER_ARB, buffer_obj);
// allocate the NxN 32-bit values on the GPU
glBufferData(GL_PIXEL_UNPACK_BUFFER_ARB, N*N*sizeof(float), NULL,
GL_DYNAMIC_DRAW_ARB);
// notify the runtime that we're gonna share the bufffer with cuda
cudaGraphicsGLRegisterBuffer(&resource, buffer_obj, cudaGraphicsMapFlagsNone);
cudaMalloc((void**)&dev_a, N*N*sizeof(float));
cudaMalloc((void**)&dev_b, N*N*sizeof(float));
cudaBindTexture(NULL, texture_a, dev_a, N*N*sizeof(float));
cudaBindTexture(NULL, texture_b, dev_b, N*N*sizeof(float));
int mid = N/2;
for (int i = 0; i < N; i++) {
for (int j = 0; j < N; j++) {
if (i > mid - 50 && i < mid + 50 &&
j > mid - 50 && j < mid + 50) {
a[j+i*N] = 255;
} else {
a[j+i*N] = 0;
}
}
}
cudaMemcpy(dev_a, a, N*N*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, N*N*sizeof(float), cudaMemcpyHostToDevice);
glutKeyboardFunc(key_func);
glutDisplayFunc(draw_func);
glutIdleFunc(idle_func);
glutMainLoop();
cudaFree(dev_a);
cudaFree(dev_b);
}
|
itof vs cpu.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "hip/device_functions.h"
#include <cstdlib>
#include <stdlib.h>
#include <sys/time.h>
#include <iostream>
#define arraySize 31 //35 max
#define threads_per_block 1024
#define max_blocks 32768
using namespace std;
__constant__ float coefs[arraySize*2];
__global__ void single_thread(float *sh_sum_dev,float W, float *str_num_dev, float num_of_blocks,int rep)
{
register float th_w_sum = 0;
register float th_v_sum = 0;
register float th_bin[arraySize];
register int n_of_it=rep;
__shared__ float sh_maxs[threads_per_block];
__shared__ float indices[threads_per_block];
indices[threadIdx.x] = threadIdx.x;
__syncthreads();
long int num_to_bin = blockIdx.x * blockDim.x + threadIdx.x;
num_to_bin += max_blocks * n_of_it;
#pragma unroll
for (uint i = 0; i < arraySize; i++)
{
th_bin[i] = ((num_to_bin) >> i) % 2;
th_w_sum += th_bin[i] * coefs[i];
th_v_sum += th_bin[i] * coefs[i+arraySize];
}
sh_maxs[threadIdx.x] = (th_w_sum > W) ? 0:th_v_sum;
__syncthreads ();
for (uint offset = blockDim.x / 2; offset >= 1; offset >>= 1)
{
if (threadIdx.x < offset)
{
if (sh_maxs[threadIdx.x] < sh_maxs[threadIdx.x + offset])
{
sh_maxs[threadIdx.x] = sh_maxs[threadIdx.x + offset];
indices[threadIdx.x] = indices[threadIdx.x + offset];
}
}
__syncthreads ();
}
// write result for this block to global mem
if(threadIdx.x == 0){
sh_sum_dev[blockIdx.x+max_blocks*rep] = sh_maxs[0];
str_num_dev[blockIdx.x+max_blocks*rep] = indices[0] + blockIdx.x * blockDim.x +max_blocks*rep;
}}
__global__ void
reduction_max (float *s, float *str_num_dev)
{
int ID = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ float sdata[threads_per_block*2];
sdata[threadIdx.x] = s[ID];
sdata[threadIdx.x + threads_per_block] = str_num_dev[ID];
__syncthreads ();
// do reduction in shared mem
for (uint s = blockDim.x / 2; s > 0; s >>= 1)
{
if (threadIdx.x < s)
{
if (sdata[threadIdx.x] < sdata[threadIdx.x + s])
{
sdata[threadIdx.x] = sdata[threadIdx.x + s];
//sdata[tid+64] = sdata[tid + s+64];
sdata[threadIdx.x + threads_per_block] =
sdata[threadIdx.x + threads_per_block + s];
}
}
__syncthreads ();
}
// write result for this block to global mem
if (threadIdx.x == 0)
{
//if(sdata[0]>s[0]){//}&&(blockIdx.x>0)){
s[blockIdx.x] = sdata[0];
str_num_dev[blockIdx.x] = sdata[threads_per_block];
}
//}
}
__global__ void
which_string (int a, int *view_dev)
{
view_dev[threadIdx.x] = (a >> threadIdx.x) % 2;
}
int main(){
float W = 500;
struct timeval t0, t1;
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
float milliseconds1 = 0;
float milliseconds2 = 0;
long int strSize_b = pow (2, arraySize);
int num_of_blocks = strSize_b / threads_per_block;
float *Sum = new float[1]; // = { 0 };
float *sh_sum_dev;
float weight[31] ={ 5, 10, 17, 19, 20, 23, 26, 30, 32, 38, 40, 44, 47, 50, 55, 56, 56, 60, 62, 66, 70, 75, 77, 80, 81, 90,93,96,101,107,115 };
float values[31] ={ 10, 13, 16, 22, 30, 25, 55, 90, 110, 115, 130, 120, 150, 170, 194, 199, 194, 199, 217, 230, 248, 250, 264, 271, 279, 286,293,299,305,313,321 };
float dev_coefs[62] = {5, 10, 17, 19, 20, 23, 26, 30, 32, 38, 40, 44, 47, 50, 55, 56, 56, 60, 62, 66, 70, 75, 77, 80, 81, 90,93,96,101,107,115, 10, 13, 16, 22, 30, 25, 55, 90, 110, 115, 130, 120, 150, 170, 194, 199, 194, 199, 217, 230, 248, 250, 264, 271, 279, 286,293,299,305,313,321 };
//float dev_coefs[60] = {5, 10, 17, 19, 20, 23, 26, 30, 32, 38, 40, 44, 47, 50, 55, 56, 56, 60, 62, 66, 70, 75, 77, 80, 81, 90,93,96,101,107, 10, 13, 16, 22, 30, 25, 55, 90, 110, 115, 130, 120, 150, 170, 194, 199, 194, 199, 217, 230, 248, 250, 264, 271, 279, 286,293,299,305,313 };
//float *values_dev;
long sec, usec;
float *str_num_dev;
float *str_num = new float[1];
float N_of_rep;
N_of_rep = num_of_blocks/max_blocks;
cout <<"N of items "<<arraySize<<"\n";
cout<<"N of blocks "<<num_of_blocks<<"\n";
cout<<"strSize_b = "<<strSize_b<<"\n";
cout<<"num_of_blocks / threads_per_block = "<<num_of_blocks / threads_per_block<<"\n";
cout<<"N of repeats = "<<N_of_rep<<"\n";
cout<<"sing param = "<<num_of_blocks/N_of_rep<<" _ "<< threads_per_block<<"\n";
cout<<"red param "<<num_of_blocks / threads_per_block<<" , "<<strSize_b/num_of_blocks<<"\n";
gettimeofday (&t0, NULL);
hipMalloc ((void **) &sh_sum_dev, num_of_blocks * sizeof (float));
hipMalloc ((void **) &str_num_dev, num_of_blocks * sizeof (float));
hipMemcpyToSymbol (coefs, dev_coefs, 2*arraySize * sizeof (float));
hipEventRecord(start);
int sing_blocks = num_of_blocks/N_of_rep;
for(int i = 0;i<N_of_rep;i++){
hipLaunchKernelGGL(( single_thread) , dim3(sing_blocks), dim3(threads_per_block) , 0, 0, sh_sum_dev, W, str_num_dev, num_of_blocks,i);
}
hipEventRecord(stop);
hipEventSynchronize(stop);
hipEventElapsedTime(&milliseconds1, start, stop);
hipEventRecord(start);
int k = num_of_blocks/threads_per_block;
while(k>1){
hipLaunchKernelGGL(( reduction_max) , dim3(k), dim3(threads_per_block), 0, 0, sh_sum_dev, str_num_dev);
if(k>=threads_per_block){k/=threads_per_block;}
else break;
}
hipLaunchKernelGGL((
reduction_max) , dim3(1),dim3(k), 0, 0, sh_sum_dev, str_num_dev);
hipEventSynchronize(stop);
hipEventElapsedTime(&milliseconds2, start, stop);
hipMemcpy (Sum, sh_sum_dev, sizeof (float), hipMemcpyDeviceToHost);
hipMemcpy (str_num, str_num_dev, sizeof (float), hipMemcpyDeviceToHost);
gettimeofday (&t1, 0);
sec = (t1.tv_sec - t0.tv_sec);
usec = t1.tv_usec - t0.tv_usec;
cout << "GPU time = " << sec << " sec, " << usec << " microsec\n";
cout<<"single_thread time = "<<milliseconds1/1000.0<<"sec\n";
cout<<"reduction time = "<<milliseconds2/1000.0<<"sec\n";
cout << "Acheived maximal sum = " << Sum[0] << "\n";
cout << "String number " << str_num[0] << "\n";
int *view = new int[arraySize];
int *view_dev;
hipMalloc ((void **) &view_dev, arraySize * sizeof (int));
hipLaunchKernelGGL(( which_string) , dim3(1), dim3(arraySize) , 0, 0, str_num[0], view_dev);
hipMemcpy (view, view_dev, arraySize * sizeof (int),
hipMemcpyDeviceToHost);
for (int i = 0; i < arraySize; i++)
{
cout << view[i] << " ";
} cout << "\n";
//check
float checksum = 0;
for (int i = 0; i < arraySize; i++)
{
checksum += values[i] * view[i];
} cout << "Validation sum = " << checksum << "\n";
checksum = 0;
for (int i = 0; i < arraySize; i++)
{
checksum += weight[i] * view[i];
} cout << "Weight = " << checksum << "\n";
hipFree (sh_sum_dev);
hipFree (str_num_dev);
hipFree (coefs);
hipFree (view_dev);
cout<<"CPU version:\n";
float *cpu_bin = new float[arraySize];
int max = 0;
int tmp = 0;
int cpu_str = 0;
int cap;
gettimeofday (&t0, NULL);
for(long int i = 0;i<num_of_blocks;i++){
for(int j = 0; j<threads_per_block;j++){
int tobin = i*threads_per_block+j;
for(int k = 0; k<arraySize;k++){
cpu_bin[k] = tobin%2;
tobin>>=1;
tmp += cpu_bin[k]*values[k];
cap += cpu_bin[k]*weight[k];
}
if((cap<=W)&&(tmp>max)){max = tmp;cpu_str = i*threads_per_block+j;}
tmp = 0; cap = 0;
}
}
gettimeofday (&t1, 0);
sec = (t1.tv_sec - t0.tv_sec);
usec = t1.tv_usec - t0.tv_usec;
cout<<"Max = "<<max<<"\n"<<"STR = "<<cpu_str<<"\n";
cout << "CPU time = " << sec << " sec, " << usec << " microsec\n";
return 0;
}
| itof vs cpu.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "device_functions.h"
#include <cstdlib>
#include <stdlib.h>
#include <sys/time.h>
#include <iostream>
#define arraySize 31 //35 max
#define threads_per_block 1024
#define max_blocks 32768
using namespace std;
__constant__ float coefs[arraySize*2];
__global__ void single_thread(float *sh_sum_dev,float W, float *str_num_dev, float num_of_blocks,int rep)
{
register float th_w_sum = 0;
register float th_v_sum = 0;
register float th_bin[arraySize];
register int n_of_it=rep;
__shared__ float sh_maxs[threads_per_block];
__shared__ float indices[threads_per_block];
indices[threadIdx.x] = threadIdx.x;
__syncthreads();
long int num_to_bin = blockIdx.x * blockDim.x + threadIdx.x;
num_to_bin += max_blocks * n_of_it;
#pragma unroll
for (uint i = 0; i < arraySize; i++)
{
th_bin[i] = ((num_to_bin) >> i) % 2;
th_w_sum += th_bin[i] * coefs[i];
th_v_sum += th_bin[i] * coefs[i+arraySize];
}
sh_maxs[threadIdx.x] = (th_w_sum > W) ? 0:th_v_sum;
__syncthreads ();
for (uint offset = blockDim.x / 2; offset >= 1; offset >>= 1)
{
if (threadIdx.x < offset)
{
if (sh_maxs[threadIdx.x] < sh_maxs[threadIdx.x + offset])
{
sh_maxs[threadIdx.x] = sh_maxs[threadIdx.x + offset];
indices[threadIdx.x] = indices[threadIdx.x + offset];
}
}
__syncthreads ();
}
// write result for this block to global mem
if(threadIdx.x == 0){
sh_sum_dev[blockIdx.x+max_blocks*rep] = sh_maxs[0];
str_num_dev[blockIdx.x+max_blocks*rep] = indices[0] + blockIdx.x * blockDim.x +max_blocks*rep;
}}
__global__ void
reduction_max (float *s, float *str_num_dev)
{
int ID = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ float sdata[threads_per_block*2];
sdata[threadIdx.x] = s[ID];
sdata[threadIdx.x + threads_per_block] = str_num_dev[ID];
__syncthreads ();
// do reduction in shared mem
for (uint s = blockDim.x / 2; s > 0; s >>= 1)
{
if (threadIdx.x < s)
{
if (sdata[threadIdx.x] < sdata[threadIdx.x + s])
{
sdata[threadIdx.x] = sdata[threadIdx.x + s];
//sdata[tid+64] = sdata[tid + s+64];
sdata[threadIdx.x + threads_per_block] =
sdata[threadIdx.x + threads_per_block + s];
}
}
__syncthreads ();
}
// write result for this block to global mem
if (threadIdx.x == 0)
{
//if(sdata[0]>s[0]){//}&&(blockIdx.x>0)){
s[blockIdx.x] = sdata[0];
str_num_dev[blockIdx.x] = sdata[threads_per_block];
}
//}
}
__global__ void
which_string (int a, int *view_dev)
{
view_dev[threadIdx.x] = (a >> threadIdx.x) % 2;
}
int main(){
float W = 500;
struct timeval t0, t1;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
float milliseconds1 = 0;
float milliseconds2 = 0;
long int strSize_b = pow (2, arraySize);
int num_of_blocks = strSize_b / threads_per_block;
float *Sum = new float[1]; // = { 0 };
float *sh_sum_dev;
float weight[31] ={ 5, 10, 17, 19, 20, 23, 26, 30, 32, 38, 40, 44, 47, 50, 55, 56, 56, 60, 62, 66, 70, 75, 77, 80, 81, 90,93,96,101,107,115 };
float values[31] ={ 10, 13, 16, 22, 30, 25, 55, 90, 110, 115, 130, 120, 150, 170, 194, 199, 194, 199, 217, 230, 248, 250, 264, 271, 279, 286,293,299,305,313,321 };
float dev_coefs[62] = {5, 10, 17, 19, 20, 23, 26, 30, 32, 38, 40, 44, 47, 50, 55, 56, 56, 60, 62, 66, 70, 75, 77, 80, 81, 90,93,96,101,107,115, 10, 13, 16, 22, 30, 25, 55, 90, 110, 115, 130, 120, 150, 170, 194, 199, 194, 199, 217, 230, 248, 250, 264, 271, 279, 286,293,299,305,313,321 };
//float dev_coefs[60] = {5, 10, 17, 19, 20, 23, 26, 30, 32, 38, 40, 44, 47, 50, 55, 56, 56, 60, 62, 66, 70, 75, 77, 80, 81, 90,93,96,101,107, 10, 13, 16, 22, 30, 25, 55, 90, 110, 115, 130, 120, 150, 170, 194, 199, 194, 199, 217, 230, 248, 250, 264, 271, 279, 286,293,299,305,313 };
//float *values_dev;
long sec, usec;
float *str_num_dev;
float *str_num = new float[1];
float N_of_rep;
N_of_rep = num_of_blocks/max_blocks;
cout <<"N of items "<<arraySize<<"\n";
cout<<"N of blocks "<<num_of_blocks<<"\n";
cout<<"strSize_b = "<<strSize_b<<"\n";
cout<<"num_of_blocks / threads_per_block = "<<num_of_blocks / threads_per_block<<"\n";
cout<<"N of repeats = "<<N_of_rep<<"\n";
cout<<"sing param = "<<num_of_blocks/N_of_rep<<" _ "<< threads_per_block<<"\n";
cout<<"red param "<<num_of_blocks / threads_per_block<<" , "<<strSize_b/num_of_blocks<<"\n";
gettimeofday (&t0, NULL);
cudaMalloc ((void **) &sh_sum_dev, num_of_blocks * sizeof (float));
cudaMalloc ((void **) &str_num_dev, num_of_blocks * sizeof (float));
cudaMemcpyToSymbol (coefs, dev_coefs, 2*arraySize * sizeof (float));
cudaEventRecord(start);
int sing_blocks = num_of_blocks/N_of_rep;
for(int i = 0;i<N_of_rep;i++){
single_thread <<< sing_blocks, threads_per_block >>> (sh_sum_dev, W, str_num_dev, num_of_blocks,i);
}
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&milliseconds1, start, stop);
cudaEventRecord(start);
int k = num_of_blocks/threads_per_block;
while(k>1){
reduction_max <<<k, threads_per_block>>> (sh_sum_dev, str_num_dev);
if(k>=threads_per_block){k/=threads_per_block;}
else break;
}
reduction_max <<<1,k>>> (sh_sum_dev, str_num_dev);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&milliseconds2, start, stop);
cudaMemcpy (Sum, sh_sum_dev, sizeof (float), cudaMemcpyDeviceToHost);
cudaMemcpy (str_num, str_num_dev, sizeof (float), cudaMemcpyDeviceToHost);
gettimeofday (&t1, 0);
sec = (t1.tv_sec - t0.tv_sec);
usec = t1.tv_usec - t0.tv_usec;
cout << "GPU time = " << sec << " sec, " << usec << " microsec\n";
cout<<"single_thread time = "<<milliseconds1/1000.0<<"sec\n";
cout<<"reduction time = "<<milliseconds2/1000.0<<"sec\n";
cout << "Acheived maximal sum = " << Sum[0] << "\n";
cout << "String number " << str_num[0] << "\n";
int *view = new int[arraySize];
int *view_dev;
cudaMalloc ((void **) &view_dev, arraySize * sizeof (int));
which_string <<< 1, arraySize >>> (str_num[0], view_dev);
cudaMemcpy (view, view_dev, arraySize * sizeof (int),
cudaMemcpyDeviceToHost);
for (int i = 0; i < arraySize; i++)
{
cout << view[i] << " ";
} cout << "\n";
//check
float checksum = 0;
for (int i = 0; i < arraySize; i++)
{
checksum += values[i] * view[i];
} cout << "Validation sum = " << checksum << "\n";
checksum = 0;
for (int i = 0; i < arraySize; i++)
{
checksum += weight[i] * view[i];
} cout << "Weight = " << checksum << "\n";
cudaFree (sh_sum_dev);
cudaFree (str_num_dev);
cudaFree (coefs);
cudaFree (view_dev);
cout<<"CPU version:\n";
float *cpu_bin = new float[arraySize];
int max = 0;
int tmp = 0;
int cpu_str = 0;
int cap;
gettimeofday (&t0, NULL);
for(long int i = 0;i<num_of_blocks;i++){
for(int j = 0; j<threads_per_block;j++){
int tobin = i*threads_per_block+j;
for(int k = 0; k<arraySize;k++){
cpu_bin[k] = tobin%2;
tobin>>=1;
tmp += cpu_bin[k]*values[k];
cap += cpu_bin[k]*weight[k];
}
if((cap<=W)&&(tmp>max)){max = tmp;cpu_str = i*threads_per_block+j;}
tmp = 0; cap = 0;
}
}
gettimeofday (&t1, 0);
sec = (t1.tv_sec - t0.tv_sec);
usec = t1.tv_usec - t0.tv_usec;
cout<<"Max = "<<max<<"\n"<<"STR = "<<cpu_str<<"\n";
cout << "CPU time = " << sec << " sec, " << usec << " microsec\n";
return 0;
}
|
cf36ae2e9d1ca3b39e5d112a334cb7b8d54b39fe.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <helper_cuda.h>
/* Example code taken from "Cheng J. et al. Professional CUDA C Programming"
* An example of using shared memory to transpose square thread coordinates
* of a CUDA grid into a global memory array. Different kernels below
* demonstrate performing reads and writes with different ordering, as well as
* optimizing using memory padding.
*/
#define BDIMX 4
#define BDIMY 4
#define IPAD 1
void printData(char *msg, int *in, const int size)
{
printf("%s: ", msg);
for (int i = 0; i < size; i++)
{
printf("%5d", in[i]);
fflush(stdout);
}
printf("\n");
return;
}
__global__ void setRowReadRow (int *out)
{
// static shared memory
__shared__ int tile[BDIMY][BDIMX];
// mapping from thread index to global memory index
unsigned int idx = threadIdx.y * blockDim.x + threadIdx.x;
// shared memory store operation
tile[threadIdx.y][threadIdx.x] = idx;
// wait for all threads to complete
__syncthreads();
// shared memory load operation
out[idx] = tile[threadIdx.y][threadIdx.x] ;
}
__global__ void setColReadCol (int *out)
{
// static shared memory
__shared__ int tile[BDIMX][BDIMY];
// mapping from thread index to global memory index
unsigned int idx = threadIdx.y * blockDim.x + threadIdx.x;
// shared memory store operation
tile[threadIdx.x][threadIdx.y] = idx;
// wait for all threads to complete
__syncthreads();
// shared memory load operation
out[idx] = tile[threadIdx.x][threadIdx.y];
}
__global__ void setRowReadCol(int *out)
{
// static shared memory
__shared__ int tile[BDIMY][BDIMX];
// mapping from thread index to global memory index
unsigned int idx = threadIdx.y * blockDim.x + threadIdx.x;
// shared memory store operation
tile[threadIdx.y][threadIdx.x] = idx;
// wait for all threads to complete
__syncthreads();
// shared memory load operation
out[idx] = tile[threadIdx.x][threadIdx.y];
}
__global__ void setRowReadColPad(int *out)
{
// static shared memory
__shared__ int tile[BDIMY][BDIMX + IPAD];
// mapping from thread index to global memory offset
unsigned int idx = threadIdx.y * blockDim.x + threadIdx.x;
// shared memory store operation
tile[threadIdx.y][threadIdx.x] = idx;
// wait for all threads to complete
__syncthreads();
// shared memory load operation
out[idx] = tile[threadIdx.x][threadIdx.y];
}
int main(int argc, char **argv)
{
// set up device
int dev = 0;
hipDeviceProp_t deviceProp;
checkCudaErrors(hipGetDeviceProperties(&deviceProp, dev));
printf("%s at ", argv[0]);
printf("device %d: %s ", dev, deviceProp.name);
checkCudaErrors(hipSetDevice(dev));
hipSharedMemConfig pConfig;
checkCudaErrors(hipDeviceGetSharedMemConfig ( &pConfig ));
printf("with Bank Mode:%s ", pConfig == 1 ? "4-Byte" : "8-Byte");
// set up array size 2048
int nx = BDIMX;
int ny = BDIMY;
bool iprintf = 1;
if (argc > 1) iprintf = atoi(argv[1]);
size_t nBytes = nx * ny * sizeof(int);
// execution configuration
dim3 block (BDIMX, BDIMY);
dim3 grid (1, 1);
printf("<<< grid (%d,%d) block (%d,%d)>>>\n", grid.x, grid.y, block.x,
block.y);
// allocate device memory
int *d_C;
checkCudaErrors(hipMalloc((int**)&d_C, nBytes));
int *gpuRef = (int *)malloc(nBytes);
checkCudaErrors(hipMemset(d_C, 0, nBytes));
hipLaunchKernelGGL(( setColReadCol), dim3(grid), dim3(block), 0, 0, d_C);
checkCudaErrors(hipMemcpy(gpuRef, d_C, nBytes, hipMemcpyDeviceToHost));
if(iprintf) printData("set col read col ", gpuRef, nx * ny);
checkCudaErrors(hipMemset(d_C, 0, nBytes));
hipLaunchKernelGGL(( setRowReadRow), dim3(grid), dim3(block), 0, 0, d_C);
checkCudaErrors(hipMemcpy(gpuRef, d_C, nBytes, hipMemcpyDeviceToHost));
if(iprintf) printData("set row read row ", gpuRef, nx * ny);
checkCudaErrors(hipMemset(d_C, 0, nBytes));
hipLaunchKernelGGL(( setRowReadCol), dim3(grid), dim3(block), 0, 0, d_C);
checkCudaErrors(hipMemcpy(gpuRef, d_C, nBytes, hipMemcpyDeviceToHost));
if(iprintf) printData("set row read col ", gpuRef, nx * ny);
checkCudaErrors(hipMemset(d_C, 0, nBytes));
hipLaunchKernelGGL(( setRowReadColPad), dim3(grid), dim3(block), 0, 0, d_C);
checkCudaErrors(hipMemcpy(gpuRef, d_C, nBytes, hipMemcpyDeviceToHost));
if(iprintf) printData("set row read col pad", gpuRef, nx * ny);
// free host and device memory
checkCudaErrors(hipFree(d_C));
free(gpuRef);
// reset device
checkCudaErrors(hipDeviceReset());
return EXIT_SUCCESS;
} | cf36ae2e9d1ca3b39e5d112a334cb7b8d54b39fe.cu | #include <cuda_runtime.h>
#include <stdio.h>
#include <helper_cuda.h>
/* Example code taken from "Cheng J. et al. Professional CUDA C Programming"
* An example of using shared memory to transpose square thread coordinates
* of a CUDA grid into a global memory array. Different kernels below
* demonstrate performing reads and writes with different ordering, as well as
* optimizing using memory padding.
*/
#define BDIMX 4
#define BDIMY 4
#define IPAD 1
void printData(char *msg, int *in, const int size)
{
printf("%s: ", msg);
for (int i = 0; i < size; i++)
{
printf("%5d", in[i]);
fflush(stdout);
}
printf("\n");
return;
}
__global__ void setRowReadRow (int *out)
{
// static shared memory
__shared__ int tile[BDIMY][BDIMX];
// mapping from thread index to global memory index
unsigned int idx = threadIdx.y * blockDim.x + threadIdx.x;
// shared memory store operation
tile[threadIdx.y][threadIdx.x] = idx;
// wait for all threads to complete
__syncthreads();
// shared memory load operation
out[idx] = tile[threadIdx.y][threadIdx.x] ;
}
__global__ void setColReadCol (int *out)
{
// static shared memory
__shared__ int tile[BDIMX][BDIMY];
// mapping from thread index to global memory index
unsigned int idx = threadIdx.y * blockDim.x + threadIdx.x;
// shared memory store operation
tile[threadIdx.x][threadIdx.y] = idx;
// wait for all threads to complete
__syncthreads();
// shared memory load operation
out[idx] = tile[threadIdx.x][threadIdx.y];
}
__global__ void setRowReadCol(int *out)
{
// static shared memory
__shared__ int tile[BDIMY][BDIMX];
// mapping from thread index to global memory index
unsigned int idx = threadIdx.y * blockDim.x + threadIdx.x;
// shared memory store operation
tile[threadIdx.y][threadIdx.x] = idx;
// wait for all threads to complete
__syncthreads();
// shared memory load operation
out[idx] = tile[threadIdx.x][threadIdx.y];
}
__global__ void setRowReadColPad(int *out)
{
// static shared memory
__shared__ int tile[BDIMY][BDIMX + IPAD];
// mapping from thread index to global memory offset
unsigned int idx = threadIdx.y * blockDim.x + threadIdx.x;
// shared memory store operation
tile[threadIdx.y][threadIdx.x] = idx;
// wait for all threads to complete
__syncthreads();
// shared memory load operation
out[idx] = tile[threadIdx.x][threadIdx.y];
}
int main(int argc, char **argv)
{
// set up device
int dev = 0;
cudaDeviceProp deviceProp;
checkCudaErrors(cudaGetDeviceProperties(&deviceProp, dev));
printf("%s at ", argv[0]);
printf("device %d: %s ", dev, deviceProp.name);
checkCudaErrors(cudaSetDevice(dev));
cudaSharedMemConfig pConfig;
checkCudaErrors(cudaDeviceGetSharedMemConfig ( &pConfig ));
printf("with Bank Mode:%s ", pConfig == 1 ? "4-Byte" : "8-Byte");
// set up array size 2048
int nx = BDIMX;
int ny = BDIMY;
bool iprintf = 1;
if (argc > 1) iprintf = atoi(argv[1]);
size_t nBytes = nx * ny * sizeof(int);
// execution configuration
dim3 block (BDIMX, BDIMY);
dim3 grid (1, 1);
printf("<<< grid (%d,%d) block (%d,%d)>>>\n", grid.x, grid.y, block.x,
block.y);
// allocate device memory
int *d_C;
checkCudaErrors(cudaMalloc((int**)&d_C, nBytes));
int *gpuRef = (int *)malloc(nBytes);
checkCudaErrors(cudaMemset(d_C, 0, nBytes));
setColReadCol<<<grid, block>>>(d_C);
checkCudaErrors(cudaMemcpy(gpuRef, d_C, nBytes, cudaMemcpyDeviceToHost));
if(iprintf) printData("set col read col ", gpuRef, nx * ny);
checkCudaErrors(cudaMemset(d_C, 0, nBytes));
setRowReadRow<<<grid, block>>>(d_C);
checkCudaErrors(cudaMemcpy(gpuRef, d_C, nBytes, cudaMemcpyDeviceToHost));
if(iprintf) printData("set row read row ", gpuRef, nx * ny);
checkCudaErrors(cudaMemset(d_C, 0, nBytes));
setRowReadCol<<<grid, block>>>(d_C);
checkCudaErrors(cudaMemcpy(gpuRef, d_C, nBytes, cudaMemcpyDeviceToHost));
if(iprintf) printData("set row read col ", gpuRef, nx * ny);
checkCudaErrors(cudaMemset(d_C, 0, nBytes));
setRowReadColPad<<<grid, block>>>(d_C);
checkCudaErrors(cudaMemcpy(gpuRef, d_C, nBytes, cudaMemcpyDeviceToHost));
if(iprintf) printData("set row read col pad", gpuRef, nx * ny);
// free host and device memory
checkCudaErrors(cudaFree(d_C));
free(gpuRef);
// reset device
checkCudaErrors(cudaDeviceReset());
return EXIT_SUCCESS;
} |
2217d1bbb11269754c1d036a7b62ca2efaefacab.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void calculateError(float *aFourth, float *err, int expectedOutput)
{
int i = threadIdx.x;
err[i] = aFourth[i] - (i + 1 == expectedOutput);
} | 2217d1bbb11269754c1d036a7b62ca2efaefacab.cu | #include "includes.h"
__global__ void calculateError(float *aFourth, float *err, int expectedOutput)
{
int i = threadIdx.x;
err[i] = aFourth[i] - (i + 1 == expectedOutput);
} |
ced3930b29ff0f2b486b9c96ddad3b77d39eb59a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
The implementation of this file is based on skipLayerNorm plugin in TensorRT demo:
https://github.com/NVIDIA/TensorRT/tree/release/5.1/demo/BERT/
Copyright 2019 NVIDIA Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
// Modifications: Add SkipLayerNormKernelVec to
// leverage vectorized load/write.
// and templatize ComputeSkipLayerNorm for different
// data types.
// Copyright (c) Advanced Micro Devices, Inc. All rights reserved.
// Licensed under the MIT License.
#include "contrib_ops/cuda/bert/layer_norm.cuh"
#include "contrib_ops/cuda/bert/skip_layer_norm_impl.h"
#include <hip/hip_fp16.h>
namespace onnxruntime {
namespace contrib {
namespace cuda {
namespace {
template <typename T>
T maybe2half(float x);
template <>
float maybe2half(float x) {
return x;
}
template <>
half maybe2half(float x) {
return __float2half_rn(x);
}
// Using only power of 2 numbers will lead to waste of compute for same size such as 768, which is a very common case
// in BERT. Ideally we can step by wrap_size * num_unroll, but listing too many steps will cause long compile time.
constexpr int kSizes[] = {32, 64, 128, 384, 768, 1024, 2048};
constexpr int kMinBlockSize = 32;
constexpr int kMaxBlockSize = 256;
int NextSize(int x) {
size_t len = sizeof(kSizes) / sizeof(kSizes[0]);
for (size_t i = 0; i < len; ++i) {
if (x <= kSizes[i]) {
return kSizes[i];
}
}
return kSizes[len - 1];
}
template <typename T, int NumUnroll>
bool CanVectorized(T* output, T* skip_input_bias_add_output, const T* input, const T* skip, const T* gamma,
const T* beta, const T* bias, const int ld, const int next_size) {
constexpr int alignment = std::alignment_of<aligned_vector<T, NumUnroll>>::value;
return ld % NumUnroll == 0 && reinterpret_cast<uint64_t>(output) % alignment == 0 &&
reinterpret_cast<uint64_t>(skip_input_bias_add_output) % alignment == 0 &&
reinterpret_cast<uint64_t>(input) % alignment == 0 && reinterpret_cast<uint64_t>(skip) % alignment == 0 &&
reinterpret_cast<uint64_t>(gamma) % alignment == 0 && reinterpret_cast<uint64_t>(beta) % alignment == 0 &&
reinterpret_cast<uint64_t>(bias) % alignment == 0 && next_size / NumUnroll >= kMinBlockSize &&
next_size / NumUnroll <= kMaxBlockSize;
}
} // namespace
template <typename T, unsigned TPB, bool Simplified>
__global__ void SkipLayerNormKernel(
const int ld, const T* input, const T* skip,
const T* beta, const T* gamma, const T* bias,
const T epsilon, T* output, T* skip_input_bias_add_output, const bool skip_broadcasted, int skip_size) {
const T reverse_ld = T(1.f / ld);
const int offset = blockIdx.x * ld;
KeyValuePairSum pair_sum;
// reduce x and x^2
hipcub::KeyValuePair<T, T> thread_data(0, 0);
for (int i = threadIdx.x; i < ld; i += TPB) {
const int idx = offset + i;
const T skip_data = skip_broadcasted ? skip[idx % skip_size] : skip[idx];
const T val = (bias == nullptr) ? input[idx] + skip_data : input[idx] + skip_data + bias[i];
const T rldval = reverse_ld * val;
thread_data = pair_sum(thread_data, hipcub::KeyValuePair<T, T>(rldval, rldval * val));
if (skip_input_bias_add_output != nullptr) {
skip_input_bias_add_output[idx] = val;
}
output[idx] = val;
}
if (Simplified) {
SimplifiedLayerNorm<T, TPB>(thread_data.value, ld, offset, gamma, epsilon, output);
return;
}
LayerNorm<T, TPB>(thread_data, ld, offset, beta, gamma, epsilon, output);
}
// Vectorized kernel
template <typename T, unsigned TPB, int ILP, bool Simplified>
__global__ void SkipLayerNormKernelSmall(
const int ld, const T* input, const T* skip, const T* beta, const T* gamma,
const T* bias, const T epsilon, T* output, T* skip_input_bias_add_output,
bool hasBias, bool hasSkipInputBiasAdditionOutput, const bool skip_broadcasted, const int skip_size) {
const T rld = T(1.f / ld);
const int idx = blockIdx.x * ld + threadIdx.x * ILP; // grid_size = n / ld
using VecT = aligned_vector<T, ILP>;
T input_v[ILP], skip_v[ILP], bias_v[ILP], skip_input_bias_add_output_v[ILP];
VecT* input_val = reinterpret_cast<VecT*>(&input_v);
*input_val = *reinterpret_cast<const VecT*>(&input[idx]);
VecT* skip_val = reinterpret_cast<VecT*>(&skip_v);
if (skip_broadcasted){
*skip_val = *reinterpret_cast<const VecT*>(&skip[idx % skip_size]);
}else{
*skip_val = *reinterpret_cast<const VecT*>(&skip[idx]);
}
if (hasBias) {
VecT* bias_val = reinterpret_cast<VecT*>(&bias_v);
*bias_val = *reinterpret_cast<const VecT*>(&bias[threadIdx.x * ILP]);
}
hipcub::KeyValuePair<T, T> thread_data(T(0.f), T(0.f));
if (ILP * threadIdx.x < ld) {
T rldval_sum = T(0.f);
T rldvalsq_sum = T(0.f);
#pragma unroll
for (int i = 0; i < ILP; i++) {
input_v[i] += hasBias ? skip_v[i] + bias_v[i] : skip_v[i];
if (hasSkipInputBiasAdditionOutput) {
skip_input_bias_add_output_v[i] = input_v[i];
}
const T rldval = rld * input_v[i];
rldval_sum += rldval;
rldvalsq_sum += rldval * input_v[i];
}
if (hasSkipInputBiasAdditionOutput) {
*(reinterpret_cast<VecT*>(&skip_input_bias_add_output[idx])) = *reinterpret_cast<VecT*>(&skip_input_bias_add_output_v);
}
thread_data = hipcub::KeyValuePair<T, T>(rldval_sum, rldvalsq_sum);
}
if (Simplified) {
SimplifiedLayerNormSmall<T, TPB, ILP>(input_v, thread_data.value, ld, idx, gamma, epsilon, output);
return;
}
LayerNormSmall<T, TPB, ILP>(input_v, thread_data, ld, idx, beta, gamma, epsilon, output);
}
template <typename T, bool Simplified>
void LaunchSkipLayerNormKernel(
hipStream_t stream, T* output, T* skip_input_bias_add_output, const T* input, const T* skip, const T* gamma,
const T* beta, const T* bias, float epsilon, int ld, int row_count, bool skip_broadcasted, int skip_size) {
if (row_count == 0) {
return;
}
bool hasBias = (bias == nullptr) ? false : true;
bool hasSkipInputBiasAdditionOutput = (skip_input_bias_add_output == nullptr) ? false : true;
const int next_size = NextSize(ld);
const int grid_size = row_count;
bool flag_vec2 =
CanVectorized<T, 2>(output, skip_input_bias_add_output, input, skip, gamma, beta, bias, ld, next_size);
bool flag_vec4 =
CanVectorized<T, 4>(output, skip_input_bias_add_output, input, skip, gamma, beta, bias, ld, next_size);
switch (next_size) {
#define LAUNCH_SKIP_LAYER_NORM_KERNEL_SMALL(num_unroll) \
hipLaunchKernelGGL(( SkipLayerNormKernelSmall<T, block_size, num_unroll, Simplified>) \
, dim3(grid_size), dim3(block_size), 0, stream, ld, input, skip, beta, gamma, bias, maybe2half<T>(epsilon), output, \
skip_input_bias_add_output, hasBias, hasSkipInputBiasAdditionOutput, skip_broadcasted, skip_size)
#define LAUNCH_SKIP_LAYER_NORM_KERNEL() \
hipLaunchKernelGGL(( SkipLayerNormKernel<T, kMaxBlockSize, Simplified>), dim3(grid_size), dim3(kMaxBlockSize), 0, stream, \
ld, input, skip, beta, gamma, bias, maybe2half<T>(epsilon), output, skip_input_bias_add_output, skip_broadcasted, skip_size)
#define CASE_NEXT_SIZE(next_size_value) \
case next_size_value: { \
if (flag_vec4) { \
constexpr int block_size = next_size_value / 4; \
LAUNCH_SKIP_LAYER_NORM_KERNEL_SMALL(4); \
} else if (flag_vec2) { \
constexpr int block_size = next_size_value / 2; \
LAUNCH_SKIP_LAYER_NORM_KERNEL_SMALL(2); \
} else { \
if (next_size_value <= kMaxBlockSize) { \
constexpr int block_size = next_size_value; \
LAUNCH_SKIP_LAYER_NORM_KERNEL_SMALL(1); \
} else { \
LAUNCH_SKIP_LAYER_NORM_KERNEL(); \
} \
} \
} break
CASE_NEXT_SIZE(kSizes[0]);
CASE_NEXT_SIZE(kSizes[1]);
CASE_NEXT_SIZE(kSizes[2]);
CASE_NEXT_SIZE(kSizes[3]);
CASE_NEXT_SIZE(kSizes[4]);
CASE_NEXT_SIZE(kSizes[5]);
CASE_NEXT_SIZE(kSizes[6]);
#undef CASE_NEXT_SIZE
#undef LAUNCH_SKIP_LAYER_NORM_KERNEL
#undef LAUNCH_SKIP_LAYER_NORM_KERNEL_SMALL
}
}
#define SKIPLAYERNORM_IMPL(T, Simplified) \
template void LaunchSkipLayerNormKernel<T, Simplified>(hipStream_t stream, T * output, \
T * skip_input_bias_add_output, \
const T* input, const T* skip, const T* gamma, \
const T* beta, const T* bias, float epsilon, \
int ld, int row_count, bool skip_broadcasted, int skip_size);
SKIPLAYERNORM_IMPL(float, true);
SKIPLAYERNORM_IMPL(float, false);
SKIPLAYERNORM_IMPL(half, true);
SKIPLAYERNORM_IMPL(half, false);
} // namespace cuda
} // namespace contrib
} // namespace onnxruntime
| ced3930b29ff0f2b486b9c96ddad3b77d39eb59a.cu | /*
The implementation of this file is based on skipLayerNorm plugin in TensorRT demo:
https://github.com/NVIDIA/TensorRT/tree/release/5.1/demo/BERT/
Copyright 2019 NVIDIA Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
// Modifications: Add SkipLayerNormKernelVec to
// leverage vectorized load/write.
// and templatize ComputeSkipLayerNorm for different
// data types.
// Copyright (c) Advanced Micro Devices, Inc. All rights reserved.
// Licensed under the MIT License.
#include "contrib_ops/cuda/bert/layer_norm.cuh"
#include "contrib_ops/cuda/bert/skip_layer_norm_impl.h"
#include <cuda_fp16.h>
namespace onnxruntime {
namespace contrib {
namespace cuda {
namespace {
template <typename T>
T maybe2half(float x);
template <>
float maybe2half(float x) {
return x;
}
template <>
half maybe2half(float x) {
return __float2half_rn(x);
}
// Using only power of 2 numbers will lead to waste of compute for same size such as 768, which is a very common case
// in BERT. Ideally we can step by wrap_size * num_unroll, but listing too many steps will cause long compile time.
constexpr int kSizes[] = {32, 64, 128, 384, 768, 1024, 2048};
constexpr int kMinBlockSize = 32;
constexpr int kMaxBlockSize = 256;
int NextSize(int x) {
size_t len = sizeof(kSizes) / sizeof(kSizes[0]);
for (size_t i = 0; i < len; ++i) {
if (x <= kSizes[i]) {
return kSizes[i];
}
}
return kSizes[len - 1];
}
template <typename T, int NumUnroll>
bool CanVectorized(T* output, T* skip_input_bias_add_output, const T* input, const T* skip, const T* gamma,
const T* beta, const T* bias, const int ld, const int next_size) {
constexpr int alignment = std::alignment_of<aligned_vector<T, NumUnroll>>::value;
return ld % NumUnroll == 0 && reinterpret_cast<uint64_t>(output) % alignment == 0 &&
reinterpret_cast<uint64_t>(skip_input_bias_add_output) % alignment == 0 &&
reinterpret_cast<uint64_t>(input) % alignment == 0 && reinterpret_cast<uint64_t>(skip) % alignment == 0 &&
reinterpret_cast<uint64_t>(gamma) % alignment == 0 && reinterpret_cast<uint64_t>(beta) % alignment == 0 &&
reinterpret_cast<uint64_t>(bias) % alignment == 0 && next_size / NumUnroll >= kMinBlockSize &&
next_size / NumUnroll <= kMaxBlockSize;
}
} // namespace
template <typename T, unsigned TPB, bool Simplified>
__global__ void SkipLayerNormKernel(
const int ld, const T* input, const T* skip,
const T* beta, const T* gamma, const T* bias,
const T epsilon, T* output, T* skip_input_bias_add_output, const bool skip_broadcasted, int skip_size) {
const T reverse_ld = T(1.f / ld);
const int offset = blockIdx.x * ld;
KeyValuePairSum pair_sum;
// reduce x and x^2
cub::KeyValuePair<T, T> thread_data(0, 0);
for (int i = threadIdx.x; i < ld; i += TPB) {
const int idx = offset + i;
const T skip_data = skip_broadcasted ? skip[idx % skip_size] : skip[idx];
const T val = (bias == nullptr) ? input[idx] + skip_data : input[idx] + skip_data + bias[i];
const T rldval = reverse_ld * val;
thread_data = pair_sum(thread_data, cub::KeyValuePair<T, T>(rldval, rldval * val));
if (skip_input_bias_add_output != nullptr) {
skip_input_bias_add_output[idx] = val;
}
output[idx] = val;
}
if (Simplified) {
SimplifiedLayerNorm<T, TPB>(thread_data.value, ld, offset, gamma, epsilon, output);
return;
}
LayerNorm<T, TPB>(thread_data, ld, offset, beta, gamma, epsilon, output);
}
// Vectorized kernel
template <typename T, unsigned TPB, int ILP, bool Simplified>
__global__ void SkipLayerNormKernelSmall(
const int ld, const T* input, const T* skip, const T* beta, const T* gamma,
const T* bias, const T epsilon, T* output, T* skip_input_bias_add_output,
bool hasBias, bool hasSkipInputBiasAdditionOutput, const bool skip_broadcasted, const int skip_size) {
const T rld = T(1.f / ld);
const int idx = blockIdx.x * ld + threadIdx.x * ILP; // grid_size = n / ld
using VecT = aligned_vector<T, ILP>;
T input_v[ILP], skip_v[ILP], bias_v[ILP], skip_input_bias_add_output_v[ILP];
VecT* input_val = reinterpret_cast<VecT*>(&input_v);
*input_val = *reinterpret_cast<const VecT*>(&input[idx]);
VecT* skip_val = reinterpret_cast<VecT*>(&skip_v);
if (skip_broadcasted){
*skip_val = *reinterpret_cast<const VecT*>(&skip[idx % skip_size]);
}else{
*skip_val = *reinterpret_cast<const VecT*>(&skip[idx]);
}
if (hasBias) {
VecT* bias_val = reinterpret_cast<VecT*>(&bias_v);
*bias_val = *reinterpret_cast<const VecT*>(&bias[threadIdx.x * ILP]);
}
cub::KeyValuePair<T, T> thread_data(T(0.f), T(0.f));
if (ILP * threadIdx.x < ld) {
T rldval_sum = T(0.f);
T rldvalsq_sum = T(0.f);
#pragma unroll
for (int i = 0; i < ILP; i++) {
input_v[i] += hasBias ? skip_v[i] + bias_v[i] : skip_v[i];
if (hasSkipInputBiasAdditionOutput) {
skip_input_bias_add_output_v[i] = input_v[i];
}
const T rldval = rld * input_v[i];
rldval_sum += rldval;
rldvalsq_sum += rldval * input_v[i];
}
if (hasSkipInputBiasAdditionOutput) {
*(reinterpret_cast<VecT*>(&skip_input_bias_add_output[idx])) = *reinterpret_cast<VecT*>(&skip_input_bias_add_output_v);
}
thread_data = cub::KeyValuePair<T, T>(rldval_sum, rldvalsq_sum);
}
if (Simplified) {
SimplifiedLayerNormSmall<T, TPB, ILP>(input_v, thread_data.value, ld, idx, gamma, epsilon, output);
return;
}
LayerNormSmall<T, TPB, ILP>(input_v, thread_data, ld, idx, beta, gamma, epsilon, output);
}
template <typename T, bool Simplified>
void LaunchSkipLayerNormKernel(
cudaStream_t stream, T* output, T* skip_input_bias_add_output, const T* input, const T* skip, const T* gamma,
const T* beta, const T* bias, float epsilon, int ld, int row_count, bool skip_broadcasted, int skip_size) {
if (row_count == 0) {
return;
}
bool hasBias = (bias == nullptr) ? false : true;
bool hasSkipInputBiasAdditionOutput = (skip_input_bias_add_output == nullptr) ? false : true;
const int next_size = NextSize(ld);
const int grid_size = row_count;
bool flag_vec2 =
CanVectorized<T, 2>(output, skip_input_bias_add_output, input, skip, gamma, beta, bias, ld, next_size);
bool flag_vec4 =
CanVectorized<T, 4>(output, skip_input_bias_add_output, input, skip, gamma, beta, bias, ld, next_size);
switch (next_size) {
#define LAUNCH_SKIP_LAYER_NORM_KERNEL_SMALL(num_unroll) \
SkipLayerNormKernelSmall<T, block_size, num_unroll, Simplified> \
<<<grid_size, block_size, 0, stream>>>(ld, input, skip, beta, gamma, bias, maybe2half<T>(epsilon), output, \
skip_input_bias_add_output, hasBias, hasSkipInputBiasAdditionOutput, skip_broadcasted, skip_size)
#define LAUNCH_SKIP_LAYER_NORM_KERNEL() \
SkipLayerNormKernel<T, kMaxBlockSize, Simplified><<<grid_size, kMaxBlockSize, 0, stream>>>( \
ld, input, skip, beta, gamma, bias, maybe2half<T>(epsilon), output, skip_input_bias_add_output, skip_broadcasted, skip_size)
#define CASE_NEXT_SIZE(next_size_value) \
case next_size_value: { \
if (flag_vec4) { \
constexpr int block_size = next_size_value / 4; \
LAUNCH_SKIP_LAYER_NORM_KERNEL_SMALL(4); \
} else if (flag_vec2) { \
constexpr int block_size = next_size_value / 2; \
LAUNCH_SKIP_LAYER_NORM_KERNEL_SMALL(2); \
} else { \
if (next_size_value <= kMaxBlockSize) { \
constexpr int block_size = next_size_value; \
LAUNCH_SKIP_LAYER_NORM_KERNEL_SMALL(1); \
} else { \
LAUNCH_SKIP_LAYER_NORM_KERNEL(); \
} \
} \
} break
CASE_NEXT_SIZE(kSizes[0]);
CASE_NEXT_SIZE(kSizes[1]);
CASE_NEXT_SIZE(kSizes[2]);
CASE_NEXT_SIZE(kSizes[3]);
CASE_NEXT_SIZE(kSizes[4]);
CASE_NEXT_SIZE(kSizes[5]);
CASE_NEXT_SIZE(kSizes[6]);
#undef CASE_NEXT_SIZE
#undef LAUNCH_SKIP_LAYER_NORM_KERNEL
#undef LAUNCH_SKIP_LAYER_NORM_KERNEL_SMALL
}
}
#define SKIPLAYERNORM_IMPL(T, Simplified) \
template void LaunchSkipLayerNormKernel<T, Simplified>(cudaStream_t stream, T * output, \
T * skip_input_bias_add_output, \
const T* input, const T* skip, const T* gamma, \
const T* beta, const T* bias, float epsilon, \
int ld, int row_count, bool skip_broadcasted, int skip_size);
SKIPLAYERNORM_IMPL(float, true);
SKIPLAYERNORM_IMPL(float, false);
SKIPLAYERNORM_IMPL(half, true);
SKIPLAYERNORM_IMPL(half, false);
} // namespace cuda
} // namespace contrib
} // namespace onnxruntime
|
d5b54d9435a654df0a2757ba5400173c78a44e05.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "cNetwork.h"
#include "math.h"
#include <hip/hip_runtime.h>
#include <helper_functions.h>
#include <helper_cuda.h>
cNetwork* Network;
cLoadFile ConFiles;
#define TEST_COUNT 1
#define KERNEL_WIDTH 5
#define KERNEL_HEIGHT 5
#define POOLING_WIDTH 2
#define POOLING_HEIGHT 2
/*
thrust::device_vector<Npp8u*> testDevSrc = thrust::device_vector<Npp8u*>();
thrust::device_vector<Npp8u*> testDevDst = thrust::device_vector<Npp8u*>();
thrust::host_vector<Npp8u*> testHostSrc = thrust::host_vector<Npp8u*>();
thrust::host_vector<Npp8u*> testHostDst = thrust::host_vector<Npp8u*>();
*/
thrust::host_vector<cImagesGPU*> testDevSrc = thrust::host_vector<cImagesGPU*>();
thrust::host_vector<cImagesGPU*> testDevDst = thrust::host_vector<cImagesGPU*>();
thrust::host_vector<cImages*> testHostSrc = thrust::host_vector<cImages*>();
thrust::host_vector<cImages*> testHostDst = thrust::host_vector<cImages*>();
/*inline __global__ void maxpooling(Npp8u* DevSrc, Npp8u* DevDst, NppiSize SrcSize, NppiSize DstSize){
// if(blockIdx.x >= testDevSrc.size()) return;
// Work here...Fix pooling kernel problem
int max = 0;
int Idx_1 = 0;
for(int i = 0; i < POOLING_WIDTH; i++){
if(threadIdx.x * POOLING_WIDTH + i >= SrcSize.width) return;
for(int j = 0; j < POOLING_HEIGHT; j++){
if(threadIdx.y * POOLING_HEIGHT + j >= SrcSize.height) return;
Idx_1 = (threadIdx.y * POOLING_HEIGHT + j) * SrcSize.width + (threadIdx.x * POOLING_WIDTH + i);
if(max < DevSrc[Idx_1]) max = DevSrc[Idx_1];
}
}
DevDst[threadIdx.y * DstSize.width + threadIdx.x] = max;
}*/
inline void maxpoolingCPU(Npp8u* HostSrc, Npp8u* HostDst, NppiSize SrcSize, NppiSize DstSize){
int xSrc, ySrc, max, Idx_1;
for(int i = 0; i < DstSize.width * DstSize.height; i++){
xSrc = (i%DstSize.width) * POOLING_WIDTH;
ySrc = (i/DstSize.width) * POOLING_HEIGHT;
max = 0;
for(int j = 0; j < POOLING_WIDTH; j++){
if(xSrc + j >= SrcSize.width) break;
for(int k = 0; k < POOLING_HEIGHT; k++){
if(ySrc + k >= SrcSize.height) break;
Idx_1 = (ySrc + k) * SrcSize.width + xSrc + j;
if(max < HostSrc[Idx_1]) max = HostSrc[Idx_1];
}
}
HostDst[i] = max;
}
}
inline void ConvolutionCPU(Npp8u* Src, Npp8u* Dst, float* fKernel, NppiSize ROISize, NppiSize kerSize){
for(int i = 0; i < ROISize.height - KERNEL_HEIGHT + 1; i ++){
for(int j = 0; j < ROISize.width - KERNEL_WIDTH + 1; j++){
int res = 0;
for(int k = 0; k < KERNEL_HEIGHT; k++){
if(i + k >= ROISize.height) break;
for(int l = 0; l < KERNEL_WIDTH; l++){
if(j + l >= ROISize.width) break;
res += Src[(i + k) * ROISize.width + j + l] * fKernel[k * kerSize.width + l];
}
}
Dst[i * ROISize.width + j] = res;
}
}
}
inline int cudaDeviceInit(int argc, const char **argv)
{
int deviceCount;
checkCudaErrors(hipGetDeviceCount(&deviceCount));
if (deviceCount == 0)
{
std::cerr << "CUDA error: no devices supporting CUDA." << std::endl;
exit(EXIT_FAILURE);
}
int dev = findCudaDevice(argc, argv);
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, dev);
std::cerr << "hipSetDevice GPU" << dev << " = " << deviceProp.name << std::endl;
checkCudaErrors(hipSetDevice(dev));
return dev;
}
int main(int argc, char *argv[])
{
int label = 0;
char temp = 0;
int err = 0;
int readCount =0;
NppiSize kerSize = {0, 0};
NppiSize SrcSize = {0, 0};
dim3 threadsPerBlock(0, 0);
hipStream_t Stream_1;
hipEvent_t Event_1;
cImages* oHostSrc = NULL;
cImages* oHostDst = NULL;
cImages* oDstCPU = NULL;
cImagesGPU* DevTemp = NULL;
cImagesGPU* DevDst = NULL;
Npp8u* HostTemp = NULL;
cImagesGPU* DevSrc = NULL;
StopWatchInterface* hTimer = NULL;
float Time;
void* tmp;
try{
hipDeviceReset();
cudaDeviceInit(argc, (const char **)argv);
checkCudaErrors(hipDeviceReset());
ConFiles.Initialize();
fprintf( logFile,"Loading file successed! Starting initialize network...\n");
GlobalValues.Initialize();
Network = new cNetwork;
Network->Initialize();
oHostSrc = new cImages(imgWidth, imgHeight);
//system("pause");
//tmp = malloc(sizeof(Npp8u) * imgWidth * imgHeight);
//----------------------------------------------------------------------------------------
//---------------------Main Working Part Below--------------------------------------------
//----------------------------------------------------------------------------------------
std::string filePath = std::string();
char* cpath;
cpath = (char*)malloc(20 * sizeof(char));
itoa(1, cpath, 10);
filePath = OUTPUT_SAVE_PATH + std::string("_Level") + std::string(cpath);
int r = 0, w = 0;
float rate = 0.0;
//Network->LoadWeights();
hipDeviceSynchronize();
for(int i = 0; i < trainCount; i++){
readCount = fread(oHostSrc->data(), sizeof(cPixel), imgWidth * imgHeight, imgSet);
fread(&temp, sizeof(char), 1, imgLabel);
label = temp;
if(Network->Train(oHostSrc, label, i)){
r++;
}else{
w++;
}
if(i % 500 == 499){
rate = r * 1.0 / (r + w);
fprintf(logFile, "O:%f ", rate);
printf("O:%f ", rate);
}
if(i % 2500 == 2499) {
fprintf(logFile, "\n");printf("\n");
}
if(i%GlobalValues.sBatchSize == (GlobalValues.sBatchSize - 1)) GlobalValues.fLearnMomentum *= 0.92f;
}
r = 0;
w = 0;
rate = 0.0;
//Network->SaveWeights();
/*for(int i = 0; i < testCount; i++){
readCount = fread(oHostSrc->data(), sizeof(cPixel), imgWidth * imgHeight, tstSet);
fread(&temp, sizeof(char), 1, tstLabel);
label = temp;
if(label == Network->Compute(oHostSrc)){
r++;
}else{
w++;
}
}
rate = r * 1.0 / (r + w);
printf("\n\nTest correct rate: %f.\n", rate);
fprintf(logFile, "\n\nTest correct rate: %f.\n", rate);*/
//----------------------------------------------------------------------------------------
//-------------------------Main Working Part Above----------------------------------------
//----------------------------------------------------------------------------------------
//memcpy( tmp, tmp, sizeof(Npp8u) * imgWidth * imgHeight);
//Network->TraceOutput();
//Network->Compute(oHostSrc);
//Network->Trace();
/*
hipStreamCreate(&Stream_1);
hipEventCreate(&Event_1);
nppSetStream( Stream_1);
tmp = malloc(sizeof(Npp8u) * imgWidth * imgHeight);
std::string filePath;
char* cpath;
cpath = (char*)malloc(sizeof(char) * 10);
testDevSrc.clear();
testDevDst.clear();
testHostSrc.clear();
testHostDst.clear();
for(int i = 0; i < TEST_COUNT ; i++){
readCount = fread(tmp, sizeof(Npp8u) , imgWidth * imgHeight, imgSet);
if(readCount != imgHeight * imgWidth) {
err = ferror(imgSet);
throw;
}
oHostSrc = new cImages(imgWidth, imgHeight);
memcpy( oHostSrc->data(), tmp, sizeof(Npp8u) * imgWidth * imgHeight);
testHostSrc.push_back(oHostSrc);
DevSrc = new cImagesGPU(imgWidth, imgHeight);
checkCudaErrors(hipMemcpy( DevSrc->data(), tmp, imgWidth * imgHeight * sizeof(Npp8u), hipMemcpyHostToDevice));
testDevSrc.push_back(DevSrc);
oHostDst = new cImages(imgWidth - KERNEL_HEIGHT + 1, imgHeight - KERNEL_HEIGHT + 1);
testHostDst.push_back(oHostDst);
DevDst = new cImagesGPU(imgWidth - KERNEL_HEIGHT + 1, imgHeight - KERNEL_HEIGHT + 1);
testDevDst.push_back(DevDst);
}
float* fKernel = NULL;
fKernel = (float*)malloc(KERNEL_WIDTH * KERNEL_HEIGHT * sizeof(float));
printf("\n");
for(int i = 0; i < KERNEL_HEIGHT; i++){
for(int j =0; j < KERNEL_WIDTH; j++){
fKernel[i * KERNEL_WIDTH + j] = fRandom(-0.05, 0.05);
printf("%f ", fKernel[i * KERNEL_WIDTH + j]);
}
printf("\n");
}
printf("\n");
SrcSize.height = imgHeight;
SrcSize.width = imgWidth;
kerSize.height = KERNEL_HEIGHT;
kerSize.width = KERNEL_WIDTH;
NppiPoint Anchor;
Anchor.x = 1;
Anchor.x = 1;
sdkCreateTimer(&hTimer);
sdkResetTimer(&hTimer);
sdkStartTimer(&hTimer);
for(int i = 0; i < TEST_COUNT; i++){
nppiFilter32f_8u_C1R (testDevSrc[i]->data(), testDevSrc[i]->pitch(), testDevDst[i]->data(),
testDevDst[i]->pitch(), SrcSize, fKernel, kerSize, Anchor);
}
//checkCudaErrors(hipEventRecord(Event_1, Stream_1));
hipDeviceSynchronize();
sdkStopTimer(&hTimer);
Time = 0.0;
Time = sdkGetTimerValue(&hTimer);
printf("Complete max pooling using GPU for %d maps.\n", TEST_COUNT);
printf("Finished in %f msecs.\n", Time);
system("pause");
sdkResetTimer(&hTimer);
sdkStartTimer(&hTimer);
for(int i = 0; i < TEST_COUNT; i++){
ConvolutionCPU(testHostSrc[i]->data(), testHostDst[i]->data(), fKernel, SrcSize, kerSize);
}
sdkStopTimer(&hTimer);
Time = 0.0;
Time = sdkGetTimerValue(&hTimer);
printf("Complete max pooling using CPU for %d maps.\n", TEST_COUNT);
printf("Finished in %f msecs.\n", Time);
system("pause");
float Err = 0.0f;
oHostDst = new cImages(imgWidth, imgHeight);
DevDst[0].copyTo(oHostDst->data(), oHostDst->pitch());
for(int i = 0; i < imgHeight - KERNEL_HEIGHT + 1; i++){
for(int j = 0; j < imgWidth - KERNEL_WIDTH + 1; j++){
Err += sqrt((float)(oHostDst->data()[i * (imgHeight - KERNEL_HEIGHT + 1) + j] - testHostDst[0]->data()[(imgHeight - KERNEL_HEIGHT + 1) + j]));
}
}
printf("\nError rate: %f\n", Err);*/
/*for(int i = 0; i < TEST_COUNT; i++){
checkCudaErrors(hipMemcpy(oHostDst->data(), testDevDst[i],
DstSize.width * DstSize.height * sizeof(Npp8u), hipMemcpyDeviceToHost));
itoa(i, cpath, 10);
filePath = IMAGE_SAVE_PATH + std::string(cpath) + "GPU.pgm";
npp::saveImage(filePath, *oHostDst);*/
/*memcpy(oDstCPU->data(), testHostDst[i], DstSize.width * DstSize.height * sizeof(Npp8u));
filePath = IMAGE_SAVE_PATH + std::string(cpath) + "CPU.pgm";
npp::saveImage(filePath, *oDstCPU);
}
/*printf("Reading image success.\n");
printf("Saving image...\n\n");
printf("good\n");*/
for(int i = 0; i < testDevSrc.size(); i++){
if(testDevSrc[i] != NULL){
hipFree(testDevSrc[i]);
testDevSrc[i] = NULL;
}
}
testDevSrc.clear();
for(int i = 0; i < testDevDst.size(); i++){
if(testDevDst[i] != NULL){
hipFree(testDevDst[i]);
testDevDst[i] = NULL;
}
}
testDevDst.clear();
for(int i = 0; i < testHostSrc.size(); i++){
if(testHostSrc[i] != NULL){
hipFree(testHostSrc[i]);
testHostSrc[i] = NULL;
}
}
testHostSrc.clear();
for(int i = 0; i < testHostDst.size(); i++){
if(testHostDst[i] != NULL){
hipFree(testHostDst[i]);
testHostDst[i] = NULL;
}
}
testHostDst.clear();
printf("Success!\n");
//system("pause");
hipDeviceReset();
exit(EXIT_SUCCESS);
}
catch(char* err){
/*if(logFile != NULL){
fprintf(logFile, err);
fprintf(logFile, "\n");
fclose(logFile);
logFile = NULL;
}
if(imgSet != NULL){
fclose(imgSet);
imgSet = NULL;
}
printf("Some error occured.\n");
system("pause");*/
}
} | d5b54d9435a654df0a2757ba5400173c78a44e05.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "cNetwork.h"
#include "math.h"
#include <cuda_runtime.h>
#include <helper_functions.h>
#include <helper_cuda.h>
cNetwork* Network;
cLoadFile ConFiles;
#define TEST_COUNT 1
#define KERNEL_WIDTH 5
#define KERNEL_HEIGHT 5
#define POOLING_WIDTH 2
#define POOLING_HEIGHT 2
/*
thrust::device_vector<Npp8u*> testDevSrc = thrust::device_vector<Npp8u*>();
thrust::device_vector<Npp8u*> testDevDst = thrust::device_vector<Npp8u*>();
thrust::host_vector<Npp8u*> testHostSrc = thrust::host_vector<Npp8u*>();
thrust::host_vector<Npp8u*> testHostDst = thrust::host_vector<Npp8u*>();
*/
thrust::host_vector<cImagesGPU*> testDevSrc = thrust::host_vector<cImagesGPU*>();
thrust::host_vector<cImagesGPU*> testDevDst = thrust::host_vector<cImagesGPU*>();
thrust::host_vector<cImages*> testHostSrc = thrust::host_vector<cImages*>();
thrust::host_vector<cImages*> testHostDst = thrust::host_vector<cImages*>();
/*inline __global__ void maxpooling(Npp8u* DevSrc, Npp8u* DevDst, NppiSize SrcSize, NppiSize DstSize){
// if(blockIdx.x >= testDevSrc.size()) return;
// Work here...Fix pooling kernel problem
int max = 0;
int Idx_1 = 0;
for(int i = 0; i < POOLING_WIDTH; i++){
if(threadIdx.x * POOLING_WIDTH + i >= SrcSize.width) return;
for(int j = 0; j < POOLING_HEIGHT; j++){
if(threadIdx.y * POOLING_HEIGHT + j >= SrcSize.height) return;
Idx_1 = (threadIdx.y * POOLING_HEIGHT + j) * SrcSize.width + (threadIdx.x * POOLING_WIDTH + i);
if(max < DevSrc[Idx_1]) max = DevSrc[Idx_1];
}
}
DevDst[threadIdx.y * DstSize.width + threadIdx.x] = max;
}*/
inline void maxpoolingCPU(Npp8u* HostSrc, Npp8u* HostDst, NppiSize SrcSize, NppiSize DstSize){
int xSrc, ySrc, max, Idx_1;
for(int i = 0; i < DstSize.width * DstSize.height; i++){
xSrc = (i%DstSize.width) * POOLING_WIDTH;
ySrc = (i/DstSize.width) * POOLING_HEIGHT;
max = 0;
for(int j = 0; j < POOLING_WIDTH; j++){
if(xSrc + j >= SrcSize.width) break;
for(int k = 0; k < POOLING_HEIGHT; k++){
if(ySrc + k >= SrcSize.height) break;
Idx_1 = (ySrc + k) * SrcSize.width + xSrc + j;
if(max < HostSrc[Idx_1]) max = HostSrc[Idx_1];
}
}
HostDst[i] = max;
}
}
inline void ConvolutionCPU(Npp8u* Src, Npp8u* Dst, float* fKernel, NppiSize ROISize, NppiSize kerSize){
for(int i = 0; i < ROISize.height - KERNEL_HEIGHT + 1; i ++){
for(int j = 0; j < ROISize.width - KERNEL_WIDTH + 1; j++){
int res = 0;
for(int k = 0; k < KERNEL_HEIGHT; k++){
if(i + k >= ROISize.height) break;
for(int l = 0; l < KERNEL_WIDTH; l++){
if(j + l >= ROISize.width) break;
res += Src[(i + k) * ROISize.width + j + l] * fKernel[k * kerSize.width + l];
}
}
Dst[i * ROISize.width + j] = res;
}
}
}
inline int cudaDeviceInit(int argc, const char **argv)
{
int deviceCount;
checkCudaErrors(cudaGetDeviceCount(&deviceCount));
if (deviceCount == 0)
{
std::cerr << "CUDA error: no devices supporting CUDA." << std::endl;
exit(EXIT_FAILURE);
}
int dev = findCudaDevice(argc, argv);
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, dev);
std::cerr << "cudaSetDevice GPU" << dev << " = " << deviceProp.name << std::endl;
checkCudaErrors(cudaSetDevice(dev));
return dev;
}
int main(int argc, char *argv[])
{
int label = 0;
char temp = 0;
int err = 0;
int readCount =0;
NppiSize kerSize = {0, 0};
NppiSize SrcSize = {0, 0};
dim3 threadsPerBlock(0, 0);
cudaStream_t Stream_1;
cudaEvent_t Event_1;
cImages* oHostSrc = NULL;
cImages* oHostDst = NULL;
cImages* oDstCPU = NULL;
cImagesGPU* DevTemp = NULL;
cImagesGPU* DevDst = NULL;
Npp8u* HostTemp = NULL;
cImagesGPU* DevSrc = NULL;
StopWatchInterface* hTimer = NULL;
float Time;
void* tmp;
try{
cudaDeviceReset();
cudaDeviceInit(argc, (const char **)argv);
checkCudaErrors(cudaDeviceReset());
ConFiles.Initialize();
fprintf( logFile,"Loading file successed! Starting initialize network...\n");
GlobalValues.Initialize();
Network = new cNetwork;
Network->Initialize();
oHostSrc = new cImages(imgWidth, imgHeight);
//system("pause");
//tmp = malloc(sizeof(Npp8u) * imgWidth * imgHeight);
//----------------------------------------------------------------------------------------
//---------------------Main Working Part Below--------------------------------------------
//----------------------------------------------------------------------------------------
std::string filePath = std::string();
char* cpath;
cpath = (char*)malloc(20 * sizeof(char));
itoa(1, cpath, 10);
filePath = OUTPUT_SAVE_PATH + std::string("_Level") + std::string(cpath);
int r = 0, w = 0;
float rate = 0.0;
//Network->LoadWeights();
cudaDeviceSynchronize();
for(int i = 0; i < trainCount; i++){
readCount = fread(oHostSrc->data(), sizeof(cPixel), imgWidth * imgHeight, imgSet);
fread(&temp, sizeof(char), 1, imgLabel);
label = temp;
if(Network->Train(oHostSrc, label, i)){
r++;
}else{
w++;
}
if(i % 500 == 499){
rate = r * 1.0 / (r + w);
fprintf(logFile, "O:%f ", rate);
printf("O:%f ", rate);
}
if(i % 2500 == 2499) {
fprintf(logFile, "\n");printf("\n");
}
if(i%GlobalValues.sBatchSize == (GlobalValues.sBatchSize - 1)) GlobalValues.fLearnMomentum *= 0.92f;
}
r = 0;
w = 0;
rate = 0.0;
//Network->SaveWeights();
/*for(int i = 0; i < testCount; i++){
readCount = fread(oHostSrc->data(), sizeof(cPixel), imgWidth * imgHeight, tstSet);
fread(&temp, sizeof(char), 1, tstLabel);
label = temp;
if(label == Network->Compute(oHostSrc)){
r++;
}else{
w++;
}
}
rate = r * 1.0 / (r + w);
printf("\n\nTest correct rate: %f.\n", rate);
fprintf(logFile, "\n\nTest correct rate: %f.\n", rate);*/
//----------------------------------------------------------------------------------------
//-------------------------Main Working Part Above----------------------------------------
//----------------------------------------------------------------------------------------
//memcpy( tmp, tmp, sizeof(Npp8u) * imgWidth * imgHeight);
//Network->TraceOutput();
//Network->Compute(oHostSrc);
//Network->Trace();
/*
cudaStreamCreate(&Stream_1);
cudaEventCreate(&Event_1);
nppSetStream( Stream_1);
tmp = malloc(sizeof(Npp8u) * imgWidth * imgHeight);
std::string filePath;
char* cpath;
cpath = (char*)malloc(sizeof(char) * 10);
testDevSrc.clear();
testDevDst.clear();
testHostSrc.clear();
testHostDst.clear();
for(int i = 0; i < TEST_COUNT ; i++){
readCount = fread(tmp, sizeof(Npp8u) , imgWidth * imgHeight, imgSet);
if(readCount != imgHeight * imgWidth) {
err = ferror(imgSet);
throw;
}
oHostSrc = new cImages(imgWidth, imgHeight);
memcpy( oHostSrc->data(), tmp, sizeof(Npp8u) * imgWidth * imgHeight);
testHostSrc.push_back(oHostSrc);
DevSrc = new cImagesGPU(imgWidth, imgHeight);
checkCudaErrors(cudaMemcpy( DevSrc->data(), tmp, imgWidth * imgHeight * sizeof(Npp8u), cudaMemcpyHostToDevice));
testDevSrc.push_back(DevSrc);
oHostDst = new cImages(imgWidth - KERNEL_HEIGHT + 1, imgHeight - KERNEL_HEIGHT + 1);
testHostDst.push_back(oHostDst);
DevDst = new cImagesGPU(imgWidth - KERNEL_HEIGHT + 1, imgHeight - KERNEL_HEIGHT + 1);
testDevDst.push_back(DevDst);
}
float* fKernel = NULL;
fKernel = (float*)malloc(KERNEL_WIDTH * KERNEL_HEIGHT * sizeof(float));
printf("\n");
for(int i = 0; i < KERNEL_HEIGHT; i++){
for(int j =0; j < KERNEL_WIDTH; j++){
fKernel[i * KERNEL_WIDTH + j] = fRandom(-0.05, 0.05);
printf("%f ", fKernel[i * KERNEL_WIDTH + j]);
}
printf("\n");
}
printf("\n");
SrcSize.height = imgHeight;
SrcSize.width = imgWidth;
kerSize.height = KERNEL_HEIGHT;
kerSize.width = KERNEL_WIDTH;
NppiPoint Anchor;
Anchor.x = 1;
Anchor.x = 1;
sdkCreateTimer(&hTimer);
sdkResetTimer(&hTimer);
sdkStartTimer(&hTimer);
for(int i = 0; i < TEST_COUNT; i++){
nppiFilter32f_8u_C1R (testDevSrc[i]->data(), testDevSrc[i]->pitch(), testDevDst[i]->data(),
testDevDst[i]->pitch(), SrcSize, fKernel, kerSize, Anchor);
}
//checkCudaErrors(cudaEventRecord(Event_1, Stream_1));
cudaDeviceSynchronize();
sdkStopTimer(&hTimer);
Time = 0.0;
Time = sdkGetTimerValue(&hTimer);
printf("Complete max pooling using GPU for %d maps.\n", TEST_COUNT);
printf("Finished in %f msecs.\n", Time);
system("pause");
sdkResetTimer(&hTimer);
sdkStartTimer(&hTimer);
for(int i = 0; i < TEST_COUNT; i++){
ConvolutionCPU(testHostSrc[i]->data(), testHostDst[i]->data(), fKernel, SrcSize, kerSize);
}
sdkStopTimer(&hTimer);
Time = 0.0;
Time = sdkGetTimerValue(&hTimer);
printf("Complete max pooling using CPU for %d maps.\n", TEST_COUNT);
printf("Finished in %f msecs.\n", Time);
system("pause");
float Err = 0.0f;
oHostDst = new cImages(imgWidth, imgHeight);
DevDst[0].copyTo(oHostDst->data(), oHostDst->pitch());
for(int i = 0; i < imgHeight - KERNEL_HEIGHT + 1; i++){
for(int j = 0; j < imgWidth - KERNEL_WIDTH + 1; j++){
Err += sqrt((float)(oHostDst->data()[i * (imgHeight - KERNEL_HEIGHT + 1) + j] - testHostDst[0]->data()[(imgHeight - KERNEL_HEIGHT + 1) + j]));
}
}
printf("\nError rate: %f\n", Err);*/
/*for(int i = 0; i < TEST_COUNT; i++){
checkCudaErrors(cudaMemcpy(oHostDst->data(), testDevDst[i],
DstSize.width * DstSize.height * sizeof(Npp8u), cudaMemcpyDeviceToHost));
itoa(i, cpath, 10);
filePath = IMAGE_SAVE_PATH + std::string(cpath) + "GPU.pgm";
npp::saveImage(filePath, *oHostDst);*/
/*memcpy(oDstCPU->data(), testHostDst[i], DstSize.width * DstSize.height * sizeof(Npp8u));
filePath = IMAGE_SAVE_PATH + std::string(cpath) + "CPU.pgm";
npp::saveImage(filePath, *oDstCPU);
}
/*printf("Reading image success.\n");
printf("Saving image...\n\n");
printf("good\n");*/
for(int i = 0; i < testDevSrc.size(); i++){
if(testDevSrc[i] != NULL){
cudaFree(testDevSrc[i]);
testDevSrc[i] = NULL;
}
}
testDevSrc.clear();
for(int i = 0; i < testDevDst.size(); i++){
if(testDevDst[i] != NULL){
cudaFree(testDevDst[i]);
testDevDst[i] = NULL;
}
}
testDevDst.clear();
for(int i = 0; i < testHostSrc.size(); i++){
if(testHostSrc[i] != NULL){
cudaFree(testHostSrc[i]);
testHostSrc[i] = NULL;
}
}
testHostSrc.clear();
for(int i = 0; i < testHostDst.size(); i++){
if(testHostDst[i] != NULL){
cudaFree(testHostDst[i]);
testHostDst[i] = NULL;
}
}
testHostDst.clear();
printf("Success!\n");
//system("pause");
cudaDeviceReset();
exit(EXIT_SUCCESS);
}
catch(char* err){
/*if(logFile != NULL){
fprintf(logFile, err);
fprintf(logFile, "\n");
fclose(logFile);
logFile = NULL;
}
if(imgSet != NULL){
fclose(imgSet);
imgSet = NULL;
}
printf("Some error occured.\n");
system("pause");*/
}
} |
290ac9788470717f64bf710ea1dc0f73eea19c68.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define BOOST_TEST_MODULE "Test_Perfomance"
#define EVENTS_NUMBER 1000000
#include <boost/test/unit_test.hpp>
#include <boost/test/floating_point_comparison.hpp>
#include <boost/filesystem.hpp>
#include <iostream>
#include <random>
#include "Catboost.h"
#include "Evaluator_hip.cuh"
#include "GenerateBinFeatures.cuh"
#define cudaCheck(stmt) { \
hipError_t err = stmt; \
if (err != hipSuccess){ \
std::cerr << "Failed to run " << #stmt << std::endl; \
std::cerr << hipGetErrorString(err) << std::endl; \
throw std::invalid_argument("cudaCheck failed"); \
} \
}
BOOST_AUTO_TEST_CASE(Test_Perfomance)
{
hipEvent_t start;
hipEvent_t stop;
cudaCheck(hipEventCreate(&start));
cudaCheck(hipEventCreate(&stop));
const std::string model_path = "/home/popov/Documents/Practice/data/MuID-Run2-MC-570-v1.cb";
const std::string signal_data_path = "/home/popov/Documents/Practice/data/signal.csv";
if ( !boost::filesystem::exists(model_path) ){
std::cout << "Can't find model file: " << model_path << std::endl;
}
const int number_of_events = EVENTS_NUMBER;
int model_bin_feature_num = 0;
int *host_tree_sizes, *dev_tree_sizes;
int *host_border_nums, *dev_border_nums;
int **host_tree_splits, **dev_tree_splits;
float *host_catboost_output, *dev_catboost_output;
float **host_borders, **dev_borders;
float **host_features, **dev_features;
double **host_leaf_values, **dev_leaf_values;
unsigned char *dev_bin_features;
CatboostEvaluator evaluator(model_path);
int model_float_feature_num = (int)evaluator.GetFloatFeatureCount();
BOOST_CHECK_EQUAL(model_float_feature_num, 20);
const NCatBoostFbs::TObliviousTrees* ObliviousTrees = evaluator.GetObliviousTrees();
int tree_num = ObliviousTrees->TreeSizes()->size();
const int* treeSplitsPtr_flat = ObliviousTrees->TreeSplits()->data();
const double* leafValuesPtr_flat = ObliviousTrees->LeafValues()->data();
std::vector<std::vector<float>> features;
std::vector<float> event(model_float_feature_num);
std::random_device rd;
std::mt19937 mt(rd());
std::uniform_real_distribution<> dis(-5.0, 5.0);
for (size_t j = 0; j < model_float_feature_num; ++j) {
event[j] = dis(mt);
}
for (size_t i = 0; i < number_of_events; ++i) {
features.push_back(event);
}
cudaCheck(hipHostMalloc((void***)&host_features, number_of_events * sizeof(float*)));
cudaCheck(hipHostMalloc((void***)&host_borders, model_float_feature_num * sizeof(float*)));
cudaCheck(hipHostMalloc((void**)&host_border_nums, model_float_feature_num * sizeof(int)));
cudaCheck(hipHostMalloc((void***)&host_leaf_values, tree_num * sizeof(double*)));
cudaCheck(hipHostMalloc((void***)&host_tree_splits, tree_num * sizeof(int*)));
cudaCheck(hipHostMalloc((void**)&host_catboost_output, number_of_events * sizeof(float)));
cudaCheck(hipHostMalloc((void**)&host_tree_sizes, tree_num * sizeof(int)));
int index = 0;
for (const auto& ff : *ObliviousTrees->FloatFeatures()) {
int border_num = ff->Borders()->size();
host_border_nums[index] = border_num;
model_bin_feature_num += border_num;
cudaCheck(hipMalloc((void**)&host_borders[index], border_num*sizeof(float)));
cudaCheck(hipMemcpy(host_borders[index], ff->Borders()+1, border_num*sizeof(float),hipMemcpyHostToDevice));
index++;
}
for (int i = 0; i < tree_num; i++) {
host_tree_sizes[i] = ObliviousTrees->TreeSizes()->Get(i);
}
for (int i = 0; i < tree_num; i++) {
int depth = host_tree_sizes[i];
cudaCheck(hipMalloc((void**)&host_leaf_values[i], (1 << depth)*sizeof(double)));
cudaCheck(hipMemcpy(host_leaf_values[i], leafValuesPtr_flat, (1 << depth)*sizeof(double), hipMemcpyHostToDevice));
cudaCheck(hipMalloc((void**)&host_tree_splits[i], depth*sizeof(int)));
cudaCheck(hipMemcpy(host_tree_splits[i], treeSplitsPtr_flat, depth*sizeof(int), hipMemcpyHostToDevice));
leafValuesPtr_flat += (1 << depth);
treeSplitsPtr_flat += depth;
}
for (int i = 0; i < number_of_events; ++i) {
cudaCheck(hipMalloc((void**)&host_features[i], model_float_feature_num*sizeof(float)));
cudaCheck(hipMemcpy(host_features[i], features[i].data(), model_float_feature_num*sizeof(float),hipMemcpyHostToDevice));
}
cudaCheck(hipMalloc((void***)&dev_features, number_of_events * sizeof(float*)));
cudaCheck(hipMalloc((void***)&dev_borders, model_float_feature_num * sizeof(float*)));
cudaCheck(hipMalloc((void**)&dev_border_nums, model_float_feature_num * sizeof(int)));
cudaCheck(hipMalloc((void***)&dev_leaf_values, tree_num * sizeof(double*)));
cudaCheck(hipMalloc((void***)&dev_tree_splits, tree_num * sizeof(int*)));
cudaCheck(hipMalloc((void**)&dev_catboost_output, number_of_events * sizeof(float)));
cudaCheck(hipMalloc((void**)&dev_tree_sizes, tree_num * sizeof(int)));
cudaCheck(hipMalloc((void**)&dev_bin_features, number_of_events * model_bin_feature_num * sizeof(char)));
cudaCheck(hipMemcpyAsync(dev_borders, host_borders, model_float_feature_num * sizeof(float*), hipMemcpyHostToDevice));
cudaCheck(hipMemcpyAsync(dev_features, host_features, number_of_events * sizeof(float*), hipMemcpyHostToDevice));
cudaCheck(hipMemcpyAsync(dev_border_nums, host_border_nums, model_float_feature_num * sizeof(int), hipMemcpyHostToDevice));
cudaCheck(hipMemcpyAsync(dev_tree_splits, host_tree_splits, tree_num * sizeof(int*), hipMemcpyHostToDevice));
cudaCheck(hipMemcpyAsync(dev_leaf_values, host_leaf_values, tree_num * sizeof(double*), hipMemcpyHostToDevice));
cudaCheck(hipMemcpyAsync(dev_tree_sizes, host_tree_sizes, tree_num * sizeof(int), hipMemcpyHostToDevice));
hipEventRecord(start, 0);
hipLaunchKernelGGL(( gen_bin_features), dim3(dim3(number_of_events)), dim3(dim3(model_float_feature_num)), 0, 0,
dev_borders,
dev_features,
dev_border_nums,
dev_bin_features,
number_of_events,
model_bin_feature_num
);
hipLaunchKernelGGL(( catboost_evaluator), dim3(dim3(number_of_events)), dim3(dim3(32)), 32*sizeof(float), 0,
dev_tree_splits,
dev_leaf_values,
dev_tree_sizes,
dev_catboost_output,
dev_bin_features,
tree_num,
number_of_events,
model_bin_feature_num
);
hipEventRecord(stop, 0);
float time = 0;
hipEventSynchronize(stop);
hipEventElapsedTime(&time, start, stop);
BOOST_TEST_MESSAGE("GPU compute time: " << time);
cudaCheck(hipMemcpyAsync(host_catboost_output, dev_catboost_output, number_of_events*sizeof(float), hipMemcpyDeviceToHost));
} | 290ac9788470717f64bf710ea1dc0f73eea19c68.cu | #define BOOST_TEST_MODULE "Test_Perfomance"
#define EVENTS_NUMBER 1000000
#include <boost/test/unit_test.hpp>
#include <boost/test/floating_point_comparison.hpp>
#include <boost/filesystem.hpp>
#include <iostream>
#include <random>
#include "Catboost.h"
#include "Evaluator.cuh"
#include "GenerateBinFeatures.cuh"
#define cudaCheck(stmt) { \
cudaError_t err = stmt; \
if (err != cudaSuccess){ \
std::cerr << "Failed to run " << #stmt << std::endl; \
std::cerr << cudaGetErrorString(err) << std::endl; \
throw std::invalid_argument("cudaCheck failed"); \
} \
}
BOOST_AUTO_TEST_CASE(Test_Perfomance)
{
cudaEvent_t start;
cudaEvent_t stop;
cudaCheck(cudaEventCreate(&start));
cudaCheck(cudaEventCreate(&stop));
const std::string model_path = "/home/popov/Documents/Practice/data/MuID-Run2-MC-570-v1.cb";
const std::string signal_data_path = "/home/popov/Documents/Practice/data/signal.csv";
if ( !boost::filesystem::exists(model_path) ){
std::cout << "Can't find model file: " << model_path << std::endl;
}
const int number_of_events = EVENTS_NUMBER;
int model_bin_feature_num = 0;
int *host_tree_sizes, *dev_tree_sizes;
int *host_border_nums, *dev_border_nums;
int **host_tree_splits, **dev_tree_splits;
float *host_catboost_output, *dev_catboost_output;
float **host_borders, **dev_borders;
float **host_features, **dev_features;
double **host_leaf_values, **dev_leaf_values;
unsigned char *dev_bin_features;
CatboostEvaluator evaluator(model_path);
int model_float_feature_num = (int)evaluator.GetFloatFeatureCount();
BOOST_CHECK_EQUAL(model_float_feature_num, 20);
const NCatBoostFbs::TObliviousTrees* ObliviousTrees = evaluator.GetObliviousTrees();
int tree_num = ObliviousTrees->TreeSizes()->size();
const int* treeSplitsPtr_flat = ObliviousTrees->TreeSplits()->data();
const double* leafValuesPtr_flat = ObliviousTrees->LeafValues()->data();
std::vector<std::vector<float>> features;
std::vector<float> event(model_float_feature_num);
std::random_device rd;
std::mt19937 mt(rd());
std::uniform_real_distribution<> dis(-5.0, 5.0);
for (size_t j = 0; j < model_float_feature_num; ++j) {
event[j] = dis(mt);
}
for (size_t i = 0; i < number_of_events; ++i) {
features.push_back(event);
}
cudaCheck(cudaMallocHost((void***)&host_features, number_of_events * sizeof(float*)));
cudaCheck(cudaMallocHost((void***)&host_borders, model_float_feature_num * sizeof(float*)));
cudaCheck(cudaMallocHost((void**)&host_border_nums, model_float_feature_num * sizeof(int)));
cudaCheck(cudaMallocHost((void***)&host_leaf_values, tree_num * sizeof(double*)));
cudaCheck(cudaMallocHost((void***)&host_tree_splits, tree_num * sizeof(int*)));
cudaCheck(cudaMallocHost((void**)&host_catboost_output, number_of_events * sizeof(float)));
cudaCheck(cudaMallocHost((void**)&host_tree_sizes, tree_num * sizeof(int)));
int index = 0;
for (const auto& ff : *ObliviousTrees->FloatFeatures()) {
int border_num = ff->Borders()->size();
host_border_nums[index] = border_num;
model_bin_feature_num += border_num;
cudaCheck(cudaMalloc((void**)&host_borders[index], border_num*sizeof(float)));
cudaCheck(cudaMemcpy(host_borders[index], ff->Borders()+1, border_num*sizeof(float),cudaMemcpyHostToDevice));
index++;
}
for (int i = 0; i < tree_num; i++) {
host_tree_sizes[i] = ObliviousTrees->TreeSizes()->Get(i);
}
for (int i = 0; i < tree_num; i++) {
int depth = host_tree_sizes[i];
cudaCheck(cudaMalloc((void**)&host_leaf_values[i], (1 << depth)*sizeof(double)));
cudaCheck(cudaMemcpy(host_leaf_values[i], leafValuesPtr_flat, (1 << depth)*sizeof(double), cudaMemcpyHostToDevice));
cudaCheck(cudaMalloc((void**)&host_tree_splits[i], depth*sizeof(int)));
cudaCheck(cudaMemcpy(host_tree_splits[i], treeSplitsPtr_flat, depth*sizeof(int), cudaMemcpyHostToDevice));
leafValuesPtr_flat += (1 << depth);
treeSplitsPtr_flat += depth;
}
for (int i = 0; i < number_of_events; ++i) {
cudaCheck(cudaMalloc((void**)&host_features[i], model_float_feature_num*sizeof(float)));
cudaCheck(cudaMemcpy(host_features[i], features[i].data(), model_float_feature_num*sizeof(float),cudaMemcpyHostToDevice));
}
cudaCheck(cudaMalloc((void***)&dev_features, number_of_events * sizeof(float*)));
cudaCheck(cudaMalloc((void***)&dev_borders, model_float_feature_num * sizeof(float*)));
cudaCheck(cudaMalloc((void**)&dev_border_nums, model_float_feature_num * sizeof(int)));
cudaCheck(cudaMalloc((void***)&dev_leaf_values, tree_num * sizeof(double*)));
cudaCheck(cudaMalloc((void***)&dev_tree_splits, tree_num * sizeof(int*)));
cudaCheck(cudaMalloc((void**)&dev_catboost_output, number_of_events * sizeof(float)));
cudaCheck(cudaMalloc((void**)&dev_tree_sizes, tree_num * sizeof(int)));
cudaCheck(cudaMalloc((void**)&dev_bin_features, number_of_events * model_bin_feature_num * sizeof(char)));
cudaCheck(cudaMemcpyAsync(dev_borders, host_borders, model_float_feature_num * sizeof(float*), cudaMemcpyHostToDevice));
cudaCheck(cudaMemcpyAsync(dev_features, host_features, number_of_events * sizeof(float*), cudaMemcpyHostToDevice));
cudaCheck(cudaMemcpyAsync(dev_border_nums, host_border_nums, model_float_feature_num * sizeof(int), cudaMemcpyHostToDevice));
cudaCheck(cudaMemcpyAsync(dev_tree_splits, host_tree_splits, tree_num * sizeof(int*), cudaMemcpyHostToDevice));
cudaCheck(cudaMemcpyAsync(dev_leaf_values, host_leaf_values, tree_num * sizeof(double*), cudaMemcpyHostToDevice));
cudaCheck(cudaMemcpyAsync(dev_tree_sizes, host_tree_sizes, tree_num * sizeof(int), cudaMemcpyHostToDevice));
cudaEventRecord(start, 0);
gen_bin_features<<<dim3(number_of_events), dim3(model_float_feature_num)>>>(
dev_borders,
dev_features,
dev_border_nums,
dev_bin_features,
number_of_events,
model_bin_feature_num
);
catboost_evaluator<<<dim3(number_of_events), dim3(32), 32*sizeof(float)>>>(
dev_tree_splits,
dev_leaf_values,
dev_tree_sizes,
dev_catboost_output,
dev_bin_features,
tree_num,
number_of_events,
model_bin_feature_num
);
cudaEventRecord(stop, 0);
float time = 0;
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
BOOST_TEST_MESSAGE("GPU compute time: " << time);
cudaCheck(cudaMemcpyAsync(host_catboost_output, dev_catboost_output, number_of_events*sizeof(float), cudaMemcpyDeviceToHost));
} |
86e0ae8724a0dd6691fbc1e6800fadc1b725abdf.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright 2021 NVIDIA Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#include "cuda_help.h"
#include "fill.cuh"
#include "norm.h"
#include "proj.h"
#include "sum.h"
#include <type_traits>
using namespace Legion;
namespace legate {
namespace numpy {
template<typename T>
__global__ void __launch_bounds__(THREADS_PER_BLOCK, MIN_CTAS_PER_SM)
legate_vec_norm_2d(const AccessorRW<T, 2> inout, const AccessorRO<T, 2> in, const Rect<2> bounds, const T identity,
const int axis, const int order) {
coord_t y = bounds.lo[1] + blockIdx.x * blockDim.x + threadIdx.x;
coord_t x = bounds.lo[0] + (blockIdx.z * gridDim.y + blockIdx.y) * blockDim.y + threadIdx.y;
const Point<2> p(x, y);
if (!bounds.contains(p)) return;
T value = identity;
if (axis == 0) {
while (x <= bounds.hi[0]) {
T val = in[x][y];
if (std::is_signed<T>::value && val < T(0)) val = -val;
if (order == 1)
SumReduction<T>::template fold<true /*exclusive*/>(value, val);
else if (order == 2) {
ProdReduction<T>::template fold<true /*exclusive*/>(val, val);
SumReduction<T>::template fold<true /*exclusive*/>(value, val);
} else {
T prod = val;
for (int i = 0; i < (order - 1); i++)
ProdReduction<T>::template fold<true /*exclusive*/>(prod, val);
SumReduction<T>::template fold<true /*exclusive*/>(value, prod);
}
x += gridDim.z * gridDim.y * blockDim.y;
}
} else {
while (y <= bounds.hi[1]) {
T val = in[x][y];
if (std::is_signed<T>::value && val < T(0)) val = -val;
if (order == 1)
SumReduction<T>::template fold<true /*exclusive*/>(value, val);
else if (order == 2) {
ProdReduction<T>::template fold<true /*exclusive*/>(val, val);
SumReduction<T>::template fold<true /*exclusive*/>(value, val);
} else {
T prod = val;
for (int i = 0; i < (order - 1); i++)
ProdReduction<T>::template fold<true /*exclusive*/>(prod, val);
SumReduction<T>::template fold<true /*exclusive*/>(value, prod);
}
y += gridDim.x * blockDim.x;
}
#if __CUDA_ARCH__ >= 700
__shared__ T trampoline[THREADS_PER_BLOCK];
// Check for the case where all the threads in the same warp have
// the same x value in which case they're all going to conflict
// so instead we do a warp-level reduction so just one thread ends
// up doing the full atomic
const int same_mask = __match_any_sync(0xffffffff, threadIdx.y);
int laneid;
asm volatile("mov.s32 %0, %laneid;" : "=r"(laneid));
const int active_mask = __ballot_sync(0xffffffff, same_mask - (1 << laneid));
if (active_mask) {
// Store our data into shared
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
trampoline[tid] = value;
// Make sure all the threads in the warp are done writing
__syncwarp(active_mask);
// Have the lowest thread in each mask pull in the values
int lowest_index = -1;
for (int i = 0; i < warpSize; i++)
if (same_mask & (1 << i)) {
if (lowest_index == -1) {
if (i != laneid) {
// We're not the lowest thread in the warp for
// this value so we're done, set the value back
// to identity to ensure that we don't try to
// perform the reduction out to memory
value = identity;
break;
} else // Make sure we don't do this test again
lowest_index = i;
// It was already our value, so just keep going
} else {
// Pull in the value from shared memory
const int index = tid + i - laneid;
SumReduction<T>::template fold<true /*exclusive*/>(value, trampoline[index]);
}
}
}
#endif
}
if (value != identity) SumReduction<T>::template fold<false /*exclusive*/>(inout[p], value);
}
template<typename T>
__global__ void __launch_bounds__(THREADS_PER_BLOCK, MIN_CTAS_PER_SM)
legate_vec_norm_3d(const AccessorRW<T, 3> inout, const AccessorRO<T, 3> in, const Rect<3> bounds, const T identity,
const int axis, const int order) {
coord_t z = bounds.lo[2] + blockIdx.x * blockDim.x + threadIdx.x;
coord_t y = bounds.lo[1] + blockIdx.y * blockDim.y + threadIdx.y;
coord_t x = bounds.lo[0] + blockIdx.z * blockDim.z + threadIdx.z;
const Point<3> p(x, y, z);
if (!bounds.contains(p)) return;
T value = identity;
if (axis == 0) {
while (x <= bounds.hi[0]) {
T val = in[x][y][z];
if (std::is_signed<T>::value && val < T(0)) val = -val;
if (order == 1)
SumReduction<T>::template fold<true /*exclusive*/>(value, val);
else if (order == 2) {
ProdReduction<T>::template fold<true /*exclusive*/>(val, val);
SumReduction<T>::template fold<true /*exclusive*/>(value, val);
} else {
T prod = val;
for (int i = 0; i < (order - 1); i++)
ProdReduction<T>::template fold<true /*exclusive*/>(prod, val);
SumReduction<T>::template fold<true /*exclusive*/>(value, prod);
}
x += gridDim.z * blockDim.z;
}
} else if (axis == 1) {
while (y <= bounds.hi[1]) {
T val = in[x][y][z];
if (std::is_signed<T>::value && val < T(0)) val = -val;
if (order == 1)
SumReduction<T>::template fold<true /*exclusive*/>(value, val);
else if (order == 2) {
ProdReduction<T>::template fold<true /*exclusive*/>(val, val);
SumReduction<T>::template fold<true /*exclusive*/>(value, val);
} else {
T prod = val;
for (int i = 0; i < (order - 1); i++)
ProdReduction<T>::template fold<true /*exclusive*/>(prod, val);
SumReduction<T>::template fold<true /*exclusive*/>(value, prod);
}
y += gridDim.y * blockDim.y;
}
} else {
while (z <= bounds.hi[2]) {
T val = in[x][y][z];
if (std::is_signed<T>::value && val < T(0)) val = -val;
if (order == 1)
SumReduction<T>::template fold<true /*exclusive*/>(value, val);
else if (order == 2) {
ProdReduction<T>::template fold<true /*exclusive*/>(val, val);
SumReduction<T>::template fold<true /*exclusive*/>(value, val);
} else {
T prod = val;
for (int i = 0; i < (order - 1); i++)
ProdReduction<T>::template fold<true /*exclusive*/>(prod, val);
SumReduction<T>::template fold<true /*exclusive*/>(value, prod);
}
z += gridDim.x * blockDim.x;
}
#if __CUDA_ARCH__ >= 700
__shared__ T trampoline[THREADS_PER_BLOCK];
// Check for the case where all the threads in the same warp have
// the same x value in which case they're all going to conflict
// so instead we do a warp-level reduction so just one thread ends
// up doing the full atomic
const int same_mask = __match_any_sync(0xffffffff, threadIdx.z * blockDim.y + threadIdx.y);
int laneid;
asm volatile("mov.s32 %0, %laneid;" : "=r"(laneid));
const int active_mask = __ballot_sync(0xffffffff, same_mask - (1 << laneid));
if (active_mask) {
// Store our data into shared
const int tid = (threadIdx.z * blockDim.y + threadIdx.y) * blockDim.x + threadIdx.x;
trampoline[tid] = value;
// Make sure all the threads in the warp are done writing
__syncwarp(active_mask);
// Have the lowest thread in each mask pull in the values
int lowest_index = -1;
for (int i = 0; i < warpSize; i++)
if (same_mask & (1 << i)) {
if (lowest_index == -1) {
if (i != laneid) {
// We're not the lowest thread in the warp for
// this value so we're done, set the value back
// to identity to ensure that we don't try to
// perform the reduction out to memory
value = identity;
break;
} else // Make sure we don't do this test again
lowest_index = i;
// It was already our value, so just keep going
} else {
// Pull in the value from shared memory
const int index = tid + i - laneid;
SumReduction<T>::template fold<true /*exclusive*/>(value, trampoline[index]);
}
}
}
#endif
}
if (value != identity) SumReduction<T>::template fold<false /*exclusive*/>(inout[p], value);
}
template<typename T>
/*static*/ void NormTask<T>::gpu_variant(const Task* task, const std::vector<PhysicalRegion>& regions, Context ctx,
Runtime* runtime) {
LegateDeserializer derez(task->args, task->arglen);
const int axis = derez.unpack_dimension();
const int collapse_dim = derez.unpack_dimension();
const int init_dim = derez.unpack_dimension();
switch (init_dim) {
case 1: {
const Rect<1> rect = NumPyProjectionFunctor::unpack_shape<1>(task, derez);
if (rect.empty()) return;
const AccessorWO<T, 1> out =
(collapse_dim >= 0) ? derez.unpack_accessor_WO<T, 1>(regions[0], rect, collapse_dim, task->index_point[collapse_dim])
: derez.unpack_accessor_WO<T, 1>(regions[0], rect);
const size_t volume = rect.volume();
const size_t blocks = (volume + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
hipLaunchKernelGGL(( legate_fill_1d<T>), dim3(blocks), dim3(THREADS_PER_BLOCK), 0, 0, out, SumReduction<T>::identity, rect.lo, volume);
break;
}
case 2: {
const Rect<2> rect = NumPyProjectionFunctor::unpack_shape<2>(task, derez);
if (rect.empty()) return;
const AccessorWO<T, 2> out =
(collapse_dim >= 0) ? derez.unpack_accessor_WO<T, 2>(regions[0], rect, collapse_dim, task->index_point[collapse_dim])
: derez.unpack_accessor_WO<T, 2>(regions[0], rect);
const size_t volume = rect.volume();
const size_t blocks = (volume + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
const coord_t pitch = rect.hi[1] - rect.lo[1] + 1;
hipLaunchKernelGGL(( legate_fill_2d<T>), dim3(blocks), dim3(THREADS_PER_BLOCK), 0, 0, out, SumReduction<T>::identity, rect.lo, Point<1>(pitch), volume);
break;
}
default:
assert(false); // shouldn't see any other cases
}
const int dim = derez.unpack_dimension();
const int order = task->futures[0].get_result<int>();
assert(order > 0);
switch (dim) {
// Should never get the case of 1 as this would just be a copy since
// reducing our only dimension should have called SumReducTask
case 2: {
const Rect<2> rect = NumPyProjectionFunctor::unpack_shape<2>(task, derez);
if (rect.empty()) return;
const AccessorRW<T, 2> inout =
(collapse_dim >= 0) ? derez.unpack_accessor_RW<T, 2, 1>(regions[0], rect, collapse_dim, task->index_point[collapse_dim])
: derez.unpack_accessor_RW<T, 2>(regions[0], rect);
const AccessorRO<T, 2> in = derez.unpack_accessor_RO<T, 2>(regions[1], rect);
// Figure out how many blocks and threads we need
dim3 threads(1, 1, 1);
dim3 blocks(1, 1, 1);
raster_2d_reduction(blocks, threads, rect, axis, (const void*)legate_vec_norm_2d<T>);
hipLaunchKernelGGL(( legate_vec_norm_2d<T>), dim3(blocks), dim3(threads), 0, 0, inout, in, rect, SumReduction<T>::identity, axis, order);
break;
}
case 3: {
const Rect<3> rect = NumPyProjectionFunctor::unpack_shape<3>(task, derez);
if (rect.empty()) return;
const AccessorRW<T, 3> inout =
(collapse_dim >= 0) ? derez.unpack_accessor_RW<T, 3, 2>(regions[0], rect, collapse_dim, task->index_point[collapse_dim])
: derez.unpack_accessor_RW<T, 3>(regions[0], rect);
const AccessorRO<T, 3> in = derez.unpack_accessor_RO<T, 3>(regions[1], rect);
// Figure out how many blocks and threads we need
dim3 threads(1, 1, 1);
dim3 blocks(1, 1, 1);
raster_3d_reduction(blocks, threads, rect, axis, (const void*)legate_vec_norm_3d<T>);
hipLaunchKernelGGL(( legate_vec_norm_3d<T>), dim3(blocks), dim3(threads), 0, 0, inout, in, rect, SumReduction<T>::identity, axis, order);
break;
}
default:
assert(false);
}
}
INSTANTIATE_TASK_VARIANT(NormTask, gpu_variant)
template<typename T>
__global__ void __launch_bounds__(THREADS_PER_BLOCK, MIN_CTAS_PER_SM)
legate_vec_norm_reduce_1d(DeferredReduction<SumReduction<T>> result, const AccessorRO<T, 1> in, const size_t iters,
const Point<1> origin, const size_t max, const int order, const T identity) {
T value = identity;
if (order == 1) {
for (unsigned idx = 0; idx < iters; idx++) {
const size_t offset = (idx * gridDim.x + blockIdx.x) * blockDim.x + threadIdx.x;
if (offset < max) {
const coord_t x = origin[0] + offset;
T val = in[x];
if (std::is_signed<T>::value && val < T(0)) val = -val;
SumReduction<T>::template fold<true>(value, val);
}
}
} else if (order == 2) {
for (unsigned idx = 0; idx < iters; idx++) {
const size_t offset = (idx * gridDim.x + blockIdx.x) * blockDim.x + threadIdx.x;
if (offset < max) {
const coord_t x = origin[0] + offset;
T val = in[x];
ProdReduction<T>::template fold<true>(val, val);
SumReduction<T>::template fold<true>(value, val);
}
}
} else {
for (unsigned idx = 0; idx < iters; idx++) {
const size_t offset = (idx * gridDim.x + blockIdx.x) * blockDim.x + threadIdx.x;
if (offset < max) {
const coord_t x = origin[0] + offset;
T val = in[x];
if (std::is_signed<T>::value && val < T(0)) val = -val;
T prod = val;
for (int i = 0; i < (order - 1); i++)
ProdReduction<T>::template fold<true>(prod, val);
SumReduction<T>::template fold<true>(value, prod);
}
}
}
reduce_output(result, value);
}
template<typename T>
__global__ void __launch_bounds__(THREADS_PER_BLOCK, MIN_CTAS_PER_SM)
legate_vec_norm_reduce_2d(DeferredReduction<SumReduction<T>> result, const AccessorRO<T, 2> in, const size_t iters,
const Point<2> origin, const Point<1> pitch, const size_t max, const int order, const T identity) {
T value = identity;
if (order == 1) {
for (unsigned idx = 0; idx < iters; idx++) {
const size_t offset = (idx * gridDim.x + blockIdx.x) * blockDim.x + threadIdx.x;
if (offset < max) {
const coord_t x = origin[0] + offset / pitch[0];
const coord_t y = origin[1] + offset % pitch[0];
T val = in[x][y];
if (std::is_signed<T>::value && val < T(0)) val = -val;
SumReduction<T>::template fold<true>(value, val);
}
}
} else if (order == 2) {
for (unsigned idx = 0; idx < iters; idx++) {
const size_t offset = (idx * gridDim.x + blockIdx.x) * blockDim.x + threadIdx.x;
if (offset < max) {
const coord_t x = origin[0] + offset / pitch[0];
const coord_t y = origin[1] + offset % pitch[0];
T val = in[x][y];
ProdReduction<T>::template fold<true>(val, val);
SumReduction<T>::template fold<true>(value, val);
}
}
} else {
for (unsigned idx = 0; idx < iters; idx++) {
const size_t offset = (idx * gridDim.x + blockIdx.x) * blockDim.x + threadIdx.x;
if (offset < max) {
const coord_t x = origin[0] + offset / pitch[0];
const coord_t y = origin[1] + offset % pitch[0];
T val = in[x][y];
if (std::is_signed<T>::value && val < T(0)) val = -val;
T prod = val;
for (int i = 0; i < (order - 1); i++)
ProdReduction<T>::template fold<true>(prod, val);
SumReduction<T>::template fold<true>(value, prod);
}
}
}
reduce_output(result, value);
}
template<typename T>
__global__ void __launch_bounds__(THREADS_PER_BLOCK, MIN_CTAS_PER_SM)
legate_vec_norm_reduce_3d(DeferredReduction<SumReduction<T>> result, const AccessorRO<T, 3> in, const size_t iters,
const Point<3> origin, const Point<2> pitch, const size_t max, const int order, const T identity) {
T value = identity;
if (order == 1) {
for (unsigned idx = 0; idx < iters; idx++) {
const size_t offset = (idx * gridDim.x + blockIdx.x) * blockDim.x + threadIdx.x;
if (offset < max) {
const coord_t x = origin[0] + offset / pitch[0];
const coord_t y = origin[1] + (offset % pitch[0]) / pitch[1];
const coord_t z = origin[2] + (offset % pitch[0]) % pitch[1];
T val = in[x][y][z];
if (std::is_signed<T>::value && val < T(0)) val = -val;
SumReduction<T>::template fold<true>(value, val);
}
}
} else if (order == 2) {
for (unsigned idx = 0; idx < iters; idx++) {
const size_t offset = (idx * gridDim.x + blockIdx.x) * blockDim.x + threadIdx.x;
if (offset < max) {
const coord_t x = origin[0] + offset / pitch[0];
const coord_t y = origin[1] + (offset % pitch[0]) / pitch[1];
const coord_t z = origin[2] + (offset % pitch[0]) % pitch[1];
T val = in[x][y][z];
ProdReduction<T>::template fold<true>(val, val);
SumReduction<T>::template fold<true>(value, val);
}
}
} else {
for (unsigned idx = 0; idx < iters; idx++) {
const size_t offset = (idx * gridDim.x + blockIdx.x) * blockDim.x + threadIdx.x;
if (offset < max) {
const coord_t x = origin[0] + offset / pitch[0];
const coord_t y = origin[1] + (offset % pitch[0]) / pitch[1];
const coord_t z = origin[2] + (offset % pitch[0]) % pitch[1];
T val = in[x][y][z];
if (std::is_signed<T>::value && val < T(0)) val = -val;
T prod = val;
for (int i = 0; i < (order - 1); i++)
ProdReduction<T>::template fold<true>(prod, val);
SumReduction<T>::template fold<true>(value, prod);
}
}
}
reduce_output(result, value);
}
template<typename T>
/*static*/ DeferredReduction<SumReduction<T>>
NormReducTask<T>::gpu_variant(const Task* task, const std::vector<PhysicalRegion>& regions, Context ctx, Runtime* runtime) {
LegateDeserializer derez(task->args, task->arglen);
const int dim = derez.unpack_dimension();
const int order = task->futures[0].get_result<int>();
assert(order > 0);
DeferredReduction<SumReduction<T>> result;
switch (dim) {
case 1: {
const Rect<1> rect = NumPyProjectionFunctor::unpack_shape<1>(task, derez);
if (rect.empty()) break;
const AccessorRO<T, 1> in = derez.unpack_accessor_RO<T, 1>(regions[0], rect);
const size_t volume = rect.volume();
const size_t blocks = (volume + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
if (blocks >= MAX_REDUCTION_CTAS) {
const size_t iters = (blocks + MAX_REDUCTION_CTAS - 1) / MAX_REDUCTION_CTAS;
hipLaunchKernelGGL(( legate_vec_norm_reduce_1d<T>)
, dim3(MAX_REDUCTION_CTAS), dim3(THREADS_PER_BLOCK), 0, 0, result, in, iters, rect.lo, volume, order, SumReduction<T>::identity);
} else {
hipLaunchKernelGGL(( legate_vec_norm_reduce_1d<T>)
, dim3(blocks), dim3(THREADS_PER_BLOCK), 0, 0, result, in, 1 /*iters*/, rect.lo, volume, order, SumReduction<T>::identity);
}
break;
}
case 2: {
const Rect<2> rect = NumPyProjectionFunctor::unpack_shape<2>(task, derez);
if (rect.empty()) break;
const AccessorRO<T, 2> in = derez.unpack_accessor_RO<T, 2>(regions[0], rect);
const size_t volume = rect.volume();
const size_t blocks = (volume + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
const coord_t pitch = rect.hi[1] - rect.lo[1] + 1;
if (blocks >= MAX_REDUCTION_CTAS) {
const size_t iters = (blocks + MAX_REDUCTION_CTAS - 1) / MAX_REDUCTION_CTAS;
hipLaunchKernelGGL(( legate_vec_norm_reduce_2d<T>), dim3(MAX_REDUCTION_CTAS), dim3(THREADS_PER_BLOCK), 0, 0, result, in, iters, rect.lo, Point<1>(pitch), volume,
order, SumReduction<T>::identity);
} else {
hipLaunchKernelGGL(( legate_vec_norm_reduce_2d<T>), dim3(blocks), dim3(THREADS_PER_BLOCK), 0, 0, result, in, 1 /*iter*/, rect.lo, Point<1>(pitch), volume, order,
SumReduction<T>::identity);
}
break;
}
case 3: {
const Rect<3> rect = NumPyProjectionFunctor::unpack_shape<3>(task, derez);
if (rect.empty()) break;
const AccessorRO<T, 3> in = derez.unpack_accessor_RO<T, 3>(regions[0], rect);
const size_t volume = rect.volume();
const size_t blocks = (volume + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
const coord_t diffy = rect.hi[1] - rect.lo[1] + 1;
const coord_t diffz = rect.hi[2] - rect.lo[2] + 1;
const coord_t pitch[2] = {diffy * diffz, diffz};
if (blocks >= MAX_REDUCTION_CTAS) {
const size_t iters = (blocks + MAX_REDUCTION_CTAS - 1) / MAX_REDUCTION_CTAS;
hipLaunchKernelGGL(( legate_vec_norm_reduce_3d<T>), dim3(MAX_REDUCTION_CTAS), dim3(THREADS_PER_BLOCK), 0, 0, result, in, iters, rect.lo, Point<2>(pitch), volume,
order, SumReduction<T>::identity);
} else {
hipLaunchKernelGGL(( legate_vec_norm_reduce_3d<T>), dim3(blocks), dim3(THREADS_PER_BLOCK), 0, 0, result, in, 1 /*iter*/, rect.lo, Point<2>(pitch), volume, order,
SumReduction<T>::identity);
}
break;
}
default:
assert(false); // should have any other dimensions
}
return result;
}
INSTANTIATE_DEFERRED_REDUCTION_TASK_VARIANT(NormReducTask, SumReduction, gpu_variant)
} // namespace numpy
} // namespace legate
| 86e0ae8724a0dd6691fbc1e6800fadc1b725abdf.cu | /* Copyright 2021 NVIDIA Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#include "cuda_help.h"
#include "fill.cuh"
#include "norm.h"
#include "proj.h"
#include "sum.h"
#include <type_traits>
using namespace Legion;
namespace legate {
namespace numpy {
template<typename T>
__global__ void __launch_bounds__(THREADS_PER_BLOCK, MIN_CTAS_PER_SM)
legate_vec_norm_2d(const AccessorRW<T, 2> inout, const AccessorRO<T, 2> in, const Rect<2> bounds, const T identity,
const int axis, const int order) {
coord_t y = bounds.lo[1] + blockIdx.x * blockDim.x + threadIdx.x;
coord_t x = bounds.lo[0] + (blockIdx.z * gridDim.y + blockIdx.y) * blockDim.y + threadIdx.y;
const Point<2> p(x, y);
if (!bounds.contains(p)) return;
T value = identity;
if (axis == 0) {
while (x <= bounds.hi[0]) {
T val = in[x][y];
if (std::is_signed<T>::value && val < T(0)) val = -val;
if (order == 1)
SumReduction<T>::template fold<true /*exclusive*/>(value, val);
else if (order == 2) {
ProdReduction<T>::template fold<true /*exclusive*/>(val, val);
SumReduction<T>::template fold<true /*exclusive*/>(value, val);
} else {
T prod = val;
for (int i = 0; i < (order - 1); i++)
ProdReduction<T>::template fold<true /*exclusive*/>(prod, val);
SumReduction<T>::template fold<true /*exclusive*/>(value, prod);
}
x += gridDim.z * gridDim.y * blockDim.y;
}
} else {
while (y <= bounds.hi[1]) {
T val = in[x][y];
if (std::is_signed<T>::value && val < T(0)) val = -val;
if (order == 1)
SumReduction<T>::template fold<true /*exclusive*/>(value, val);
else if (order == 2) {
ProdReduction<T>::template fold<true /*exclusive*/>(val, val);
SumReduction<T>::template fold<true /*exclusive*/>(value, val);
} else {
T prod = val;
for (int i = 0; i < (order - 1); i++)
ProdReduction<T>::template fold<true /*exclusive*/>(prod, val);
SumReduction<T>::template fold<true /*exclusive*/>(value, prod);
}
y += gridDim.x * blockDim.x;
}
#if __CUDA_ARCH__ >= 700
__shared__ T trampoline[THREADS_PER_BLOCK];
// Check for the case where all the threads in the same warp have
// the same x value in which case they're all going to conflict
// so instead we do a warp-level reduction so just one thread ends
// up doing the full atomic
const int same_mask = __match_any_sync(0xffffffff, threadIdx.y);
int laneid;
asm volatile("mov.s32 %0, %laneid;" : "=r"(laneid));
const int active_mask = __ballot_sync(0xffffffff, same_mask - (1 << laneid));
if (active_mask) {
// Store our data into shared
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
trampoline[tid] = value;
// Make sure all the threads in the warp are done writing
__syncwarp(active_mask);
// Have the lowest thread in each mask pull in the values
int lowest_index = -1;
for (int i = 0; i < warpSize; i++)
if (same_mask & (1 << i)) {
if (lowest_index == -1) {
if (i != laneid) {
// We're not the lowest thread in the warp for
// this value so we're done, set the value back
// to identity to ensure that we don't try to
// perform the reduction out to memory
value = identity;
break;
} else // Make sure we don't do this test again
lowest_index = i;
// It was already our value, so just keep going
} else {
// Pull in the value from shared memory
const int index = tid + i - laneid;
SumReduction<T>::template fold<true /*exclusive*/>(value, trampoline[index]);
}
}
}
#endif
}
if (value != identity) SumReduction<T>::template fold<false /*exclusive*/>(inout[p], value);
}
template<typename T>
__global__ void __launch_bounds__(THREADS_PER_BLOCK, MIN_CTAS_PER_SM)
legate_vec_norm_3d(const AccessorRW<T, 3> inout, const AccessorRO<T, 3> in, const Rect<3> bounds, const T identity,
const int axis, const int order) {
coord_t z = bounds.lo[2] + blockIdx.x * blockDim.x + threadIdx.x;
coord_t y = bounds.lo[1] + blockIdx.y * blockDim.y + threadIdx.y;
coord_t x = bounds.lo[0] + blockIdx.z * blockDim.z + threadIdx.z;
const Point<3> p(x, y, z);
if (!bounds.contains(p)) return;
T value = identity;
if (axis == 0) {
while (x <= bounds.hi[0]) {
T val = in[x][y][z];
if (std::is_signed<T>::value && val < T(0)) val = -val;
if (order == 1)
SumReduction<T>::template fold<true /*exclusive*/>(value, val);
else if (order == 2) {
ProdReduction<T>::template fold<true /*exclusive*/>(val, val);
SumReduction<T>::template fold<true /*exclusive*/>(value, val);
} else {
T prod = val;
for (int i = 0; i < (order - 1); i++)
ProdReduction<T>::template fold<true /*exclusive*/>(prod, val);
SumReduction<T>::template fold<true /*exclusive*/>(value, prod);
}
x += gridDim.z * blockDim.z;
}
} else if (axis == 1) {
while (y <= bounds.hi[1]) {
T val = in[x][y][z];
if (std::is_signed<T>::value && val < T(0)) val = -val;
if (order == 1)
SumReduction<T>::template fold<true /*exclusive*/>(value, val);
else if (order == 2) {
ProdReduction<T>::template fold<true /*exclusive*/>(val, val);
SumReduction<T>::template fold<true /*exclusive*/>(value, val);
} else {
T prod = val;
for (int i = 0; i < (order - 1); i++)
ProdReduction<T>::template fold<true /*exclusive*/>(prod, val);
SumReduction<T>::template fold<true /*exclusive*/>(value, prod);
}
y += gridDim.y * blockDim.y;
}
} else {
while (z <= bounds.hi[2]) {
T val = in[x][y][z];
if (std::is_signed<T>::value && val < T(0)) val = -val;
if (order == 1)
SumReduction<T>::template fold<true /*exclusive*/>(value, val);
else if (order == 2) {
ProdReduction<T>::template fold<true /*exclusive*/>(val, val);
SumReduction<T>::template fold<true /*exclusive*/>(value, val);
} else {
T prod = val;
for (int i = 0; i < (order - 1); i++)
ProdReduction<T>::template fold<true /*exclusive*/>(prod, val);
SumReduction<T>::template fold<true /*exclusive*/>(value, prod);
}
z += gridDim.x * blockDim.x;
}
#if __CUDA_ARCH__ >= 700
__shared__ T trampoline[THREADS_PER_BLOCK];
// Check for the case where all the threads in the same warp have
// the same x value in which case they're all going to conflict
// so instead we do a warp-level reduction so just one thread ends
// up doing the full atomic
const int same_mask = __match_any_sync(0xffffffff, threadIdx.z * blockDim.y + threadIdx.y);
int laneid;
asm volatile("mov.s32 %0, %laneid;" : "=r"(laneid));
const int active_mask = __ballot_sync(0xffffffff, same_mask - (1 << laneid));
if (active_mask) {
// Store our data into shared
const int tid = (threadIdx.z * blockDim.y + threadIdx.y) * blockDim.x + threadIdx.x;
trampoline[tid] = value;
// Make sure all the threads in the warp are done writing
__syncwarp(active_mask);
// Have the lowest thread in each mask pull in the values
int lowest_index = -1;
for (int i = 0; i < warpSize; i++)
if (same_mask & (1 << i)) {
if (lowest_index == -1) {
if (i != laneid) {
// We're not the lowest thread in the warp for
// this value so we're done, set the value back
// to identity to ensure that we don't try to
// perform the reduction out to memory
value = identity;
break;
} else // Make sure we don't do this test again
lowest_index = i;
// It was already our value, so just keep going
} else {
// Pull in the value from shared memory
const int index = tid + i - laneid;
SumReduction<T>::template fold<true /*exclusive*/>(value, trampoline[index]);
}
}
}
#endif
}
if (value != identity) SumReduction<T>::template fold<false /*exclusive*/>(inout[p], value);
}
template<typename T>
/*static*/ void NormTask<T>::gpu_variant(const Task* task, const std::vector<PhysicalRegion>& regions, Context ctx,
Runtime* runtime) {
LegateDeserializer derez(task->args, task->arglen);
const int axis = derez.unpack_dimension();
const int collapse_dim = derez.unpack_dimension();
const int init_dim = derez.unpack_dimension();
switch (init_dim) {
case 1: {
const Rect<1> rect = NumPyProjectionFunctor::unpack_shape<1>(task, derez);
if (rect.empty()) return;
const AccessorWO<T, 1> out =
(collapse_dim >= 0) ? derez.unpack_accessor_WO<T, 1>(regions[0], rect, collapse_dim, task->index_point[collapse_dim])
: derez.unpack_accessor_WO<T, 1>(regions[0], rect);
const size_t volume = rect.volume();
const size_t blocks = (volume + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
legate_fill_1d<T><<<blocks, THREADS_PER_BLOCK>>>(out, SumReduction<T>::identity, rect.lo, volume);
break;
}
case 2: {
const Rect<2> rect = NumPyProjectionFunctor::unpack_shape<2>(task, derez);
if (rect.empty()) return;
const AccessorWO<T, 2> out =
(collapse_dim >= 0) ? derez.unpack_accessor_WO<T, 2>(regions[0], rect, collapse_dim, task->index_point[collapse_dim])
: derez.unpack_accessor_WO<T, 2>(regions[0], rect);
const size_t volume = rect.volume();
const size_t blocks = (volume + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
const coord_t pitch = rect.hi[1] - rect.lo[1] + 1;
legate_fill_2d<T><<<blocks, THREADS_PER_BLOCK>>>(out, SumReduction<T>::identity, rect.lo, Point<1>(pitch), volume);
break;
}
default:
assert(false); // shouldn't see any other cases
}
const int dim = derez.unpack_dimension();
const int order = task->futures[0].get_result<int>();
assert(order > 0);
switch (dim) {
// Should never get the case of 1 as this would just be a copy since
// reducing our only dimension should have called SumReducTask
case 2: {
const Rect<2> rect = NumPyProjectionFunctor::unpack_shape<2>(task, derez);
if (rect.empty()) return;
const AccessorRW<T, 2> inout =
(collapse_dim >= 0) ? derez.unpack_accessor_RW<T, 2, 1>(regions[0], rect, collapse_dim, task->index_point[collapse_dim])
: derez.unpack_accessor_RW<T, 2>(regions[0], rect);
const AccessorRO<T, 2> in = derez.unpack_accessor_RO<T, 2>(regions[1], rect);
// Figure out how many blocks and threads we need
dim3 threads(1, 1, 1);
dim3 blocks(1, 1, 1);
raster_2d_reduction(blocks, threads, rect, axis, (const void*)legate_vec_norm_2d<T>);
legate_vec_norm_2d<T><<<blocks, threads>>>(inout, in, rect, SumReduction<T>::identity, axis, order);
break;
}
case 3: {
const Rect<3> rect = NumPyProjectionFunctor::unpack_shape<3>(task, derez);
if (rect.empty()) return;
const AccessorRW<T, 3> inout =
(collapse_dim >= 0) ? derez.unpack_accessor_RW<T, 3, 2>(regions[0], rect, collapse_dim, task->index_point[collapse_dim])
: derez.unpack_accessor_RW<T, 3>(regions[0], rect);
const AccessorRO<T, 3> in = derez.unpack_accessor_RO<T, 3>(regions[1], rect);
// Figure out how many blocks and threads we need
dim3 threads(1, 1, 1);
dim3 blocks(1, 1, 1);
raster_3d_reduction(blocks, threads, rect, axis, (const void*)legate_vec_norm_3d<T>);
legate_vec_norm_3d<T><<<blocks, threads>>>(inout, in, rect, SumReduction<T>::identity, axis, order);
break;
}
default:
assert(false);
}
}
INSTANTIATE_TASK_VARIANT(NormTask, gpu_variant)
template<typename T>
__global__ void __launch_bounds__(THREADS_PER_BLOCK, MIN_CTAS_PER_SM)
legate_vec_norm_reduce_1d(DeferredReduction<SumReduction<T>> result, const AccessorRO<T, 1> in, const size_t iters,
const Point<1> origin, const size_t max, const int order, const T identity) {
T value = identity;
if (order == 1) {
for (unsigned idx = 0; idx < iters; idx++) {
const size_t offset = (idx * gridDim.x + blockIdx.x) * blockDim.x + threadIdx.x;
if (offset < max) {
const coord_t x = origin[0] + offset;
T val = in[x];
if (std::is_signed<T>::value && val < T(0)) val = -val;
SumReduction<T>::template fold<true>(value, val);
}
}
} else if (order == 2) {
for (unsigned idx = 0; idx < iters; idx++) {
const size_t offset = (idx * gridDim.x + blockIdx.x) * blockDim.x + threadIdx.x;
if (offset < max) {
const coord_t x = origin[0] + offset;
T val = in[x];
ProdReduction<T>::template fold<true>(val, val);
SumReduction<T>::template fold<true>(value, val);
}
}
} else {
for (unsigned idx = 0; idx < iters; idx++) {
const size_t offset = (idx * gridDim.x + blockIdx.x) * blockDim.x + threadIdx.x;
if (offset < max) {
const coord_t x = origin[0] + offset;
T val = in[x];
if (std::is_signed<T>::value && val < T(0)) val = -val;
T prod = val;
for (int i = 0; i < (order - 1); i++)
ProdReduction<T>::template fold<true>(prod, val);
SumReduction<T>::template fold<true>(value, prod);
}
}
}
reduce_output(result, value);
}
template<typename T>
__global__ void __launch_bounds__(THREADS_PER_BLOCK, MIN_CTAS_PER_SM)
legate_vec_norm_reduce_2d(DeferredReduction<SumReduction<T>> result, const AccessorRO<T, 2> in, const size_t iters,
const Point<2> origin, const Point<1> pitch, const size_t max, const int order, const T identity) {
T value = identity;
if (order == 1) {
for (unsigned idx = 0; idx < iters; idx++) {
const size_t offset = (idx * gridDim.x + blockIdx.x) * blockDim.x + threadIdx.x;
if (offset < max) {
const coord_t x = origin[0] + offset / pitch[0];
const coord_t y = origin[1] + offset % pitch[0];
T val = in[x][y];
if (std::is_signed<T>::value && val < T(0)) val = -val;
SumReduction<T>::template fold<true>(value, val);
}
}
} else if (order == 2) {
for (unsigned idx = 0; idx < iters; idx++) {
const size_t offset = (idx * gridDim.x + blockIdx.x) * blockDim.x + threadIdx.x;
if (offset < max) {
const coord_t x = origin[0] + offset / pitch[0];
const coord_t y = origin[1] + offset % pitch[0];
T val = in[x][y];
ProdReduction<T>::template fold<true>(val, val);
SumReduction<T>::template fold<true>(value, val);
}
}
} else {
for (unsigned idx = 0; idx < iters; idx++) {
const size_t offset = (idx * gridDim.x + blockIdx.x) * blockDim.x + threadIdx.x;
if (offset < max) {
const coord_t x = origin[0] + offset / pitch[0];
const coord_t y = origin[1] + offset % pitch[0];
T val = in[x][y];
if (std::is_signed<T>::value && val < T(0)) val = -val;
T prod = val;
for (int i = 0; i < (order - 1); i++)
ProdReduction<T>::template fold<true>(prod, val);
SumReduction<T>::template fold<true>(value, prod);
}
}
}
reduce_output(result, value);
}
template<typename T>
__global__ void __launch_bounds__(THREADS_PER_BLOCK, MIN_CTAS_PER_SM)
legate_vec_norm_reduce_3d(DeferredReduction<SumReduction<T>> result, const AccessorRO<T, 3> in, const size_t iters,
const Point<3> origin, const Point<2> pitch, const size_t max, const int order, const T identity) {
T value = identity;
if (order == 1) {
for (unsigned idx = 0; idx < iters; idx++) {
const size_t offset = (idx * gridDim.x + blockIdx.x) * blockDim.x + threadIdx.x;
if (offset < max) {
const coord_t x = origin[0] + offset / pitch[0];
const coord_t y = origin[1] + (offset % pitch[0]) / pitch[1];
const coord_t z = origin[2] + (offset % pitch[0]) % pitch[1];
T val = in[x][y][z];
if (std::is_signed<T>::value && val < T(0)) val = -val;
SumReduction<T>::template fold<true>(value, val);
}
}
} else if (order == 2) {
for (unsigned idx = 0; idx < iters; idx++) {
const size_t offset = (idx * gridDim.x + blockIdx.x) * blockDim.x + threadIdx.x;
if (offset < max) {
const coord_t x = origin[0] + offset / pitch[0];
const coord_t y = origin[1] + (offset % pitch[0]) / pitch[1];
const coord_t z = origin[2] + (offset % pitch[0]) % pitch[1];
T val = in[x][y][z];
ProdReduction<T>::template fold<true>(val, val);
SumReduction<T>::template fold<true>(value, val);
}
}
} else {
for (unsigned idx = 0; idx < iters; idx++) {
const size_t offset = (idx * gridDim.x + blockIdx.x) * blockDim.x + threadIdx.x;
if (offset < max) {
const coord_t x = origin[0] + offset / pitch[0];
const coord_t y = origin[1] + (offset % pitch[0]) / pitch[1];
const coord_t z = origin[2] + (offset % pitch[0]) % pitch[1];
T val = in[x][y][z];
if (std::is_signed<T>::value && val < T(0)) val = -val;
T prod = val;
for (int i = 0; i < (order - 1); i++)
ProdReduction<T>::template fold<true>(prod, val);
SumReduction<T>::template fold<true>(value, prod);
}
}
}
reduce_output(result, value);
}
template<typename T>
/*static*/ DeferredReduction<SumReduction<T>>
NormReducTask<T>::gpu_variant(const Task* task, const std::vector<PhysicalRegion>& regions, Context ctx, Runtime* runtime) {
LegateDeserializer derez(task->args, task->arglen);
const int dim = derez.unpack_dimension();
const int order = task->futures[0].get_result<int>();
assert(order > 0);
DeferredReduction<SumReduction<T>> result;
switch (dim) {
case 1: {
const Rect<1> rect = NumPyProjectionFunctor::unpack_shape<1>(task, derez);
if (rect.empty()) break;
const AccessorRO<T, 1> in = derez.unpack_accessor_RO<T, 1>(regions[0], rect);
const size_t volume = rect.volume();
const size_t blocks = (volume + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
if (blocks >= MAX_REDUCTION_CTAS) {
const size_t iters = (blocks + MAX_REDUCTION_CTAS - 1) / MAX_REDUCTION_CTAS;
legate_vec_norm_reduce_1d<T>
<<<MAX_REDUCTION_CTAS, THREADS_PER_BLOCK>>>(result, in, iters, rect.lo, volume, order, SumReduction<T>::identity);
} else {
legate_vec_norm_reduce_1d<T>
<<<blocks, THREADS_PER_BLOCK>>>(result, in, 1 /*iters*/, rect.lo, volume, order, SumReduction<T>::identity);
}
break;
}
case 2: {
const Rect<2> rect = NumPyProjectionFunctor::unpack_shape<2>(task, derez);
if (rect.empty()) break;
const AccessorRO<T, 2> in = derez.unpack_accessor_RO<T, 2>(regions[0], rect);
const size_t volume = rect.volume();
const size_t blocks = (volume + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
const coord_t pitch = rect.hi[1] - rect.lo[1] + 1;
if (blocks >= MAX_REDUCTION_CTAS) {
const size_t iters = (blocks + MAX_REDUCTION_CTAS - 1) / MAX_REDUCTION_CTAS;
legate_vec_norm_reduce_2d<T><<<MAX_REDUCTION_CTAS, THREADS_PER_BLOCK>>>(result, in, iters, rect.lo, Point<1>(pitch), volume,
order, SumReduction<T>::identity);
} else {
legate_vec_norm_reduce_2d<T><<<blocks, THREADS_PER_BLOCK>>>(result, in, 1 /*iter*/, rect.lo, Point<1>(pitch), volume, order,
SumReduction<T>::identity);
}
break;
}
case 3: {
const Rect<3> rect = NumPyProjectionFunctor::unpack_shape<3>(task, derez);
if (rect.empty()) break;
const AccessorRO<T, 3> in = derez.unpack_accessor_RO<T, 3>(regions[0], rect);
const size_t volume = rect.volume();
const size_t blocks = (volume + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
const coord_t diffy = rect.hi[1] - rect.lo[1] + 1;
const coord_t diffz = rect.hi[2] - rect.lo[2] + 1;
const coord_t pitch[2] = {diffy * diffz, diffz};
if (blocks >= MAX_REDUCTION_CTAS) {
const size_t iters = (blocks + MAX_REDUCTION_CTAS - 1) / MAX_REDUCTION_CTAS;
legate_vec_norm_reduce_3d<T><<<MAX_REDUCTION_CTAS, THREADS_PER_BLOCK>>>(result, in, iters, rect.lo, Point<2>(pitch), volume,
order, SumReduction<T>::identity);
} else {
legate_vec_norm_reduce_3d<T><<<blocks, THREADS_PER_BLOCK>>>(result, in, 1 /*iter*/, rect.lo, Point<2>(pitch), volume, order,
SumReduction<T>::identity);
}
break;
}
default:
assert(false); // should have any other dimensions
}
return result;
}
INSTANTIATE_DEFERRED_REDUCTION_TASK_VARIANT(NormReducTask, SumReduction, gpu_variant)
} // namespace numpy
} // namespace legate
|
63117d152e4b2d695abf6765d75e3e210e9d2e12.hip | // !!! This is a file automatically generated by hipify!!!
#include <ATen/ATen.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <vector>
namespace{
template <typename scalar_t>
__global__ void plus_scan_cuda_forward_kernel(
}
} // namespace
at::Tensor plus_scan_cuda(
at::Tensor input) {
}
| 63117d152e4b2d695abf6765d75e3e210e9d2e12.cu | #include <ATen/ATen.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <vector>
namespace{
template <typename scalar_t>
__global__ void plus_scan_cuda_forward_kernel(
}
} // namespace
at::Tensor plus_scan_cuda(
at::Tensor input) {
}
|
600f12586a0d892f280e6b4d152fbdf7ac7968e5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <windows.h> // use the QPC
void GenerateMatrix(float *matrix, int nx, int ny)
{
int i, j;
float cnt = 0;
for (i = 0; i < nx; i++)
{
for (j = 0; j < ny; j++)
{
matrix[i*nx + j] = cnt++;
}
}
printf("[*] GenerateMatrix has done!\n");
}
void PrintMatrix(float *matrix, int nx, int ny)
{
int i, j;
for (i = 0; i < nx; i++)
{
for (j = 0; j < ny; j++)
{
printf("%.2f\t", matrix[i*nx + j]);
}
printf("\n");
}
printf("[*] PrintMatrix has done!\n");
}
/************************* matrix summary begin *************************/
inline void AddMatrixOnCPU(float *A, float *B, float *C, int nx, int ny)
{
int i, j;
for (i = 0; i < nx; i++)
{
for (j = 0; j < ny; j++)
{
C[i*nx + j] = A[i*nx + j] + B[i*nx + j];
}
}
printf("[*] AddMatrix on CPU has done!\n");
}
__global__ inline void AddMatrixOnGPU(float *A, float *B, float *C, int nx, int ny)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
int idx = i*nx + j;
if (i <= nx && j <= ny)
{
C[idx] = A[idx] + B[idx];
}
}
/************************* matrix summary done **************************/
//
//
//
/************************ matrix multiply begin *************************/
inline void MulMatrixOnCPU(float *A, float *B, float *C, int nx, int ny)
{
int i, j, k;
float sum = 0.0;
for (i = 0; i < nx; i++)
{
for (j = 0; j < ny; j++)
{
sum = 0.0;
for (k = 0; k < nx; k++)
{
sum = sum + A[i*nx + k] * B[k*nx + j];
}
C[i*nx + j] = sum;
}
}
}
__global__ inline void MulMatrixOnGPU(float *A, float *B, float *C, int nx, int ny)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
int k;
if (i < nx && j < ny) // we should to identify the "i" and "j" scope.
{
float sum = 0.0;
for (k = 0; k < nx; k++)
{
sum += A[i*nx + k] * B[k*nx + j];
}
C[i*nx + j] = sum;
}
}
/************************ matrix multiply end ***************************/
// compare the result
int Compare(float *cpu_ref, float *gpu_ref, int nx, int ny)
{
int i, j;
for (i = 0; i < nx; i++)
{
for (j = 0; j < ny; j++)
{
if (cpu_ref[i*nx + j] != gpu_ref[i*nx + j])
{
return 0;
}
}
}
return 1;
}
int main(int argc, char *argv[])
{
LARGE_INTEGER begin_cpu, begin_gpu;
LARGE_INTEGER end_cpu, end_gpu;
LARGE_INTEGER freq_cpu, freq_gpu;
// the size of the elements in the matrix can not be much larger....
// because of my worse GPU: nVIDIA GeForce GT710
unsigned int N = 1 << 12;
int nx = (int)sqrt((float)N);
int ny = (int)sqrt((float)N);
float *A = NULL;
float *B = NULL;
float *C = NULL;
float *gpu_ref = NULL;
float *d_A = NULL;
float *d_B = NULL;
float *d_C = NULL;
// allocate the memory on CPU
A = (float *)malloc(sizeof(float)* N);
B = (float *)malloc(sizeof(float)* N);
C = (float *)malloc(sizeof(float)* N);
gpu_ref = (float *)malloc(sizeof(float)*N);
// set the memory to zero
memset(A, 0, sizeof(float)*N);
memset(B, 0, sizeof(float)*N);
memset(C, 0, sizeof(float)*N);
memset(gpu_ref, 0, sizeof(float)*N);
// allocate the memory on GPU
hipMalloc((float **)&d_A, sizeof(float)*N);
hipMalloc((float **)&d_B, sizeof(float)*N);
hipMalloc((float **)&d_C, sizeof(float)*N);
// reset the memory to zero
hipMemset(d_A, 0, sizeof(float)*N);
hipMemset(d_B, 0, sizeof(float)*N);
hipMemset(d_C, 0, sizeof(float)*N);
// generate the matrix on CPU
GenerateMatrix(A, nx, ny);
GenerateMatrix(B, nx, ny);
// transfer the data from CPU to GPU
hipMemcpy(d_A, A, sizeof(float)*N, hipMemcpyHostToDevice);
hipMemcpy(d_B, B, sizeof(float)*N, hipMemcpyHostToDevice);
// set the grid number and the block thread number
dim3 block(32, 32);
dim3 grid((nx + block.x - 1) / block.x, (ny + block.y - 1) / block.y);
// Add the matrix on CPU
AddMatrixOnCPU(A, B, C, nx, ny);
// Add the matrix on GPU
AddMatrixOnGPU << <grid, block >> >(d_A, d_B, d_C, nx, ny);
hipDeviceSynchronize(); // let the CPU wait the GPU to do its calculation.
// transform the data from the GPU to CPU
hipMemcpy(gpu_ref, d_C, sizeof(float)*N, hipMemcpyDeviceToHost);
if (Compare(C, gpu_ref, nx, ny))
{
printf("[*] Compare : Matrix_ADD => the result are the same!\n");
}
else
{
printf("[*] Compare : Matrix_ADD => the result are NOT the same...\n");
}
// begin to calculate the time consumption
QueryPerformanceCounter(&freq_cpu);
QueryPerformanceCounter(&begin_cpu);
// test the matrix multiply
MulMatrixOnCPU(A, B, C, nx, ny);
QueryPerformanceCounter(&end_cpu);
printf("CPU time consumption:%f ms\n", 1000*(float)(end_cpu.QuadPart - begin_cpu.QuadPart) / (float)freq_cpu.QuadPart);
// test the matrix multiply on GPU
MulMatrixOnGPU << <grid, block >> >(d_A, d_B, d_C, nx, ny);
hipDeviceSynchronize();
hipMemcpy(gpu_ref, d_C, sizeof(float)*N, hipMemcpyDeviceToHost);
// make the comparison
if (Compare(C, gpu_ref, nx, ny))
{
printf("[*] Compare : Matrix_MUL => the result are the same!\n");
}
else
{
printf("[*] Compare : Matrix_MUL => the result are NOT the same...\n");
}
// Debug Print
// PrintMatrix(gpu_ref, nx, ny);
// PrintMatrix(C, nx, ny);
free(A);
free(B);
free(C);
free(gpu_ref);
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
return 0;
}
// [*] GenerateMatrix has done!
// [*] GenerateMatrix has done!
// [*] AddMatrix on CPU has done!
// [*] Compare : Matrix_ADD = > the result are the same!
// [*] Compare : Matrix_MUL = > the result are the same!
// Press any key to continue...
// nvprof check
// C:\Users\HP\Desktop\test\x64\Debug > nvprof test.exe
// == 18712 == NVPROF is profiling process 18712, command: test.exe
// [*] GenerateMatrix has done!
// [*] GenerateMatrix has done!
// [*] AddMatrix on CPU has done!
// [*] Compare : Matrix_ADD = > the result are the same!
// CPU time consumption : 0.000002 ms
// GPU time consumption : 0.000002 ms
// [*] Compare : Matrix_MUL = > the result are the same!
// == 18712 == Profiling application : test.exe
// == 18712 == Profiling result :
// Type Time(%) Time Calls Avg Min Max Name
// GPU activities : 91.91% 718.66us 1 718.66us 718.66us 718.66us MulMatrixOnGPU(float*, float*, float*, int, int)
// 3.62% 28.285us 1 28.285us 28.285us 28.285us AddMatrixOnGPU(float*, float*, float*, int, int)
// 1.93% 15.071us 3 5.0230us 3.8390us 7.3600us[CUDA memset]
// 1.28% 10.047us 2 5.0230us 4.9280us 5.1190us[CUDA memcpy DtoH]
// 1.26% 9.8870us 2 4.9430us 4.5760us 5.3110us[CUDA memcpy HtoD]
// API calls : 90.76% 331.25ms 3 110.42ms 2.6000us 331.25ms hipMalloc
// 8.46% 30.874ms 1 30.874ms 30.874ms 30.874ms hipDevicePrimaryCtxRelease
// 0.24% 871.50us 4 217.88us 55.900us 641.20us hipMemcpy
// 0.24% 870.40us 3 290.13us 12.400us 790.50us hipDeviceSynchronize
// 0.17% 616.90us 1 616.90us 616.90us 616.90us hipModuleUnload
// 0.07% 242.00us 97 2.4940us 100ns 127.40us hipDeviceGetAttribute
// 0.04% 149.10us 3 49.700us 6.6000us 122.20us hipFree
// 0.01% 47.200us 2 23.600us 15.100us 32.100us cudaLaunchKernel
// 0.01% 22.300us 1 22.300us 22.300us 22.300us cuDeviceTotalMem
// 0.00% 14.100us 3 4.7000us 1.4000us 10.600us hipMemset
// 0.00% 6.8000us 1 6.8000us 6.8000us 6.8000us hipDeviceGetPCIBusId
// 0.00% 2.7000us 3 900ns 200ns 2.3000us hipGetDeviceCount
// 0.00% 1.5000us 2 750ns 100ns 1.4000us hipDeviceGet
// 0.00 % 800ns 1 800ns 800ns 800ns hipDeviceGetName
// 0.00 % 400ns 1 400ns 400ns 400ns hipDeviceGetUuid
// 0.00 % 200ns 1 200ns 200ns 200ns cuDeviceGetLuid
//
// C : \Users\HP\Desktop\test\x64\Debug > cd ..
//
// C:\Users\HP\Desktop\test\x64 > cd Release
//
// C : \Users\HP\Desktop\test\x64\Release > nvprof test.exe
// == 18808 == NVPROF is profiling process 18808, command: test.exe
// [*] GenerateMatrix has done!
// [*] GenerateMatrix has done!
// [*] AddMatrix on CPU has done!
// [*] Compare : Matrix_ADD = > the result are the same!
// CPU time consumption : 0.000000 ms
// [*] Compare : Matrix_MUL = > the result are the same!
// == 18808 == Profiling application : test.exe
// == 18808 == Profiling result :
// Type Time(%) Time Calls Avg Min Max Name
// GPU activities : 91.07% 599.83us 1 599.83us 599.83us 599.83us MulMatrixOnGPU(float*, float*, float*, int, int)
// 3.82% 25.150us 1 25.150us 25.150us 25.150us AddMatrixOnGPU(float*, float*, float*, int, int)
// 1.97% 12.991us 3 4.3300us 3.6790us 5.6320us[CUDA memset]
// 1.61% 10.624us 2 5.3120us 5.3120us 5.3120us[CUDA memcpy HtoD]
// 1.53% 10.079us 2 5.0390us 4.8000us 5.2790us[CUDA memcpy DtoH]
// API calls : 73.36% 96.757ms 3 32.252ms 3.1000us 96.746ms hipMalloc
// 25.46% 33.576ms 1 33.576ms 33.576ms 33.576ms hipDevicePrimaryCtxRelease
// 0.52% 691.50us 2 345.75us 59.600us 631.90us hipDeviceSynchronize
// 0.17% 224.60us 4 56.150us 25.500us 81.700us hipMemcpy
// 0.16% 213.70us 1 213.70us 213.70us 213.70us hipModuleUnload
// 0.13% 175.10us 3 58.366us 6.4000us 152.30us hipFree
// 0.12% 157.10us 97 1.6190us 100ns 69.500us hipDeviceGetAttribute
// 0.03% 42.400us 2 21.200us 13.300us 29.100us cudaLaunchKernel
// 0.02% 24.400us 1 24.400us 24.400us 24.400us cuDeviceTotalMem
// 0.01% 15.300us 3 5.1000us 1.5000us 11.900us hipMemset
// 0.00% 6.5000us 1 6.5000us 6.5000us 6.5000us hipDeviceGetPCIBusId
// 0.00% 2.6000us 3 866ns 200ns 2.2000us cuDeviceGetCountt
// 0.00% 1.4000us 2 700ns 100ns 1.3000us hipDeviceGet
// 0.00% 1.4000us 1 1.4000us 1.4000us 1.4000us hipDeviceGetName
// 0.00 % 400ns 1 400ns 400ns 400ns cuDeviceGetLuid
// 0.00 % 300ns 1 300ns 300ns 300ns hipDeviceGetUuid
| 600f12586a0d892f280e6b4d152fbdf7ac7968e5.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <windows.h> // use the QPC
void GenerateMatrix(float *matrix, int nx, int ny)
{
int i, j;
float cnt = 0;
for (i = 0; i < nx; i++)
{
for (j = 0; j < ny; j++)
{
matrix[i*nx + j] = cnt++;
}
}
printf("[*] GenerateMatrix has done!\n");
}
void PrintMatrix(float *matrix, int nx, int ny)
{
int i, j;
for (i = 0; i < nx; i++)
{
for (j = 0; j < ny; j++)
{
printf("%.2f\t", matrix[i*nx + j]);
}
printf("\n");
}
printf("[*] PrintMatrix has done!\n");
}
/************************* matrix summary begin *************************/
inline void AddMatrixOnCPU(float *A, float *B, float *C, int nx, int ny)
{
int i, j;
for (i = 0; i < nx; i++)
{
for (j = 0; j < ny; j++)
{
C[i*nx + j] = A[i*nx + j] + B[i*nx + j];
}
}
printf("[*] AddMatrix on CPU has done!\n");
}
__global__ inline void AddMatrixOnGPU(float *A, float *B, float *C, int nx, int ny)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
int idx = i*nx + j;
if (i <= nx && j <= ny)
{
C[idx] = A[idx] + B[idx];
}
}
/************************* matrix summary done **************************/
//
//
//
/************************ matrix multiply begin *************************/
inline void MulMatrixOnCPU(float *A, float *B, float *C, int nx, int ny)
{
int i, j, k;
float sum = 0.0;
for (i = 0; i < nx; i++)
{
for (j = 0; j < ny; j++)
{
sum = 0.0;
for (k = 0; k < nx; k++)
{
sum = sum + A[i*nx + k] * B[k*nx + j];
}
C[i*nx + j] = sum;
}
}
}
__global__ inline void MulMatrixOnGPU(float *A, float *B, float *C, int nx, int ny)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
int k;
if (i < nx && j < ny) // we should to identify the "i" and "j" scope.
{
float sum = 0.0;
for (k = 0; k < nx; k++)
{
sum += A[i*nx + k] * B[k*nx + j];
}
C[i*nx + j] = sum;
}
}
/************************ matrix multiply end ***************************/
// compare the result
int Compare(float *cpu_ref, float *gpu_ref, int nx, int ny)
{
int i, j;
for (i = 0; i < nx; i++)
{
for (j = 0; j < ny; j++)
{
if (cpu_ref[i*nx + j] != gpu_ref[i*nx + j])
{
return 0;
}
}
}
return 1;
}
int main(int argc, char *argv[])
{
LARGE_INTEGER begin_cpu, begin_gpu;
LARGE_INTEGER end_cpu, end_gpu;
LARGE_INTEGER freq_cpu, freq_gpu;
// the size of the elements in the matrix can not be much larger....
// because of my worse GPU: nVIDIA GeForce GT710
unsigned int N = 1 << 12;
int nx = (int)sqrt((float)N);
int ny = (int)sqrt((float)N);
float *A = NULL;
float *B = NULL;
float *C = NULL;
float *gpu_ref = NULL;
float *d_A = NULL;
float *d_B = NULL;
float *d_C = NULL;
// allocate the memory on CPU
A = (float *)malloc(sizeof(float)* N);
B = (float *)malloc(sizeof(float)* N);
C = (float *)malloc(sizeof(float)* N);
gpu_ref = (float *)malloc(sizeof(float)*N);
// set the memory to zero
memset(A, 0, sizeof(float)*N);
memset(B, 0, sizeof(float)*N);
memset(C, 0, sizeof(float)*N);
memset(gpu_ref, 0, sizeof(float)*N);
// allocate the memory on GPU
cudaMalloc((float **)&d_A, sizeof(float)*N);
cudaMalloc((float **)&d_B, sizeof(float)*N);
cudaMalloc((float **)&d_C, sizeof(float)*N);
// reset the memory to zero
cudaMemset(d_A, 0, sizeof(float)*N);
cudaMemset(d_B, 0, sizeof(float)*N);
cudaMemset(d_C, 0, sizeof(float)*N);
// generate the matrix on CPU
GenerateMatrix(A, nx, ny);
GenerateMatrix(B, nx, ny);
// transfer the data from CPU to GPU
cudaMemcpy(d_A, A, sizeof(float)*N, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, B, sizeof(float)*N, cudaMemcpyHostToDevice);
// set the grid number and the block thread number
dim3 block(32, 32);
dim3 grid((nx + block.x - 1) / block.x, (ny + block.y - 1) / block.y);
// Add the matrix on CPU
AddMatrixOnCPU(A, B, C, nx, ny);
// Add the matrix on GPU
AddMatrixOnGPU << <grid, block >> >(d_A, d_B, d_C, nx, ny);
cudaDeviceSynchronize(); // let the CPU wait the GPU to do its calculation.
// transform the data from the GPU to CPU
cudaMemcpy(gpu_ref, d_C, sizeof(float)*N, cudaMemcpyDeviceToHost);
if (Compare(C, gpu_ref, nx, ny))
{
printf("[*] Compare : Matrix_ADD => the result are the same!\n");
}
else
{
printf("[*] Compare : Matrix_ADD => the result are NOT the same...\n");
}
// begin to calculate the time consumption
QueryPerformanceCounter(&freq_cpu);
QueryPerformanceCounter(&begin_cpu);
// test the matrix multiply
MulMatrixOnCPU(A, B, C, nx, ny);
QueryPerformanceCounter(&end_cpu);
printf("CPU time consumption:%f ms\n", 1000*(float)(end_cpu.QuadPart - begin_cpu.QuadPart) / (float)freq_cpu.QuadPart);
// test the matrix multiply on GPU
MulMatrixOnGPU << <grid, block >> >(d_A, d_B, d_C, nx, ny);
cudaDeviceSynchronize();
cudaMemcpy(gpu_ref, d_C, sizeof(float)*N, cudaMemcpyDeviceToHost);
// make the comparison
if (Compare(C, gpu_ref, nx, ny))
{
printf("[*] Compare : Matrix_MUL => the result are the same!\n");
}
else
{
printf("[*] Compare : Matrix_MUL => the result are NOT the same...\n");
}
// Debug Print
// PrintMatrix(gpu_ref, nx, ny);
// PrintMatrix(C, nx, ny);
free(A);
free(B);
free(C);
free(gpu_ref);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
return 0;
}
// [*] GenerateMatrix has done!
// [*] GenerateMatrix has done!
// [*] AddMatrix on CPU has done!
// [*] Compare : Matrix_ADD = > the result are the same!
// [*] Compare : Matrix_MUL = > the result are the same!
// Press any key to continue...
// nvprof check
// C:\Users\HP\Desktop\test\x64\Debug > nvprof test.exe
// == 18712 == NVPROF is profiling process 18712, command: test.exe
// [*] GenerateMatrix has done!
// [*] GenerateMatrix has done!
// [*] AddMatrix on CPU has done!
// [*] Compare : Matrix_ADD = > the result are the same!
// CPU time consumption : 0.000002 ms
// GPU time consumption : 0.000002 ms
// [*] Compare : Matrix_MUL = > the result are the same!
// == 18712 == Profiling application : test.exe
// == 18712 == Profiling result :
// Type Time(%) Time Calls Avg Min Max Name
// GPU activities : 91.91% 718.66us 1 718.66us 718.66us 718.66us MulMatrixOnGPU(float*, float*, float*, int, int)
// 3.62% 28.285us 1 28.285us 28.285us 28.285us AddMatrixOnGPU(float*, float*, float*, int, int)
// 1.93% 15.071us 3 5.0230us 3.8390us 7.3600us[CUDA memset]
// 1.28% 10.047us 2 5.0230us 4.9280us 5.1190us[CUDA memcpy DtoH]
// 1.26% 9.8870us 2 4.9430us 4.5760us 5.3110us[CUDA memcpy HtoD]
// API calls : 90.76% 331.25ms 3 110.42ms 2.6000us 331.25ms cudaMalloc
// 8.46% 30.874ms 1 30.874ms 30.874ms 30.874ms cuDevicePrimaryCtxRelease
// 0.24% 871.50us 4 217.88us 55.900us 641.20us cudaMemcpy
// 0.24% 870.40us 3 290.13us 12.400us 790.50us cudaDeviceSynchronize
// 0.17% 616.90us 1 616.90us 616.90us 616.90us cuModuleUnload
// 0.07% 242.00us 97 2.4940us 100ns 127.40us cuDeviceGetAttribute
// 0.04% 149.10us 3 49.700us 6.6000us 122.20us cudaFree
// 0.01% 47.200us 2 23.600us 15.100us 32.100us cudaLaunchKernel
// 0.01% 22.300us 1 22.300us 22.300us 22.300us cuDeviceTotalMem
// 0.00% 14.100us 3 4.7000us 1.4000us 10.600us cudaMemset
// 0.00% 6.8000us 1 6.8000us 6.8000us 6.8000us cuDeviceGetPCIBusId
// 0.00% 2.7000us 3 900ns 200ns 2.3000us cuDeviceGetCount
// 0.00% 1.5000us 2 750ns 100ns 1.4000us cuDeviceGet
// 0.00 % 800ns 1 800ns 800ns 800ns cuDeviceGetName
// 0.00 % 400ns 1 400ns 400ns 400ns cuDeviceGetUuid
// 0.00 % 200ns 1 200ns 200ns 200ns cuDeviceGetLuid
//
// C : \Users\HP\Desktop\test\x64\Debug > cd ..
//
// C:\Users\HP\Desktop\test\x64 > cd Release
//
// C : \Users\HP\Desktop\test\x64\Release > nvprof test.exe
// == 18808 == NVPROF is profiling process 18808, command: test.exe
// [*] GenerateMatrix has done!
// [*] GenerateMatrix has done!
// [*] AddMatrix on CPU has done!
// [*] Compare : Matrix_ADD = > the result are the same!
// CPU time consumption : 0.000000 ms
// [*] Compare : Matrix_MUL = > the result are the same!
// == 18808 == Profiling application : test.exe
// == 18808 == Profiling result :
// Type Time(%) Time Calls Avg Min Max Name
// GPU activities : 91.07% 599.83us 1 599.83us 599.83us 599.83us MulMatrixOnGPU(float*, float*, float*, int, int)
// 3.82% 25.150us 1 25.150us 25.150us 25.150us AddMatrixOnGPU(float*, float*, float*, int, int)
// 1.97% 12.991us 3 4.3300us 3.6790us 5.6320us[CUDA memset]
// 1.61% 10.624us 2 5.3120us 5.3120us 5.3120us[CUDA memcpy HtoD]
// 1.53% 10.079us 2 5.0390us 4.8000us 5.2790us[CUDA memcpy DtoH]
// API calls : 73.36% 96.757ms 3 32.252ms 3.1000us 96.746ms cudaMalloc
// 25.46% 33.576ms 1 33.576ms 33.576ms 33.576ms cuDevicePrimaryCtxRelease
// 0.52% 691.50us 2 345.75us 59.600us 631.90us cudaDeviceSynchronize
// 0.17% 224.60us 4 56.150us 25.500us 81.700us cudaMemcpy
// 0.16% 213.70us 1 213.70us 213.70us 213.70us cuModuleUnload
// 0.13% 175.10us 3 58.366us 6.4000us 152.30us cudaFree
// 0.12% 157.10us 97 1.6190us 100ns 69.500us cuDeviceGetAttribute
// 0.03% 42.400us 2 21.200us 13.300us 29.100us cudaLaunchKernel
// 0.02% 24.400us 1 24.400us 24.400us 24.400us cuDeviceTotalMem
// 0.01% 15.300us 3 5.1000us 1.5000us 11.900us cudaMemset
// 0.00% 6.5000us 1 6.5000us 6.5000us 6.5000us cuDeviceGetPCIBusId
// 0.00% 2.6000us 3 866ns 200ns 2.2000us cuDeviceGetCountt
// 0.00% 1.4000us 2 700ns 100ns 1.3000us cuDeviceGet
// 0.00% 1.4000us 1 1.4000us 1.4000us 1.4000us cuDeviceGetName
// 0.00 % 400ns 1 400ns 400ns 400ns cuDeviceGetLuid
// 0.00 % 300ns 1 300ns 300ns 300ns cuDeviceGetUuid
|
95d7880307820f695fba1f4339f78e872f36a555.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2018-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include <raft/cudart_utils.h>
#include <stdlib.h>
#include <algorithm>
#include <limits>
#include <raft/random/rng.cuh>
#include <selection/kselection.cuh>
namespace MLCommon {
namespace Selection {
template <typename TypeV, typename TypeK, int N, int TPB, bool Greater>
__global__ void sortTestKernel(TypeK* key)
{
KVArray<TypeV, TypeK, N, Greater> arr;
#pragma unroll
for (int i = 0; i < N; ++i) {
arr.arr[i].val = (TypeV)raft::laneId();
arr.arr[i].key = (TypeK)raft::laneId();
}
raft::warpFence();
arr.sort();
raft::warpFence();
#pragma unroll
for (int i = 0; i < N; ++i)
arr.arr[i].store(nullptr, key + threadIdx.x + i * TPB);
}
template <typename TypeV, typename TypeK, int N, int TPB, bool Greater>
void sortTest(TypeK* key)
{
TypeK* dkey;
CUDA_CHECK(hipMalloc((void**)&dkey, sizeof(TypeK) * TPB * N));
hipLaunchKernelGGL(( sortTestKernel<TypeV, TypeK, N, TPB, Greater>), dim3(1), dim3(TPB), 0, 0, dkey);
CUDA_CHECK(hipPeekAtLastError());
raft::update_host<TypeK>(key, dkey, TPB * N, 0);
CUDA_CHECK(hipFree(dkey));
}
/************************************************************************/
/********************** Add the function for CPU test *******************/
/************************************************************************/
template <typename TypeV, typename TypeK, bool Greater>
int cmp(KVPair<TypeV, TypeK> a, KVPair<TypeV, TypeK> b)
{
if (Greater == 0) {
return a.val > b.val;
} else {
return a.val < b.val;
}
}
template <typename TypeV, typename TypeK, bool Greater>
void partSortKVPair(KVPair<TypeV, TypeK>* arr, int N, int k)
{
std::partial_sort(arr, arr + k, arr + N, cmp<TypeV, TypeK, Greater>);
}
template <typename TypeV, typename TypeK, int N, bool Greater>
void sortKVArray(KVArray<TypeV, TypeK, N, Greater>& arr)
{
std::sort(arr.arr, arr.arr + N, cmp<TypeV, TypeK, Greater>);
}
template <typename TypeV, typename TypeK, bool Greater>
::testing::AssertionResult checkResult(
TypeV* d_arr, TypeV* d_outv, TypeK* d_outk, int rows, int N, int k, TypeV tolerance)
{
for (int rIndex = 0; rIndex < rows; rIndex++) {
// input data
TypeV* h_arr = new TypeV[N];
raft::update_host(h_arr, d_arr + rIndex * N, N, 0);
KVPair<TypeV, TypeK>* topk = new KVPair<TypeV, TypeK>[N];
for (int j = 0; j < N; j++) {
topk[j].val = h_arr[j];
topk[j].key = j;
}
// result reference
TypeV* h_outv = new TypeV[k];
raft::update_host(h_outv, d_outv + rIndex * k, k, 0);
TypeK* h_outk = new TypeK[k];
raft::update_host(h_outk, d_outk + rIndex * k, k, 0);
// calculate the result
partSortKVPair<TypeV, TypeK, Greater>(topk, N, k);
// check result
for (int j = 0; j < k; j++) {
// std::cout<<"Get value at ("<<rIndex<<" "<<j<<") Cpu "
// <<topk[j].val<<" "<<topk[j].key<<" Gpu "<<h_outv[j]<<" "
//<<h_outk[j] <<std::endl<<std::endl;
if (abs(h_outv[j] - topk[j].val) > tolerance) {
return ::testing::AssertionFailure()
<< "actual=" << topk[j].val << " != expected=" << h_outv[j];
}
}
// delete resource
delete[] h_arr;
delete[] h_outv;
delete[] h_outk;
delete[] topk;
}
return ::testing::AssertionSuccess();
}
// Structure WarpTopKInputs
template <typename T>
struct WarpTopKInputs {
T tolerance;
int rows; // batch size
int cols; // N the length of variables
int k; // the top-k value
unsigned long long int seed; // seed to generate data
};
template <typename T>
::std::ostream& operator<<(::std::ostream& os, const WarpTopKInputs<T>& dims)
{
return os;
}
// Define functions WarpTopKTest
template <typename T>
class WarpTopKTest : public ::testing::TestWithParam<WarpTopKInputs<T>> {
protected:
void SetUp() override
{
params = ::testing::TestWithParam<WarpTopKInputs<T>>::GetParam();
raft::random::Rng r(params.seed);
hipStream_t stream;
CUDA_CHECK(hipStreamCreate(&stream));
raft::allocate(arr, params.rows * params.cols);
raft::allocate(outk, params.rows * params.k);
raft::allocate(outv, params.rows * params.k);
r.uniform(arr, params.rows * params.cols, T(-1.0), T(1.0), stream);
static const bool Sort = false;
static const bool Greater = true;
warpTopK<T, int, Greater, Sort>(outv, outk, arr, params.k, params.rows, params.cols, stream);
CUDA_CHECK(hipStreamDestroy(stream));
}
void TearDown() override
{
CUDA_CHECK(hipFree(outv));
CUDA_CHECK(hipFree(outk));
CUDA_CHECK(hipFree(arr));
}
protected:
WarpTopKInputs<T> params;
T *arr, *outv;
int* outk;
};
// Parameters
// Milestone 1: Verify the result of current implementation
// Milestone 2: Support all the values of k between 1 and 1024; both inclusive
// Milestone 2.1: Using the POC code to Support all the values
const std::vector<WarpTopKInputs<float>> inputs2_0 = {{0.00000001, 2, 1024, 256, 1234ULL}};
const std::vector<WarpTopKInputs<float>> inputs2_1 = {{0.00000001, 4, 2048, 1024, 1234ULL}};
const std::vector<WarpTopKInputs<float>> inputs2_2 = {{0.00000001, 4, 2048, 1, 1234ULL}};
// Milestone 2.2: Using the full thread queue and warp queue code to support
// all the values
// @TODO: Milestone 3: Support not sorted
// @TODO: Milestone 4: Support multi-gpu
// Define the function TEST_P
typedef WarpTopKTest<float> TestD2_0;
typedef WarpTopKTest<float> TestD2_1;
typedef WarpTopKTest<float> TestD2_2;
TEST_P(TestD2_0, Result)
{
const static bool Greater = true;
ASSERT_TRUE((checkResult<float, int, Greater>(
arr, outv, outk, params.rows, params.cols, params.k, params.tolerance)));
}
TEST_P(TestD2_1, Result)
{
const static bool Greater = true;
ASSERT_TRUE((checkResult<float, int, Greater>(
arr, outv, outk, params.rows, params.cols, params.k, params.tolerance)));
}
TEST_P(TestD2_2, Result)
{
const static bool Greater = true;
ASSERT_TRUE((checkResult<float, int, Greater>(
arr, outv, outk, params.rows, params.cols, params.k, params.tolerance)));
}
// Instantiate
INSTANTIATE_TEST_CASE_P(WarpTopKTests, TestD2_0, ::testing::ValuesIn(inputs2_0));
INSTANTIATE_TEST_CASE_P(WarpTopKTests, TestD2_1, ::testing::ValuesIn(inputs2_1));
INSTANTIATE_TEST_CASE_P(WarpTopKTests, TestD2_2, ::testing::ValuesIn(inputs2_2));
} // end namespace Selection
} // end namespace MLCommon
| 95d7880307820f695fba1f4339f78e872f36a555.cu | /*
* Copyright (c) 2018-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include <raft/cudart_utils.h>
#include <stdlib.h>
#include <algorithm>
#include <limits>
#include <raft/random/rng.cuh>
#include <selection/kselection.cuh>
namespace MLCommon {
namespace Selection {
template <typename TypeV, typename TypeK, int N, int TPB, bool Greater>
__global__ void sortTestKernel(TypeK* key)
{
KVArray<TypeV, TypeK, N, Greater> arr;
#pragma unroll
for (int i = 0; i < N; ++i) {
arr.arr[i].val = (TypeV)raft::laneId();
arr.arr[i].key = (TypeK)raft::laneId();
}
raft::warpFence();
arr.sort();
raft::warpFence();
#pragma unroll
for (int i = 0; i < N; ++i)
arr.arr[i].store(nullptr, key + threadIdx.x + i * TPB);
}
template <typename TypeV, typename TypeK, int N, int TPB, bool Greater>
void sortTest(TypeK* key)
{
TypeK* dkey;
CUDA_CHECK(cudaMalloc((void**)&dkey, sizeof(TypeK) * TPB * N));
sortTestKernel<TypeV, TypeK, N, TPB, Greater><<<1, TPB>>>(dkey);
CUDA_CHECK(cudaPeekAtLastError());
raft::update_host<TypeK>(key, dkey, TPB * N, 0);
CUDA_CHECK(cudaFree(dkey));
}
/************************************************************************/
/********************** Add the function for CPU test *******************/
/************************************************************************/
template <typename TypeV, typename TypeK, bool Greater>
int cmp(KVPair<TypeV, TypeK> a, KVPair<TypeV, TypeK> b)
{
if (Greater == 0) {
return a.val > b.val;
} else {
return a.val < b.val;
}
}
template <typename TypeV, typename TypeK, bool Greater>
void partSortKVPair(KVPair<TypeV, TypeK>* arr, int N, int k)
{
std::partial_sort(arr, arr + k, arr + N, cmp<TypeV, TypeK, Greater>);
}
template <typename TypeV, typename TypeK, int N, bool Greater>
void sortKVArray(KVArray<TypeV, TypeK, N, Greater>& arr)
{
std::sort(arr.arr, arr.arr + N, cmp<TypeV, TypeK, Greater>);
}
template <typename TypeV, typename TypeK, bool Greater>
::testing::AssertionResult checkResult(
TypeV* d_arr, TypeV* d_outv, TypeK* d_outk, int rows, int N, int k, TypeV tolerance)
{
for (int rIndex = 0; rIndex < rows; rIndex++) {
// input data
TypeV* h_arr = new TypeV[N];
raft::update_host(h_arr, d_arr + rIndex * N, N, 0);
KVPair<TypeV, TypeK>* topk = new KVPair<TypeV, TypeK>[N];
for (int j = 0; j < N; j++) {
topk[j].val = h_arr[j];
topk[j].key = j;
}
// result reference
TypeV* h_outv = new TypeV[k];
raft::update_host(h_outv, d_outv + rIndex * k, k, 0);
TypeK* h_outk = new TypeK[k];
raft::update_host(h_outk, d_outk + rIndex * k, k, 0);
// calculate the result
partSortKVPair<TypeV, TypeK, Greater>(topk, N, k);
// check result
for (int j = 0; j < k; j++) {
// std::cout<<"Get value at ("<<rIndex<<" "<<j<<") Cpu "
// <<topk[j].val<<" "<<topk[j].key<<" Gpu "<<h_outv[j]<<" "
//<<h_outk[j] <<std::endl<<std::endl;
if (abs(h_outv[j] - topk[j].val) > tolerance) {
return ::testing::AssertionFailure()
<< "actual=" << topk[j].val << " != expected=" << h_outv[j];
}
}
// delete resource
delete[] h_arr;
delete[] h_outv;
delete[] h_outk;
delete[] topk;
}
return ::testing::AssertionSuccess();
}
// Structure WarpTopKInputs
template <typename T>
struct WarpTopKInputs {
T tolerance;
int rows; // batch size
int cols; // N the length of variables
int k; // the top-k value
unsigned long long int seed; // seed to generate data
};
template <typename T>
::std::ostream& operator<<(::std::ostream& os, const WarpTopKInputs<T>& dims)
{
return os;
}
// Define functions WarpTopKTest
template <typename T>
class WarpTopKTest : public ::testing::TestWithParam<WarpTopKInputs<T>> {
protected:
void SetUp() override
{
params = ::testing::TestWithParam<WarpTopKInputs<T>>::GetParam();
raft::random::Rng r(params.seed);
cudaStream_t stream;
CUDA_CHECK(cudaStreamCreate(&stream));
raft::allocate(arr, params.rows * params.cols);
raft::allocate(outk, params.rows * params.k);
raft::allocate(outv, params.rows * params.k);
r.uniform(arr, params.rows * params.cols, T(-1.0), T(1.0), stream);
static const bool Sort = false;
static const bool Greater = true;
warpTopK<T, int, Greater, Sort>(outv, outk, arr, params.k, params.rows, params.cols, stream);
CUDA_CHECK(cudaStreamDestroy(stream));
}
void TearDown() override
{
CUDA_CHECK(cudaFree(outv));
CUDA_CHECK(cudaFree(outk));
CUDA_CHECK(cudaFree(arr));
}
protected:
WarpTopKInputs<T> params;
T *arr, *outv;
int* outk;
};
// Parameters
// Milestone 1: Verify the result of current implementation
// Milestone 2: Support all the values of k between 1 and 1024; both inclusive
// Milestone 2.1: Using the POC code to Support all the values
const std::vector<WarpTopKInputs<float>> inputs2_0 = {{0.00000001, 2, 1024, 256, 1234ULL}};
const std::vector<WarpTopKInputs<float>> inputs2_1 = {{0.00000001, 4, 2048, 1024, 1234ULL}};
const std::vector<WarpTopKInputs<float>> inputs2_2 = {{0.00000001, 4, 2048, 1, 1234ULL}};
// Milestone 2.2: Using the full thread queue and warp queue code to support
// all the values
// @TODO: Milestone 3: Support not sorted
// @TODO: Milestone 4: Support multi-gpu
// Define the function TEST_P
typedef WarpTopKTest<float> TestD2_0;
typedef WarpTopKTest<float> TestD2_1;
typedef WarpTopKTest<float> TestD2_2;
TEST_P(TestD2_0, Result)
{
const static bool Greater = true;
ASSERT_TRUE((checkResult<float, int, Greater>(
arr, outv, outk, params.rows, params.cols, params.k, params.tolerance)));
}
TEST_P(TestD2_1, Result)
{
const static bool Greater = true;
ASSERT_TRUE((checkResult<float, int, Greater>(
arr, outv, outk, params.rows, params.cols, params.k, params.tolerance)));
}
TEST_P(TestD2_2, Result)
{
const static bool Greater = true;
ASSERT_TRUE((checkResult<float, int, Greater>(
arr, outv, outk, params.rows, params.cols, params.k, params.tolerance)));
}
// Instantiate
INSTANTIATE_TEST_CASE_P(WarpTopKTests, TestD2_0, ::testing::ValuesIn(inputs2_0));
INSTANTIATE_TEST_CASE_P(WarpTopKTests, TestD2_1, ::testing::ValuesIn(inputs2_1));
INSTANTIATE_TEST_CASE_P(WarpTopKTests, TestD2_2, ::testing::ValuesIn(inputs2_2));
} // end namespace Selection
} // end namespace MLCommon
|
ed4954f3c7b41d67ddbd97a6c4d7f904847fd1f3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
void print_vector(float* vector, size_t size) {
for (size_t i = 0; i < size; ++i) {
std::cout << i << ", ";
}
std::cout << std::endl;
}
void print_matrix(float matrix[256][256]) {
for (size_t r = 0; r < 256; ++r) {
for (size_t c = 0; c < 256; ++c) {
std::cout << matrix[r][c] << ", ";
}
std::cout << '\n';
}
std::cout << '\n';
}
__global__ void vector_add(float* A, float* B, float* C) {
int i = threadIdx.x;
C[i] = A[i] + B[i];
}
__global__ void vector_assign(float *A, float *B, float *C) {
int i = threadIdx.x;
A[i] = 1.0f;
B[i] = 2.0f;
C[i] = 0.0f;
}
__global__ void matrix_add(float* A, float* B, float* C, size_t pitch) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j = blockDim.y * blockIdx.y + threadIdx.y;
float *a = reinterpret_cast<float *>(reinterpret_cast<char *>(A) + i * pitch);
float *b = reinterpret_cast<float *>(reinterpret_cast<char *>(B) + i * pitch);
float *c = reinterpret_cast<float *>(reinterpret_cast<char *>(C) + i * pitch);
c[j] = b[j] + a[j];
}
void test_vector_add() {
float host_c[256];
float *A, *B, *C;
hipMalloc(&A, 256 * sizeof(float));
hipMalloc(&B, 256 * sizeof(float));
hipMalloc(&C, 256 * sizeof(float));
hipLaunchKernelGGL(( vector_assign), dim3(1), dim3(256), 0, 0, A, B, C);
hipLaunchKernelGGL(( vector_add), dim3(1), dim3(256), 0, 0, A, B, C);
hipMemcpy(host_c, C, sizeof(float) * 256, hipMemcpyDeviceToHost);
hipFree(A);
hipFree(B);
hipFree(C);
print_vector(host_c, 256);
}
void test_matrix_add() {
float host_a[256][256];
float host_b[256][256];
float host_c[256][256];
for (auto i = 0; i < 256; ++i) {
for (auto j = 0; j < 256; ++j) {
host_a[i][j] = 0.1;
host_b[i][j] = 0.2;
host_c[i][j] = 0.0f;
}
}
float *device_a, *device_b, *device_c;
size_t pitch = 0;
hipMallocPitch(&device_a, &pitch, 256 * sizeof(float), 256);
hipMallocPitch(&device_b, &pitch, 256 * sizeof(float), 256);
hipMallocPitch(&device_c, &pitch, 256 * sizeof(float), 256);
hipMemcpy2D(device_a, pitch, host_a, 256 * sizeof(float), 256 * sizeof(float), 256, hipMemcpyHostToDevice);
hipMemcpy2D(device_b, pitch, host_b, 256 * sizeof(float), 256 * sizeof(float), 256, hipMemcpyHostToDevice);
hipMemcpy2D(device_c, pitch, host_c, 256 * sizeof(float), 256 * sizeof(float), 256, hipMemcpyHostToDevice);
dim3 block_size(16, 16);
dim3 grid_size(256 / 16, 256 / 16);
hipLaunchKernelGGL(( matrix_add), dim3(grid_size), dim3(block_size), 0, 0, device_a, device_b, device_c, pitch);
hipMemcpy2D(host_c, 256 * sizeof(float), device_c, pitch, 256 * sizeof(float), 256, hipMemcpyDeviceToHost);
print_matrix(host_c);
hipFree(device_a);
hipFree(device_b);
hipFree(device_c);
}
int main () {
test_vector_add();
test_matrix_add();
return 0;
}
| ed4954f3c7b41d67ddbd97a6c4d7f904847fd1f3.cu | #include <iostream>
void print_vector(float* vector, size_t size) {
for (size_t i = 0; i < size; ++i) {
std::cout << i << ", ";
}
std::cout << std::endl;
}
void print_matrix(float matrix[256][256]) {
for (size_t r = 0; r < 256; ++r) {
for (size_t c = 0; c < 256; ++c) {
std::cout << matrix[r][c] << ", ";
}
std::cout << '\n';
}
std::cout << '\n';
}
__global__ void vector_add(float* A, float* B, float* C) {
int i = threadIdx.x;
C[i] = A[i] + B[i];
}
__global__ void vector_assign(float *A, float *B, float *C) {
int i = threadIdx.x;
A[i] = 1.0f;
B[i] = 2.0f;
C[i] = 0.0f;
}
__global__ void matrix_add(float* A, float* B, float* C, size_t pitch) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j = blockDim.y * blockIdx.y + threadIdx.y;
float *a = reinterpret_cast<float *>(reinterpret_cast<char *>(A) + i * pitch);
float *b = reinterpret_cast<float *>(reinterpret_cast<char *>(B) + i * pitch);
float *c = reinterpret_cast<float *>(reinterpret_cast<char *>(C) + i * pitch);
c[j] = b[j] + a[j];
}
void test_vector_add() {
float host_c[256];
float *A, *B, *C;
cudaMalloc(&A, 256 * sizeof(float));
cudaMalloc(&B, 256 * sizeof(float));
cudaMalloc(&C, 256 * sizeof(float));
vector_assign<<<1, 256>>>(A, B, C);
vector_add<<<1, 256>>>(A, B, C);
cudaMemcpy(host_c, C, sizeof(float) * 256, cudaMemcpyDeviceToHost);
cudaFree(A);
cudaFree(B);
cudaFree(C);
print_vector(host_c, 256);
}
void test_matrix_add() {
float host_a[256][256];
float host_b[256][256];
float host_c[256][256];
for (auto i = 0; i < 256; ++i) {
for (auto j = 0; j < 256; ++j) {
host_a[i][j] = 0.1;
host_b[i][j] = 0.2;
host_c[i][j] = 0.0f;
}
}
float *device_a, *device_b, *device_c;
size_t pitch = 0;
cudaMallocPitch(&device_a, &pitch, 256 * sizeof(float), 256);
cudaMallocPitch(&device_b, &pitch, 256 * sizeof(float), 256);
cudaMallocPitch(&device_c, &pitch, 256 * sizeof(float), 256);
cudaMemcpy2D(device_a, pitch, host_a, 256 * sizeof(float), 256 * sizeof(float), 256, cudaMemcpyHostToDevice);
cudaMemcpy2D(device_b, pitch, host_b, 256 * sizeof(float), 256 * sizeof(float), 256, cudaMemcpyHostToDevice);
cudaMemcpy2D(device_c, pitch, host_c, 256 * sizeof(float), 256 * sizeof(float), 256, cudaMemcpyHostToDevice);
dim3 block_size(16, 16);
dim3 grid_size(256 / 16, 256 / 16);
matrix_add<<<grid_size, block_size>>>(device_a, device_b, device_c, pitch);
cudaMemcpy2D(host_c, 256 * sizeof(float), device_c, pitch, 256 * sizeof(float), 256, cudaMemcpyDeviceToHost);
print_matrix(host_c);
cudaFree(device_a);
cudaFree(device_b);
cudaFree(device_c);
}
int main () {
test_vector_add();
test_matrix_add();
return 0;
}
|
2cc1370ebf92e4edc4ac54803406c8e29aab2e26.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2007 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws. Users and possessors of this source code
* are hereby granted a nonexclusive, royalty-free license to use this code
* in individual and commercial software.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*
* Any use of this source code in individual and commercial software must
* include, in the user documentation and internal comments to the code,
* the above Disclaimer and U.S. Government End Users Notice.
*/
///////////////////////////////////////////////////////////////////////////////
// This is nvidias histogram256 SDK example modded to do a 1024 point
// histogram
///////////////////////////////////////////////////////////////////////////////
#include <gloop/statistics.h>
//Total number of possible data values
#define BIN_COUNT 1024 // Changed from 256
#define HISTOGRAM_SIZE (BIN_COUNT * sizeof(unsigned int))
//Machine warp size
#ifndef __DEVICE_EMULATION__
//G80's warp size is 32 threads
#define WARP_LOG_SIZE 5
#else
//Emulation currently doesn't execute threads in coherent groups of 32 threads,
//which effectively means warp size of 1 thread for emulation modes
#define WARP_LOG_SIZE 0
#endif
//Warps in thread block
#define WARP_N 3
//Threads per block count
#ifdef HISTO_WG_SIZE_0
#define THREAD_N HISTO_WG_SIZE_0
#else
#define THREAD_N (WARP_N << WARP_LOG_SIZE)
#endif
//Per-block number of elements in histograms
#define BLOCK_MEMORY (WARP_N * BIN_COUNT)
#define IMUL(a, b) __mul24(a, b)
__device__ void addData1024(volatile unsigned int *s_WarpHist, unsigned int data, unsigned int threadTag){
unsigned int count;
do{
count = s_WarpHist[data] & 0x07FFFFFFU;
count = threadTag | (count + 1);
s_WarpHist[data] = count;
}while(s_WarpHist[data] != count);
}
__global__ void histogram1024Kernel(unsigned int *d_Result, float *d_Data, float minimum, float maximum, int dataN){
//Current global thread index
const int globalTid = IMUL(blockIdx.x, blockDim.x) + threadIdx.x;
//Total number of threads in the compute grid
const int numThreads = IMUL(blockDim.x, gridDim.x);
//WARP_LOG_SIZE higher bits of counter values are tagged
//by lower WARP_LOG_SIZE threadID bits
// Will correctly issue warning when compiling for debug (x<<32-0)
const unsigned int threadTag = threadIdx.x << (32 - WARP_LOG_SIZE);
//Shared memory cache for each warp in current thread block
//Declare as volatile to prevent incorrect compiler optimizations in addPixel()
volatile __shared__ unsigned int s_Hist[BLOCK_MEMORY];
//Current warp shared memory frame
const int warpBase = IMUL(threadIdx.x >> WARP_LOG_SIZE, BIN_COUNT);
//Clear shared memory buffer for current thread block before processing
for(int pos = threadIdx.x; pos < BLOCK_MEMORY; pos += blockDim.x)
s_Hist[pos] = 0;
__syncthreads();
//Cycle through the entire data set, update subhistograms for each warp
//Since threads in warps always execute the same instruction,
//we are safe with the addPixel trick
for(int pos = globalTid; pos < dataN; pos += numThreads){
unsigned int data4 = ((d_Data[pos] - minimum)/(maximum - minimum)) * BIN_COUNT;
addData1024(s_Hist + warpBase, data4 & 0x3FFU, threadTag);
}
__syncthreads();
//Merge per-warp histograms into per-block and write to global memory
for(int pos = threadIdx.x; pos < BIN_COUNT; pos += blockDim.x){
unsigned int sum = 0;
for(int base = 0; base < BLOCK_MEMORY; base += BIN_COUNT)
sum += s_Hist[base + pos] & 0x07FFFFFFU;
atomicAdd(d_Result + pos, sum);
}
}
//Thread block (== subhistogram) count
#define BLOCK_N 64
////////////////////////////////////////////////////////////////////////////////
// Put all kernels together
////////////////////////////////////////////////////////////////////////////////
//histogram1024kernel() results buffer
unsigned int *d_Result1024;
//Internal memory allocation
void initHistogram1024(void){
gloop::Statistics::Scope<gloop::Statistics::Type::Copy> scope;
checkCudaErrors( hipMalloc((void **)&d_Result1024, HISTOGRAM_SIZE ));
}
//Internal memory deallocation
void closeHistogram1024(void){
gloop::Statistics::Scope<gloop::Statistics::Type::Copy> scope;
checkCudaErrors( hipFree(d_Result1024) );
}
//histogram1024 CPU front-end
void histogram1024GPU(
unsigned int *h_Result,
float *d_Data,
float minimum,
float maximum,
int dataN)
{
{
gloop::Statistics::Scope<gloop::Statistics::Type::Copy> scope;
checkCudaErrors( hipMemset(d_Result1024, 0, HISTOGRAM_SIZE) );
}
{
gloop::Statistics::Scope<gloop::Statistics::Type::Kernel> scope;
hipLaunchKernelGGL(( histogram1024Kernel), dim3(BLOCK_N), dim3(THREAD_N), 0, 0,
d_Result1024,
d_Data,
minimum,
maximum,
dataN
);
hipDeviceSynchronize();
}
{
gloop::Statistics::Scope<gloop::Statistics::Type::Copy> scope;
checkCudaErrors( hipMemcpy(h_Result, d_Result1024, HISTOGRAM_SIZE, hipMemcpyDeviceToHost) );
}
}
| 2cc1370ebf92e4edc4ac54803406c8e29aab2e26.cu | /*
* Copyright 1993-2007 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws. Users and possessors of this source code
* are hereby granted a nonexclusive, royalty-free license to use this code
* in individual and commercial software.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*
* Any use of this source code in individual and commercial software must
* include, in the user documentation and internal comments to the code,
* the above Disclaimer and U.S. Government End Users Notice.
*/
///////////////////////////////////////////////////////////////////////////////
// This is nvidias histogram256 SDK example modded to do a 1024 point
// histogram
///////////////////////////////////////////////////////////////////////////////
#include <gloop/statistics.h>
//Total number of possible data values
#define BIN_COUNT 1024 // Changed from 256
#define HISTOGRAM_SIZE (BIN_COUNT * sizeof(unsigned int))
//Machine warp size
#ifndef __DEVICE_EMULATION__
//G80's warp size is 32 threads
#define WARP_LOG_SIZE 5
#else
//Emulation currently doesn't execute threads in coherent groups of 32 threads,
//which effectively means warp size of 1 thread for emulation modes
#define WARP_LOG_SIZE 0
#endif
//Warps in thread block
#define WARP_N 3
//Threads per block count
#ifdef HISTO_WG_SIZE_0
#define THREAD_N HISTO_WG_SIZE_0
#else
#define THREAD_N (WARP_N << WARP_LOG_SIZE)
#endif
//Per-block number of elements in histograms
#define BLOCK_MEMORY (WARP_N * BIN_COUNT)
#define IMUL(a, b) __mul24(a, b)
__device__ void addData1024(volatile unsigned int *s_WarpHist, unsigned int data, unsigned int threadTag){
unsigned int count;
do{
count = s_WarpHist[data] & 0x07FFFFFFU;
count = threadTag | (count + 1);
s_WarpHist[data] = count;
}while(s_WarpHist[data] != count);
}
__global__ void histogram1024Kernel(unsigned int *d_Result, float *d_Data, float minimum, float maximum, int dataN){
//Current global thread index
const int globalTid = IMUL(blockIdx.x, blockDim.x) + threadIdx.x;
//Total number of threads in the compute grid
const int numThreads = IMUL(blockDim.x, gridDim.x);
//WARP_LOG_SIZE higher bits of counter values are tagged
//by lower WARP_LOG_SIZE threadID bits
// Will correctly issue warning when compiling for debug (x<<32-0)
const unsigned int threadTag = threadIdx.x << (32 - WARP_LOG_SIZE);
//Shared memory cache for each warp in current thread block
//Declare as volatile to prevent incorrect compiler optimizations in addPixel()
volatile __shared__ unsigned int s_Hist[BLOCK_MEMORY];
//Current warp shared memory frame
const int warpBase = IMUL(threadIdx.x >> WARP_LOG_SIZE, BIN_COUNT);
//Clear shared memory buffer for current thread block before processing
for(int pos = threadIdx.x; pos < BLOCK_MEMORY; pos += blockDim.x)
s_Hist[pos] = 0;
__syncthreads();
//Cycle through the entire data set, update subhistograms for each warp
//Since threads in warps always execute the same instruction,
//we are safe with the addPixel trick
for(int pos = globalTid; pos < dataN; pos += numThreads){
unsigned int data4 = ((d_Data[pos] - minimum)/(maximum - minimum)) * BIN_COUNT;
addData1024(s_Hist + warpBase, data4 & 0x3FFU, threadTag);
}
__syncthreads();
//Merge per-warp histograms into per-block and write to global memory
for(int pos = threadIdx.x; pos < BIN_COUNT; pos += blockDim.x){
unsigned int sum = 0;
for(int base = 0; base < BLOCK_MEMORY; base += BIN_COUNT)
sum += s_Hist[base + pos] & 0x07FFFFFFU;
atomicAdd(d_Result + pos, sum);
}
}
//Thread block (== subhistogram) count
#define BLOCK_N 64
////////////////////////////////////////////////////////////////////////////////
// Put all kernels together
////////////////////////////////////////////////////////////////////////////////
//histogram1024kernel() results buffer
unsigned int *d_Result1024;
//Internal memory allocation
void initHistogram1024(void){
gloop::Statistics::Scope<gloop::Statistics::Type::Copy> scope;
checkCudaErrors( cudaMalloc((void **)&d_Result1024, HISTOGRAM_SIZE ));
}
//Internal memory deallocation
void closeHistogram1024(void){
gloop::Statistics::Scope<gloop::Statistics::Type::Copy> scope;
checkCudaErrors( cudaFree(d_Result1024) );
}
//histogram1024 CPU front-end
void histogram1024GPU(
unsigned int *h_Result,
float *d_Data,
float minimum,
float maximum,
int dataN)
{
{
gloop::Statistics::Scope<gloop::Statistics::Type::Copy> scope;
checkCudaErrors( cudaMemset(d_Result1024, 0, HISTOGRAM_SIZE) );
}
{
gloop::Statistics::Scope<gloop::Statistics::Type::Kernel> scope;
histogram1024Kernel<<<BLOCK_N, THREAD_N>>>(
d_Result1024,
d_Data,
minimum,
maximum,
dataN
);
cudaThreadSynchronize();
}
{
gloop::Statistics::Scope<gloop::Statistics::Type::Copy> scope;
checkCudaErrors( cudaMemcpy(h_Result, d_Result1024, HISTOGRAM_SIZE, cudaMemcpyDeviceToHost) );
}
}
|
07addbd8e888277215c7f23d7808d88791e862e0.hip | // !!! This is a file automatically generated by hipify!!!
#include <vector>
#include "cuda/deform_im2col_cuda.cuh"
#include <ATen/ATen.h>
#include <ATen/hip/HIPContext.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
// #include <THH/THH.h>
// #include <THH/THHAtomics.cuh>
// #include <THH/THHDeviceUtils.cuh>
// extern THCState *state;
// author: Charles Shang
// https://github.com/torch/cunn/blob/master/lib/THCUNN/generic/SpatialConvolutionMM.cu
at::Tensor
deform_conv_cuda_forward(const at::Tensor &input,
const at::Tensor &weight,
const at::Tensor &bias,
const at::Tensor &offset,
const int kernel_h,
const int kernel_w,
const int stride_h,
const int stride_w,
const int pad_h,
const int pad_w,
const int dilation_h,
const int dilation_w,
const int group,
const int deformable_group,
const int im2col_step)
{
// THCAssertSameGPU(THCudaTensor_checkGPU(state, 5, input, weight, bias, offset, mask));
AT_ASSERTM(input.is_contiguous(), "input tensor has to be contiguous");
AT_ASSERTM(weight.is_contiguous(), "weight tensor has to be contiguous");
AT_ASSERTM(input.is_cuda(), "input must be a CUDA tensor");
AT_ASSERTM(weight.is_cuda(), "weight must be a CUDA tensor");
AT_ASSERTM(bias.is_cuda(), "bias must be a CUDA tensor");
AT_ASSERTM(offset.is_cuda(), "offset must be a CUDA tensor");
const int batch = input.size(0);
const int channels = input.size(1);
const int height = input.size(2);
const int width = input.size(3);
const int channels_out = weight.size(0);
const int channels_kernel = weight.size(1);
const int kernel_h_ = weight.size(2);
const int kernel_w_ = weight.size(3);
const int im2col_step_ = ::min(batch, im2col_step);
AT_ASSERTM(batch % im2col_step_ == 0, "batch(%d) must divide im2col_step(%d)", batch, im2col_step_);
AT_ASSERTM((channels % group == 0) && (channels_out % group == 0),
"channels(%d) and channels_out(%d) must divide group(%d)", channels, channels_out, group);
// printf("Kernels: %d %d %d %d\n", kernel_h_, kernel_w_, kernel_w, kernel_h);
// printf("Channels: %d %d\n", channels, channels_kernel);
// printf("Channels: %d %d\n", channels_out, channels_kernel);
AT_ASSERTM(kernel_h_ == kernel_h && kernel_w_ == kernel_w,
"Input shape and kernel shape wont match: (%d x %d vs %d x %d).", kernel_h_, kernel_w, kernel_h_, kernel_w_);
AT_ASSERTM(channels == (channels_kernel * group),
"Input shape and kernel channels wont match: (%d vs %d).", channels, channels_kernel * group);
const int height_out = (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1;
const int width_out = (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1;
auto output = at::empty({batch * height_out * width_out, channels_out}, input.options());
// prepare group weight and bias
auto weight_g = weight.view({group, channels_out/group, channels_kernel, kernel_h, kernel_w});
auto bias_g = bias.view({group, channels_out/group});
// define alias for easy use
const int batch_n = im2col_step_;
const int per_input_size = channels * height * width;
const int per_offset_size = offset.size(1) * offset.size(2) * offset.size(3);
auto output_n = output.view({batch/im2col_step_, batch_n * height_out * width_out, channels_out});
for (int n = 0; n < batch/im2col_step_; ++n)
{
auto columns = at::empty({channels * kernel_h * kernel_w, batch_n * height_out * width_out}, input.options());
AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), "deform_conv_forward_cuda", ([&] {
deformable_im2col_cuda(at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
input.data_ptr<scalar_t>() + n * im2col_step_ * per_input_size,
offset.data_ptr<scalar_t>() + n * im2col_step_ * per_offset_size,
batch_n, channels, height, width,
height_out, width_out, kernel_h, kernel_w,
pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w,
deformable_group,
columns.data_ptr<scalar_t>());
}));
// auto columns_m = columns.t();
// auto weight_m = weight.view({channels_out, channels_kernel * kernel_h * kernel_w}).t();
// output = at::addmm(bias, columns_m, weight_m);
auto columns_g = columns.view({group, channels/group * kernel_h * kernel_w, batch_n * height_out * width_out});
auto output_g = output_n.select(0, n).view({batch_n * height_out * width_out, group, channels_out/group});
for (int g = 0; g < group; ++g)
{
auto columns_gm = columns_g.select(0, g).t();
auto weight_gm = weight_g.select(0, g).view({channels_out/group, channels_kernel * kernel_h * kernel_w}).t();
auto output_m = at::addmm(bias_g.select(0, g), columns_gm, weight_gm);
output_g.select(1, g) = output_m.view({batch_n * height_out * width_out, channels_out/group});
}
}
output = output.view({batch, height_out, width_out, channels_out}).permute({0, 3, 1, 2}).contiguous();
return output;
}
std::vector<at::Tensor> deform_conv_cuda_backward(const at::Tensor &input,
const at::Tensor &weight,
const at::Tensor &bias,
const at::Tensor &offset,
const at::Tensor &grad_output,
const int kernel_h,
const int kernel_w,
const int stride_h,
const int stride_w,
const int pad_h,
const int pad_w,
const int dilation_h,
const int dilation_w,
const int group,
const int deformable_group,
const int im2col_step)
{
AT_ASSERTM(input.is_contiguous(), "input tensor has to be contiguous");
AT_ASSERTM(weight.is_contiguous(), "weight tensor has to be contiguous");
AT_ASSERTM(input.is_cuda(), "input must be a CUDA tensor");
AT_ASSERTM(weight.is_cuda(), "weight must be a CUDA tensor");
AT_ASSERTM(bias.is_cuda(), "bias must be a CUDA tensor");
AT_ASSERTM(offset.is_cuda(), "offset must be a CUDA tensor");
const int batch = input.size(0);
const int channels = input.size(1);
const int height = input.size(2);
const int width = input.size(3);
const int channels_out = weight.size(0);
const int channels_kernel = weight.size(1);
const int kernel_h_ = weight.size(2);
const int kernel_w_ = weight.size(3);
const int batch_ = grad_output.size(0);
const int channels_out_ = grad_output.size(1);
const int height_out_ = grad_output.size(2);
const int width_out_ = grad_output.size(3);
const int im2col_step_ = ::min(im2col_step, batch);
AT_ASSERTM(batch % im2col_step_ == 0, "batch(%d) must divide im2col_step(%d)", batch, im2col_step_);
AT_ASSERTM((channels % group == 0) && (channels_out % group == 0),
"channels(%d) and channels_out(%d) must divide group(%d)", channels, channels_out, group);
AT_ASSERTM(kernel_h_ == kernel_h && kernel_w_ == kernel_w,
"Input shape and kernel shape wont match: (%d x %d vs %d x %d).", kernel_h_, kernel_w, kernel_h_, kernel_w_);
AT_ASSERTM(channels == (channels_kernel * group),
"Input shape and kernel channels wont match: (%d vs %d).", channels, channels_kernel * group);
const int height_out = (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1;
const int width_out = (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1;
AT_ASSERTM(batch == batch_,
"Input shape and grad_out batch wont match: (%d vs %d).", batch, batch_);
AT_ASSERTM(channels_out == channels_out_,
"Input shape and grad_out channels_out wont match: (%d vs %d).", channels_out, channels_out_);
AT_ASSERTM(height_out == height_out_ && width_out == width_out_,
"Input shape and grad_out shape wont match: (%d x %d vs %d x %d).", height_out, height_out_, width_out, width_out_);
auto grad_input = at::zeros_like(input);
auto grad_offset = at::zeros_like(offset);
auto grad_weight = at::zeros_like(weight);
auto grad_bias = at::zeros_like(bias);
// auto grad_output_m = grad_output.permute({1, 0, 2, 3}).contiguous().view({channels_out, batch * height_out * width_out});
// auto weight_m = weight.view({channels_out, channels_kernel * kernel_h * kernel_w}).t();
// columns = at::mm(weight_m, grad_output_m);
// prepare group weight and bias
auto weight_g = weight.view({group, channels_out/group, channels_kernel, kernel_h, kernel_w});
auto grad_weight_g = grad_weight.view({group, channels_out/group, channels_kernel, kernel_h, kernel_w});
auto grad_bias_g = grad_bias.view({group, channels_out/group});
const int batch_n = im2col_step_;
const int per_input_size = channels * height * width;
const int per_offset_size = offset.size(1) * offset.size(2) * offset.size(3);
auto grad_output_n = grad_output.view({batch/im2col_step_, batch_n, channels_out, height_out, width_out});
for (int n = 0; n < batch/im2col_step_; ++n)
{
auto grad_output_g = grad_output_n.select(0, n).view({batch_n, group, channels_out/group, height_out, width_out});
auto ones = at::ones({batch_n * height_out * width_out}, input.options());
auto columns = at::empty({channels * kernel_h * kernel_w, batch_n * 1 * height_out * width_out}, input.options());
auto columns_g = columns.view({group, channels/group * kernel_h * kernel_w, batch_n * height_out * width_out});
for (int g = 0; g < group; ++g)
{
auto grad_output_gm = grad_output_g.select(1, g).permute({1, 0, 2, 3}).contiguous().view({channels_out/group, batch_n * height_out * width_out});
auto weight_gm = weight_g.select(0, g).view({channels_out/group, channels_kernel * kernel_h * kernel_w}).t();
columns_g.select(0, g) = at::mm(weight_gm, grad_output_gm);
}
AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), "deform_conv_backward_cuda", ([&] {
deformable_col2im_coord_cuda(at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
columns.data_ptr<scalar_t>(),
input.data_ptr<scalar_t>() + n * im2col_step_ * per_input_size,
offset.data_ptr<scalar_t>() + n * im2col_step_ * per_offset_size,
batch_n, channels, height, width,
height_out, width_out, kernel_h, kernel_w,
pad_h, pad_w, stride_h, stride_w,
dilation_h, dilation_w, deformable_group,
grad_offset.data_ptr<scalar_t>() + n * im2col_step_ * per_offset_size);
// gradient w.r.t. input data
deformable_col2im_cuda(at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
columns.data_ptr<scalar_t>(),
offset.data_ptr<scalar_t>() + n * im2col_step_ * per_offset_size,
batch_n, channels, height, width,
height_out, width_out, kernel_h, kernel_w,
pad_h, pad_w, stride_h, stride_w,
dilation_h, dilation_w, deformable_group,
grad_input.data_ptr<scalar_t>() + n * im2col_step_ * per_input_size);
// gradient w.r.t. weight, dWeight should accumulate across the batch and group
deformable_im2col_cuda(at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
input.data_ptr<scalar_t>() + n * im2col_step_ * per_input_size,
offset.data_ptr<scalar_t>() + n * im2col_step_ * per_offset_size,
batch_n, channels, height, width,
height_out, width_out, kernel_h, kernel_w,
pad_h, pad_w, stride_h, stride_w,
dilation_h, dilation_w, deformable_group,
columns.data_ptr<scalar_t>());
}));
// auto grad_output_m = grad_output.permute({1, 0, 2, 3}).contiguous().view({channels_out, batch * height_out * width_out});
// grad_weight = at::mm(grad_output_m, columns.t()).view_as(weight);
// grad_bias = at::mv(grad_output_m, ones);
// auto grad_output_g = grad_output.view({batch, group, channels_out/group, height_out, width_out});
// auto columns_g = columns.view({group, channels/group * kernel_h * kernel_w, batch * height_out * width_out});
for (int g = 0; g < group; ++g)
{
auto grad_output_gm = grad_output_g.select(1, g).permute({1, 0, 2, 3}).contiguous().view({channels_out/group, batch_n * height_out * width_out});
auto columns_gm = columns_g.select(0, g).t();
auto grad_weight_gm = grad_weight_g.select(0, g).view({channels_out/group, channels_kernel * kernel_h * kernel_w});
auto grad_bias_gm = grad_bias_g.select(0, g);
grad_weight_g.select(0, g) = at::addmm(grad_weight_gm, grad_output_gm, columns_gm).view_as(grad_weight_g.select(0, g));
grad_bias_g.select(0, g) = at::addmv(grad_bias_gm, grad_output_gm, ones);
}
}
return {
grad_input, grad_offset, grad_weight, grad_bias
};
}
| 07addbd8e888277215c7f23d7808d88791e862e0.cu | #include <vector>
#include "cuda/deform_im2col_cuda.cuh"
#include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <cuda.h>
#include <cuda_runtime.h>
// #include <THC/THC.h>
// #include <THC/THCAtomics.cuh>
// #include <THC/THCDeviceUtils.cuh>
// extern THCState *state;
// author: Charles Shang
// https://github.com/torch/cunn/blob/master/lib/THCUNN/generic/SpatialConvolutionMM.cu
at::Tensor
deform_conv_cuda_forward(const at::Tensor &input,
const at::Tensor &weight,
const at::Tensor &bias,
const at::Tensor &offset,
const int kernel_h,
const int kernel_w,
const int stride_h,
const int stride_w,
const int pad_h,
const int pad_w,
const int dilation_h,
const int dilation_w,
const int group,
const int deformable_group,
const int im2col_step)
{
// THCAssertSameGPU(THCudaTensor_checkGPU(state, 5, input, weight, bias, offset, mask));
AT_ASSERTM(input.is_contiguous(), "input tensor has to be contiguous");
AT_ASSERTM(weight.is_contiguous(), "weight tensor has to be contiguous");
AT_ASSERTM(input.is_cuda(), "input must be a CUDA tensor");
AT_ASSERTM(weight.is_cuda(), "weight must be a CUDA tensor");
AT_ASSERTM(bias.is_cuda(), "bias must be a CUDA tensor");
AT_ASSERTM(offset.is_cuda(), "offset must be a CUDA tensor");
const int batch = input.size(0);
const int channels = input.size(1);
const int height = input.size(2);
const int width = input.size(3);
const int channels_out = weight.size(0);
const int channels_kernel = weight.size(1);
const int kernel_h_ = weight.size(2);
const int kernel_w_ = weight.size(3);
const int im2col_step_ = std::min(batch, im2col_step);
AT_ASSERTM(batch % im2col_step_ == 0, "batch(%d) must divide im2col_step(%d)", batch, im2col_step_);
AT_ASSERTM((channels % group == 0) && (channels_out % group == 0),
"channels(%d) and channels_out(%d) must divide group(%d)", channels, channels_out, group);
// printf("Kernels: %d %d %d %d\n", kernel_h_, kernel_w_, kernel_w, kernel_h);
// printf("Channels: %d %d\n", channels, channels_kernel);
// printf("Channels: %d %d\n", channels_out, channels_kernel);
AT_ASSERTM(kernel_h_ == kernel_h && kernel_w_ == kernel_w,
"Input shape and kernel shape wont match: (%d x %d vs %d x %d).", kernel_h_, kernel_w, kernel_h_, kernel_w_);
AT_ASSERTM(channels == (channels_kernel * group),
"Input shape and kernel channels wont match: (%d vs %d).", channels, channels_kernel * group);
const int height_out = (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1;
const int width_out = (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1;
auto output = at::empty({batch * height_out * width_out, channels_out}, input.options());
// prepare group weight and bias
auto weight_g = weight.view({group, channels_out/group, channels_kernel, kernel_h, kernel_w});
auto bias_g = bias.view({group, channels_out/group});
// define alias for easy use
const int batch_n = im2col_step_;
const int per_input_size = channels * height * width;
const int per_offset_size = offset.size(1) * offset.size(2) * offset.size(3);
auto output_n = output.view({batch/im2col_step_, batch_n * height_out * width_out, channels_out});
for (int n = 0; n < batch/im2col_step_; ++n)
{
auto columns = at::empty({channels * kernel_h * kernel_w, batch_n * height_out * width_out}, input.options());
AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), "deform_conv_forward_cuda", ([&] {
deformable_im2col_cuda(at::cuda::getCurrentCUDAStream(),
input.data_ptr<scalar_t>() + n * im2col_step_ * per_input_size,
offset.data_ptr<scalar_t>() + n * im2col_step_ * per_offset_size,
batch_n, channels, height, width,
height_out, width_out, kernel_h, kernel_w,
pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w,
deformable_group,
columns.data_ptr<scalar_t>());
}));
// auto columns_m = columns.t();
// auto weight_m = weight.view({channels_out, channels_kernel * kernel_h * kernel_w}).t();
// output = at::addmm(bias, columns_m, weight_m);
auto columns_g = columns.view({group, channels/group * kernel_h * kernel_w, batch_n * height_out * width_out});
auto output_g = output_n.select(0, n).view({batch_n * height_out * width_out, group, channels_out/group});
for (int g = 0; g < group; ++g)
{
auto columns_gm = columns_g.select(0, g).t();
auto weight_gm = weight_g.select(0, g).view({channels_out/group, channels_kernel * kernel_h * kernel_w}).t();
auto output_m = at::addmm(bias_g.select(0, g), columns_gm, weight_gm);
output_g.select(1, g) = output_m.view({batch_n * height_out * width_out, channels_out/group});
}
}
output = output.view({batch, height_out, width_out, channels_out}).permute({0, 3, 1, 2}).contiguous();
return output;
}
std::vector<at::Tensor> deform_conv_cuda_backward(const at::Tensor &input,
const at::Tensor &weight,
const at::Tensor &bias,
const at::Tensor &offset,
const at::Tensor &grad_output,
const int kernel_h,
const int kernel_w,
const int stride_h,
const int stride_w,
const int pad_h,
const int pad_w,
const int dilation_h,
const int dilation_w,
const int group,
const int deformable_group,
const int im2col_step)
{
AT_ASSERTM(input.is_contiguous(), "input tensor has to be contiguous");
AT_ASSERTM(weight.is_contiguous(), "weight tensor has to be contiguous");
AT_ASSERTM(input.is_cuda(), "input must be a CUDA tensor");
AT_ASSERTM(weight.is_cuda(), "weight must be a CUDA tensor");
AT_ASSERTM(bias.is_cuda(), "bias must be a CUDA tensor");
AT_ASSERTM(offset.is_cuda(), "offset must be a CUDA tensor");
const int batch = input.size(0);
const int channels = input.size(1);
const int height = input.size(2);
const int width = input.size(3);
const int channels_out = weight.size(0);
const int channels_kernel = weight.size(1);
const int kernel_h_ = weight.size(2);
const int kernel_w_ = weight.size(3);
const int batch_ = grad_output.size(0);
const int channels_out_ = grad_output.size(1);
const int height_out_ = grad_output.size(2);
const int width_out_ = grad_output.size(3);
const int im2col_step_ = std::min(im2col_step, batch);
AT_ASSERTM(batch % im2col_step_ == 0, "batch(%d) must divide im2col_step(%d)", batch, im2col_step_);
AT_ASSERTM((channels % group == 0) && (channels_out % group == 0),
"channels(%d) and channels_out(%d) must divide group(%d)", channels, channels_out, group);
AT_ASSERTM(kernel_h_ == kernel_h && kernel_w_ == kernel_w,
"Input shape and kernel shape wont match: (%d x %d vs %d x %d).", kernel_h_, kernel_w, kernel_h_, kernel_w_);
AT_ASSERTM(channels == (channels_kernel * group),
"Input shape and kernel channels wont match: (%d vs %d).", channels, channels_kernel * group);
const int height_out = (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1;
const int width_out = (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1;
AT_ASSERTM(batch == batch_,
"Input shape and grad_out batch wont match: (%d vs %d).", batch, batch_);
AT_ASSERTM(channels_out == channels_out_,
"Input shape and grad_out channels_out wont match: (%d vs %d).", channels_out, channels_out_);
AT_ASSERTM(height_out == height_out_ && width_out == width_out_,
"Input shape and grad_out shape wont match: (%d x %d vs %d x %d).", height_out, height_out_, width_out, width_out_);
auto grad_input = at::zeros_like(input);
auto grad_offset = at::zeros_like(offset);
auto grad_weight = at::zeros_like(weight);
auto grad_bias = at::zeros_like(bias);
// auto grad_output_m = grad_output.permute({1, 0, 2, 3}).contiguous().view({channels_out, batch * height_out * width_out});
// auto weight_m = weight.view({channels_out, channels_kernel * kernel_h * kernel_w}).t();
// columns = at::mm(weight_m, grad_output_m);
// prepare group weight and bias
auto weight_g = weight.view({group, channels_out/group, channels_kernel, kernel_h, kernel_w});
auto grad_weight_g = grad_weight.view({group, channels_out/group, channels_kernel, kernel_h, kernel_w});
auto grad_bias_g = grad_bias.view({group, channels_out/group});
const int batch_n = im2col_step_;
const int per_input_size = channels * height * width;
const int per_offset_size = offset.size(1) * offset.size(2) * offset.size(3);
auto grad_output_n = grad_output.view({batch/im2col_step_, batch_n, channels_out, height_out, width_out});
for (int n = 0; n < batch/im2col_step_; ++n)
{
auto grad_output_g = grad_output_n.select(0, n).view({batch_n, group, channels_out/group, height_out, width_out});
auto ones = at::ones({batch_n * height_out * width_out}, input.options());
auto columns = at::empty({channels * kernel_h * kernel_w, batch_n * 1 * height_out * width_out}, input.options());
auto columns_g = columns.view({group, channels/group * kernel_h * kernel_w, batch_n * height_out * width_out});
for (int g = 0; g < group; ++g)
{
auto grad_output_gm = grad_output_g.select(1, g).permute({1, 0, 2, 3}).contiguous().view({channels_out/group, batch_n * height_out * width_out});
auto weight_gm = weight_g.select(0, g).view({channels_out/group, channels_kernel * kernel_h * kernel_w}).t();
columns_g.select(0, g) = at::mm(weight_gm, grad_output_gm);
}
AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), "deform_conv_backward_cuda", ([&] {
deformable_col2im_coord_cuda(at::cuda::getCurrentCUDAStream(),
columns.data_ptr<scalar_t>(),
input.data_ptr<scalar_t>() + n * im2col_step_ * per_input_size,
offset.data_ptr<scalar_t>() + n * im2col_step_ * per_offset_size,
batch_n, channels, height, width,
height_out, width_out, kernel_h, kernel_w,
pad_h, pad_w, stride_h, stride_w,
dilation_h, dilation_w, deformable_group,
grad_offset.data_ptr<scalar_t>() + n * im2col_step_ * per_offset_size);
// gradient w.r.t. input data
deformable_col2im_cuda(at::cuda::getCurrentCUDAStream(),
columns.data_ptr<scalar_t>(),
offset.data_ptr<scalar_t>() + n * im2col_step_ * per_offset_size,
batch_n, channels, height, width,
height_out, width_out, kernel_h, kernel_w,
pad_h, pad_w, stride_h, stride_w,
dilation_h, dilation_w, deformable_group,
grad_input.data_ptr<scalar_t>() + n * im2col_step_ * per_input_size);
// gradient w.r.t. weight, dWeight should accumulate across the batch and group
deformable_im2col_cuda(at::cuda::getCurrentCUDAStream(),
input.data_ptr<scalar_t>() + n * im2col_step_ * per_input_size,
offset.data_ptr<scalar_t>() + n * im2col_step_ * per_offset_size,
batch_n, channels, height, width,
height_out, width_out, kernel_h, kernel_w,
pad_h, pad_w, stride_h, stride_w,
dilation_h, dilation_w, deformable_group,
columns.data_ptr<scalar_t>());
}));
// auto grad_output_m = grad_output.permute({1, 0, 2, 3}).contiguous().view({channels_out, batch * height_out * width_out});
// grad_weight = at::mm(grad_output_m, columns.t()).view_as(weight);
// grad_bias = at::mv(grad_output_m, ones);
// auto grad_output_g = grad_output.view({batch, group, channels_out/group, height_out, width_out});
// auto columns_g = columns.view({group, channels/group * kernel_h * kernel_w, batch * height_out * width_out});
for (int g = 0; g < group; ++g)
{
auto grad_output_gm = grad_output_g.select(1, g).permute({1, 0, 2, 3}).contiguous().view({channels_out/group, batch_n * height_out * width_out});
auto columns_gm = columns_g.select(0, g).t();
auto grad_weight_gm = grad_weight_g.select(0, g).view({channels_out/group, channels_kernel * kernel_h * kernel_w});
auto grad_bias_gm = grad_bias_g.select(0, g);
grad_weight_g.select(0, g) = at::addmm(grad_weight_gm, grad_output_gm, columns_gm).view_as(grad_weight_g.select(0, g));
grad_bias_g.select(0, g) = at::addmv(grad_bias_gm, grad_output_gm, ones);
}
}
return {
grad_input, grad_offset, grad_weight, grad_bias
};
}
|
2989944e46a55fd85d0a07fff48f4ef01716e757.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "batch_matmul.cuh"
void blas_sgemm_batch( hipblasHandle_t handle,
const bool TransA, const bool TransB,
int m, int n, int k,
const float alpha,
const float **Aarray, int lda,
const float **Barray, int ldb,
const float beta,
float **Carray, int ldc,
int batchCount) {
checkCudaErrors(hipblasSgemmBatched(handle,
TransA ? HIPBLAS_OP_T : HIPBLAS_OP_N,
TransB ? HIPBLAS_OP_T : HIPBLAS_OP_N,
m, n, k,
&alpha,
Aarray, lda,
Barray, ldb,
&beta,
Carray, ldc,
batchCount));
}
__global__ void load_pointer_vector_qk(const float* query,
const float* key,
float* out,
const float** query_array,
const float** key_array,
float** out_array,
size_t batchsize,
size_t seq_length,
size_t num_attention_heads,
size_t length_per_heads){
size_t id_batchsize = threadIdx.x;
size_t id_heads = blockIdx.x;
size_t idx = id_batchsize * seq_length * length_per_heads * num_attention_heads +
length_per_heads * id_heads;
query_array[id_batchsize * num_attention_heads + id_heads] = query + idx;
key_array[id_batchsize * num_attention_heads + id_heads] = key + idx;
out_array[id_batchsize * num_attention_heads + id_heads] = out +
id_batchsize * seq_length * seq_length * num_attention_heads + id_heads * seq_length;
__syncthreads();
}
void Query_Key::forward(const float* query,
const float* key,
float number,
float* &output){
size_t batchsize = handle->batchsize;
size_t seq_length = handle->seq_length;
size_t num_attention_heads = handle->num_attention_heads;
size_t length_per_heads = handle->hidden_size / num_attention_heads;
output = handle->global_malloc_manage_float.get_new_head_point(
batchsize * num_attention_heads * seq_length * seq_length);
dim3 threads(batchsize, 1, 1);
dim3 blocks(num_attention_heads, 1, 1);
hipLaunchKernelGGL(( load_pointer_vector_qk), dim3(blocks), dim3(threads), 0, handle->cal_stream, query,
key,
output,
query_array,
key_array,
out_array,
batchsize,
seq_length,
num_attention_heads,
length_per_heads);
blas_sgemm_batch(handle->handle,
true, false,
seq_length, seq_length, length_per_heads,
number,
key_array, num_attention_heads * length_per_heads,
query_array, num_attention_heads * length_per_heads,
0.0f,
out_array, num_attention_heads * seq_length,
num_attention_heads * batchsize);
}
__global__ void load_pointer_vector_pv(const float* prob,
const float* value,
float* out,
const float** prob_array,
const float** value_array,
float** out_array,
size_t batchsize,
size_t seq_length,
size_t num_attention_heads,
size_t length_per_heads){
size_t id_batchsize = threadIdx.x;
size_t id_heads = blockIdx.x;
size_t idx = id_batchsize * seq_length * length_per_heads * num_attention_heads +
length_per_heads * id_heads;
prob_array[id_batchsize * num_attention_heads + id_heads] = prob +
id_batchsize * seq_length * seq_length * num_attention_heads + id_heads * seq_length;
value_array[id_batchsize * num_attention_heads + id_heads] = value + idx;
out_array[id_batchsize * num_attention_heads + id_heads] = out + idx;
__syncthreads();
}
void Prob_Value::forward(const float* prob,
const float* value,
float* &output){
size_t batchsize = handle->batchsize;
size_t seq_length = handle->seq_length;
size_t num_attention_heads = handle->num_attention_heads;
size_t length_per_heads = handle->hidden_size / num_attention_heads;
output = handle->global_malloc_manage_float.get_new_head_point(
batchsize * num_attention_heads * seq_length * length_per_heads);
dim3 threads(batchsize, 1, 1);
dim3 blocks(num_attention_heads, 1, 1);
hipLaunchKernelGGL(( load_pointer_vector_pv), dim3(blocks), dim3(threads), 0, handle->cal_stream, prob,
value,
output,
prob_array,
value_array,
out_array,
batchsize,
seq_length,
num_attention_heads,
length_per_heads);
blas_sgemm_batch(handle->handle,
false, false,
length_per_heads, seq_length, seq_length,
1.0,
value_array, num_attention_heads * length_per_heads,
prob_array, num_attention_heads * seq_length,
0.0,
out_array, num_attention_heads * length_per_heads,
num_attention_heads * batchsize);
} | 2989944e46a55fd85d0a07fff48f4ef01716e757.cu | #include "batch_matmul.cuh"
void blas_sgemm_batch( cublasHandle_t handle,
const bool TransA, const bool TransB,
int m, int n, int k,
const float alpha,
const float **Aarray, int lda,
const float **Barray, int ldb,
const float beta,
float **Carray, int ldc,
int batchCount) {
checkCudaErrors(cublasSgemmBatched(handle,
TransA ? CUBLAS_OP_T : CUBLAS_OP_N,
TransB ? CUBLAS_OP_T : CUBLAS_OP_N,
m, n, k,
&alpha,
Aarray, lda,
Barray, ldb,
&beta,
Carray, ldc,
batchCount));
}
__global__ void load_pointer_vector_qk(const float* query,
const float* key,
float* out,
const float** query_array,
const float** key_array,
float** out_array,
size_t batchsize,
size_t seq_length,
size_t num_attention_heads,
size_t length_per_heads){
size_t id_batchsize = threadIdx.x;
size_t id_heads = blockIdx.x;
size_t idx = id_batchsize * seq_length * length_per_heads * num_attention_heads +
length_per_heads * id_heads;
query_array[id_batchsize * num_attention_heads + id_heads] = query + idx;
key_array[id_batchsize * num_attention_heads + id_heads] = key + idx;
out_array[id_batchsize * num_attention_heads + id_heads] = out +
id_batchsize * seq_length * seq_length * num_attention_heads + id_heads * seq_length;
__syncthreads();
}
void Query_Key::forward(const float* query,
const float* key,
float number,
float* &output){
size_t batchsize = handle->batchsize;
size_t seq_length = handle->seq_length;
size_t num_attention_heads = handle->num_attention_heads;
size_t length_per_heads = handle->hidden_size / num_attention_heads;
output = handle->global_malloc_manage_float.get_new_head_point(
batchsize * num_attention_heads * seq_length * seq_length);
dim3 threads(batchsize, 1, 1);
dim3 blocks(num_attention_heads, 1, 1);
load_pointer_vector_qk<<<blocks, threads, 0, handle->cal_stream>>>(query,
key,
output,
query_array,
key_array,
out_array,
batchsize,
seq_length,
num_attention_heads,
length_per_heads);
blas_sgemm_batch(handle->handle,
true, false,
seq_length, seq_length, length_per_heads,
number,
key_array, num_attention_heads * length_per_heads,
query_array, num_attention_heads * length_per_heads,
0.0f,
out_array, num_attention_heads * seq_length,
num_attention_heads * batchsize);
}
__global__ void load_pointer_vector_pv(const float* prob,
const float* value,
float* out,
const float** prob_array,
const float** value_array,
float** out_array,
size_t batchsize,
size_t seq_length,
size_t num_attention_heads,
size_t length_per_heads){
size_t id_batchsize = threadIdx.x;
size_t id_heads = blockIdx.x;
size_t idx = id_batchsize * seq_length * length_per_heads * num_attention_heads +
length_per_heads * id_heads;
prob_array[id_batchsize * num_attention_heads + id_heads] = prob +
id_batchsize * seq_length * seq_length * num_attention_heads + id_heads * seq_length;
value_array[id_batchsize * num_attention_heads + id_heads] = value + idx;
out_array[id_batchsize * num_attention_heads + id_heads] = out + idx;
__syncthreads();
}
void Prob_Value::forward(const float* prob,
const float* value,
float* &output){
size_t batchsize = handle->batchsize;
size_t seq_length = handle->seq_length;
size_t num_attention_heads = handle->num_attention_heads;
size_t length_per_heads = handle->hidden_size / num_attention_heads;
output = handle->global_malloc_manage_float.get_new_head_point(
batchsize * num_attention_heads * seq_length * length_per_heads);
dim3 threads(batchsize, 1, 1);
dim3 blocks(num_attention_heads, 1, 1);
load_pointer_vector_pv<<<blocks, threads, 0, handle->cal_stream>>>(prob,
value,
output,
prob_array,
value_array,
out_array,
batchsize,
seq_length,
num_attention_heads,
length_per_heads);
blas_sgemm_batch(handle->handle,
false, false,
length_per_heads, seq_length, seq_length,
1.0,
value_array, num_attention_heads * length_per_heads,
prob_array, num_attention_heads * seq_length,
0.0,
out_array, num_attention_heads * length_per_heads,
num_attention_heads * batchsize);
} |
ca33804d08d5f1bf08c26057bae90a84652c344e.hip | // !!! This is a file automatically generated by hipify!!!
// ----------------------------------------------------------------------------
// Gunrock -- Fast and Efficient GPU Graph Library
// ----------------------------------------------------------------------------
// This source code is distributed under the terms of LICENSE.TXT
// in the root directory of this source distribution.
// ----------------------------------------------------------------------------
/**
* @file sssp_app.cu
*
* @brief single-source shortest path (SSSP) application
*/
#include <gunrock/gunrock.h>
// Utilities and correctness-checking
#include <gunrock/util/test_utils.cuh>
// Graph definations
#include <gunrock/graphio/graphio.cuh>
#include <gunrock/app/app_base.cuh>
#include <gunrock/app/test_base.cuh>
// single-source shortest path includes
#include <gunrock/app/SSSP_Test/SSSP_Test_enactor.cuh>
#include <gunrock/app/SSSP_Test/SSSP_Test_test.cuh>
namespace gunrock {
namespace app {
namespace SSSP_Test {
hipError_t UseParameters(util::Parameters ¶meters)
{
hipError_t retval = hipSuccess;
GUARD_CU(UseParameters_app (parameters));
GUARD_CU(UseParameters_problem(parameters));
GUARD_CU(UseParameters_enactor(parameters));
GUARD_CU(parameters.Use<std::string>(
"src",
util::REQUIRED_ARGUMENT | util::MULTI_VALUE | util::OPTIONAL_PARAMETER,
"0",
"<Vertex-ID|random|largestdegree> The source vertices\n"
"\tIf random, randomly select non-zero degree vertices;\n"
"\tIf largestdegree, select vertices with largest degrees",
__FILE__, __LINE__));
GUARD_CU(parameters.Use<int>(
"src-seed",
util::REQUIRED_ARGUMENT | util::SINGLE_VALUE | util::OPTIONAL_PARAMETER,
util::PreDefinedValues<int>::InvalidValue,
"seed to generate random sources",
__FILE__, __LINE__));
return retval;
}
/**
* @brief Run SSSP tests
* @tparam GraphT Type of the graph
* @tparam ValueT Type of the distances
* @param[in] parameters Excution parameters
* @param[in] graph Input graph
* @param[in] ref_distances Reference distances
* @param[in] target Whether to perform the SSSP
* \return hipError_t error message(s), if any
*/
template <typename GraphT, typename ValueT = typename GraphT::ValueT>
hipError_t RunTests(
util::Parameters ¶meters,
GraphT &graph,
ValueT **ref_distances = NULL,
util::Location target = util::DEVICE)
{
hipError_t retval = hipSuccess;
typedef typename GraphT::VertexT VertexT;
typedef typename GraphT::SizeT SizeT;
typedef Problem<GraphT > ProblemT;
typedef Enactor<ProblemT> EnactorT;
util::CpuTimer cpu_timer, total_timer;
cpu_timer.Start(); total_timer.Start();
// parse configurations from parameters
bool quiet_mode = parameters.Get<bool>("quiet");
int num_runs = parameters.Get<int >("num-runs");
std::string validation = parameters.Get<std::string>("validation");
util::Info info("Template", parameters, graph); // initialize Info structure
std::vector<VertexT> srcs = parameters.Get<std::vector<VertexT>>("srcs");
int num_srcs = srcs .size();
// Allocate host-side array (for both reference and GPU-computed results)
ValueT *h_distances = new ValueT[graph.nodes];
// Allocate problem and enactor on GPU, and initialize them
ProblemT problem(parameters);
EnactorT enactor;
GUARD_CU(problem.Init(graph , target));
GUARD_CU(enactor.Init(problem, target));
cpu_timer.Stop();
parameters.Set("preprocess-time", cpu_timer.ElapsedMillis());
// perform the algorithm
VertexT src;
for (int run_num = 0; run_num < num_runs; ++run_num)
{
src = srcs[run_num % num_srcs];
GUARD_CU(problem.Reset(src, target));
GUARD_CU(enactor.Reset(src, target));
util::PrintMsg("__________________________", !quiet_mode);
cpu_timer.Start();
GUARD_CU(enactor.Enact(src));
cpu_timer.Stop();
info.CollectSingleRun(cpu_timer.ElapsedMillis());
util::PrintMsg("--------------------------\nRun "
+ std::to_string(run_num) + " elapsed: "
+ std::to_string(cpu_timer.ElapsedMillis()) +
" ms, src = " + std::to_string(src) +
", #iterations = "
+ std::to_string(enactor.enactor_slices[0]
.enactor_stats.iteration), !quiet_mode);
if (validation == "each")
{
GUARD_CU(problem.Extract(h_distances));
SizeT num_errors = app::Template::Validate_Results(
parameters, graph,
src, h_distances,
ref_distances == NULL ? NULL : ref_distances[run_num % num_srcs],
NULL,
false);
}
}
cpu_timer.Start();
// Copy out results
GUARD_CU(problem.Extract(h_distances));
if (validation == "last")
{
SizeT num_errors = app::Template::Validate_Results(
parameters, graph,
src, h_distances,
ref_distances == NULL ? NULL : ref_distances[(num_runs -1) % num_srcs],
NULL,
true);
}
// compute running statistics
info.ComputeTraversalStats(enactor, (ValueT*) h_distances);
//Display_Memory_Usage(problem);
#ifdef ENABLE_PERFORMANCE_PROFILING
//Display_Performance_Profiling(enactor);
#endif
// Clean up
GUARD_CU(enactor.Release(target));
GUARD_CU(problem.Release(target));
delete[] h_distances ; h_distances = NULL;
cpu_timer.Stop(); total_timer.Stop();
info.Finalize(cpu_timer.ElapsedMillis(), total_timer.ElapsedMillis());
return retval;
}
} // namespace Template
} // namespace app
} // namespace gunrock
/*
* @brief Entry of gunrock_template function
* @tparam GraphT Type of the graph
* @tparam ValueT Type of the distances
* @param[in] parameters Excution parameters
* @param[in] graph Input graph
* @param[out] distances Return shortest distance to source per vertex
* @param[out] preds Return predecessors of each vertex
* \return double Return accumulated elapsed times for all runs
*/
template <typename GraphT, typename ValueT = typename GraphT::ValueT>
double gunrock_SSSP_Test(
gunrock::util::Parameters ¶meters,
GraphT &graph,
ValueT **distances
)
{
typedef typename GraphT::VertexT VertexT;
typedef gunrock::app::Template::Problem<GraphT > ProblemT;
typedef gunrock::app::Template::Enactor<ProblemT> EnactorT;
gunrock::util::CpuTimer cpu_timer;
gunrock::util::Location target = gunrock::util::DEVICE;
double total_time = 0;
if (parameters.UseDefault("quiet"))
parameters.Set("quiet", true);
// Allocate problem and enactor on GPU, and initialize them
ProblemT problem(parameters);
EnactorT enactor;
problem.Init(graph , target);
enactor.Init(problem, target);
int num_runs = parameters.Get<int>("num-runs");
std::vector<VertexT> srcs = parameters.Get<std::vector<VertexT>>("srcs");
int num_srcs = srcs.size();
for (int run_num = 0; run_num < num_runs; ++run_num)
{
int src_num = run_num % num_srcs;
VertexT src = srcs[src_num];
problem.Reset(src, target);
enactor.Reset(src, target);
cpu_timer.Start();
enactor.Enact(src);
cpu_timer.Stop();
total_time += cpu_timer.ElapsedMillis();
problem.Extract(distances[src_num]);
}
enactor.Release(target);
problem.Release(target);
srcs.clear();
return total_time;
}
/*
* @brief Simple interface take in graph as CSR format
* @param[in] num_nodes Number of veritces in the input graph
* @param[in] num_edges Number of edges in the input graph
* @param[in] row_offsets CSR-formatted graph input row offsets
* @param[in] col_indices CSR-formatted graph input column indices
* @param[in] edge_values CSR-formatted graph input edge weights
* @param[in] num_runs Number of runs to perform SSSP
* @param[in] sources Sources to begin traverse, one for each run
* @param[in] mark_preds Whether to output predecessor info
* @param[out] distances Return shortest distance to source per vertex
* @param[out] preds Return predecessors of each vertex
* \return double Return accumulated elapsed times for all runs
*/
template <
typename VertexT = int,
typename SizeT = int,
typename GValueT = unsigned int,
typename TValueT = GValueT>
float SSSSP_Test(
const SizeT num_nodes,
const SizeT num_edges,
const SizeT *row_offsets,
const VertexT *col_indices,
const GValueT *edge_values,
const int num_runs
VertexT *sources,
SSSPValueT **distances
)
{
typedef typename gunrock::app::TestGraph<VertexT, SizeT, GValueT,
gunrock::graph::HAS_EDGE_VALUES | gunrock::graph::HAS_CSR>
GraphT;
typedef typename GraphT::CsrT CsrT;
// Setup parameters
gunrock::util::Parameters parameters("Template");
gunrock::graphio::UseParameters(parameters);
gunrock::app::SSSP_Test::UseParameters(parameters);
gunrock::app::UseParameters_test(parameters);
parameters.Parse_CommandLine(0, NULL);
parameters.Set("graph-type", "by-pass");
parameters.Set("num-runs", num_runs);
std::vector<VertexT> srcs;
for (int i = 0; i < num_runs; i ++)
srcs.push_back(sources[i]);
parameters.Set("srcs", srcs);
bool quiet = parameters.Get<bool>("quiet");
GraphT graph;
// Assign pointers into gunrock graph format
graph.CsrT::Allocate(num_nodes, num_edges, gunrock::util::HOST);
graph.CsrT::row_offsets .SetPointer(row_offsets, gunrock::util::HOST);
graph.CsrT::column_indices.SetPointer(col_indices, gunrock::util::HOST);
graph.CsrT::edge_values .SetPointer(edge_values, gunrock::util::HOST);
graph.FromCsr(graph.csr(), true, quiet);
gunrock::graphio::LoadGraph(parameters, graph);
// Run the Template
double elapsed_time = gunrock_SSSP_Test(parameters, graph , distances);
// Cleanup
graph.Release();
srcs.clear();
return elapsed_time;
}
// Leave this at the end of the file
// Local Variables:
// mode:c++
// c-file-style: "NVIDIA"
// End:
| ca33804d08d5f1bf08c26057bae90a84652c344e.cu | // ----------------------------------------------------------------------------
// Gunrock -- Fast and Efficient GPU Graph Library
// ----------------------------------------------------------------------------
// This source code is distributed under the terms of LICENSE.TXT
// in the root directory of this source distribution.
// ----------------------------------------------------------------------------
/**
* @file sssp_app.cu
*
* @brief single-source shortest path (SSSP) application
*/
#include <gunrock/gunrock.h>
// Utilities and correctness-checking
#include <gunrock/util/test_utils.cuh>
// Graph definations
#include <gunrock/graphio/graphio.cuh>
#include <gunrock/app/app_base.cuh>
#include <gunrock/app/test_base.cuh>
// single-source shortest path includes
#include <gunrock/app/SSSP_Test/SSSP_Test_enactor.cuh>
#include <gunrock/app/SSSP_Test/SSSP_Test_test.cuh>
namespace gunrock {
namespace app {
namespace SSSP_Test {
cudaError_t UseParameters(util::Parameters ¶meters)
{
cudaError_t retval = cudaSuccess;
GUARD_CU(UseParameters_app (parameters));
GUARD_CU(UseParameters_problem(parameters));
GUARD_CU(UseParameters_enactor(parameters));
GUARD_CU(parameters.Use<std::string>(
"src",
util::REQUIRED_ARGUMENT | util::MULTI_VALUE | util::OPTIONAL_PARAMETER,
"0",
"<Vertex-ID|random|largestdegree> The source vertices\n"
"\tIf random, randomly select non-zero degree vertices;\n"
"\tIf largestdegree, select vertices with largest degrees",
__FILE__, __LINE__));
GUARD_CU(parameters.Use<int>(
"src-seed",
util::REQUIRED_ARGUMENT | util::SINGLE_VALUE | util::OPTIONAL_PARAMETER,
util::PreDefinedValues<int>::InvalidValue,
"seed to generate random sources",
__FILE__, __LINE__));
return retval;
}
/**
* @brief Run SSSP tests
* @tparam GraphT Type of the graph
* @tparam ValueT Type of the distances
* @param[in] parameters Excution parameters
* @param[in] graph Input graph
* @param[in] ref_distances Reference distances
* @param[in] target Whether to perform the SSSP
* \return cudaError_t error message(s), if any
*/
template <typename GraphT, typename ValueT = typename GraphT::ValueT>
cudaError_t RunTests(
util::Parameters ¶meters,
GraphT &graph,
ValueT **ref_distances = NULL,
util::Location target = util::DEVICE)
{
cudaError_t retval = cudaSuccess;
typedef typename GraphT::VertexT VertexT;
typedef typename GraphT::SizeT SizeT;
typedef Problem<GraphT > ProblemT;
typedef Enactor<ProblemT> EnactorT;
util::CpuTimer cpu_timer, total_timer;
cpu_timer.Start(); total_timer.Start();
// parse configurations from parameters
bool quiet_mode = parameters.Get<bool>("quiet");
int num_runs = parameters.Get<int >("num-runs");
std::string validation = parameters.Get<std::string>("validation");
util::Info info("Template", parameters, graph); // initialize Info structure
std::vector<VertexT> srcs = parameters.Get<std::vector<VertexT>>("srcs");
int num_srcs = srcs .size();
// Allocate host-side array (for both reference and GPU-computed results)
ValueT *h_distances = new ValueT[graph.nodes];
// Allocate problem and enactor on GPU, and initialize them
ProblemT problem(parameters);
EnactorT enactor;
GUARD_CU(problem.Init(graph , target));
GUARD_CU(enactor.Init(problem, target));
cpu_timer.Stop();
parameters.Set("preprocess-time", cpu_timer.ElapsedMillis());
// perform the algorithm
VertexT src;
for (int run_num = 0; run_num < num_runs; ++run_num)
{
src = srcs[run_num % num_srcs];
GUARD_CU(problem.Reset(src, target));
GUARD_CU(enactor.Reset(src, target));
util::PrintMsg("__________________________", !quiet_mode);
cpu_timer.Start();
GUARD_CU(enactor.Enact(src));
cpu_timer.Stop();
info.CollectSingleRun(cpu_timer.ElapsedMillis());
util::PrintMsg("--------------------------\nRun "
+ std::to_string(run_num) + " elapsed: "
+ std::to_string(cpu_timer.ElapsedMillis()) +
" ms, src = " + std::to_string(src) +
", #iterations = "
+ std::to_string(enactor.enactor_slices[0]
.enactor_stats.iteration), !quiet_mode);
if (validation == "each")
{
GUARD_CU(problem.Extract(h_distances));
SizeT num_errors = app::Template::Validate_Results(
parameters, graph,
src, h_distances,
ref_distances == NULL ? NULL : ref_distances[run_num % num_srcs],
NULL,
false);
}
}
cpu_timer.Start();
// Copy out results
GUARD_CU(problem.Extract(h_distances));
if (validation == "last")
{
SizeT num_errors = app::Template::Validate_Results(
parameters, graph,
src, h_distances,
ref_distances == NULL ? NULL : ref_distances[(num_runs -1) % num_srcs],
NULL,
true);
}
// compute running statistics
info.ComputeTraversalStats(enactor, (ValueT*) h_distances);
//Display_Memory_Usage(problem);
#ifdef ENABLE_PERFORMANCE_PROFILING
//Display_Performance_Profiling(enactor);
#endif
// Clean up
GUARD_CU(enactor.Release(target));
GUARD_CU(problem.Release(target));
delete[] h_distances ; h_distances = NULL;
cpu_timer.Stop(); total_timer.Stop();
info.Finalize(cpu_timer.ElapsedMillis(), total_timer.ElapsedMillis());
return retval;
}
} // namespace Template
} // namespace app
} // namespace gunrock
/*
* @brief Entry of gunrock_template function
* @tparam GraphT Type of the graph
* @tparam ValueT Type of the distances
* @param[in] parameters Excution parameters
* @param[in] graph Input graph
* @param[out] distances Return shortest distance to source per vertex
* @param[out] preds Return predecessors of each vertex
* \return double Return accumulated elapsed times for all runs
*/
template <typename GraphT, typename ValueT = typename GraphT::ValueT>
double gunrock_SSSP_Test(
gunrock::util::Parameters ¶meters,
GraphT &graph,
ValueT **distances
)
{
typedef typename GraphT::VertexT VertexT;
typedef gunrock::app::Template::Problem<GraphT > ProblemT;
typedef gunrock::app::Template::Enactor<ProblemT> EnactorT;
gunrock::util::CpuTimer cpu_timer;
gunrock::util::Location target = gunrock::util::DEVICE;
double total_time = 0;
if (parameters.UseDefault("quiet"))
parameters.Set("quiet", true);
// Allocate problem and enactor on GPU, and initialize them
ProblemT problem(parameters);
EnactorT enactor;
problem.Init(graph , target);
enactor.Init(problem, target);
int num_runs = parameters.Get<int>("num-runs");
std::vector<VertexT> srcs = parameters.Get<std::vector<VertexT>>("srcs");
int num_srcs = srcs.size();
for (int run_num = 0; run_num < num_runs; ++run_num)
{
int src_num = run_num % num_srcs;
VertexT src = srcs[src_num];
problem.Reset(src, target);
enactor.Reset(src, target);
cpu_timer.Start();
enactor.Enact(src);
cpu_timer.Stop();
total_time += cpu_timer.ElapsedMillis();
problem.Extract(distances[src_num]);
}
enactor.Release(target);
problem.Release(target);
srcs.clear();
return total_time;
}
/*
* @brief Simple interface take in graph as CSR format
* @param[in] num_nodes Number of veritces in the input graph
* @param[in] num_edges Number of edges in the input graph
* @param[in] row_offsets CSR-formatted graph input row offsets
* @param[in] col_indices CSR-formatted graph input column indices
* @param[in] edge_values CSR-formatted graph input edge weights
* @param[in] num_runs Number of runs to perform SSSP
* @param[in] sources Sources to begin traverse, one for each run
* @param[in] mark_preds Whether to output predecessor info
* @param[out] distances Return shortest distance to source per vertex
* @param[out] preds Return predecessors of each vertex
* \return double Return accumulated elapsed times for all runs
*/
template <
typename VertexT = int,
typename SizeT = int,
typename GValueT = unsigned int,
typename TValueT = GValueT>
float SSSSP_Test(
const SizeT num_nodes,
const SizeT num_edges,
const SizeT *row_offsets,
const VertexT *col_indices,
const GValueT *edge_values,
const int num_runs
VertexT *sources,
SSSPValueT **distances
)
{
typedef typename gunrock::app::TestGraph<VertexT, SizeT, GValueT,
gunrock::graph::HAS_EDGE_VALUES | gunrock::graph::HAS_CSR>
GraphT;
typedef typename GraphT::CsrT CsrT;
// Setup parameters
gunrock::util::Parameters parameters("Template");
gunrock::graphio::UseParameters(parameters);
gunrock::app::SSSP_Test::UseParameters(parameters);
gunrock::app::UseParameters_test(parameters);
parameters.Parse_CommandLine(0, NULL);
parameters.Set("graph-type", "by-pass");
parameters.Set("num-runs", num_runs);
std::vector<VertexT> srcs;
for (int i = 0; i < num_runs; i ++)
srcs.push_back(sources[i]);
parameters.Set("srcs", srcs);
bool quiet = parameters.Get<bool>("quiet");
GraphT graph;
// Assign pointers into gunrock graph format
graph.CsrT::Allocate(num_nodes, num_edges, gunrock::util::HOST);
graph.CsrT::row_offsets .SetPointer(row_offsets, gunrock::util::HOST);
graph.CsrT::column_indices.SetPointer(col_indices, gunrock::util::HOST);
graph.CsrT::edge_values .SetPointer(edge_values, gunrock::util::HOST);
graph.FromCsr(graph.csr(), true, quiet);
gunrock::graphio::LoadGraph(parameters, graph);
// Run the Template
double elapsed_time = gunrock_SSSP_Test(parameters, graph , distances);
// Cleanup
graph.Release();
srcs.clear();
return elapsed_time;
}
// Leave this at the end of the file
// Local Variables:
// mode:c++
// c-file-style: "NVIDIA"
// End:
|
69becae57d933f68bdedc07af5f4d7f6b37abe0a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// linear algebra subroutines
// Ben Cumming @ CSCS
#include <iostream>
#include <cmath>
#include <cstdio>
#include <mpi.h>
#include "linalg.h"
#include "operators.h"
#include "stats.h"
#include "data.h"
namespace linalg {
namespace kernels {
__global__
void fill(
double *y,
const double value,
int n)
{
auto i = threadIdx.x + blockDim.x*blockIdx.x;
if(i < n) {
y[i] = value;
}
}
__global__
void axpy(
double* y,
const double alpha,
const double* x,
int n)
{
auto i = threadIdx.x + blockDim.x*blockIdx.x;
if(i < n) {
y[i] += alpha * x[i];
}
}
__global__
void add_scaled_diff(
double *y,
const double* x,
const double alpha,
const double *l,
const double *r,
const int n)
{
auto i = threadIdx.x + blockDim.x*blockIdx.x;
if(i < n) {
y[i] = x[i] + alpha * (l[i] - r[i]);
}
}
__global__
void scaled_diff(
double *y,
const double alpha,
const double* l,
const double* r,
int n)
{
auto i = threadIdx.x + blockDim.x*blockIdx.x;
if(i < n) {
y[i] = alpha * (l[i] - r[i]);
}
}
__global__
void scale(
double *y,
const double alpha,
const double *x,
int n)
{
auto i = threadIdx.x + blockDim.x*blockIdx.x;
if(i < n) {
y[i] = alpha * x[i];
}
}
__global__
void lcomb(
double *y,
const double alpha,
const double *x,
const double beta,
const double *z,
int n)
{
auto i = threadIdx.x + blockDim.x*blockIdx.x;
if(i < n) {
y[i] = alpha * x[i] + beta * z[i];
}
}
__global__
void copy(
double *y,
const double* x,
int n)
{
auto i = threadIdx.x + blockDim.x*blockIdx.x;
if(i < n) {
y[i] = x[i];
}
}
} // namespace kernels
bool cg_initialized = false;
Field r;
Field Ap;
Field p;
Field Fx;
Field Fxold;
Field v;
Field xold;
// block dimensions for blas 1 calls
const int block_dim = 192;
int calculate_grid_dim(const int block_dim, int n) {
return (n+block_dim-1)/block_dim;
}
using namespace operators;
using namespace stats;
using data::Field;
// initialize temporary storage fields used by the cg solver
// I do this here so that the fields are persistent between calls
// to the CG solver. This is useful if we want to avoid malloc/free calls
// on the device for the OpenACC implementation
void cg_init(int nx, int ny)
{
Ap.init(nx,ny);
r.init(nx,ny);
p.init(nx,ny);
Fx.init(nx,ny);
Fxold.init(nx,ny);
v.init(nx,ny);
xold.init(nx,ny);
cg_initialized = true;
}
////////////////////////////////////////////////////////////////////////////////
// blas level 1 reductions
////////////////////////////////////////////////////////////////////////////////
// computes the inner product of x and y
// x and y are vectors on length N
double ss_dot(Field const& x, Field const& y)
{
double result = 0;
double result_global = 0;
const int N = x.length();
auto status =
hipblasDdot(
cublas_handle(), N,
x.device_data(), 1,
y.device_data(), 1,
&result
);
MPI_Allreduce(&result, &result_global,
1, MPI_DOUBLE, MPI_SUM,
MPI_COMM_WORLD);
return result_global;
}
// computes the 2-norm of x
// x is a vector on length N
double ss_norm2(Field const& x)
{
double result = 0;
double result_global = 0;
const int n = x.length();
auto status =
hipblasDnrm2(
cublas_handle(), n,
x.device_data(), 1,
&result
);
// take the square of the result, because we still have to sum of the local
// partial sums before taking sqrt of the full global sum
result *= result;
MPI_Allreduce(&result, &result_global,
1, MPI_DOUBLE, MPI_SUM,
MPI_COMM_WORLD);
result_global = sqrt(result_global);
return(result_global);
//return sqrt(result_global);
}
// sets entries in a vector to value
// x is a vector on length N
// value is th
void ss_fill(Field& x, const double value)
{
const int n = x.length();
auto grid_dim = calculate_grid_dim(block_dim, n);
hipLaunchKernelGGL(( kernels::fill), dim3(grid_dim), dim3(block_dim), 0, 0, x.device_data(), value, n);
}
////////////////////////////////////////////////////////////////////////////////
// blas level 1 vector-vector operations
////////////////////////////////////////////////////////////////////////////////
// computes y := alpha*x + y
// x and y are vectors on length N
// alpha is a scalar
void ss_axpy(Field& y, const double alpha, Field const& x)
{
const int n = y.length();
auto grid_dim = calculate_grid_dim(block_dim, n);
hipLaunchKernelGGL(( kernels::axpy), dim3(grid_dim), dim3(block_dim), 0, 0,
y.device_data(), alpha, x.device_data(), x.length());
}
// computes y = x + alpha*(l-r)
// y, x, l and r are vectors of length N
// alpha is a scalar
void ss_add_scaled_diff(Field& y, Field const& x, const double alpha,
Field const& l, Field const& r)
{
const int n = y.length();
auto grid_dim = calculate_grid_dim(block_dim, n);
hipLaunchKernelGGL(( kernels::add_scaled_diff), dim3(grid_dim), dim3(block_dim), 0, 0,
y.device_data(), x.device_data(), alpha, l.device_data(), r.device_data(), n);
}
// computes y = alpha*(l-r)
// y, l and r are vectors of length N
// alpha is a scalar
void ss_scaled_diff(Field& y, const double alpha,
Field const& l, Field const& r)
{
const int n = y.length();
auto grid_dim = calculate_grid_dim(block_dim, n);
hipLaunchKernelGGL(( kernels::scaled_diff), dim3(grid_dim), dim3(block_dim), 0, 0,
y.device_data(), alpha, l.device_data(), r.device_data(), n);
}
// computes y := alpha*x
// alpha is scalar
// y and x are vectors on length n
void ss_scale(Field& y, const double alpha, Field& x)
{
const int n = x.length();
auto grid_dim = calculate_grid_dim(block_dim, n);
hipLaunchKernelGGL(( kernels::scale), dim3(grid_dim), dim3(block_dim), 0, 0,
y.device_data(), alpha, x.device_data(), n);
}
// computes linear combination of two vectors y := alpha*x + beta*z
// alpha and beta are scalar
// y, x and z are vectors on length n
void ss_lcomb(Field& y, const double alpha, Field& x, const double beta,
Field const& z)
{
const int n = x.length();
auto grid_dim = calculate_grid_dim(block_dim, n);
hipLaunchKernelGGL(( kernels::lcomb), dim3(grid_dim), dim3(block_dim), 0, 0,
y.device_data(), alpha, x.device_data(), beta, z.device_data(), n);
}
// copy one vector into another y := x
// x and y are vectors of length N
void ss_copy(Field& y, Field const& x)
{
const int n = x.length();
auto grid_dim = calculate_grid_dim(block_dim, n);
hipLaunchKernelGGL(( kernels::copy), dim3(grid_dim), dim3(block_dim), 0, 0,
y.device_data(), x.device_data(), n);
}
// conjugate gradient solver
// solve the linear system A*x = b for x
// the matrix A is implicit in the objective function for the diffusion equation
// the value in x constitute the "first guess" at the solution
// x(N)
// ON ENTRY contains the initial guess for the solution
// ON EXIT contains the solution
void ss_cg(Field& x, Field const& b, const int maxiters, const double tol, bool& success)
{
// this is the dimension of the linear system that we are to solve
int nx = data::domain.nx;
int ny = data::domain.ny;
if(!cg_initialized) {
cg_init(nx,ny);
}
// epsilon value use for matrix-vector approximation
double eps = 1.e-8;
double eps_inv = 1. / eps;
// allocate memory for temporary storage
ss_fill(Fx, 0.0);
ss_fill(Fxold, 0.0);
ss_copy(xold, x);
// matrix vector multiplication is approximated with
// A*v = 1/epsilon * ( F(x+epsilon*v) - F(x) )
// = 1/epsilon * ( F(x+epsilon*v) - Fxold )
// we compute Fxold at startup
// we have to keep x so that we can compute the F(x+exps*v)
diffusion(x, Fxold);
// v = x + epsilon*x
ss_scale(v, 1.0 + eps, x);
// Fx = F(v)
diffusion(v, Fx);
// r = b - A*x
// where A*x = (Fx-Fxold)/eps
ss_add_scaled_diff(r, b, -eps_inv, Fx, Fxold);
// p = r
ss_copy(p, r);
// rold = <r,r>
double rold = ss_dot(r, r);
double rnew = rold;
// check for convergence
success = sqrt(rold) < tol;
if (success) {
return;
}
int iter;
for(iter=0; iter<maxiters; iter++) {
// Ap = A*p
ss_lcomb(v, 1.0, xold, eps, p);
diffusion(v, Fx);
ss_scaled_diff(Ap, eps_inv, Fx, Fxold);
// alpha = rold / p'*Ap
double alpha = rold / ss_dot(p, Ap);
// x += alpha*p
ss_axpy(x, alpha, p);
// r -= alpha*Ap
ss_axpy(r, -alpha, Ap);
// find new norm
rnew = ss_dot(r, r);
// test for convergence
if (sqrt(rnew) < tol) {
success = true;
break;
}
// p = r + (rnew/rold) * p
ss_lcomb(p, 1.0, r, rnew / rold, p);
rold = rnew;
}
stats::iters_cg += iter + 1;
if (!success && !data::domain.rank) {
std::cerr << "ERROR: CG failed to converge after " << iter
<< " iterations, with residual " << sqrt(rnew)
<< std::endl;
}
}
} // namespace linalg
| 69becae57d933f68bdedc07af5f4d7f6b37abe0a.cu | // linear algebra subroutines
// Ben Cumming @ CSCS
#include <iostream>
#include <cmath>
#include <cstdio>
#include <mpi.h>
#include "linalg.h"
#include "operators.h"
#include "stats.h"
#include "data.h"
namespace linalg {
namespace kernels {
__global__
void fill(
double *y,
const double value,
int n)
{
auto i = threadIdx.x + blockDim.x*blockIdx.x;
if(i < n) {
y[i] = value;
}
}
__global__
void axpy(
double* y,
const double alpha,
const double* x,
int n)
{
auto i = threadIdx.x + blockDim.x*blockIdx.x;
if(i < n) {
y[i] += alpha * x[i];
}
}
__global__
void add_scaled_diff(
double *y,
const double* x,
const double alpha,
const double *l,
const double *r,
const int n)
{
auto i = threadIdx.x + blockDim.x*blockIdx.x;
if(i < n) {
y[i] = x[i] + alpha * (l[i] - r[i]);
}
}
__global__
void scaled_diff(
double *y,
const double alpha,
const double* l,
const double* r,
int n)
{
auto i = threadIdx.x + blockDim.x*blockIdx.x;
if(i < n) {
y[i] = alpha * (l[i] - r[i]);
}
}
__global__
void scale(
double *y,
const double alpha,
const double *x,
int n)
{
auto i = threadIdx.x + blockDim.x*blockIdx.x;
if(i < n) {
y[i] = alpha * x[i];
}
}
__global__
void lcomb(
double *y,
const double alpha,
const double *x,
const double beta,
const double *z,
int n)
{
auto i = threadIdx.x + blockDim.x*blockIdx.x;
if(i < n) {
y[i] = alpha * x[i] + beta * z[i];
}
}
__global__
void copy(
double *y,
const double* x,
int n)
{
auto i = threadIdx.x + blockDim.x*blockIdx.x;
if(i < n) {
y[i] = x[i];
}
}
} // namespace kernels
bool cg_initialized = false;
Field r;
Field Ap;
Field p;
Field Fx;
Field Fxold;
Field v;
Field xold;
// block dimensions for blas 1 calls
const int block_dim = 192;
int calculate_grid_dim(const int block_dim, int n) {
return (n+block_dim-1)/block_dim;
}
using namespace operators;
using namespace stats;
using data::Field;
// initialize temporary storage fields used by the cg solver
// I do this here so that the fields are persistent between calls
// to the CG solver. This is useful if we want to avoid malloc/free calls
// on the device for the OpenACC implementation
void cg_init(int nx, int ny)
{
Ap.init(nx,ny);
r.init(nx,ny);
p.init(nx,ny);
Fx.init(nx,ny);
Fxold.init(nx,ny);
v.init(nx,ny);
xold.init(nx,ny);
cg_initialized = true;
}
////////////////////////////////////////////////////////////////////////////////
// blas level 1 reductions
////////////////////////////////////////////////////////////////////////////////
// computes the inner product of x and y
// x and y are vectors on length N
double ss_dot(Field const& x, Field const& y)
{
double result = 0;
double result_global = 0;
const int N = x.length();
auto status =
cublasDdot(
cublas_handle(), N,
x.device_data(), 1,
y.device_data(), 1,
&result
);
MPI_Allreduce(&result, &result_global,
1, MPI_DOUBLE, MPI_SUM,
MPI_COMM_WORLD);
return result_global;
}
// computes the 2-norm of x
// x is a vector on length N
double ss_norm2(Field const& x)
{
double result = 0;
double result_global = 0;
const int n = x.length();
auto status =
cublasDnrm2(
cublas_handle(), n,
x.device_data(), 1,
&result
);
// take the square of the result, because we still have to sum of the local
// partial sums before taking sqrt of the full global sum
result *= result;
MPI_Allreduce(&result, &result_global,
1, MPI_DOUBLE, MPI_SUM,
MPI_COMM_WORLD);
result_global = sqrt(result_global);
return(result_global);
//return sqrt(result_global);
}
// sets entries in a vector to value
// x is a vector on length N
// value is th
void ss_fill(Field& x, const double value)
{
const int n = x.length();
auto grid_dim = calculate_grid_dim(block_dim, n);
kernels::fill<<<grid_dim, block_dim>>>(x.device_data(), value, n);
}
////////////////////////////////////////////////////////////////////////////////
// blas level 1 vector-vector operations
////////////////////////////////////////////////////////////////////////////////
// computes y := alpha*x + y
// x and y are vectors on length N
// alpha is a scalar
void ss_axpy(Field& y, const double alpha, Field const& x)
{
const int n = y.length();
auto grid_dim = calculate_grid_dim(block_dim, n);
kernels::axpy<<<grid_dim, block_dim>>>
(y.device_data(), alpha, x.device_data(), x.length());
}
// computes y = x + alpha*(l-r)
// y, x, l and r are vectors of length N
// alpha is a scalar
void ss_add_scaled_diff(Field& y, Field const& x, const double alpha,
Field const& l, Field const& r)
{
const int n = y.length();
auto grid_dim = calculate_grid_dim(block_dim, n);
kernels::add_scaled_diff<<<grid_dim, block_dim>>>
(y.device_data(), x.device_data(), alpha, l.device_data(), r.device_data(), n);
}
// computes y = alpha*(l-r)
// y, l and r are vectors of length N
// alpha is a scalar
void ss_scaled_diff(Field& y, const double alpha,
Field const& l, Field const& r)
{
const int n = y.length();
auto grid_dim = calculate_grid_dim(block_dim, n);
kernels::scaled_diff<<<grid_dim, block_dim>>>
(y.device_data(), alpha, l.device_data(), r.device_data(), n);
}
// computes y := alpha*x
// alpha is scalar
// y and x are vectors on length n
void ss_scale(Field& y, const double alpha, Field& x)
{
const int n = x.length();
auto grid_dim = calculate_grid_dim(block_dim, n);
kernels::scale<<<grid_dim, block_dim>>>
(y.device_data(), alpha, x.device_data(), n);
}
// computes linear combination of two vectors y := alpha*x + beta*z
// alpha and beta are scalar
// y, x and z are vectors on length n
void ss_lcomb(Field& y, const double alpha, Field& x, const double beta,
Field const& z)
{
const int n = x.length();
auto grid_dim = calculate_grid_dim(block_dim, n);
kernels::lcomb<<<grid_dim, block_dim>>>
(y.device_data(), alpha, x.device_data(), beta, z.device_data(), n);
}
// copy one vector into another y := x
// x and y are vectors of length N
void ss_copy(Field& y, Field const& x)
{
const int n = x.length();
auto grid_dim = calculate_grid_dim(block_dim, n);
kernels::copy<<<grid_dim, block_dim>>>
(y.device_data(), x.device_data(), n);
}
// conjugate gradient solver
// solve the linear system A*x = b for x
// the matrix A is implicit in the objective function for the diffusion equation
// the value in x constitute the "first guess" at the solution
// x(N)
// ON ENTRY contains the initial guess for the solution
// ON EXIT contains the solution
void ss_cg(Field& x, Field const& b, const int maxiters, const double tol, bool& success)
{
// this is the dimension of the linear system that we are to solve
int nx = data::domain.nx;
int ny = data::domain.ny;
if(!cg_initialized) {
cg_init(nx,ny);
}
// epsilon value use for matrix-vector approximation
double eps = 1.e-8;
double eps_inv = 1. / eps;
// allocate memory for temporary storage
ss_fill(Fx, 0.0);
ss_fill(Fxold, 0.0);
ss_copy(xold, x);
// matrix vector multiplication is approximated with
// A*v = 1/epsilon * ( F(x+epsilon*v) - F(x) )
// = 1/epsilon * ( F(x+epsilon*v) - Fxold )
// we compute Fxold at startup
// we have to keep x so that we can compute the F(x+exps*v)
diffusion(x, Fxold);
// v = x + epsilon*x
ss_scale(v, 1.0 + eps, x);
// Fx = F(v)
diffusion(v, Fx);
// r = b - A*x
// where A*x = (Fx-Fxold)/eps
ss_add_scaled_diff(r, b, -eps_inv, Fx, Fxold);
// p = r
ss_copy(p, r);
// rold = <r,r>
double rold = ss_dot(r, r);
double rnew = rold;
// check for convergence
success = sqrt(rold) < tol;
if (success) {
return;
}
int iter;
for(iter=0; iter<maxiters; iter++) {
// Ap = A*p
ss_lcomb(v, 1.0, xold, eps, p);
diffusion(v, Fx);
ss_scaled_diff(Ap, eps_inv, Fx, Fxold);
// alpha = rold / p'*Ap
double alpha = rold / ss_dot(p, Ap);
// x += alpha*p
ss_axpy(x, alpha, p);
// r -= alpha*Ap
ss_axpy(r, -alpha, Ap);
// find new norm
rnew = ss_dot(r, r);
// test for convergence
if (sqrt(rnew) < tol) {
success = true;
break;
}
// p = r + (rnew/rold) * p
ss_lcomb(p, 1.0, r, rnew / rold, p);
rold = rnew;
}
stats::iters_cg += iter + 1;
if (!success && !data::domain.rank) {
std::cerr << "ERROR: CG failed to converge after " << iter
<< " iterations, with residual " << sqrt(rnew)
<< std::endl;
}
}
} // namespace linalg
|
ebb3ef4548f978958fb52f619f5090f52d37fb63.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <driver_functions.h>
#include "commons.cuh"
#include "training/routine.cuh"
__global__
void matmul(uint32_t n, uint32_t m, uint32_t p, float *A, float *B, float *C) {
__shared__ float Ab[MATMUL_TILE_DIM][MATMUL_TILE_DIM + 1];
__shared__ float Bb[MATMUL_TILE_DIM][MATMUL_TILE_DIM + 1];
uint32_t idx_x = (blockIdx.x * MATMUL_TILE_DIM) + threadIdx.x;
uint32_t idx_y = (blockIdx.y * MATMUL_TILE_DIM) + threadIdx.y;
float C_temp[4] = {0.0, 0.0, 0.0, 0.0};
for (uint32_t tile = 0; tile < m; tile += MATMUL_TILE_DIM) {
for(uint32_t j = 0; j < MATMUL_TILE_DIM; j += MATMUL_BLOCK_DIM_Y) {
if(tile + threadIdx.x < m && (idx_y + j) < n)
Ab[threadIdx.y + j][threadIdx.x] = A[(idx_y + j) * m + tile + threadIdx.x];
else
Ab[threadIdx.y + j][threadIdx.x] = 0.0;
if(tile + threadIdx.y + j < m && idx_x < p)
Bb[threadIdx.y + j][threadIdx.x] = B[(tile + threadIdx.y + j) * p + idx_x];
else
Bb[threadIdx.y + j][threadIdx.x] = 0.0;
}
__syncthreads();
for(uint32_t j = 0; j < MATMUL_TILE_DIM; j += MATMUL_BLOCK_DIM_Y) {
for(uint32_t i = 0; i < MATMUL_BLOCK_DIM_X; i += 1) {
C_temp[j / MATMUL_BLOCK_DIM_Y] += Ab[threadIdx.y + j][i] * Bb[i][threadIdx.x];
}
}
__syncthreads();
}
for(uint32_t j = 0; j < MATMUL_TILE_DIM; j += MATMUL_BLOCK_DIM_Y) {
if((idx_y + j) < n && idx_x < p) {
C[(idx_y + j) * p + idx_x] = C_temp[j / MATMUL_BLOCK_DIM_Y];
}
}
}
| ebb3ef4548f978958fb52f619f5090f52d37fb63.cu | #include <cuda.h>
#include <cuda_runtime.h>
#include <driver_functions.h>
#include "commons.cuh"
#include "training/routine.cuh"
__global__
void matmul(uint32_t n, uint32_t m, uint32_t p, float *A, float *B, float *C) {
__shared__ float Ab[MATMUL_TILE_DIM][MATMUL_TILE_DIM + 1];
__shared__ float Bb[MATMUL_TILE_DIM][MATMUL_TILE_DIM + 1];
uint32_t idx_x = (blockIdx.x * MATMUL_TILE_DIM) + threadIdx.x;
uint32_t idx_y = (blockIdx.y * MATMUL_TILE_DIM) + threadIdx.y;
float C_temp[4] = {0.0, 0.0, 0.0, 0.0};
for (uint32_t tile = 0; tile < m; tile += MATMUL_TILE_DIM) {
for(uint32_t j = 0; j < MATMUL_TILE_DIM; j += MATMUL_BLOCK_DIM_Y) {
if(tile + threadIdx.x < m && (idx_y + j) < n)
Ab[threadIdx.y + j][threadIdx.x] = A[(idx_y + j) * m + tile + threadIdx.x];
else
Ab[threadIdx.y + j][threadIdx.x] = 0.0;
if(tile + threadIdx.y + j < m && idx_x < p)
Bb[threadIdx.y + j][threadIdx.x] = B[(tile + threadIdx.y + j) * p + idx_x];
else
Bb[threadIdx.y + j][threadIdx.x] = 0.0;
}
__syncthreads();
for(uint32_t j = 0; j < MATMUL_TILE_DIM; j += MATMUL_BLOCK_DIM_Y) {
for(uint32_t i = 0; i < MATMUL_BLOCK_DIM_X; i += 1) {
C_temp[j / MATMUL_BLOCK_DIM_Y] += Ab[threadIdx.y + j][i] * Bb[i][threadIdx.x];
}
}
__syncthreads();
}
for(uint32_t j = 0; j < MATMUL_TILE_DIM; j += MATMUL_BLOCK_DIM_Y) {
if((idx_y + j) < n && idx_x < p) {
C[(idx_y + j) * p + idx_x] = C_temp[j / MATMUL_BLOCK_DIM_Y];
}
}
}
|
ad42d2cc6544b788c00550ad0bf7183e49a7d1ef.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <assert.h>
#include <time.h>
#include <math.h>
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
extern "C" {
#include "../bnf-parser/expression.h"
#include "../bnf-parser/Parser.h"
#include "../bnf-parser/Lexer.h"
#include "../bnf-xml-parser/xml-expression.h"
}
int yyparse(struct expression ** expr, yyscan_t scanner);
static void CheckCudaErrorAux (const char *, unsigned, const char *, hipError_t);
#define CUDA_CHECK_RETURN(value) CheckCudaErrorAux(__FILE__,__LINE__, #value, value)
__global__
void init_message_buffer_kernel(float *message_buffer, float *node_states, unsigned int *node_num_vars,
unsigned int num_nodes){
unsigned int node_index, state_index, num_variables;
node_index = blockIdx.x*blockDim.x + threadIdx.x;
state_index = blockIdx.y*blockDim.y + threadIdx.y;
if(node_index < num_nodes){
num_variables = node_num_vars[node_index];
if(state_index < num_variables){
message_buffer[node_index * MAX_STATES + state_index] = node_states[node_index * MAX_STATES + state_index];
}
}
}
__device__
void combine_message_cuda(float * dest, float * edge_messages, unsigned int length, unsigned int node_index,
unsigned int edge_offset, unsigned int num_edges, char n_is_pow_2, unsigned int warp_size){
__shared__ float shared_dest[BLOCK_SIZE_3_D_Z];
__shared__ float shared_src[BLOCK_SIZE_3_D_Z];
unsigned int index = threadIdx.z;
if(index < length && edge_offset + index < num_edges){
shared_dest[index] = dest[node_index + index];
shared_src[index] = edge_messages[edge_offset + index];
__syncthreads();
dest[edge_offset + index] = shared_dest[index] * shared_src[index];
}
}
__global__
void read_incoming_messages_kernel(float *message_buffer, float *previous_messages,
unsigned int * dest_node_to_edges_nodes,
unsigned int * dest_node_to_edges_edges,
unsigned int current_num_edges,
unsigned int *node_num_vars, unsigned int num_vertices,
char n_is_pow_2, unsigned int warp_size){
unsigned int node_index, edge_index, start_index, end_index, diff_index, tmp_index, num_variables;
node_index = blockIdx.x*blockDim.x + threadIdx.x;
edge_index = blockIdx.y*blockDim.y + threadIdx.y;
if(node_index < num_vertices) {
num_variables = node_num_vars[node_index];
start_index = dest_node_to_edges_nodes[node_index];
if (node_index + 1 >= num_vertices) {
end_index = current_num_edges;
} else {
end_index = dest_node_to_edges_nodes[node_index + 1];
}
diff_index = end_index - start_index;
if (edge_index < diff_index) {
tmp_index = dest_node_to_edges_edges[edge_index + start_index];
combine_message_cuda(message_buffer, previous_messages, num_variables, MAX_STATES * node_index,
MAX_STATES * tmp_index, current_num_edges, n_is_pow_2, warp_size);
}
}
}
__device__
void send_message_for_edge_cuda(float * message_buffer, unsigned int edge_index, unsigned int node_index,
float * joint_probabilities, float * edge_messages,
unsigned int * x_dim, unsigned int * y_dim){
unsigned int i, j, num_src, num_dest;
float sum, partial_sum;
num_src = x_dim[edge_index];
num_dest = y_dim[edge_index];
sum = 0.0f;
for(i = 0; i < num_src; ++i){
partial_sum = 0.0;
for(j = 0; j < num_dest; ++j){
partial_sum += joint_probabilities[MAX_STATES * MAX_STATES * edge_index + MAX_STATES * i + j] * message_buffer[MAX_STATES * node_index + j];
}
sum += partial_sum;
edge_messages[edge_index * MAX_STATES + i] = partial_sum;
}
if(sum <= 0.0){
sum = 1.0;
}
for(i = 0; i < num_src; ++i){
edge_messages[edge_index * MAX_STATES + i] = edge_messages[edge_index * MAX_STATES + i] / sum;
}
}
__global__
void send_message_for_node_kernel(float * message_buffer, unsigned int current_num_edges,
float * joint_probabilities, float * current_edge_messages,
unsigned int * src_node_to_edges_nodes,
unsigned int * src_node_to_edges_edges,
unsigned int * edges_x_dim, unsigned int * edges_y_dim,
unsigned int num_vertices){
unsigned int node_index, edge_index, start_index, end_index, diff_index;
node_index = blockIdx.x*blockDim.x + threadIdx.x;
edge_index = blockIdx.y*blockDim.y + threadIdx.y;
if(node_index < num_vertices){
start_index = src_node_to_edges_nodes[node_index];
if(node_index + 1 >= num_vertices){
end_index = current_num_edges;
}
else{
end_index = src_node_to_edges_nodes[node_index + 1];
}
diff_index = end_index - start_index;
if (edge_index < diff_index) {
edge_index = src_node_to_edges_edges[edge_index + start_index];
send_message_for_edge_cuda(message_buffer, edge_index, node_index, joint_probabilities, current_edge_messages, edges_x_dim, edges_y_dim);
}
}
}
__global__
void marginalize_node_combine_kernel(unsigned int * node_num_vars, float * message_buffer, float * node_states,
float * current_edges_messages,
unsigned int * dest_node_to_edges_nodes,
unsigned int * dest_node_to_edges_edges,
unsigned int num_vertices,
unsigned int num_edges, char n_is_pow_2, unsigned int warp_size){
unsigned int node_index, edge_index, temp_edge_index, num_variables, start_index, end_index, diff_index;
node_index = blockIdx.x*blockDim.x + threadIdx.x;
edge_index = blockIdx.y*blockDim.y + threadIdx.y;
if(node_index < num_vertices) {
num_variables = node_num_vars[node_index];
if(edge_index < num_variables){
message_buffer[MAX_STATES * node_index + edge_index] = 1.0;
}
start_index = dest_node_to_edges_nodes[node_index];
if(node_index + 1 >= num_vertices){
end_index = num_edges;
}
else{
end_index = dest_node_to_edges_nodes[node_index + 1];
}
diff_index = end_index - start_index;
if(edge_index < diff_index){
temp_edge_index = dest_node_to_edges_edges[edge_index + start_index];
combine_message_cuda(message_buffer, current_edges_messages, num_variables, node_index * MAX_STATES, temp_edge_index * MAX_STATES, num_edges, n_is_pow_2, warp_size);
}
}
}
__global__
void marginalize_sum_node_kernel(unsigned int * node_num_vars, float * message_buffer, float * node_states,
float * current_edges_messages,
unsigned int * dest_node_to_edges_nodes,
unsigned int * dest_node_to_edges_edges,
unsigned int num_vertices,
unsigned int num_edges, char n_is_pow_2, unsigned int warp_size){
unsigned int node_index, edge_index, temp_edge_index, num_variables, start_index, end_index, diff_index;
__shared__ float sum[BLOCK_SIZE_2_D_X];
__shared__ float shared_message_buffer[BLOCK_SIZE_2_D_X][BLOCK_SIZE_2_D_Y];
node_index = blockIdx.x*blockDim.x + threadIdx.x;
edge_index = blockIdx.y*blockDim.y + threadIdx.y;
if(node_index < num_variables) {
num_variables = node_num_vars[node_index];
if(edge_index < num_variables) {
if (edge_index == 0) {
sum[threadIdx.x] = 0.0;
}
shared_message_buffer[threadIdx.x][threadIdx.y] *= message_buffer[MAX_STATES * node_index + edge_index];
__syncthreads();
atomicAdd(&sum[threadIdx.x], shared_message_buffer[threadIdx.x][threadIdx.y]);
__syncthreads();
if (threadIdx.y == 0 && sum[threadIdx.x] <= 0.0) {
sum[threadIdx.x] = 1.0;
}
__syncthreads();
node_states[MAX_STATES * node_index + edge_index] = shared_message_buffer[threadIdx.x][threadIdx.y] / sum[threadIdx.x];
}
}
}
__device__
float calculate_local_delta(unsigned int i, float * previous_messages, float * current_messages, unsigned int * edges_x_dim){
float delta, diff;
unsigned int k;
delta = 0.0;
for(k = 0; k < edges_x_dim[i]; ++k){
diff = previous_messages[MAX_STATES * i + k] - current_messages[MAX_STATES * i + k];
if(diff != diff){
diff = 0.0;
}
delta += fabs(diff);
}
return delta;
}
__global__
void calculate_delta(float * previous_messages, float * current_messages, float * delta, float * delta_array, unsigned int * edges_x_dim, unsigned int num_edges){
extern __shared__ float shared_delta[];
unsigned int tid, idx, i, s;
tid = threadIdx.x;
idx = blockIdx.x*blockDim.x + threadIdx.x;
i = blockIdx.x * (blockDim.x * 2) + threadIdx.x;
if(idx < num_edges){
delta_array[idx] = calculate_local_delta(idx, previous_messages, current_messages, edges_x_dim);
}
__syncthreads();
float my_delta = (i < num_edges) ? delta_array[i] : 0;
if(i + BLOCK_SIZE < num_edges){
my_delta += delta_array[i + BLOCK_SIZE];
}
shared_delta[tid] = my_delta;
__syncthreads();
// do reduction in shared mememory
for(s= blockDim.x / 2; s > 32; s>>=1){
if(tid < s){
shared_delta[tid] = my_delta = my_delta + shared_delta[tid + s];
}
__syncthreads();
}
#if (__CUDA_ARCH__ >= 300)
if(tid < 32){
//fetch final intermediate sum from second warp
if(BLOCK_SIZE >= 64){
my_delta += shared_delta[tid + 32];
}
for(s = WARP_SIZE/2; s > 0; s /= 2){
my_delta += __shfl_down(my_delta, s);
}
}
#else
if((BLOCK_SIZE >= 64) && (tid < 32)){
shared_delta[tid] = my_delta = my_delta + shared_delta[tid + 32];
}
__syncthreads();
if((BLOCK_SIZE >= 32) && (tid < 16)){
shared_delta[tid] = my_delta = my_delta + shared_delta[tid + 16];
}
__syncthreads();
if((BLOCK_SIZE >= 16) && (tid < 8)){
shared_delta[tid] = my_delta = my_delta + shared_delta[tid + 8];
}
__syncthreads();
if((BLOCK_SIZE >= 8) && (tid < 4)){
shared_delta[tid] = my_delta = my_delta + shared_delta[tid + 4];
}
__syncthreads();
if((BLOCK_SIZE >= 4) && (tid < 2)){
shared_delta[tid] = my_delta = my_delta + shared_delta[tid + 2];
}
__syncthreads();
if((BLOCK_SIZE >= 2) && (tid < 1)){
shared_delta[tid] = my_delta = my_delta + shared_delta[tid + 1];
}
__syncthreads();
#endif
if(tid == 0) {
*delta = my_delta;
}
}
__global__
void calculate_delta_6(float * previous_messages, float * current_messages, float * delta, float * delta_array,
unsigned int * edges_x_dim,
unsigned int num_edges, char n_is_pow_2, unsigned int warp_size) {
extern __shared__ float shared_delta[];
unsigned int offset;
// perform first level of reduce
// reading from global memory, writing to shared memory
unsigned int idx = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x * blockDim.x * 2 + threadIdx.x;
unsigned int grid_size = blockDim.x * 2 * gridDim.x;
if(idx < num_edges){
delta_array[idx] = calculate_local_delta(idx, previous_messages, current_messages, edges_x_dim);
}
__syncthreads();
float my_delta = 0.0;
while (i < num_edges) {
my_delta = delta_array[i];
// ensure we don't read out of bounds
if (n_is_pow_2 || i + blockDim.x < num_edges) {
my_delta += delta_array[i];
}
i += grid_size;
}
//each thread puts its local sum into shared memory
shared_delta[tid] = my_delta;
__syncthreads();
// do reduction in shared mem
if ((blockDim.x >= 512) && (tid < 256)) {
shared_delta[tid] = my_delta = my_delta + shared_delta[tid + 256];
}
__syncthreads();
if ((blockDim.x >= 256) && (tid < 128)) {
shared_delta[tid] = my_delta = my_delta + shared_delta[tid + 128];
}
__syncthreads();
if ((blockDim.x >= 128) && (tid < 64)) {
shared_delta[tid] = my_delta = my_delta + shared_delta[tid + 64];
}
__syncthreads();
#if (__CUDA_ARCH__ >= 300)
if( tid < 32){
// fetch final intermediate sum from 2nd warp
if(blockDim.x >= 64){
my_delta += shared_delta[tid + 32];
}
for(offset = warp_size/2; offset > 0; offset /= 2 ){
my_delta += __shfl_down(my_delta, offset);
}
}
#else
// fully unroll reduction within a single warp
if ((blockDim.x >= 64) && (tid < 32)) {
shared_delta[tid] = my_delta = my_delta + shared_delta[tid + 32];
}
__syncthreads();
if ((blockDim.x >= 32) && (tid < 16)) {
shared_delta[tid] = my_delta = my_delta + shared_delta[tid + 16];
}
__syncthreads();
if((blockDim.x >= 16) && (tid < 8)){
shared_delta[tid] = my_delta = my_delta + shared_delta[tid + 8];
}
__syncthreads();
if((blockDim.x >= 8) && (tid < 4)){
shared_delta[tid] = my_delta = my_delta + shared_delta[tid + 4];
}
__syncthreads();
if((blockDim.x >= 4) && (tid < 2)){
shared_delta[tid] = my_delta = my_delta + shared_delta[tid + 2];
}
__syncthreads();
if((blockDim.x >= 2) && (tid < 1)){
shared_delta[tid] = my_delta = my_delta + shared_delta[tid + 1];
}
__syncthreads();
#endif
//write result for this block to global mem
if(tid == 0){
*delta = my_delta;
}
}
__global__
void calculate_delta_simple(float * previous_messages, float * current_messages,
float * delta, float * delta_array, unsigned int * edges_x_dim,
unsigned int num_edges) {
extern __shared__ float shared_delta[];
unsigned int tid, idx, i, s;
tid = threadIdx.x;
idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < num_edges) {
delta_array[idx] = calculate_local_delta(idx, previous_messages, current_messages, edges_x_dim);
}
__syncthreads();
shared_delta[tid] = (idx < num_edges) ? delta_array[idx] : 0;
__syncthreads();
// do reduction in shared mem
for(s = 1; s < blockDim.x; s *= 2){
i = 2 * s * tid;
if( i < blockDim.x ) {
shared_delta[i] += shared_delta[i + s];
}
__syncthreads();
}
//write result for this block to global mem
if(tid == 0){
*delta = shared_delta[0];
}
}
static void prepare_unsigned_int_text(texture<unsigned int, hipTextureType1D, hipReadModeElementType> * tex){
tex->addressMode[0] = hipAddressModeWrap;
tex->addressMode[1] = hipAddressModeWrap;
tex->filterMode = hipFilterModePoint;
tex->normalized = 1;
}
static void check_cuda_kernel_return_code(){
hipError_t err;
err = hipGetLastError();
if (err != hipSuccess) {
fprintf(stderr, "Error: %s\n", hipGetErrorString(err));
exit(-1);
}
}
unsigned int loopy_propagate_until_cuda(Graph_t graph, float convergence, unsigned int max_iterations){
unsigned int i, j, num_iter, num_vertices, num_edges;
float * delta;
float * delta_array;
float previous_delta, host_delta;
char is_pow_2;
float * edges_joint_probabilities;
float * message_buffer;
float * current_messages;
float * previous_messages;
float * temp;
unsigned int * edges_x_dim;
unsigned int * edges_y_dim;
unsigned int * src_nodes_to_edges_nodes;
unsigned int * src_nodes_to_edges_edges;
unsigned int * dest_nodes_to_edges_nodes;
unsigned int * dest_nodes_to_edges_edges;
float * node_states;
unsigned int * node_num_vars;
host_delta = 0.0;
num_vertices = graph->current_num_vertices;
num_edges = graph->current_num_edges;
/*printf("Before=====");
print_edges(graph);
print_nodes(graph);*/
is_pow_2 = num_vertices % 2 == 0;
// allocate data
CUDA_CHECK_RETURN(hipMalloc((void**)&edges_x_dim, sizeof(unsigned int) * graph->current_num_edges));
CUDA_CHECK_RETURN(hipMalloc((void**)&edges_y_dim, sizeof(unsigned int) * graph->current_num_edges));
CUDA_CHECK_RETURN(hipMalloc((void **)&edges_joint_probabilities, sizeof(float) * MAX_STATES * MAX_STATES * graph->current_num_edges));
CUDA_CHECK_RETURN(hipMalloc((void **)&dest_nodes_to_edges_nodes, sizeof(unsigned int) * graph->current_num_vertices));
CUDA_CHECK_RETURN(hipMalloc((void **)&dest_nodes_to_edges_edges, sizeof(unsigned int) * graph->current_num_edges));
CUDA_CHECK_RETURN(hipMalloc((void **)&src_nodes_to_edges_nodes, sizeof(unsigned int) * graph->current_num_vertices));
CUDA_CHECK_RETURN(hipMalloc((void **)&src_nodes_to_edges_edges, sizeof(unsigned int) * graph->current_num_edges));
CUDA_CHECK_RETURN(hipMalloc((void **)¤t_messages, sizeof(float) * MAX_STATES * graph->current_num_edges));
CUDA_CHECK_RETURN(hipMalloc((void **)&previous_messages, sizeof(float) * MAX_STATES * graph->current_num_edges));
CUDA_CHECK_RETURN(hipMalloc((void **)&node_states, sizeof(float) * MAX_STATES * graph->current_num_vertices));
CUDA_CHECK_RETURN(hipMalloc((void **)&node_num_vars, sizeof(unsigned int) * graph->current_num_vertices));
CUDA_CHECK_RETURN(hipMalloc((void **)&delta, sizeof(float)));
CUDA_CHECK_RETURN(hipMalloc((void **)&delta_array, sizeof(float) * num_edges));
CUDA_CHECK_RETURN(hipMalloc((void **)&message_buffer, sizeof(float) * num_vertices * MAX_STATES));
// copy data
CUDA_CHECK_RETURN(hipMemcpy(edges_joint_probabilities, graph->edges_joint_probabilities, sizeof(float) * MAX_STATES * MAX_STATES * graph->current_num_edges, hipMemcpyHostToDevice ));
CUDA_CHECK_RETURN(hipMemcpy(current_messages, graph->edges_messages, sizeof(float) * MAX_STATES * graph->current_num_edges, hipMemcpyHostToDevice));
CUDA_CHECK_RETURN(hipMemcpy(current_messages, graph->last_edges_messages, sizeof(float) * MAX_STATES * graph->current_num_edges, hipMemcpyHostToDevice));
CUDA_CHECK_RETURN(hipMemcpy(node_num_vars, graph->node_num_vars, sizeof(unsigned int) * graph->current_num_vertices, hipMemcpyHostToDevice));
CUDA_CHECK_RETURN(hipMemcpy(node_states, graph->node_states, sizeof(float) * MAX_STATES * graph->current_num_vertices, hipMemcpyHostToDevice));
CUDA_CHECK_RETURN(hipMemcpy(dest_nodes_to_edges_nodes, graph->dest_nodes_to_edges_node_list, sizeof(unsigned int) * graph->current_num_vertices, hipMemcpyHostToDevice));
CUDA_CHECK_RETURN(hipMemcpy(dest_nodes_to_edges_edges, graph->dest_nodes_to_edges_edge_list, sizeof(unsigned int) * graph->current_num_edges, hipMemcpyHostToDevice));
CUDA_CHECK_RETURN(hipMemcpy(src_nodes_to_edges_nodes, graph->src_nodes_to_edges_node_list, sizeof(unsigned int) * graph->current_num_vertices, hipMemcpyHostToDevice));
CUDA_CHECK_RETURN(hipMemcpy(src_nodes_to_edges_edges, graph->src_nodes_to_edges_edge_list, sizeof(unsigned int) * graph->current_num_edges, hipMemcpyHostToDevice));
CUDA_CHECK_RETURN(hipMemcpy(edges_x_dim, graph->edges_x_dim, sizeof(unsigned int) * num_edges, hipMemcpyHostToDevice));
CUDA_CHECK_RETURN(hipMemcpy(edges_y_dim, graph->edges_y_dim, sizeof(unsigned int) * num_edges, hipMemcpyHostToDevice));
const int blockEdge1dCount = (num_edges + BLOCK_SIZE - 1)/ BLOCK_SIZE;
const int blockNodeCount = (num_vertices + BLOCK_SIZE_2_D_X - 1)/BLOCK_SIZE_2_D_X;
const int blockStateCount = (MAX_STATES + BLOCK_SIZE_2_D_Y - 1)/BLOCK_SIZE_2_D_Y;
const int blockDegreeCount = (graph->max_degree + BLOCK_SIZE_2_D_Y - 1)/BLOCK_SIZE_2_D_Y;
const int blockMessageNodeCount = (num_vertices + BLOCK_SIZE_3_D_X - 1)/BLOCK_SIZE_3_D_X;
const int blockMessageDegreeCount = ( graph->max_degree + BLOCK_SIZE_3_D_Y - 1)/BLOCK_SIZE_3_D_Y;
const int blockMessageStateCount = ( MAX_STATES + BLOCK_SIZE_3_D_Z - 1)/BLOCK_SIZE_3_D_Z;
num_iter = 0;
dim3 dimReduceBlock(BLOCK_SIZE, 1, 1);
dim3 dimReduceGrid(blockEdge1dCount, 1, 1);
int reduceSmemSize = (BLOCK_SIZE <= 32) ? 2 * BLOCK_SIZE * sizeof(float) : BLOCK_SIZE * sizeof(float);
dim3 dimInitMessageBuffer(BLOCK_SIZE_2_D_X, BLOCK_SIZE_2_D_Y, 1);
dim3 dimInitGrid(blockNodeCount, blockStateCount, 1);
dim3 dimDegreeGrid(blockNodeCount, blockDegreeCount, 1);
int reduce2DSmemSize = (BLOCK_SIZE_2_D_Y <= 32) ? 2 * BLOCK_SIZE_2_D_Y * sizeof(float) : BLOCK_SIZE_2_D_Y * sizeof(float);
dim3 dimMessagesBuffer(BLOCK_SIZE_3_D_X, BLOCK_SIZE_3_D_Y, BLOCK_SIZE_3_D_Z);
dim3 dimMessagesGrid(blockMessageNodeCount, blockMessageDegreeCount, blockMessageStateCount);
for(i = 0; i < max_iterations; i+= BATCH_SIZE){
for(j = 0; j < BATCH_SIZE; ++j) {
hipLaunchKernelGGL(( init_message_buffer_kernel), dim3(dimInitGrid), dim3(dimInitMessageBuffer), 0, 0, message_buffer, node_states, node_num_vars, num_vertices);
check_cuda_kernel_return_code();
//CUDA_CHECK_RETURN(hipMemcpy(&host_delta, delta, sizeof(float), hipMemcpyDeviceToHost));
hipLaunchKernelGGL(( read_incoming_messages_kernel) , dim3(dimMessagesGrid), dim3(dimMessagesBuffer), 0, 0, message_buffer, previous_messages, dest_nodes_to_edges_nodes, dest_nodes_to_edges_edges, num_edges, node_num_vars, num_vertices, is_pow_2, WARP_SIZE);
check_cuda_kernel_return_code();
//CUDA_CHECK_RETURN(hipMemcpy(&host_delta, delta, sizeof(float), hipMemcpyDeviceToHost));
hipLaunchKernelGGL(( send_message_for_node_kernel), dim3(dimInitGrid), dim3(dimInitMessageBuffer), 0, 0, message_buffer, num_edges, edges_joint_probabilities, current_messages, src_nodes_to_edges_nodes, src_nodes_to_edges_edges, edges_x_dim, edges_y_dim, num_vertices);
check_cuda_kernel_return_code();
//CUDA_CHECK_RETURN(hipMemcpy(&host_delta, delta, sizeof(float), hipMemcpyDeviceToHost));
hipLaunchKernelGGL(( marginalize_node_combine_kernel), dim3(dimMessagesGrid), dim3(dimMessagesBuffer), 0, 0, node_num_vars, message_buffer, node_states, current_messages, dest_nodes_to_edges_nodes, dest_nodes_to_edges_edges, num_vertices, num_edges, is_pow_2, WARP_SIZE);
check_cuda_kernel_return_code();
hipLaunchKernelGGL(( marginalize_sum_node_kernel), dim3(dimInitGrid), dim3(dimInitMessageBuffer), 0, 0, node_num_vars, message_buffer, node_states, current_messages, dest_nodes_to_edges_nodes, dest_nodes_to_edges_edges, num_vertices, num_edges, is_pow_2, WARP_SIZE);
check_cuda_kernel_return_code();
//CUDA_CHECK_RETURN(hipMemcpy(&host_delta, delta, sizeof(float), hipMemcpyDeviceToHost));
//swap pointers
temp = current_messages;
current_messages = previous_messages;
previous_messages = temp;
num_iter++;
}
hipLaunchKernelGGL(( calculate_delta_6), dim3(dimReduceGrid), dim3(dimReduceBlock), reduceSmemSize, 0, previous_messages, current_messages, delta, delta_array, edges_x_dim, num_edges, is_pow_2, WARP_SIZE);
//calculate_delta<<<dimReduceGrid, dimReduceBlock, reduceSmemSize>>>(previous_messages, current_messages, delta, delta_array, edges_x_dim, num_edges);
//calculate_delta_simple<<<dimReduceGrid, dimReduceBlock, reduceSmemSize>>>(previous_messages, current_messages, delta, delta_array, edges_x_dim, num_edges);
check_cuda_kernel_return_code();
CUDA_CHECK_RETURN(hipMemcpy(&host_delta, delta, sizeof(float), hipMemcpyDeviceToHost));
// printf("Current delta: %f\n", host_delta);
if(host_delta < convergence || fabs(host_delta - previous_delta) < convergence){
break;
}
previous_delta = host_delta;
}
// copy data back
CUDA_CHECK_RETURN(hipMemcpy(graph->node_states, node_states, sizeof(float) * MAX_STATES * num_vertices, hipMemcpyDeviceToHost));
CUDA_CHECK_RETURN(hipMemcpy(graph->edges_messages, current_messages, sizeof(float) * MAX_STATES * num_edges, hipMemcpyDeviceToHost));
CUDA_CHECK_RETURN(hipMemcpy(graph->last_edges_messages, previous_messages, sizeof(float) * MAX_STATES * num_edges, hipMemcpyDeviceToHost));
CUDA_CHECK_RETURN(hipFree(dest_nodes_to_edges_nodes));
CUDA_CHECK_RETURN(hipFree(dest_nodes_to_edges_edges));
CUDA_CHECK_RETURN(hipFree(src_nodes_to_edges_nodes));
CUDA_CHECK_RETURN(hipFree(src_nodes_to_edges_edges));
CUDA_CHECK_RETURN(hipFree(edges_x_dim));
CUDA_CHECK_RETURN(hipFree(edges_y_dim));
CUDA_CHECK_RETURN(hipFree(edges_joint_probabilities));
CUDA_CHECK_RETURN(hipFree(current_messages));
CUDA_CHECK_RETURN(hipFree(previous_messages));
CUDA_CHECK_RETURN(hipFree(message_buffer));
CUDA_CHECK_RETURN(hipFree(node_states));
CUDA_CHECK_RETURN(hipFree(node_num_vars));
CUDA_CHECK_RETURN(hipFree(delta));
CUDA_CHECK_RETURN(hipFree(delta_array));
/*printf("After=====");
print_nodes(graph);
print_edges(graph);*/
return num_iter;
}
void test_ast(const char * expr)
{
struct expression * expression;
yyscan_t scanner;
YY_BUFFER_STATE state;
assert(yylex_init(&scanner) == 0);
assert(scanner != NULL);
assert(strlen(expr) > 0);
state = yy_scan_string(expr, scanner);
assert(yyparse(&expression, scanner) == 0);
yy_delete_buffer(state, scanner);
yylex_destroy(scanner);
assert(expression != NULL);
delete_expression(expression);
}
void test_file(const char * file_path)
{
struct expression * expression;
yyscan_t scanner;
YY_BUFFER_STATE state;
FILE * in;
assert(yylex_init(&scanner) == 0);
in = fopen(file_path, "r");
yyset_in(in, scanner);
assert(yyparse(&expression, scanner) == 0);
//yy_delete_buffer(state, scanner);
yylex_destroy(scanner);
fclose(in);
assert(expression != NULL);
delete_expression(expression);
}
void test_parse_file(char * file_name){
unsigned int i;
struct expression * expression;
yyscan_t scanner;
YY_BUFFER_STATE state;
FILE * in;
Graph_t graph;
clock_t start, end;
double time_elapsed;
assert(yylex_init(&scanner) == 0);
in = fopen(file_name, "r");
yyset_in(in, scanner);
assert(yyparse(&expression, scanner) == 0);
//yy_delete_buffer(state, scanner);
yylex_destroy(scanner);
fclose(in);
assert(expression != NULL);
graph = build_graph(expression);
//print_nodes(graph);
//print_edges(graph);
set_up_src_nodes_to_edges(graph);
set_up_dest_nodes_to_edges(graph);
start = clock();
init_levels_to_nodes(graph);
//print_levels_to_nodes(graph);
propagate_using_levels_start(graph);
for(i = 1; i < graph->num_levels - 1; ++i){
propagate_using_levels(graph, i);
}
reset_visited(graph);
for(i = graph->num_levels - 1; i > 0; --i){
propagate_using_levels(graph, i);
}
marginalize(graph);
end = clock();
time_elapsed = (double)(end - start) / CLOCKS_PER_SEC;
printf("%s,regular,%d,%d,%lf\n", file_name, graph->current_num_vertices, graph->current_num_edges, time_elapsed);
//print_nodes(graph);
assert(graph != NULL);
delete_expression(expression);
graph_destroy(graph);
}
void test_loopy_belief_propagation(char * file_name){
struct expression * expression;
yyscan_t scanner;
YY_BUFFER_STATE state;
FILE * in;
Graph_t graph;
clock_t start, end;
double time_elapsed;
assert(yylex_init(&scanner) == 0);
in = fopen(file_name, "r");
yyset_in(in, scanner);
assert(yyparse(&expression, scanner) == 0);
//yy_delete_buffer(state, scanner);
yylex_destroy(scanner);
fclose(in);
assert(expression != NULL);
graph = build_graph(expression);
assert(graph != NULL);
//print_nodes(graph);
//print_edges(graph);
set_up_src_nodes_to_edges(graph);
set_up_dest_nodes_to_edges(graph);
start = clock();
init_previous_edge(graph);
loopy_propagate_until_cuda(graph, PRECISION, NUM_ITERATIONS);
end = clock();
time_elapsed = (double)(end - start)/CLOCKS_PER_SEC;
//print_nodes(graph);
printf("%s,loopy,%d,%d,%lf\n", file_name, graph->current_num_vertices, graph->current_num_edges, time_elapsed);
delete_expression(expression);
graph_destroy(graph);
}
struct expression * parse_file(const char * file_name){
struct expression * expression;
yyscan_t scanner;
YY_BUFFER_STATE state;
FILE * in;
assert(yylex_init(&scanner) == 0);
in = fopen(file_name, "r");
yyset_in(in, scanner);
assert(yyparse(&expression, scanner) == 0);
//yy_delete_buffer(state, scanner);
yylex_destroy(scanner);
fclose(in);
assert(expression != NULL);
return expression;
}
void run_test_belief_propagation(struct expression * expression, const char * file_name){
Graph_t graph;
clock_t start, end;
double time_elapsed;
unsigned int i;
graph = build_graph(expression);
assert(graph != NULL);
//print_nodes(graph);
//print_edges(graph);
set_up_src_nodes_to_edges(graph);
set_up_dest_nodes_to_edges(graph);
calculate_diameter(graph);
start = clock();
init_levels_to_nodes(graph);
//print_levels_to_nodes(graph);
propagate_using_levels_start(graph);
for(i = 1; i < graph->num_levels - 1; ++i){
propagate_using_levels(graph, i);
}
reset_visited(graph);
for(i = graph->num_levels - 1; i > 0; --i){
propagate_using_levels(graph, i);
}
marginalize(graph);
end = clock();
time_elapsed = (double)(end - start) / CLOCKS_PER_SEC;
printf("%s,regular,%d,%d,%d,2,%lf\n", file_name, graph->current_num_vertices, graph->current_num_edges, graph->diameter, time_elapsed);
graph_destroy(graph);
}
void run_test_belief_propagation_xml_file(const char * file_name){
Graph_t graph;
clock_t start, end;
double time_elapsed;
unsigned int i;
graph = parse_xml_file(file_name);
assert(graph != NULL);
//print_nodes(graph);
//print_edges(graph);
set_up_src_nodes_to_edges(graph);
set_up_dest_nodes_to_edges(graph);
calculate_diameter(graph);
start = clock();
init_levels_to_nodes(graph);
//print_levels_to_nodes(graph);
propagate_using_levels_start(graph);
for(i = 1; i < graph->num_levels - 1; ++i){
propagate_using_levels(graph, i);
}
reset_visited(graph);
for(i = graph->num_levels - 1; i > 0; --i){
propagate_using_levels(graph, i);
}
marginalize(graph);
end = clock();
time_elapsed = (double)(end - start) / CLOCKS_PER_SEC;
printf("%s,regular,%d,%d,%d,2,%lf\n", file_name, graph->current_num_vertices, graph->current_num_edges, graph->diameter, time_elapsed);
graph_destroy(graph);
}
void run_test_loopy_belief_propagation(struct expression * expression, const char * file_name, FILE * out){
Graph_t graph;
clock_t start, end;
double time_elapsed;
unsigned int num_iterations;
graph = build_graph(expression);
assert(graph != NULL);
//print_nodes(graph);
//print_edges(graph);
set_up_src_nodes_to_edges(graph);
set_up_dest_nodes_to_edges(graph);
//calculate_diameter(graph);
start = clock();
init_previous_edge(graph);
num_iterations = loopy_propagate_until_cuda(graph, PRECISION, NUM_ITERATIONS);
end = clock();
time_elapsed = (double)(end - start)/CLOCKS_PER_SEC;
//print_nodes(graph);
fprintf(out, "%s,loopy,%d,%d,%d,%d,%lf\n", file_name, graph->current_num_vertices, graph->current_num_edges, graph->diameter, num_iterations, time_elapsed);
fflush(out);
graph_destroy(graph);
}
void run_test_loopy_belief_propagation_xml_file(const char * file_name, FILE * out){
Graph_t graph;
clock_t start, end;
double time_elapsed;
unsigned int num_iterations;
graph = parse_xml_file(file_name);
assert(graph != NULL);
//print_nodes(graph);
//print_edges(graph);
set_up_src_nodes_to_edges(graph);
set_up_dest_nodes_to_edges(graph);
//calculate_diameter(graph);
start = clock();
init_previous_edge(graph);
num_iterations = loopy_propagate_until_cuda(graph, PRECISION, NUM_ITERATIONS);
end = clock();
time_elapsed = (double)(end - start)/CLOCKS_PER_SEC;
//print_nodes(graph);
fprintf(out, "%s,loopy,%d,%d,%d,%d,%lf\n", file_name, graph->current_num_vertices, graph->current_num_edges, graph->diameter, num_iterations, time_elapsed);
fflush(out);
graph_destroy(graph);
}
void run_tests_with_file(const char * file_name, unsigned int num_iterations, FILE * out){
unsigned int i;
struct expression * expr;
expr = parse_file(file_name);
for(i = 0; i < num_iterations; ++i){
run_test_belief_propagation(expr, file_name);
}
for(i = 0; i < num_iterations; ++i){
run_test_loopy_belief_propagation(expr, file_name, out);
}
delete_expression(expr);
}
void run_tests_with_xml_file(const char * file_name, unsigned int num_iterations, FILE * out){
unsigned int i;
/*for(i = 0; i < num_iterations; ++i){
run_test_belief_propagation(expr, file_name);
}*/
for(i = 0; i < num_iterations; ++i){
run_test_loopy_belief_propagation_xml_file(file_name, out);
}
}
int main(void)
{
/*
extern int yydebug;
yydebug = 1;
/*
struct expression * expression = NULL;
const char test[] = "// Bayesian Network in the Interchange Format\n// Produced by BayesianNetworks package in JavaBayes\n// Output created Sun Nov 02 17:49:49 GMT+00:00 1997\n// Bayesian network \nnetwork \"Dog-Problem\" { //5 variables and 5 probability distributions\nproperty \"credal-set constant-density-bounded 1.1\" ;\n}variable \"light-on\" { //2 values\ntype discrete[2] { \"true\" \"false\" };\nproperty \"position = (218, 195)\" ;\n}\nvariable \"bowel-problem\" { //2 values\ntype discrete[2] { \"true\" \"false\" };\nproperty \"position = (335, 99)\" ;\n}";
test_ast(test);
test_parse_file("dog.bif");
test_parse_file("alarm.bif");
test_parse_file("very_large/andes.bif");
test_loopy_belief_propagation("very_large/andes.bif");
test_parse_file("Diabetes.bif");
test_loopy_belief_propagation("Diabetes.bif");
*/
//test_loopy_belief_propagation("../benchmark_files/dog.bif");
//test_loopy_belief_propagation("../benchmark_files/alarm.bif");
//test_file("dog.bif");
//test_file("alarm.bif");
/*expression = read_file("alarm.bif");
assert(expression != NULL);
delete_expression(expression);*/
FILE * out = fopen("cuda_kernels_benchmark.csv", "w");
fprintf(out, "File Name,Propagation Type,Number of Nodes,Number of Edges,Diameter,Number of Iterations,BP Run Time(s)\n");
fflush(out);
/*run_tests_with_file("../benchmark_files/small/asia.bif", 1);
run_tests_with_file("../benchmark_files/small/cancer.bif", 1);
run_tests_with_file("../benchmark_files/small/earthquake.bif", 1);
run_tests_with_file("../benchmark_files/small/sachs.bif", 1);
run_tests_with_file("../benchmark_files/small/survey.bif", 1);
/*
run_tests_with_file("../benchmark_files/medium/alarm.bif", 1);
run_tests_with_file("../benchmark_files/medium/barley.bif", 1);
//run_tests_with_file("../benchmark_files/medium/child.bif", 1);
run_tests_with_file("../benchmark_files/medium/hailfinder.bif", 1);
run_tests_with_file("../benchmark_files/medium/insurance.bif", 1);
run_tests_with_file("../benchmark_files/medium/mildew.bif", 1);
run_tests_with_file("../benchmark_files/medium/water.bif", 1);
run_tests_with_file("../benchmark_files/large/hepar2.bif", 1);
run_tests_with_file("../benchmark_files/large/win95pts.bif", 1);
run_tests_with_file("../benchmark_files/very_large/andes.bif", 1);
run_tests_with_file("../benchmark_files/very_large/diabetes.bif", 1);
run_tests_with_file("../benchmark_files/very_large/link.bif", 1);
run_tests_with_file("../benchmark_files/very_large/munin1.bif", 1);
run_tests_with_file("../benchmark_files/very_large/munin2.bif", 1);
run_tests_with_file("../benchmark_files/very_large/munin3.bif", 1);
run_tests_with_file("../benchmark_files/very_large/munin4.bif", 1);
//run_tests_with_file("../benchmark_files/very_large/munin.bif", 1);
run_tests_with_file("../benchmark_files/very_large/pathfinder.bif", 1);
run_tests_with_file("../benchmark_files/very_large/pigs.bif", 1);
run_tests_with_xml_file("../benchmark_files/xml/bf_1000_2000_1.xml", 1);
run_tests_with_xml_file("../benchmark_files/xml/bf_1000_2000_2.xml", 1);
run_tests_with_xml_file("../benchmark_files/xml/bf_1000_2000_3.xml", 1);
run_tests_with_xml_file("../benchmark_files/xml/bf_2000_4000_1.xml", 1);
run_tests_with_xml_file("../benchmark_files/xml/bf_2000_4000_2.xml", 1);
run_tests_with_xml_file("../benchmark_files/xml/bf_2000_4000_3.xml", 1);
run_tests_with_xml_file("../benchmark_files/xml/bf_5000_10000_1.xml", 1);
run_tests_with_xml_file("../benchmark_files/xml/bf_5000_10000_2.xml", 1);
run_tests_with_xml_file("../benchmark_files/xml/bf_5000_10000_3.xml", 1);
run_tests_with_xml_file("../benchmark_files/xml/bf_10000_20000_1.xml", 1);
run_tests_with_xml_file("../benchmark_files/xml/bf_10000_20000_2.xml", 1);
run_tests_with_xml_file("../benchmark_files/xml/bf_10000_20000_3.xml", 1);
run_tests_with_xml_file("../benchmark_files/xml/bf_12000_24000_1.xml", 1);
run_tests_with_xml_file("../benchmark_files/xml/bf_12000_24000_2.xml", 1);
run_tests_with_xml_file("../benchmark_files/xml/bf_12000_24000_3.xml", 1);*/
/*run_tests_with_xml_file("../benchmark_files/xml/bf_15000_30000_1.xml", 1);
run_tests_with_xml_file("../benchmark_files/xml/bf_15000_30000_2.xml", 1);
run_tests_with_xml_file("../benchmark_files/xml/bf_15000_30000_3.xml", 1);
run_tests_with_xml_file("../benchmark_files/xml/bf_20000_40000_1.xml", 1);
run_tests_with_xml_file("../benchmark_files/xml/bf_20000_40000_2.xml", 1);
run_tests_with_xml_file("../benchmark_files/xml/bf_20000_40000_3.xml", 1);
run_tests_with_xml_file("../benchmark_files/xml/bf_25000_50000_1.xml", 1);
run_tests_with_xml_file("../benchmark_files/xml/bf_25000_50000_2.xml", 1);
run_tests_with_xml_file("../benchmark_files/xml/bf_25000_50000_3.xml", 1);
run_tests_with_xml_file("../benchmark_files/xml/bf_30000_60000_1.xml", 1);
run_tests_with_xml_file("../benchmark_files/xml/bf_30000_60000_2.xml", 1);
run_tests_with_xml_file("../benchmark_files/xml/bf_30000_60000_3.xml", 1);*/
/*run_tests_with_xml_file("../benchmark_files/xml/bf_40000_80000_1.xml", 1);
run_tests_with_xml_file("../benchmark_files/xml/bf_40000_80000_2.xml", 1);
run_tests_with_xml_file("../benchmark_files/xml/bf_40000_80000_3.xml", 1);
run_tests_with_xml_file("../benchmark_files/xml/bf_80000_160000_2.xml", 1);*/
run_tests_with_xml_file("../benchmark_files/xml2/10_20.xml", 1, out);
run_tests_with_xml_file("../benchmark_files/xml2/100_200.xml", 1, out);
run_tests_with_xml_file("../benchmark_files/xml2/1000_2000.xml", 1, out);
run_tests_with_xml_file("../benchmark_files/xml2/10000_20000.xml", 1, out);
run_tests_with_xml_file("../benchmark_files/xml2/100000_200000.xml", 1, out);
run_tests_with_xml_file("../benchmark_files/xml2/200000_400000.xml", 1, out);
//run_tests_with_xml_file("../benchmark_files/xml2/300000_600000.xml", 1, out);
run_tests_with_xml_file("../benchmark_files/xml2/400000_800000.xml", 1, out);
//run_tests_with_xml_file("../benchmark_files/xml2/500000_1000000.xml", 1, out);
run_tests_with_xml_file("../benchmark_files/xml2/600000_1200000.xml", 1, out);
//run_tests_with_xml_file("../benchmark_files/xml2/700000_1400000.xml", 1, out);
run_tests_with_xml_file("../benchmark_files/xml2/800000_1600000.xml", 1, out);
//run_tests_with_xml_file("../benchmark_files/xml2/900000_1800000.xml", 1, out);
run_tests_with_xml_file("../benchmark_files/xml2/1000000_2000000.xml", 1, out);
//run_tests_with_xml_file("../benchmark_files/xml2/10000000_20000000.xml", 1, out);
fclose(out);
return 0;
}
/**
* Check the return value of the CUDA runtime API call and exit
* the application if the call has failed.
*/
static void CheckCudaErrorAux (const char *file, unsigned int line, const char *statement, hipError_t err)
{
if (err == hipSuccess)
return;
printf("%s returned %s (%d) at %s:%d\n", statement, hipGetErrorString(err), err, file, line);
exit (1);
}
| ad42d2cc6544b788c00550ad0bf7183e49a7d1ef.cu | #include <stdio.h>
#include <assert.h>
#include <time.h>
#include <math.h>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
extern "C" {
#include "../bnf-parser/expression.h"
#include "../bnf-parser/Parser.h"
#include "../bnf-parser/Lexer.h"
#include "../bnf-xml-parser/xml-expression.h"
}
int yyparse(struct expression ** expr, yyscan_t scanner);
static void CheckCudaErrorAux (const char *, unsigned, const char *, cudaError_t);
#define CUDA_CHECK_RETURN(value) CheckCudaErrorAux(__FILE__,__LINE__, #value, value)
__global__
void init_message_buffer_kernel(float *message_buffer, float *node_states, unsigned int *node_num_vars,
unsigned int num_nodes){
unsigned int node_index, state_index, num_variables;
node_index = blockIdx.x*blockDim.x + threadIdx.x;
state_index = blockIdx.y*blockDim.y + threadIdx.y;
if(node_index < num_nodes){
num_variables = node_num_vars[node_index];
if(state_index < num_variables){
message_buffer[node_index * MAX_STATES + state_index] = node_states[node_index * MAX_STATES + state_index];
}
}
}
__device__
void combine_message_cuda(float * dest, float * edge_messages, unsigned int length, unsigned int node_index,
unsigned int edge_offset, unsigned int num_edges, char n_is_pow_2, unsigned int warp_size){
__shared__ float shared_dest[BLOCK_SIZE_3_D_Z];
__shared__ float shared_src[BLOCK_SIZE_3_D_Z];
unsigned int index = threadIdx.z;
if(index < length && edge_offset + index < num_edges){
shared_dest[index] = dest[node_index + index];
shared_src[index] = edge_messages[edge_offset + index];
__syncthreads();
dest[edge_offset + index] = shared_dest[index] * shared_src[index];
}
}
__global__
void read_incoming_messages_kernel(float *message_buffer, float *previous_messages,
unsigned int * dest_node_to_edges_nodes,
unsigned int * dest_node_to_edges_edges,
unsigned int current_num_edges,
unsigned int *node_num_vars, unsigned int num_vertices,
char n_is_pow_2, unsigned int warp_size){
unsigned int node_index, edge_index, start_index, end_index, diff_index, tmp_index, num_variables;
node_index = blockIdx.x*blockDim.x + threadIdx.x;
edge_index = blockIdx.y*blockDim.y + threadIdx.y;
if(node_index < num_vertices) {
num_variables = node_num_vars[node_index];
start_index = dest_node_to_edges_nodes[node_index];
if (node_index + 1 >= num_vertices) {
end_index = current_num_edges;
} else {
end_index = dest_node_to_edges_nodes[node_index + 1];
}
diff_index = end_index - start_index;
if (edge_index < diff_index) {
tmp_index = dest_node_to_edges_edges[edge_index + start_index];
combine_message_cuda(message_buffer, previous_messages, num_variables, MAX_STATES * node_index,
MAX_STATES * tmp_index, current_num_edges, n_is_pow_2, warp_size);
}
}
}
__device__
void send_message_for_edge_cuda(float * message_buffer, unsigned int edge_index, unsigned int node_index,
float * joint_probabilities, float * edge_messages,
unsigned int * x_dim, unsigned int * y_dim){
unsigned int i, j, num_src, num_dest;
float sum, partial_sum;
num_src = x_dim[edge_index];
num_dest = y_dim[edge_index];
sum = 0.0f;
for(i = 0; i < num_src; ++i){
partial_sum = 0.0;
for(j = 0; j < num_dest; ++j){
partial_sum += joint_probabilities[MAX_STATES * MAX_STATES * edge_index + MAX_STATES * i + j] * message_buffer[MAX_STATES * node_index + j];
}
sum += partial_sum;
edge_messages[edge_index * MAX_STATES + i] = partial_sum;
}
if(sum <= 0.0){
sum = 1.0;
}
for(i = 0; i < num_src; ++i){
edge_messages[edge_index * MAX_STATES + i] = edge_messages[edge_index * MAX_STATES + i] / sum;
}
}
__global__
void send_message_for_node_kernel(float * message_buffer, unsigned int current_num_edges,
float * joint_probabilities, float * current_edge_messages,
unsigned int * src_node_to_edges_nodes,
unsigned int * src_node_to_edges_edges,
unsigned int * edges_x_dim, unsigned int * edges_y_dim,
unsigned int num_vertices){
unsigned int node_index, edge_index, start_index, end_index, diff_index;
node_index = blockIdx.x*blockDim.x + threadIdx.x;
edge_index = blockIdx.y*blockDim.y + threadIdx.y;
if(node_index < num_vertices){
start_index = src_node_to_edges_nodes[node_index];
if(node_index + 1 >= num_vertices){
end_index = current_num_edges;
}
else{
end_index = src_node_to_edges_nodes[node_index + 1];
}
diff_index = end_index - start_index;
if (edge_index < diff_index) {
edge_index = src_node_to_edges_edges[edge_index + start_index];
send_message_for_edge_cuda(message_buffer, edge_index, node_index, joint_probabilities, current_edge_messages, edges_x_dim, edges_y_dim);
}
}
}
__global__
void marginalize_node_combine_kernel(unsigned int * node_num_vars, float * message_buffer, float * node_states,
float * current_edges_messages,
unsigned int * dest_node_to_edges_nodes,
unsigned int * dest_node_to_edges_edges,
unsigned int num_vertices,
unsigned int num_edges, char n_is_pow_2, unsigned int warp_size){
unsigned int node_index, edge_index, temp_edge_index, num_variables, start_index, end_index, diff_index;
node_index = blockIdx.x*blockDim.x + threadIdx.x;
edge_index = blockIdx.y*blockDim.y + threadIdx.y;
if(node_index < num_vertices) {
num_variables = node_num_vars[node_index];
if(edge_index < num_variables){
message_buffer[MAX_STATES * node_index + edge_index] = 1.0;
}
start_index = dest_node_to_edges_nodes[node_index];
if(node_index + 1 >= num_vertices){
end_index = num_edges;
}
else{
end_index = dest_node_to_edges_nodes[node_index + 1];
}
diff_index = end_index - start_index;
if(edge_index < diff_index){
temp_edge_index = dest_node_to_edges_edges[edge_index + start_index];
combine_message_cuda(message_buffer, current_edges_messages, num_variables, node_index * MAX_STATES, temp_edge_index * MAX_STATES, num_edges, n_is_pow_2, warp_size);
}
}
}
__global__
void marginalize_sum_node_kernel(unsigned int * node_num_vars, float * message_buffer, float * node_states,
float * current_edges_messages,
unsigned int * dest_node_to_edges_nodes,
unsigned int * dest_node_to_edges_edges,
unsigned int num_vertices,
unsigned int num_edges, char n_is_pow_2, unsigned int warp_size){
unsigned int node_index, edge_index, temp_edge_index, num_variables, start_index, end_index, diff_index;
__shared__ float sum[BLOCK_SIZE_2_D_X];
__shared__ float shared_message_buffer[BLOCK_SIZE_2_D_X][BLOCK_SIZE_2_D_Y];
node_index = blockIdx.x*blockDim.x + threadIdx.x;
edge_index = blockIdx.y*blockDim.y + threadIdx.y;
if(node_index < num_variables) {
num_variables = node_num_vars[node_index];
if(edge_index < num_variables) {
if (edge_index == 0) {
sum[threadIdx.x] = 0.0;
}
shared_message_buffer[threadIdx.x][threadIdx.y] *= message_buffer[MAX_STATES * node_index + edge_index];
__syncthreads();
atomicAdd(&sum[threadIdx.x], shared_message_buffer[threadIdx.x][threadIdx.y]);
__syncthreads();
if (threadIdx.y == 0 && sum[threadIdx.x] <= 0.0) {
sum[threadIdx.x] = 1.0;
}
__syncthreads();
node_states[MAX_STATES * node_index + edge_index] = shared_message_buffer[threadIdx.x][threadIdx.y] / sum[threadIdx.x];
}
}
}
__device__
float calculate_local_delta(unsigned int i, float * previous_messages, float * current_messages, unsigned int * edges_x_dim){
float delta, diff;
unsigned int k;
delta = 0.0;
for(k = 0; k < edges_x_dim[i]; ++k){
diff = previous_messages[MAX_STATES * i + k] - current_messages[MAX_STATES * i + k];
if(diff != diff){
diff = 0.0;
}
delta += fabs(diff);
}
return delta;
}
__global__
void calculate_delta(float * previous_messages, float * current_messages, float * delta, float * delta_array, unsigned int * edges_x_dim, unsigned int num_edges){
extern __shared__ float shared_delta[];
unsigned int tid, idx, i, s;
tid = threadIdx.x;
idx = blockIdx.x*blockDim.x + threadIdx.x;
i = blockIdx.x * (blockDim.x * 2) + threadIdx.x;
if(idx < num_edges){
delta_array[idx] = calculate_local_delta(idx, previous_messages, current_messages, edges_x_dim);
}
__syncthreads();
float my_delta = (i < num_edges) ? delta_array[i] : 0;
if(i + BLOCK_SIZE < num_edges){
my_delta += delta_array[i + BLOCK_SIZE];
}
shared_delta[tid] = my_delta;
__syncthreads();
// do reduction in shared mememory
for(s= blockDim.x / 2; s > 32; s>>=1){
if(tid < s){
shared_delta[tid] = my_delta = my_delta + shared_delta[tid + s];
}
__syncthreads();
}
#if (__CUDA_ARCH__ >= 300)
if(tid < 32){
//fetch final intermediate sum from second warp
if(BLOCK_SIZE >= 64){
my_delta += shared_delta[tid + 32];
}
for(s = WARP_SIZE/2; s > 0; s /= 2){
my_delta += __shfl_down(my_delta, s);
}
}
#else
if((BLOCK_SIZE >= 64) && (tid < 32)){
shared_delta[tid] = my_delta = my_delta + shared_delta[tid + 32];
}
__syncthreads();
if((BLOCK_SIZE >= 32) && (tid < 16)){
shared_delta[tid] = my_delta = my_delta + shared_delta[tid + 16];
}
__syncthreads();
if((BLOCK_SIZE >= 16) && (tid < 8)){
shared_delta[tid] = my_delta = my_delta + shared_delta[tid + 8];
}
__syncthreads();
if((BLOCK_SIZE >= 8) && (tid < 4)){
shared_delta[tid] = my_delta = my_delta + shared_delta[tid + 4];
}
__syncthreads();
if((BLOCK_SIZE >= 4) && (tid < 2)){
shared_delta[tid] = my_delta = my_delta + shared_delta[tid + 2];
}
__syncthreads();
if((BLOCK_SIZE >= 2) && (tid < 1)){
shared_delta[tid] = my_delta = my_delta + shared_delta[tid + 1];
}
__syncthreads();
#endif
if(tid == 0) {
*delta = my_delta;
}
}
__global__
void calculate_delta_6(float * previous_messages, float * current_messages, float * delta, float * delta_array,
unsigned int * edges_x_dim,
unsigned int num_edges, char n_is_pow_2, unsigned int warp_size) {
extern __shared__ float shared_delta[];
unsigned int offset;
// perform first level of reduce
// reading from global memory, writing to shared memory
unsigned int idx = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x * blockDim.x * 2 + threadIdx.x;
unsigned int grid_size = blockDim.x * 2 * gridDim.x;
if(idx < num_edges){
delta_array[idx] = calculate_local_delta(idx, previous_messages, current_messages, edges_x_dim);
}
__syncthreads();
float my_delta = 0.0;
while (i < num_edges) {
my_delta = delta_array[i];
// ensure we don't read out of bounds
if (n_is_pow_2 || i + blockDim.x < num_edges) {
my_delta += delta_array[i];
}
i += grid_size;
}
//each thread puts its local sum into shared memory
shared_delta[tid] = my_delta;
__syncthreads();
// do reduction in shared mem
if ((blockDim.x >= 512) && (tid < 256)) {
shared_delta[tid] = my_delta = my_delta + shared_delta[tid + 256];
}
__syncthreads();
if ((blockDim.x >= 256) && (tid < 128)) {
shared_delta[tid] = my_delta = my_delta + shared_delta[tid + 128];
}
__syncthreads();
if ((blockDim.x >= 128) && (tid < 64)) {
shared_delta[tid] = my_delta = my_delta + shared_delta[tid + 64];
}
__syncthreads();
#if (__CUDA_ARCH__ >= 300)
if( tid < 32){
// fetch final intermediate sum from 2nd warp
if(blockDim.x >= 64){
my_delta += shared_delta[tid + 32];
}
for(offset = warp_size/2; offset > 0; offset /= 2 ){
my_delta += __shfl_down(my_delta, offset);
}
}
#else
// fully unroll reduction within a single warp
if ((blockDim.x >= 64) && (tid < 32)) {
shared_delta[tid] = my_delta = my_delta + shared_delta[tid + 32];
}
__syncthreads();
if ((blockDim.x >= 32) && (tid < 16)) {
shared_delta[tid] = my_delta = my_delta + shared_delta[tid + 16];
}
__syncthreads();
if((blockDim.x >= 16) && (tid < 8)){
shared_delta[tid] = my_delta = my_delta + shared_delta[tid + 8];
}
__syncthreads();
if((blockDim.x >= 8) && (tid < 4)){
shared_delta[tid] = my_delta = my_delta + shared_delta[tid + 4];
}
__syncthreads();
if((blockDim.x >= 4) && (tid < 2)){
shared_delta[tid] = my_delta = my_delta + shared_delta[tid + 2];
}
__syncthreads();
if((blockDim.x >= 2) && (tid < 1)){
shared_delta[tid] = my_delta = my_delta + shared_delta[tid + 1];
}
__syncthreads();
#endif
//write result for this block to global mem
if(tid == 0){
*delta = my_delta;
}
}
__global__
void calculate_delta_simple(float * previous_messages, float * current_messages,
float * delta, float * delta_array, unsigned int * edges_x_dim,
unsigned int num_edges) {
extern __shared__ float shared_delta[];
unsigned int tid, idx, i, s;
tid = threadIdx.x;
idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < num_edges) {
delta_array[idx] = calculate_local_delta(idx, previous_messages, current_messages, edges_x_dim);
}
__syncthreads();
shared_delta[tid] = (idx < num_edges) ? delta_array[idx] : 0;
__syncthreads();
// do reduction in shared mem
for(s = 1; s < blockDim.x; s *= 2){
i = 2 * s * tid;
if( i < blockDim.x ) {
shared_delta[i] += shared_delta[i + s];
}
__syncthreads();
}
//write result for this block to global mem
if(tid == 0){
*delta = shared_delta[0];
}
}
static void prepare_unsigned_int_text(texture<unsigned int, cudaTextureType1D, cudaReadModeElementType> * tex){
tex->addressMode[0] = cudaAddressModeWrap;
tex->addressMode[1] = cudaAddressModeWrap;
tex->filterMode = cudaFilterModePoint;
tex->normalized = 1;
}
static void check_cuda_kernel_return_code(){
cudaError_t err;
err = cudaGetLastError();
if (err != cudaSuccess) {
fprintf(stderr, "Error: %s\n", cudaGetErrorString(err));
exit(-1);
}
}
unsigned int loopy_propagate_until_cuda(Graph_t graph, float convergence, unsigned int max_iterations){
unsigned int i, j, num_iter, num_vertices, num_edges;
float * delta;
float * delta_array;
float previous_delta, host_delta;
char is_pow_2;
float * edges_joint_probabilities;
float * message_buffer;
float * current_messages;
float * previous_messages;
float * temp;
unsigned int * edges_x_dim;
unsigned int * edges_y_dim;
unsigned int * src_nodes_to_edges_nodes;
unsigned int * src_nodes_to_edges_edges;
unsigned int * dest_nodes_to_edges_nodes;
unsigned int * dest_nodes_to_edges_edges;
float * node_states;
unsigned int * node_num_vars;
host_delta = 0.0;
num_vertices = graph->current_num_vertices;
num_edges = graph->current_num_edges;
/*printf("Before=====");
print_edges(graph);
print_nodes(graph);*/
is_pow_2 = num_vertices % 2 == 0;
// allocate data
CUDA_CHECK_RETURN(cudaMalloc((void**)&edges_x_dim, sizeof(unsigned int) * graph->current_num_edges));
CUDA_CHECK_RETURN(cudaMalloc((void**)&edges_y_dim, sizeof(unsigned int) * graph->current_num_edges));
CUDA_CHECK_RETURN(cudaMalloc((void **)&edges_joint_probabilities, sizeof(float) * MAX_STATES * MAX_STATES * graph->current_num_edges));
CUDA_CHECK_RETURN(cudaMalloc((void **)&dest_nodes_to_edges_nodes, sizeof(unsigned int) * graph->current_num_vertices));
CUDA_CHECK_RETURN(cudaMalloc((void **)&dest_nodes_to_edges_edges, sizeof(unsigned int) * graph->current_num_edges));
CUDA_CHECK_RETURN(cudaMalloc((void **)&src_nodes_to_edges_nodes, sizeof(unsigned int) * graph->current_num_vertices));
CUDA_CHECK_RETURN(cudaMalloc((void **)&src_nodes_to_edges_edges, sizeof(unsigned int) * graph->current_num_edges));
CUDA_CHECK_RETURN(cudaMalloc((void **)¤t_messages, sizeof(float) * MAX_STATES * graph->current_num_edges));
CUDA_CHECK_RETURN(cudaMalloc((void **)&previous_messages, sizeof(float) * MAX_STATES * graph->current_num_edges));
CUDA_CHECK_RETURN(cudaMalloc((void **)&node_states, sizeof(float) * MAX_STATES * graph->current_num_vertices));
CUDA_CHECK_RETURN(cudaMalloc((void **)&node_num_vars, sizeof(unsigned int) * graph->current_num_vertices));
CUDA_CHECK_RETURN(cudaMalloc((void **)&delta, sizeof(float)));
CUDA_CHECK_RETURN(cudaMalloc((void **)&delta_array, sizeof(float) * num_edges));
CUDA_CHECK_RETURN(cudaMalloc((void **)&message_buffer, sizeof(float) * num_vertices * MAX_STATES));
// copy data
CUDA_CHECK_RETURN(cudaMemcpy(edges_joint_probabilities, graph->edges_joint_probabilities, sizeof(float) * MAX_STATES * MAX_STATES * graph->current_num_edges, cudaMemcpyHostToDevice ));
CUDA_CHECK_RETURN(cudaMemcpy(current_messages, graph->edges_messages, sizeof(float) * MAX_STATES * graph->current_num_edges, cudaMemcpyHostToDevice));
CUDA_CHECK_RETURN(cudaMemcpy(current_messages, graph->last_edges_messages, sizeof(float) * MAX_STATES * graph->current_num_edges, cudaMemcpyHostToDevice));
CUDA_CHECK_RETURN(cudaMemcpy(node_num_vars, graph->node_num_vars, sizeof(unsigned int) * graph->current_num_vertices, cudaMemcpyHostToDevice));
CUDA_CHECK_RETURN(cudaMemcpy(node_states, graph->node_states, sizeof(float) * MAX_STATES * graph->current_num_vertices, cudaMemcpyHostToDevice));
CUDA_CHECK_RETURN(cudaMemcpy(dest_nodes_to_edges_nodes, graph->dest_nodes_to_edges_node_list, sizeof(unsigned int) * graph->current_num_vertices, cudaMemcpyHostToDevice));
CUDA_CHECK_RETURN(cudaMemcpy(dest_nodes_to_edges_edges, graph->dest_nodes_to_edges_edge_list, sizeof(unsigned int) * graph->current_num_edges, cudaMemcpyHostToDevice));
CUDA_CHECK_RETURN(cudaMemcpy(src_nodes_to_edges_nodes, graph->src_nodes_to_edges_node_list, sizeof(unsigned int) * graph->current_num_vertices, cudaMemcpyHostToDevice));
CUDA_CHECK_RETURN(cudaMemcpy(src_nodes_to_edges_edges, graph->src_nodes_to_edges_edge_list, sizeof(unsigned int) * graph->current_num_edges, cudaMemcpyHostToDevice));
CUDA_CHECK_RETURN(cudaMemcpy(edges_x_dim, graph->edges_x_dim, sizeof(unsigned int) * num_edges, cudaMemcpyHostToDevice));
CUDA_CHECK_RETURN(cudaMemcpy(edges_y_dim, graph->edges_y_dim, sizeof(unsigned int) * num_edges, cudaMemcpyHostToDevice));
const int blockEdge1dCount = (num_edges + BLOCK_SIZE - 1)/ BLOCK_SIZE;
const int blockNodeCount = (num_vertices + BLOCK_SIZE_2_D_X - 1)/BLOCK_SIZE_2_D_X;
const int blockStateCount = (MAX_STATES + BLOCK_SIZE_2_D_Y - 1)/BLOCK_SIZE_2_D_Y;
const int blockDegreeCount = (graph->max_degree + BLOCK_SIZE_2_D_Y - 1)/BLOCK_SIZE_2_D_Y;
const int blockMessageNodeCount = (num_vertices + BLOCK_SIZE_3_D_X - 1)/BLOCK_SIZE_3_D_X;
const int blockMessageDegreeCount = ( graph->max_degree + BLOCK_SIZE_3_D_Y - 1)/BLOCK_SIZE_3_D_Y;
const int blockMessageStateCount = ( MAX_STATES + BLOCK_SIZE_3_D_Z - 1)/BLOCK_SIZE_3_D_Z;
num_iter = 0;
dim3 dimReduceBlock(BLOCK_SIZE, 1, 1);
dim3 dimReduceGrid(blockEdge1dCount, 1, 1);
int reduceSmemSize = (BLOCK_SIZE <= 32) ? 2 * BLOCK_SIZE * sizeof(float) : BLOCK_SIZE * sizeof(float);
dim3 dimInitMessageBuffer(BLOCK_SIZE_2_D_X, BLOCK_SIZE_2_D_Y, 1);
dim3 dimInitGrid(blockNodeCount, blockStateCount, 1);
dim3 dimDegreeGrid(blockNodeCount, blockDegreeCount, 1);
int reduce2DSmemSize = (BLOCK_SIZE_2_D_Y <= 32) ? 2 * BLOCK_SIZE_2_D_Y * sizeof(float) : BLOCK_SIZE_2_D_Y * sizeof(float);
dim3 dimMessagesBuffer(BLOCK_SIZE_3_D_X, BLOCK_SIZE_3_D_Y, BLOCK_SIZE_3_D_Z);
dim3 dimMessagesGrid(blockMessageNodeCount, blockMessageDegreeCount, blockMessageStateCount);
for(i = 0; i < max_iterations; i+= BATCH_SIZE){
for(j = 0; j < BATCH_SIZE; ++j) {
init_message_buffer_kernel<<<dimInitGrid, dimInitMessageBuffer>>>(message_buffer, node_states, node_num_vars, num_vertices);
check_cuda_kernel_return_code();
//CUDA_CHECK_RETURN(cudaMemcpy(&host_delta, delta, sizeof(float), cudaMemcpyDeviceToHost));
read_incoming_messages_kernel <<<dimMessagesGrid, dimMessagesBuffer>>>(message_buffer, previous_messages, dest_nodes_to_edges_nodes, dest_nodes_to_edges_edges, num_edges, node_num_vars, num_vertices, is_pow_2, WARP_SIZE);
check_cuda_kernel_return_code();
//CUDA_CHECK_RETURN(cudaMemcpy(&host_delta, delta, sizeof(float), cudaMemcpyDeviceToHost));
send_message_for_node_kernel<<<dimInitGrid, dimInitMessageBuffer>>>(message_buffer, num_edges, edges_joint_probabilities, current_messages, src_nodes_to_edges_nodes, src_nodes_to_edges_edges, edges_x_dim, edges_y_dim, num_vertices);
check_cuda_kernel_return_code();
//CUDA_CHECK_RETURN(cudaMemcpy(&host_delta, delta, sizeof(float), cudaMemcpyDeviceToHost));
marginalize_node_combine_kernel<<<dimMessagesGrid, dimMessagesBuffer>>>(node_num_vars, message_buffer, node_states, current_messages, dest_nodes_to_edges_nodes, dest_nodes_to_edges_edges, num_vertices, num_edges, is_pow_2, WARP_SIZE);
check_cuda_kernel_return_code();
marginalize_sum_node_kernel<<<dimInitGrid, dimInitMessageBuffer>>>(node_num_vars, message_buffer, node_states, current_messages, dest_nodes_to_edges_nodes, dest_nodes_to_edges_edges, num_vertices, num_edges, is_pow_2, WARP_SIZE);
check_cuda_kernel_return_code();
//CUDA_CHECK_RETURN(cudaMemcpy(&host_delta, delta, sizeof(float), cudaMemcpyDeviceToHost));
//swap pointers
temp = current_messages;
current_messages = previous_messages;
previous_messages = temp;
num_iter++;
}
calculate_delta_6<<<dimReduceGrid, dimReduceBlock, reduceSmemSize>>>(previous_messages, current_messages, delta, delta_array, edges_x_dim, num_edges, is_pow_2, WARP_SIZE);
//calculate_delta<<<dimReduceGrid, dimReduceBlock, reduceSmemSize>>>(previous_messages, current_messages, delta, delta_array, edges_x_dim, num_edges);
//calculate_delta_simple<<<dimReduceGrid, dimReduceBlock, reduceSmemSize>>>(previous_messages, current_messages, delta, delta_array, edges_x_dim, num_edges);
check_cuda_kernel_return_code();
CUDA_CHECK_RETURN(cudaMemcpy(&host_delta, delta, sizeof(float), cudaMemcpyDeviceToHost));
// printf("Current delta: %f\n", host_delta);
if(host_delta < convergence || fabs(host_delta - previous_delta) < convergence){
break;
}
previous_delta = host_delta;
}
// copy data back
CUDA_CHECK_RETURN(cudaMemcpy(graph->node_states, node_states, sizeof(float) * MAX_STATES * num_vertices, cudaMemcpyDeviceToHost));
CUDA_CHECK_RETURN(cudaMemcpy(graph->edges_messages, current_messages, sizeof(float) * MAX_STATES * num_edges, cudaMemcpyDeviceToHost));
CUDA_CHECK_RETURN(cudaMemcpy(graph->last_edges_messages, previous_messages, sizeof(float) * MAX_STATES * num_edges, cudaMemcpyDeviceToHost));
CUDA_CHECK_RETURN(cudaFree(dest_nodes_to_edges_nodes));
CUDA_CHECK_RETURN(cudaFree(dest_nodes_to_edges_edges));
CUDA_CHECK_RETURN(cudaFree(src_nodes_to_edges_nodes));
CUDA_CHECK_RETURN(cudaFree(src_nodes_to_edges_edges));
CUDA_CHECK_RETURN(cudaFree(edges_x_dim));
CUDA_CHECK_RETURN(cudaFree(edges_y_dim));
CUDA_CHECK_RETURN(cudaFree(edges_joint_probabilities));
CUDA_CHECK_RETURN(cudaFree(current_messages));
CUDA_CHECK_RETURN(cudaFree(previous_messages));
CUDA_CHECK_RETURN(cudaFree(message_buffer));
CUDA_CHECK_RETURN(cudaFree(node_states));
CUDA_CHECK_RETURN(cudaFree(node_num_vars));
CUDA_CHECK_RETURN(cudaFree(delta));
CUDA_CHECK_RETURN(cudaFree(delta_array));
/*printf("After=====");
print_nodes(graph);
print_edges(graph);*/
return num_iter;
}
void test_ast(const char * expr)
{
struct expression * expression;
yyscan_t scanner;
YY_BUFFER_STATE state;
assert(yylex_init(&scanner) == 0);
assert(scanner != NULL);
assert(strlen(expr) > 0);
state = yy_scan_string(expr, scanner);
assert(yyparse(&expression, scanner) == 0);
yy_delete_buffer(state, scanner);
yylex_destroy(scanner);
assert(expression != NULL);
delete_expression(expression);
}
void test_file(const char * file_path)
{
struct expression * expression;
yyscan_t scanner;
YY_BUFFER_STATE state;
FILE * in;
assert(yylex_init(&scanner) == 0);
in = fopen(file_path, "r");
yyset_in(in, scanner);
assert(yyparse(&expression, scanner) == 0);
//yy_delete_buffer(state, scanner);
yylex_destroy(scanner);
fclose(in);
assert(expression != NULL);
delete_expression(expression);
}
void test_parse_file(char * file_name){
unsigned int i;
struct expression * expression;
yyscan_t scanner;
YY_BUFFER_STATE state;
FILE * in;
Graph_t graph;
clock_t start, end;
double time_elapsed;
assert(yylex_init(&scanner) == 0);
in = fopen(file_name, "r");
yyset_in(in, scanner);
assert(yyparse(&expression, scanner) == 0);
//yy_delete_buffer(state, scanner);
yylex_destroy(scanner);
fclose(in);
assert(expression != NULL);
graph = build_graph(expression);
//print_nodes(graph);
//print_edges(graph);
set_up_src_nodes_to_edges(graph);
set_up_dest_nodes_to_edges(graph);
start = clock();
init_levels_to_nodes(graph);
//print_levels_to_nodes(graph);
propagate_using_levels_start(graph);
for(i = 1; i < graph->num_levels - 1; ++i){
propagate_using_levels(graph, i);
}
reset_visited(graph);
for(i = graph->num_levels - 1; i > 0; --i){
propagate_using_levels(graph, i);
}
marginalize(graph);
end = clock();
time_elapsed = (double)(end - start) / CLOCKS_PER_SEC;
printf("%s,regular,%d,%d,%lf\n", file_name, graph->current_num_vertices, graph->current_num_edges, time_elapsed);
//print_nodes(graph);
assert(graph != NULL);
delete_expression(expression);
graph_destroy(graph);
}
void test_loopy_belief_propagation(char * file_name){
struct expression * expression;
yyscan_t scanner;
YY_BUFFER_STATE state;
FILE * in;
Graph_t graph;
clock_t start, end;
double time_elapsed;
assert(yylex_init(&scanner) == 0);
in = fopen(file_name, "r");
yyset_in(in, scanner);
assert(yyparse(&expression, scanner) == 0);
//yy_delete_buffer(state, scanner);
yylex_destroy(scanner);
fclose(in);
assert(expression != NULL);
graph = build_graph(expression);
assert(graph != NULL);
//print_nodes(graph);
//print_edges(graph);
set_up_src_nodes_to_edges(graph);
set_up_dest_nodes_to_edges(graph);
start = clock();
init_previous_edge(graph);
loopy_propagate_until_cuda(graph, PRECISION, NUM_ITERATIONS);
end = clock();
time_elapsed = (double)(end - start)/CLOCKS_PER_SEC;
//print_nodes(graph);
printf("%s,loopy,%d,%d,%lf\n", file_name, graph->current_num_vertices, graph->current_num_edges, time_elapsed);
delete_expression(expression);
graph_destroy(graph);
}
struct expression * parse_file(const char * file_name){
struct expression * expression;
yyscan_t scanner;
YY_BUFFER_STATE state;
FILE * in;
assert(yylex_init(&scanner) == 0);
in = fopen(file_name, "r");
yyset_in(in, scanner);
assert(yyparse(&expression, scanner) == 0);
//yy_delete_buffer(state, scanner);
yylex_destroy(scanner);
fclose(in);
assert(expression != NULL);
return expression;
}
void run_test_belief_propagation(struct expression * expression, const char * file_name){
Graph_t graph;
clock_t start, end;
double time_elapsed;
unsigned int i;
graph = build_graph(expression);
assert(graph != NULL);
//print_nodes(graph);
//print_edges(graph);
set_up_src_nodes_to_edges(graph);
set_up_dest_nodes_to_edges(graph);
calculate_diameter(graph);
start = clock();
init_levels_to_nodes(graph);
//print_levels_to_nodes(graph);
propagate_using_levels_start(graph);
for(i = 1; i < graph->num_levels - 1; ++i){
propagate_using_levels(graph, i);
}
reset_visited(graph);
for(i = graph->num_levels - 1; i > 0; --i){
propagate_using_levels(graph, i);
}
marginalize(graph);
end = clock();
time_elapsed = (double)(end - start) / CLOCKS_PER_SEC;
printf("%s,regular,%d,%d,%d,2,%lf\n", file_name, graph->current_num_vertices, graph->current_num_edges, graph->diameter, time_elapsed);
graph_destroy(graph);
}
void run_test_belief_propagation_xml_file(const char * file_name){
Graph_t graph;
clock_t start, end;
double time_elapsed;
unsigned int i;
graph = parse_xml_file(file_name);
assert(graph != NULL);
//print_nodes(graph);
//print_edges(graph);
set_up_src_nodes_to_edges(graph);
set_up_dest_nodes_to_edges(graph);
calculate_diameter(graph);
start = clock();
init_levels_to_nodes(graph);
//print_levels_to_nodes(graph);
propagate_using_levels_start(graph);
for(i = 1; i < graph->num_levels - 1; ++i){
propagate_using_levels(graph, i);
}
reset_visited(graph);
for(i = graph->num_levels - 1; i > 0; --i){
propagate_using_levels(graph, i);
}
marginalize(graph);
end = clock();
time_elapsed = (double)(end - start) / CLOCKS_PER_SEC;
printf("%s,regular,%d,%d,%d,2,%lf\n", file_name, graph->current_num_vertices, graph->current_num_edges, graph->diameter, time_elapsed);
graph_destroy(graph);
}
void run_test_loopy_belief_propagation(struct expression * expression, const char * file_name, FILE * out){
Graph_t graph;
clock_t start, end;
double time_elapsed;
unsigned int num_iterations;
graph = build_graph(expression);
assert(graph != NULL);
//print_nodes(graph);
//print_edges(graph);
set_up_src_nodes_to_edges(graph);
set_up_dest_nodes_to_edges(graph);
//calculate_diameter(graph);
start = clock();
init_previous_edge(graph);
num_iterations = loopy_propagate_until_cuda(graph, PRECISION, NUM_ITERATIONS);
end = clock();
time_elapsed = (double)(end - start)/CLOCKS_PER_SEC;
//print_nodes(graph);
fprintf(out, "%s,loopy,%d,%d,%d,%d,%lf\n", file_name, graph->current_num_vertices, graph->current_num_edges, graph->diameter, num_iterations, time_elapsed);
fflush(out);
graph_destroy(graph);
}
void run_test_loopy_belief_propagation_xml_file(const char * file_name, FILE * out){
Graph_t graph;
clock_t start, end;
double time_elapsed;
unsigned int num_iterations;
graph = parse_xml_file(file_name);
assert(graph != NULL);
//print_nodes(graph);
//print_edges(graph);
set_up_src_nodes_to_edges(graph);
set_up_dest_nodes_to_edges(graph);
//calculate_diameter(graph);
start = clock();
init_previous_edge(graph);
num_iterations = loopy_propagate_until_cuda(graph, PRECISION, NUM_ITERATIONS);
end = clock();
time_elapsed = (double)(end - start)/CLOCKS_PER_SEC;
//print_nodes(graph);
fprintf(out, "%s,loopy,%d,%d,%d,%d,%lf\n", file_name, graph->current_num_vertices, graph->current_num_edges, graph->diameter, num_iterations, time_elapsed);
fflush(out);
graph_destroy(graph);
}
void run_tests_with_file(const char * file_name, unsigned int num_iterations, FILE * out){
unsigned int i;
struct expression * expr;
expr = parse_file(file_name);
for(i = 0; i < num_iterations; ++i){
run_test_belief_propagation(expr, file_name);
}
for(i = 0; i < num_iterations; ++i){
run_test_loopy_belief_propagation(expr, file_name, out);
}
delete_expression(expr);
}
void run_tests_with_xml_file(const char * file_name, unsigned int num_iterations, FILE * out){
unsigned int i;
/*for(i = 0; i < num_iterations; ++i){
run_test_belief_propagation(expr, file_name);
}*/
for(i = 0; i < num_iterations; ++i){
run_test_loopy_belief_propagation_xml_file(file_name, out);
}
}
int main(void)
{
/*
extern int yydebug;
yydebug = 1;
/*
struct expression * expression = NULL;
const char test[] = "// Bayesian Network in the Interchange Format\n// Produced by BayesianNetworks package in JavaBayes\n// Output created Sun Nov 02 17:49:49 GMT+00:00 1997\n// Bayesian network \nnetwork \"Dog-Problem\" { //5 variables and 5 probability distributions\nproperty \"credal-set constant-density-bounded 1.1\" ;\n}variable \"light-on\" { //2 values\ntype discrete[2] { \"true\" \"false\" };\nproperty \"position = (218, 195)\" ;\n}\nvariable \"bowel-problem\" { //2 values\ntype discrete[2] { \"true\" \"false\" };\nproperty \"position = (335, 99)\" ;\n}";
test_ast(test);
test_parse_file("dog.bif");
test_parse_file("alarm.bif");
test_parse_file("very_large/andes.bif");
test_loopy_belief_propagation("very_large/andes.bif");
test_parse_file("Diabetes.bif");
test_loopy_belief_propagation("Diabetes.bif");
*/
//test_loopy_belief_propagation("../benchmark_files/dog.bif");
//test_loopy_belief_propagation("../benchmark_files/alarm.bif");
//test_file("dog.bif");
//test_file("alarm.bif");
/*expression = read_file("alarm.bif");
assert(expression != NULL);
delete_expression(expression);*/
FILE * out = fopen("cuda_kernels_benchmark.csv", "w");
fprintf(out, "File Name,Propagation Type,Number of Nodes,Number of Edges,Diameter,Number of Iterations,BP Run Time(s)\n");
fflush(out);
/*run_tests_with_file("../benchmark_files/small/asia.bif", 1);
run_tests_with_file("../benchmark_files/small/cancer.bif", 1);
run_tests_with_file("../benchmark_files/small/earthquake.bif", 1);
run_tests_with_file("../benchmark_files/small/sachs.bif", 1);
run_tests_with_file("../benchmark_files/small/survey.bif", 1);
/*
run_tests_with_file("../benchmark_files/medium/alarm.bif", 1);
run_tests_with_file("../benchmark_files/medium/barley.bif", 1);
//run_tests_with_file("../benchmark_files/medium/child.bif", 1);
run_tests_with_file("../benchmark_files/medium/hailfinder.bif", 1);
run_tests_with_file("../benchmark_files/medium/insurance.bif", 1);
run_tests_with_file("../benchmark_files/medium/mildew.bif", 1);
run_tests_with_file("../benchmark_files/medium/water.bif", 1);
run_tests_with_file("../benchmark_files/large/hepar2.bif", 1);
run_tests_with_file("../benchmark_files/large/win95pts.bif", 1);
run_tests_with_file("../benchmark_files/very_large/andes.bif", 1);
run_tests_with_file("../benchmark_files/very_large/diabetes.bif", 1);
run_tests_with_file("../benchmark_files/very_large/link.bif", 1);
run_tests_with_file("../benchmark_files/very_large/munin1.bif", 1);
run_tests_with_file("../benchmark_files/very_large/munin2.bif", 1);
run_tests_with_file("../benchmark_files/very_large/munin3.bif", 1);
run_tests_with_file("../benchmark_files/very_large/munin4.bif", 1);
//run_tests_with_file("../benchmark_files/very_large/munin.bif", 1);
run_tests_with_file("../benchmark_files/very_large/pathfinder.bif", 1);
run_tests_with_file("../benchmark_files/very_large/pigs.bif", 1);
run_tests_with_xml_file("../benchmark_files/xml/bf_1000_2000_1.xml", 1);
run_tests_with_xml_file("../benchmark_files/xml/bf_1000_2000_2.xml", 1);
run_tests_with_xml_file("../benchmark_files/xml/bf_1000_2000_3.xml", 1);
run_tests_with_xml_file("../benchmark_files/xml/bf_2000_4000_1.xml", 1);
run_tests_with_xml_file("../benchmark_files/xml/bf_2000_4000_2.xml", 1);
run_tests_with_xml_file("../benchmark_files/xml/bf_2000_4000_3.xml", 1);
run_tests_with_xml_file("../benchmark_files/xml/bf_5000_10000_1.xml", 1);
run_tests_with_xml_file("../benchmark_files/xml/bf_5000_10000_2.xml", 1);
run_tests_with_xml_file("../benchmark_files/xml/bf_5000_10000_3.xml", 1);
run_tests_with_xml_file("../benchmark_files/xml/bf_10000_20000_1.xml", 1);
run_tests_with_xml_file("../benchmark_files/xml/bf_10000_20000_2.xml", 1);
run_tests_with_xml_file("../benchmark_files/xml/bf_10000_20000_3.xml", 1);
run_tests_with_xml_file("../benchmark_files/xml/bf_12000_24000_1.xml", 1);
run_tests_with_xml_file("../benchmark_files/xml/bf_12000_24000_2.xml", 1);
run_tests_with_xml_file("../benchmark_files/xml/bf_12000_24000_3.xml", 1);*/
/*run_tests_with_xml_file("../benchmark_files/xml/bf_15000_30000_1.xml", 1);
run_tests_with_xml_file("../benchmark_files/xml/bf_15000_30000_2.xml", 1);
run_tests_with_xml_file("../benchmark_files/xml/bf_15000_30000_3.xml", 1);
run_tests_with_xml_file("../benchmark_files/xml/bf_20000_40000_1.xml", 1);
run_tests_with_xml_file("../benchmark_files/xml/bf_20000_40000_2.xml", 1);
run_tests_with_xml_file("../benchmark_files/xml/bf_20000_40000_3.xml", 1);
run_tests_with_xml_file("../benchmark_files/xml/bf_25000_50000_1.xml", 1);
run_tests_with_xml_file("../benchmark_files/xml/bf_25000_50000_2.xml", 1);
run_tests_with_xml_file("../benchmark_files/xml/bf_25000_50000_3.xml", 1);
run_tests_with_xml_file("../benchmark_files/xml/bf_30000_60000_1.xml", 1);
run_tests_with_xml_file("../benchmark_files/xml/bf_30000_60000_2.xml", 1);
run_tests_with_xml_file("../benchmark_files/xml/bf_30000_60000_3.xml", 1);*/
/*run_tests_with_xml_file("../benchmark_files/xml/bf_40000_80000_1.xml", 1);
run_tests_with_xml_file("../benchmark_files/xml/bf_40000_80000_2.xml", 1);
run_tests_with_xml_file("../benchmark_files/xml/bf_40000_80000_3.xml", 1);
run_tests_with_xml_file("../benchmark_files/xml/bf_80000_160000_2.xml", 1);*/
run_tests_with_xml_file("../benchmark_files/xml2/10_20.xml", 1, out);
run_tests_with_xml_file("../benchmark_files/xml2/100_200.xml", 1, out);
run_tests_with_xml_file("../benchmark_files/xml2/1000_2000.xml", 1, out);
run_tests_with_xml_file("../benchmark_files/xml2/10000_20000.xml", 1, out);
run_tests_with_xml_file("../benchmark_files/xml2/100000_200000.xml", 1, out);
run_tests_with_xml_file("../benchmark_files/xml2/200000_400000.xml", 1, out);
//run_tests_with_xml_file("../benchmark_files/xml2/300000_600000.xml", 1, out);
run_tests_with_xml_file("../benchmark_files/xml2/400000_800000.xml", 1, out);
//run_tests_with_xml_file("../benchmark_files/xml2/500000_1000000.xml", 1, out);
run_tests_with_xml_file("../benchmark_files/xml2/600000_1200000.xml", 1, out);
//run_tests_with_xml_file("../benchmark_files/xml2/700000_1400000.xml", 1, out);
run_tests_with_xml_file("../benchmark_files/xml2/800000_1600000.xml", 1, out);
//run_tests_with_xml_file("../benchmark_files/xml2/900000_1800000.xml", 1, out);
run_tests_with_xml_file("../benchmark_files/xml2/1000000_2000000.xml", 1, out);
//run_tests_with_xml_file("../benchmark_files/xml2/10000000_20000000.xml", 1, out);
fclose(out);
return 0;
}
/**
* Check the return value of the CUDA runtime API call and exit
* the application if the call has failed.
*/
static void CheckCudaErrorAux (const char *file, unsigned int line, const char *statement, cudaError_t err)
{
if (err == cudaSuccess)
return;
printf("%s returned %s (%d) at %s:%d\n", statement, cudaGetErrorString(err), err, file, line);
exit (1);
}
|
c709e6947f2bece1557b461eb62fb2c6128bfabc.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "kernelCalcSum_ShareMem.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const int *dataArray = NULL;
hipMalloc(&dataArray, XSIZE*YSIZE);
int arraySize = XSIZE*YSIZE;
int *sum = NULL;
hipMalloc(&sum, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
kernelCalcSum_ShareMem), dim3(gridBlock),dim3(threadBlock), 0, 0, dataArray,arraySize,sum);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
kernelCalcSum_ShareMem), dim3(gridBlock),dim3(threadBlock), 0, 0, dataArray,arraySize,sum);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
kernelCalcSum_ShareMem), dim3(gridBlock),dim3(threadBlock), 0, 0, dataArray,arraySize,sum);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | c709e6947f2bece1557b461eb62fb2c6128bfabc.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "kernelCalcSum_ShareMem.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const int *dataArray = NULL;
cudaMalloc(&dataArray, XSIZE*YSIZE);
int arraySize = XSIZE*YSIZE;
int *sum = NULL;
cudaMalloc(&sum, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
kernelCalcSum_ShareMem<<<gridBlock,threadBlock>>>(dataArray,arraySize,sum);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
kernelCalcSum_ShareMem<<<gridBlock,threadBlock>>>(dataArray,arraySize,sum);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
kernelCalcSum_ShareMem<<<gridBlock,threadBlock>>>(dataArray,arraySize,sum);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
23b315ae4c7dd3c66e3cdb131534d731dd025253.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.0.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date February 2016
@generated from magmablas/zswapdblk.cu normal z -> c, Tue Feb 9 16:05:32 2016
*/
#include "magma_internal.h"
/*********************************************************/
/*
* Swap diagonal blocks of two matrices.
* Each thread block swaps one diagonal block.
* Each thread iterates across one row of the block.
*/
__global__ void
cswapdblk_kernel( int nb,
magmaFloatComplex *dA, int ldda, int inca,
magmaFloatComplex *dB, int lddb, int incb )
{
const int tx = threadIdx.x;
const int bx = blockIdx.x;
dA += tx + bx * nb * (ldda + inca);
dB += tx + bx * nb * (lddb + incb);
magmaFloatComplex tmp;
#pragma unroll
for( int i = 0; i < nb; i++ ) {
tmp = dA[i*ldda];
dA[i*ldda] = dB[i*lddb];
dB[i*lddb] = tmp;
}
}
/**
Purpose
-------
cswapdblk swaps diagonal blocks of size nb x nb between matrices
dA and dB on the GPU. It swaps nblocks = n/nb blocks.
For i = 1 .. nblocks, submatrices
dA( i*nb*inca, i*nb ) and
dB( i*nb*incb, i*nb ) are swapped.
Arguments
---------
@param[in]
n INTEGER
The number of columns of the matrices dA and dB. N >= 0.
@param[in]
nb INTEGER
The size of diagonal blocks.
NB > 0 and NB <= maximum threads per CUDA block (512 or 1024).
@param[in,out]
dA COMPLEX array, dimension (LDDA,N)
The matrix dA.
@param[in]
ldda INTEGER
The leading dimension of the array dA.
LDDA >= (nblocks - 1)*nb*inca + nb.
@param[in]
inca INTEGER
The row increment between diagonal blocks of dA. inca >= 0. For example,
inca = 1 means blocks are stored on the diagonal at dA(i*nb, i*nb),
inca = 0 means blocks are stored side-by-side at dA(0, i*nb).
@param[in,out]
dB COMPLEX array, dimension (LDDB,N)
The matrix dB.
@param[in]
lddb INTEGER
The leading dimension of the array db.
LDDB >= (nblocks - 1)*nb*incb + nb.
@param[in]
incb INTEGER
The row increment between diagonal blocks of dB. incb >= 0. See inca.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_caux2
********************************************************************/
extern "C" void
magmablas_cswapdblk_q(
magma_int_t n, magma_int_t nb,
magmaFloatComplex_ptr dA, magma_int_t ldda, magma_int_t inca,
magmaFloatComplex_ptr dB, magma_int_t lddb, magma_int_t incb,
magma_queue_t queue )
{
magma_int_t nblocks = n / nb;
magma_int_t info = 0;
if (n < 0) {
info = -1;
} else if (nb < 1 || nb > 1024) {
info = -2;
} else if (ldda < (nblocks-1)*nb*inca + nb) {
info = -4;
} else if (inca < 0) {
info = -5;
} else if (lddb < (nblocks-1)*nb*incb + nb) {
info = -7;
} else if (incb < 0) {
info = -8;
}
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info;
}
if ( nblocks > 0 ) {
hipLaunchKernelGGL(( cswapdblk_kernel), dim3(nblocks), dim3(nb), 0, queue->cuda_stream() ,
nb, dA, ldda, inca,
dB, lddb, incb );
}
}
| 23b315ae4c7dd3c66e3cdb131534d731dd025253.cu | /*
-- MAGMA (version 2.0.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date February 2016
@generated from magmablas/zswapdblk.cu normal z -> c, Tue Feb 9 16:05:32 2016
*/
#include "magma_internal.h"
/*********************************************************/
/*
* Swap diagonal blocks of two matrices.
* Each thread block swaps one diagonal block.
* Each thread iterates across one row of the block.
*/
__global__ void
cswapdblk_kernel( int nb,
magmaFloatComplex *dA, int ldda, int inca,
magmaFloatComplex *dB, int lddb, int incb )
{
const int tx = threadIdx.x;
const int bx = blockIdx.x;
dA += tx + bx * nb * (ldda + inca);
dB += tx + bx * nb * (lddb + incb);
magmaFloatComplex tmp;
#pragma unroll
for( int i = 0; i < nb; i++ ) {
tmp = dA[i*ldda];
dA[i*ldda] = dB[i*lddb];
dB[i*lddb] = tmp;
}
}
/**
Purpose
-------
cswapdblk swaps diagonal blocks of size nb x nb between matrices
dA and dB on the GPU. It swaps nblocks = n/nb blocks.
For i = 1 .. nblocks, submatrices
dA( i*nb*inca, i*nb ) and
dB( i*nb*incb, i*nb ) are swapped.
Arguments
---------
@param[in]
n INTEGER
The number of columns of the matrices dA and dB. N >= 0.
@param[in]
nb INTEGER
The size of diagonal blocks.
NB > 0 and NB <= maximum threads per CUDA block (512 or 1024).
@param[in,out]
dA COMPLEX array, dimension (LDDA,N)
The matrix dA.
@param[in]
ldda INTEGER
The leading dimension of the array dA.
LDDA >= (nblocks - 1)*nb*inca + nb.
@param[in]
inca INTEGER
The row increment between diagonal blocks of dA. inca >= 0. For example,
inca = 1 means blocks are stored on the diagonal at dA(i*nb, i*nb),
inca = 0 means blocks are stored side-by-side at dA(0, i*nb).
@param[in,out]
dB COMPLEX array, dimension (LDDB,N)
The matrix dB.
@param[in]
lddb INTEGER
The leading dimension of the array db.
LDDB >= (nblocks - 1)*nb*incb + nb.
@param[in]
incb INTEGER
The row increment between diagonal blocks of dB. incb >= 0. See inca.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_caux2
********************************************************************/
extern "C" void
magmablas_cswapdblk_q(
magma_int_t n, magma_int_t nb,
magmaFloatComplex_ptr dA, magma_int_t ldda, magma_int_t inca,
magmaFloatComplex_ptr dB, magma_int_t lddb, magma_int_t incb,
magma_queue_t queue )
{
magma_int_t nblocks = n / nb;
magma_int_t info = 0;
if (n < 0) {
info = -1;
} else if (nb < 1 || nb > 1024) {
info = -2;
} else if (ldda < (nblocks-1)*nb*inca + nb) {
info = -4;
} else if (inca < 0) {
info = -5;
} else if (lddb < (nblocks-1)*nb*incb + nb) {
info = -7;
} else if (incb < 0) {
info = -8;
}
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info;
}
if ( nblocks > 0 ) {
cswapdblk_kernel<<< nblocks, nb, 0, queue->cuda_stream() >>>
( nb, dA, ldda, inca,
dB, lddb, incb );
}
}
|
fab51d83aebd47678f712f36fa4def96873e4d2e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.5.4) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date October 2020
@generated from sparse/blas/zmergeidr.cu, normal z -> s, Thu Oct 8 23:05:47 2020
@author Hartwig Anzt
*/
#include "magmasparse_internal.h"
#define BLOCK_SIZE 512
#define PRECISION_s
// These routines merge multiple kernels from sidr into one.
/* -------------------------------------------------------------------------- */
__global__ void
magma_sidr_smoothing_1_kernel(
int num_rows,
int num_cols,
float *drs,
float *dr,
float *dt )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i<num_rows ) {
for( int j=0; j<num_cols; j++ ){
dt[ i+j*num_rows ] = drs[ i+j*num_rows ] - dr[ i+j*num_rows ];
}
}
}
/**
Purpose
-------
Mergels multiple operations into one kernel:
dt = drs - dr
Arguments
---------
@param[in]
num_rows magma_int_t
dimension m
@param[in]
num_cols magma_int_t
dimension n
@param[in]
drs magmaFloat_ptr
vector
@param[in]
dr magmaFloat_ptr
vector
@param[in,out]
dt magmaFloat_ptr
vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_sgegpuk
********************************************************************/
extern "C"
magma_int_t
magma_sidr_smoothing_1(
magma_int_t num_rows,
magma_int_t num_cols,
magmaFloat_ptr drs,
magmaFloat_ptr dr,
magmaFloat_ptr dt,
magma_queue_t queue )
{
dim3 Bs( BLOCK_SIZE );
dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) );
hipLaunchKernelGGL(( magma_sidr_smoothing_1_kernel), dim3(Gs), dim3(Bs), 0, queue->cuda_stream() , num_rows, num_cols, drs, dr, dt );
return MAGMA_SUCCESS;
}
__global__ void
magma_sidr_smoothing_2_kernel(
int num_rows,
int num_cols,
float omega,
float *dx,
float *dxs )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i<num_rows ) {
for( int j=0; j<num_cols; j++ ){
dxs[ i+j*num_rows ] = dxs[ i+j*num_rows ] + omega * dxs[ i+j*num_rows ]
- omega * dx[ i+j*num_rows ];
}
}
}
/**
Purpose
-------
Mergels multiple operations into one kernel:
dxs = dxs - gamma*(dxs-dx)
Arguments
---------
@param[in]
num_rows magma_int_t
dimension m
@param[in]
num_cols magma_int_t
dimension n
@param[in]
omega float
scalar
@param[in]
dx magmaFloat_ptr
vector
@param[in,out]
dxs magmaFloat_ptr
vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_sgegpuk
********************************************************************/
extern "C"
magma_int_t
magma_sidr_smoothing_2(
magma_int_t num_rows,
magma_int_t num_cols,
float omega,
magmaFloat_ptr dx,
magmaFloat_ptr dxs,
magma_queue_t queue )
{
dim3 Bs( BLOCK_SIZE );
dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) );
hipLaunchKernelGGL(( magma_sidr_smoothing_2_kernel), dim3(Gs), dim3(Bs), 0, queue->cuda_stream() , num_rows, num_cols, omega, dx, dxs);
return MAGMA_SUCCESS;
}
| fab51d83aebd47678f712f36fa4def96873e4d2e.cu | /*
-- MAGMA (version 2.5.4) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date October 2020
@generated from sparse/blas/zmergeidr.cu, normal z -> s, Thu Oct 8 23:05:47 2020
@author Hartwig Anzt
*/
#include "magmasparse_internal.h"
#define BLOCK_SIZE 512
#define PRECISION_s
// These routines merge multiple kernels from sidr into one.
/* -------------------------------------------------------------------------- */
__global__ void
magma_sidr_smoothing_1_kernel(
int num_rows,
int num_cols,
float *drs,
float *dr,
float *dt )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i<num_rows ) {
for( int j=0; j<num_cols; j++ ){
dt[ i+j*num_rows ] = drs[ i+j*num_rows ] - dr[ i+j*num_rows ];
}
}
}
/**
Purpose
-------
Mergels multiple operations into one kernel:
dt = drs - dr
Arguments
---------
@param[in]
num_rows magma_int_t
dimension m
@param[in]
num_cols magma_int_t
dimension n
@param[in]
drs magmaFloat_ptr
vector
@param[in]
dr magmaFloat_ptr
vector
@param[in,out]
dt magmaFloat_ptr
vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_sgegpuk
********************************************************************/
extern "C"
magma_int_t
magma_sidr_smoothing_1(
magma_int_t num_rows,
magma_int_t num_cols,
magmaFloat_ptr drs,
magmaFloat_ptr dr,
magmaFloat_ptr dt,
magma_queue_t queue )
{
dim3 Bs( BLOCK_SIZE );
dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) );
magma_sidr_smoothing_1_kernel<<< Gs, Bs, 0, queue->cuda_stream() >>>( num_rows, num_cols, drs, dr, dt );
return MAGMA_SUCCESS;
}
__global__ void
magma_sidr_smoothing_2_kernel(
int num_rows,
int num_cols,
float omega,
float *dx,
float *dxs )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i<num_rows ) {
for( int j=0; j<num_cols; j++ ){
dxs[ i+j*num_rows ] = dxs[ i+j*num_rows ] + omega * dxs[ i+j*num_rows ]
- omega * dx[ i+j*num_rows ];
}
}
}
/**
Purpose
-------
Mergels multiple operations into one kernel:
dxs = dxs - gamma*(dxs-dx)
Arguments
---------
@param[in]
num_rows magma_int_t
dimension m
@param[in]
num_cols magma_int_t
dimension n
@param[in]
omega float
scalar
@param[in]
dx magmaFloat_ptr
vector
@param[in,out]
dxs magmaFloat_ptr
vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_sgegpuk
********************************************************************/
extern "C"
magma_int_t
magma_sidr_smoothing_2(
magma_int_t num_rows,
magma_int_t num_cols,
float omega,
magmaFloat_ptr dx,
magmaFloat_ptr dxs,
magma_queue_t queue )
{
dim3 Bs( BLOCK_SIZE );
dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) );
magma_sidr_smoothing_2_kernel<<< Gs, Bs, 0, queue->cuda_stream() >>>( num_rows, num_cols, omega, dx, dxs);
return MAGMA_SUCCESS;
}
|
8cf08a0b4385af26ed4b7495296c6273a2db2fd5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "common/book.h"
#define N 10
__global__ void add( int *a, int *b, int *c ) {
int tid = blockIdx.x; // handle the data at this index
if(tid < N)
c[tid] = a[tid] + b[tid];
}
int main( void ) {
int a[N], b[N], c[N];
int *dev_a, *dev_b, *dev_c;
// allocate the memory on the cpu
HANDLE_ERROR( hipMalloc( (void**)&dev_a, N * sizeof(int) ) );
HANDLE_ERROR( hipMalloc( (void**)&dev_b, N * sizeof(int) ) );
HANDLE_ERROR( hipMalloc( (void**)&dev_c, N * sizeof(int) ) );
for( int i = 0; i < N; i++ ) {
a[i] = -i;
b[i] = i * i;
}
HANDLE_ERROR( hipMemcpy( dev_a, a, N * sizeof(int), hipMemcpyHostToDevice ) );
HANDLE_ERROR( hipMemcpy( dev_b, b, N * sizeof(int), hipMemcpyHostToDevice ) );
hipLaunchKernelGGL(( add), dim3(N),dim3(1), 0, 0, dev_a, dev_b, dev_c );
HANDLE_ERROR( hipMemcpy( c, dev_c, N * sizeof(int), hipMemcpyDeviceToHost ) );
for( int i = 0; i < N; i++ ){
printf( "%d + %d = %d\n", a[i], b[i], c[i] );
}
hipFree( dev_a );
hipFree( dev_b );
hipFree( dev_c );
return 0;
} | 8cf08a0b4385af26ed4b7495296c6273a2db2fd5.cu | #include "common/book.h"
#define N 10
__global__ void add( int *a, int *b, int *c ) {
int tid = blockIdx.x; // handle the data at this index
if(tid < N)
c[tid] = a[tid] + b[tid];
}
int main( void ) {
int a[N], b[N], c[N];
int *dev_a, *dev_b, *dev_c;
// allocate the memory on the cpu
HANDLE_ERROR( cudaMalloc( (void**)&dev_a, N * sizeof(int) ) );
HANDLE_ERROR( cudaMalloc( (void**)&dev_b, N * sizeof(int) ) );
HANDLE_ERROR( cudaMalloc( (void**)&dev_c, N * sizeof(int) ) );
for( int i = 0; i < N; i++ ) {
a[i] = -i;
b[i] = i * i;
}
HANDLE_ERROR( cudaMemcpy( dev_a, a, N * sizeof(int), cudaMemcpyHostToDevice ) );
HANDLE_ERROR( cudaMemcpy( dev_b, b, N * sizeof(int), cudaMemcpyHostToDevice ) );
add<<<N,1>>>( dev_a, dev_b, dev_c );
HANDLE_ERROR( cudaMemcpy( c, dev_c, N * sizeof(int), cudaMemcpyDeviceToHost ) );
for( int i = 0; i < N; i++ ){
printf( "%d + %d = %d\n", a[i], b[i], c[i] );
}
cudaFree( dev_a );
cudaFree( dev_b );
cudaFree( dev_c );
return 0;
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.