hip_filename stringlengths 5 84 | hip_content stringlengths 79 9.69M | cuda_filename stringlengths 4 83 | cuda_content stringlengths 19 9.69M |
|---|---|---|---|
7de1d26f0fcaecc969c9de438e0a96af1bb5b044.hip | // !!! This is a file automatically generated by hipify!!!
/**
* Copyright 1993-2012 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*/
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <algorithm>
#include <hip/hip_runtime.h>
struct Task {
uint id;
float time;
Task(uint id, float time) {
this->id = id;
this->time = time;
}
Task() {
this->id = 0;
this->time = 0;
}
bool operator() (Task i,Task j) { return (i.time < j.time); }
};
struct Machine {
int id;
float cost;
Machine() {
this->id = 0;
this->cost = 0;
}
bool operator() (Machine i,Machine j) { return (i.cost < j.cost); }
};
void min_min(Task* tasks, float* completion_times, int* task_map, bool* task_scheduled, Machine* machines,
int t, int m, int max_time) {
uint count = 0;
uint q = 0;
while(count < t) {
float current_time = 0;
uint j = machines[q].id;
uint task_id = 0;
for (int k = 0; k < t; k++) {
bool valid_task = false;
float max_value = 0;
for (int i = 0; i < t; i++) {
int id = tasks[j * t + i].id;
if (!task_scheduled[id]) {
current_time = completion_times[j] + tasks[j * t + i].time;
if(current_time > max_time){
continue;
}
if(current_time > max_value) {
task_id = id;
max_value = current_time;
valid_task = true;
}
}
}
if(valid_task){
task_scheduled[task_id] = true;
task_map[task_id] = j;
completion_times[j] = max_value;
count++;
}
}
q++;
if(q == m && count != t) {
printf("### ERROR ###\n");
}
}
}
void machine_sorting(Machine* machines, int m) {
std::stable_sort (&machines[0], &machines[0]+m, Machine());
}
void segmented_sorting(Task* tasks, int m, int t) {
for(int i = 0; i < m; i++) {
int j = i*t;
std::stable_sort (&tasks[j], &tasks[j]+t, Task());
}
}
template<typename T>
void print(T* vec, uint t, uint m) {
std::cout << "\n";
for (uint i = 0; i < t; i++) {
for (uint j = 0; j < m; j++) {
std::cout << vec[i * m + j] << " ";
}
std::cout << "\n";
}
}
template<typename T>
void print(T* vec, uint t) {
std::cout << "\n";
for (uint i = 0; i < t; i++) {
std::cout << vec[i] << " ";
}
std::cout << "\n";
}
void print(Task* vec, uint t, uint m) {
std::cout << "\n";
for (uint j = 0; j < m; j++) {
for (uint i = 0; i < t; i++) {
std::cout << "id=" << vec[j * t + i].id << " time="
<< vec[j * t + i].time << "\t";
}
std::cout << "\n";
}
}
void print(Machine* vec, uint m) {
std::cout << "\n";
for (uint j = 0; j < m; j++) {
std::cout << "id=" << vec[j].id << " time="
<< vec[j].cost << "\t";
}
std::cout << "\n";
}
void print(float* completion_times, Machine* vec, uint m) {
float sum = 0;
for (uint j = 0; j < m; j++) {
uint id = vec[j].id;
float cost = vec[j].cost * completion_times[id];
std::cout << vec[j].cost << " * " << completion_times[id] << " = " << cost << "\n";
sum += cost;
}
std::cout << "Custo Total: " << sum << "\n";
}
int main(int argc, char **argv) {
int t, m;
float max_time, aux;
aux = scanf("%d", &t);
aux = scanf("%d", &m);
aux = scanf("%f", &max_time);
//std::cout << "t=" << t << " m=" << m << "\n";
Task *tasks = (Task *) malloc(sizeof(Task) * (t * m));
bool *task_scheduled = (bool *) malloc(sizeof(bool) * t);
int *task_map = (int *) malloc(sizeof(int) * (t));
float *completion_times = (float *) malloc(sizeof(float) * (m));
Machine *machines = (Machine *) malloc(sizeof(Machine) * (m));
// Read matrix task machine
for (int i = 0; i < t; i++) {
for (int j = 0; j < m; j++) {
int a = scanf("%f", &aux);
tasks[j * t + i].id = i;
tasks[j * t + i].time = aux;
completion_times[j] = 0;
}
task_map[i] = -1;
task_scheduled[i] = false;
}
//print(tasks, t, m);
// Reading vector of costs for each machine
for (int j = 0; j < m; j++) {
int a = scanf("%f", &aux);
machines[j].id = j;
machines[j].cost = aux;
}
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start);
machine_sorting(machines, m);
//print(machines, m);
min_min(tasks, completion_times, task_map, task_scheduled, machines, t, m, max_time);
hipEventRecord(stop);
if (ELAPSED_TIME == 1) {
hipEventSynchronize(stop);
float milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
std::cout << milliseconds << "\n";
}
else {
//print(tasks, t, m);
//print(completion_times, m);
print(completion_times, machines, m);
//print(task_scheduled, t);
//print(task_map, t, m);
}
free(task_scheduled);
free(task_map);
free(tasks);
free(completion_times);
return 0;
}
| 7de1d26f0fcaecc969c9de438e0a96af1bb5b044.cu | /**
* Copyright 1993-2012 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*/
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <algorithm>
#include <cuda.h>
struct Task {
uint id;
float time;
Task(uint id, float time) {
this->id = id;
this->time = time;
}
Task() {
this->id = 0;
this->time = 0;
}
bool operator() (Task i,Task j) { return (i.time < j.time); }
};
struct Machine {
int id;
float cost;
Machine() {
this->id = 0;
this->cost = 0;
}
bool operator() (Machine i,Machine j) { return (i.cost < j.cost); }
};
void min_min(Task* tasks, float* completion_times, int* task_map, bool* task_scheduled, Machine* machines,
int t, int m, int max_time) {
uint count = 0;
uint q = 0;
while(count < t) {
float current_time = 0;
uint j = machines[q].id;
uint task_id = 0;
for (int k = 0; k < t; k++) {
bool valid_task = false;
float max_value = 0;
for (int i = 0; i < t; i++) {
int id = tasks[j * t + i].id;
if (!task_scheduled[id]) {
current_time = completion_times[j] + tasks[j * t + i].time;
if(current_time > max_time){
continue;
}
if(current_time > max_value) {
task_id = id;
max_value = current_time;
valid_task = true;
}
}
}
if(valid_task){
task_scheduled[task_id] = true;
task_map[task_id] = j;
completion_times[j] = max_value;
count++;
}
}
q++;
if(q == m && count != t) {
printf("### ERROR ###\n");
}
}
}
void machine_sorting(Machine* machines, int m) {
std::stable_sort (&machines[0], &machines[0]+m, Machine());
}
void segmented_sorting(Task* tasks, int m, int t) {
for(int i = 0; i < m; i++) {
int j = i*t;
std::stable_sort (&tasks[j], &tasks[j]+t, Task());
}
}
template<typename T>
void print(T* vec, uint t, uint m) {
std::cout << "\n";
for (uint i = 0; i < t; i++) {
for (uint j = 0; j < m; j++) {
std::cout << vec[i * m + j] << " ";
}
std::cout << "\n";
}
}
template<typename T>
void print(T* vec, uint t) {
std::cout << "\n";
for (uint i = 0; i < t; i++) {
std::cout << vec[i] << " ";
}
std::cout << "\n";
}
void print(Task* vec, uint t, uint m) {
std::cout << "\n";
for (uint j = 0; j < m; j++) {
for (uint i = 0; i < t; i++) {
std::cout << "id=" << vec[j * t + i].id << " time="
<< vec[j * t + i].time << "\t";
}
std::cout << "\n";
}
}
void print(Machine* vec, uint m) {
std::cout << "\n";
for (uint j = 0; j < m; j++) {
std::cout << "id=" << vec[j].id << " time="
<< vec[j].cost << "\t";
}
std::cout << "\n";
}
void print(float* completion_times, Machine* vec, uint m) {
float sum = 0;
for (uint j = 0; j < m; j++) {
uint id = vec[j].id;
float cost = vec[j].cost * completion_times[id];
std::cout << vec[j].cost << " * " << completion_times[id] << " = " << cost << "\n";
sum += cost;
}
std::cout << "Custo Total: " << sum << "\n";
}
int main(int argc, char **argv) {
int t, m;
float max_time, aux;
aux = scanf("%d", &t);
aux = scanf("%d", &m);
aux = scanf("%f", &max_time);
//std::cout << "t=" << t << " m=" << m << "\n";
Task *tasks = (Task *) malloc(sizeof(Task) * (t * m));
bool *task_scheduled = (bool *) malloc(sizeof(bool) * t);
int *task_map = (int *) malloc(sizeof(int) * (t));
float *completion_times = (float *) malloc(sizeof(float) * (m));
Machine *machines = (Machine *) malloc(sizeof(Machine) * (m));
// Read matrix task machine
for (int i = 0; i < t; i++) {
for (int j = 0; j < m; j++) {
int a = scanf("%f", &aux);
tasks[j * t + i].id = i;
tasks[j * t + i].time = aux;
completion_times[j] = 0;
}
task_map[i] = -1;
task_scheduled[i] = false;
}
//print(tasks, t, m);
// Reading vector of costs for each machine
for (int j = 0; j < m; j++) {
int a = scanf("%f", &aux);
machines[j].id = j;
machines[j].cost = aux;
}
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
machine_sorting(machines, m);
//print(machines, m);
min_min(tasks, completion_times, task_map, task_scheduled, machines, t, m, max_time);
cudaEventRecord(stop);
if (ELAPSED_TIME == 1) {
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
std::cout << milliseconds << "\n";
}
else {
//print(tasks, t, m);
//print(completion_times, m);
print(completion_times, machines, m);
//print(task_scheduled, t);
//print(task_map, t, m);
}
free(task_scheduled);
free(task_map);
free(tasks);
free(completion_times);
return 0;
}
|
b0bc40ca5a0d424fc7c9b040ee71a24b99857dfd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <cmath>
#include <cstdlib>
#include <cstring>
#include "caffe/common.hpp"
#include "caffe/util/im2col.hpp"
namespace caffe {
template <typename Dtype>
__global__ void im2col_gpu_kernel(const int n, const Dtype* data_im,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int height_col, const int width_col,
Dtype* data_col) {
CUDA_KERNEL_LOOP(index, n) {
const int h_index = index / width_col;
const int h_col = h_index % height_col;
const int w_col = index % width_col;
const int c_im = h_index / height_col;
const int c_col = c_im * kernel_h * kernel_w;
const int h_offset = h_col * stride_h - pad_h;
const int w_offset = w_col * stride_w - pad_w;
Dtype* data_col_ptr = data_col;
data_col_ptr += (c_col * height_col + h_col) * width_col + w_col;
const Dtype* data_im_ptr = data_im;
data_im_ptr += (c_im * height + h_offset) * width + w_offset;
for (int i = 0; i < kernel_h; ++i) {
for (int j = 0; j < kernel_w; ++j) {
int h_im = h_offset + i;
int w_im = w_offset + j;
*data_col_ptr =
(h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) ?
data_im_ptr[i * width + j] : 0;
data_col_ptr += height_col * width_col;
}
}
}
}
template <typename Dtype>
void im2col_gpu(const Dtype* data_im, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
Dtype* data_col) {
// We are going to launch channels * height_col * width_col kernels, each
// kernel responsible for copying a single-channel grid.
int height_col = (height + 2 * pad_h - kernel_h) / stride_h + 1;
int width_col = (width + 2 * pad_w - kernel_w) / stride_w + 1;
int num_kernels = channels * height_col * width_col;
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( im2col_gpu_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(num_kernels)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num_kernels, data_im, height, width, kernel_h, kernel_w, pad_h,
pad_w, stride_h, stride_w, height_col,
width_col, data_col);
CUDA_POST_KERNEL_CHECK;
}
// Explicit instantiation
template void im2col_gpu<float>(const float* data_im, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
float* data_col);
template void im2col_gpu<double>(const double* data_im, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
double* data_col);
template <typename Dtype, int num_axes>
__global__ void im2col_nd_gpu_kernel(const int n, const Dtype* data_im,
const int* im_shape, const int* col_shape,
const int* kernel_shape, const int* pad, const int* stride,
Dtype* data_col) {
int d_temp[num_axes]; // NOLINT(runtime/arrays)
int d_iter[num_axes]; // NOLINT(runtime/arrays)
int i;
CUDA_KERNEL_LOOP(index, n) {
// Initialize channel_in, computed in the loop below, with intermediate
// computations used to compute the spatial indices.
int channel_in = index;
int channel_out = 1;
for (i = num_axes - 1; i >= 0; --i) {
d_temp[i] = channel_in % col_shape[i + 1];
channel_in /= col_shape[i + 1];
channel_out *= kernel_shape[i];
}
channel_out *= channel_in;
int data_col_inc = 1;
for (i = 0; i < num_axes; ++i) {
channel_out *= col_shape[i + 1];
channel_out += d_temp[i];
d_temp[i] = d_temp[i] * stride[i] - pad[i];
channel_in *= im_shape[i + 1];
channel_in += d_temp[i];
data_col_inc *= col_shape[i + 1];
d_iter[i] = 0;
}
Dtype* data_col_ptr = data_col + channel_out;
const Dtype* data_im_ptr = data_im + channel_in;
bool incremented;
do {
bool in_range = true;
for (i = 0; i < num_axes; ++i) {
const int d_iter_im = d_iter[i] + d_temp[i];
in_range &= d_iter_im >= 0 && d_iter_im < im_shape[i + 1];
if (!in_range) { break; }
}
if (in_range) {
int data_im_offset = d_iter[0];
for (i = 1; i < num_axes; ++i) {
data_im_offset *= im_shape[i + 1];
data_im_offset += d_iter[i];
}
*data_col_ptr = data_im_ptr[data_im_offset];
} else {
*data_col_ptr = 0;
}
data_col_ptr += data_col_inc;
incremented = false;
for (i = num_axes - 1; i >= 0; --i) {
const int d_max = kernel_shape[i];
if (d_iter[i] == d_max - 1) {
d_iter[i] = 0;
} else { // d_iter[i] < d_max - 1
++d_iter[i];
incremented = true;
break;
}
} // for (int i = num_axes - 1; i >= 0; --i)
} while (incremented); // do
} // CUDA_KERNEL_LOOP(index, n)
}
template <typename Dtype>
void im2col_nd_gpu(const Dtype* data_im, const int num_spatial_axes,
const int num_kernels, const int* im_shape, const int* col_shape,
const int* kernel_shape, const int* pad, const int* stride,
Dtype* data_col) {
switch (num_spatial_axes) {
case 1:
im2col_nd_gpu_kernel<Dtype, 1> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, data_col);
break;
case 2:
im2col_nd_gpu_kernel<Dtype, 2> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, data_col);
break;
case 3:
im2col_nd_gpu_kernel<Dtype, 3> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, data_col);
break;
case 4:
im2col_nd_gpu_kernel<Dtype, 4> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, data_col);
break;
case 5:
im2col_nd_gpu_kernel<Dtype, 5> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, data_col);
break;
case 6:
im2col_nd_gpu_kernel<Dtype, 6> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, data_col);
break;
case 7:
im2col_nd_gpu_kernel<Dtype, 7> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, data_col);
break;
case 8:
im2col_nd_gpu_kernel<Dtype, 8> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, data_col);
break;
case 9:
im2col_nd_gpu_kernel<Dtype, 9> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, data_col);
break;
case 10:
im2col_nd_gpu_kernel<Dtype, 10> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, data_col);
break;
default:
LOG(FATAL) << "im2col_nd_gpu does not support computation with "
<< num_spatial_axes << " spatial axes";
}
CUDA_POST_KERNEL_CHECK;
}
// Explicit instantiation
template void im2col_nd_gpu<float>(const float* data_im,
const int num_spatial_axes, const int col_size,
const int* im_shape, const int* col_shape,
const int* kernel_shape, const int* pad, const int* stride,
float* data_col);
template void im2col_nd_gpu<double>(const double* data_im,
const int num_spatial_axes, const int col_size,
const int* im_shape, const int* col_shape,
const int* kernel_shape, const int* pad, const int* stride,
double* data_col);
template <typename Dtype>
__global__ void col2im_gpu_kernel(const int n, const Dtype* data_col,
const int height, const int width, const int channels,
const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int height_col, const int width_col,
Dtype* data_im) {
CUDA_KERNEL_LOOP(index, n) {
Dtype val = 0;
const int w_im = index % width + pad_w;
const int h_im = (index / width) % height + pad_h;
const int c_im = index / (width * height);
// compute the start and end of the output
const int w_col_start =
(w_im < kernel_w) ? 0 : (w_im - kernel_w) / stride_w + 1;
const int w_col_end =
min(w_im / stride_w + 1, width_col);
const int h_col_start =
(h_im < kernel_h) ? 0 : (h_im - kernel_h) / stride_h + 1;
const int h_col_end =
min(h_im / stride_h + 1, height_col);
/*
for (int h_col = h_col_start; h_col < h_col_end; ++h_col) {
for (int w_col = w_col_start; w_col < w_col_end; ++w_col) {
// the col location: [c * width * height + h_out, w_out]
int c_col = c_im * kernel_h * kernel_w
+ (h_im - h_col * stride_h) * kernel_w + (w_im - w_col * stride_w);
val += data_col[(c_col * height_col + h_col) * width_col + w_col];
}
}
*/
// equivalent implementation
int offset = (c_im * kernel_h * kernel_w + h_im * kernel_w + w_im)
* height_col * width_col;
int coeff_h_col = (1 - stride_h * kernel_w * height_col) * width_col;
int coeff_w_col = (1 - stride_w * height_col * width_col);
for (int h_col = h_col_start; h_col < h_col_end; ++h_col) {
for (int w_col = w_col_start; w_col < w_col_end; ++w_col) {
val += data_col[offset + h_col * coeff_h_col + w_col * coeff_w_col];
}
}
data_im[index] = val;
}
}
template <typename Dtype>
void col2im_gpu(const Dtype* data_col, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h,
const int stride_w, Dtype* data_im) {
int height_col = (height + 2 * pad_h - kernel_h) / stride_h + 1;
int width_col = (width + 2 * pad_w - kernel_w) / stride_w + 1;
int num_kernels = channels * height * width;
// To avoid involving atomic operations, we will launch one kernel per
// bottom dimension, and then in the kernel add up the top dimensions.
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( col2im_gpu_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(num_kernels)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num_kernels, data_col, height, width, channels, kernel_h, kernel_w,
pad_h, pad_w, stride_h, stride_w,
height_col, width_col, data_im);
CUDA_POST_KERNEL_CHECK;
}
// Explicit instantiation
template void col2im_gpu<float>(const float* data_col, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h,
const int stride_w, float* data_im);
template void col2im_gpu<double>(const double* data_col, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h,
const int stride_w, double* data_im);
template <typename Dtype, int num_axes>
__global__ void col2im_nd_gpu_kernel(const int n, const Dtype* data_col,
const int* im_shape, const int* col_shape,
const int* kernel_shape, const int* pad, const int* stride,
Dtype* data_im) {
int d_im[num_axes]; // NOLINT(runtime/arrays)
int d_col_iter[num_axes]; // NOLINT(runtime/arrays)
int d_col_start[num_axes]; // NOLINT(runtime/arrays)
int d_col_end[num_axes]; // NOLINT(runtime/arrays)
CUDA_KERNEL_LOOP(index, n) {
// Initialize channel_in, computed in the loop below, with intermediate
// computations used to compute the spatial indices.
int c_im = index;
// Calculate d_im (image dimensions).
for (int i = num_axes - 1; i >= 0; --i) {
d_im[i] = c_im % im_shape[i + 1] + pad[i];
c_im /= im_shape[i + 1];
}
// Calculate col start/end indices.
bool done = false;
for (int i = 0; i < num_axes; ++i) {
d_col_start[i] = d_col_iter[i] =
(d_im[i] < kernel_shape[i]) ?
0 : (d_im[i] - kernel_shape[i]) / stride[i] + 1;
d_col_end[i] = min(d_im[i] / stride[i] + 1, col_shape[i + 1]);
if (d_col_start[i] >= d_col_end[i]) {
// Skip computation if the dimension is 0 at any spatial axis --
// final val will be 0.
data_im[index] = 0;
done = true;
break; // for (int i = 0; i < num_axes; ++i)
}
}
if (done) {
continue; // CUDA_KERNEL_LOOP(index, n)
}
// Loop over the col to compute the output val.
Dtype val = 0;
bool incremented = true;
do {
// Compute the final offset.
int final_offset = 0;
int kernel_shape_prod = 1;
for (int i = num_axes - 1; i >= 0; --i) {
final_offset +=
(d_im[i] - d_col_iter[i] * stride[i]) * kernel_shape_prod;
kernel_shape_prod *= kernel_shape[i];
}
final_offset += kernel_shape_prod * c_im;
for (int i = 0; i < num_axes; ++i) {
final_offset *= col_shape[i + 1];
final_offset += d_col_iter[i];
}
val += data_col[final_offset];
incremented = false;
for (int i = num_axes - 1; i >= 0; --i) {
const int d_max = d_col_end[i];
if (d_col_iter[i] == d_max - 1) {
d_col_iter[i] = d_col_start[i];
} else { // d_col_iter[i] < d_max - 1
++d_col_iter[i];
incremented = true;
break; // for (int i = num_axes - 1; i >= 0; --i)
}
} // for (int i = num_axes - 1; i >= 0; --i)
} while (incremented);
data_im[index] = val;
} // CUDA_KERNEL_LOOP(index, n)
}
template <typename Dtype>
void col2im_nd_gpu(const Dtype* data_col, const int num_spatial_axes,
const int im_size, const int* im_shape, const int* col_shape,
const int* kernel_shape, const int* pad, const int* stride,
Dtype* data_im) {
switch (num_spatial_axes) {
case 1:
col2im_nd_gpu_kernel<Dtype, 1> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(im_size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, data_im);
break;
case 2:
col2im_nd_gpu_kernel<Dtype, 2> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(im_size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, data_im);
break;
case 3:
col2im_nd_gpu_kernel<Dtype, 3> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(im_size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, data_im);
break;
case 4:
col2im_nd_gpu_kernel<Dtype, 4> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(im_size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, data_im);
break;
case 5:
col2im_nd_gpu_kernel<Dtype, 5> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(im_size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, data_im);
break;
case 6:
col2im_nd_gpu_kernel<Dtype, 6> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(im_size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, data_im);
break;
case 7:
col2im_nd_gpu_kernel<Dtype, 7> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(im_size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, data_im);
break;
case 8:
col2im_nd_gpu_kernel<Dtype, 8> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(im_size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, data_im);
break;
case 9:
col2im_nd_gpu_kernel<Dtype, 9> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(im_size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, data_im);
break;
case 10:
col2im_nd_gpu_kernel<Dtype, 10> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(im_size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, data_im);
break;
default:
LOG(FATAL) << "col2im_nd_gpu does not support computation with "
<< num_spatial_axes << " spatial axes";
}
CUDA_POST_KERNEL_CHECK;
}
// Explicit instantiation
template void col2im_nd_gpu<float>(const float* data_col,
const int num_spatial_axes, const int im_size,
const int* im_shape, const int* col_shape,
const int* kernel_shape, const int* pad, const int* stride,
float* data_im);
template void col2im_nd_gpu<double>(const double* data_col,
const int num_spatial_axes, const int im_size,
const int* im_shape, const int* col_shape,
const int* kernel_shape, const int* pad, const int* stride,
double* data_im);
} // namespace caffe
| b0bc40ca5a0d424fc7c9b040ee71a24b99857dfd.cu | #include <algorithm>
#include <cmath>
#include <cstdlib>
#include <cstring>
#include "caffe/common.hpp"
#include "caffe/util/im2col.hpp"
namespace caffe {
template <typename Dtype>
__global__ void im2col_gpu_kernel(const int n, const Dtype* data_im,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int height_col, const int width_col,
Dtype* data_col) {
CUDA_KERNEL_LOOP(index, n) {
const int h_index = index / width_col;
const int h_col = h_index % height_col;
const int w_col = index % width_col;
const int c_im = h_index / height_col;
const int c_col = c_im * kernel_h * kernel_w;
const int h_offset = h_col * stride_h - pad_h;
const int w_offset = w_col * stride_w - pad_w;
Dtype* data_col_ptr = data_col;
data_col_ptr += (c_col * height_col + h_col) * width_col + w_col;
const Dtype* data_im_ptr = data_im;
data_im_ptr += (c_im * height + h_offset) * width + w_offset;
for (int i = 0; i < kernel_h; ++i) {
for (int j = 0; j < kernel_w; ++j) {
int h_im = h_offset + i;
int w_im = w_offset + j;
*data_col_ptr =
(h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) ?
data_im_ptr[i * width + j] : 0;
data_col_ptr += height_col * width_col;
}
}
}
}
template <typename Dtype>
void im2col_gpu(const Dtype* data_im, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
Dtype* data_col) {
// We are going to launch channels * height_col * width_col kernels, each
// kernel responsible for copying a single-channel grid.
int height_col = (height + 2 * pad_h - kernel_h) / stride_h + 1;
int width_col = (width + 2 * pad_w - kernel_w) / stride_w + 1;
int num_kernels = channels * height_col * width_col;
// NOLINT_NEXT_LINE(whitespace/operators)
im2col_gpu_kernel<Dtype><<<CAFFE_GET_BLOCKS(num_kernels),
CAFFE_CUDA_NUM_THREADS>>>(
num_kernels, data_im, height, width, kernel_h, kernel_w, pad_h,
pad_w, stride_h, stride_w, height_col,
width_col, data_col);
CUDA_POST_KERNEL_CHECK;
}
// Explicit instantiation
template void im2col_gpu<float>(const float* data_im, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
float* data_col);
template void im2col_gpu<double>(const double* data_im, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
double* data_col);
template <typename Dtype, int num_axes>
__global__ void im2col_nd_gpu_kernel(const int n, const Dtype* data_im,
const int* im_shape, const int* col_shape,
const int* kernel_shape, const int* pad, const int* stride,
Dtype* data_col) {
int d_temp[num_axes]; // NOLINT(runtime/arrays)
int d_iter[num_axes]; // NOLINT(runtime/arrays)
int i;
CUDA_KERNEL_LOOP(index, n) {
// Initialize channel_in, computed in the loop below, with intermediate
// computations used to compute the spatial indices.
int channel_in = index;
int channel_out = 1;
for (i = num_axes - 1; i >= 0; --i) {
d_temp[i] = channel_in % col_shape[i + 1];
channel_in /= col_shape[i + 1];
channel_out *= kernel_shape[i];
}
channel_out *= channel_in;
int data_col_inc = 1;
for (i = 0; i < num_axes; ++i) {
channel_out *= col_shape[i + 1];
channel_out += d_temp[i];
d_temp[i] = d_temp[i] * stride[i] - pad[i];
channel_in *= im_shape[i + 1];
channel_in += d_temp[i];
data_col_inc *= col_shape[i + 1];
d_iter[i] = 0;
}
Dtype* data_col_ptr = data_col + channel_out;
const Dtype* data_im_ptr = data_im + channel_in;
bool incremented;
do {
bool in_range = true;
for (i = 0; i < num_axes; ++i) {
const int d_iter_im = d_iter[i] + d_temp[i];
in_range &= d_iter_im >= 0 && d_iter_im < im_shape[i + 1];
if (!in_range) { break; }
}
if (in_range) {
int data_im_offset = d_iter[0];
for (i = 1; i < num_axes; ++i) {
data_im_offset *= im_shape[i + 1];
data_im_offset += d_iter[i];
}
*data_col_ptr = data_im_ptr[data_im_offset];
} else {
*data_col_ptr = 0;
}
data_col_ptr += data_col_inc;
incremented = false;
for (i = num_axes - 1; i >= 0; --i) {
const int d_max = kernel_shape[i];
if (d_iter[i] == d_max - 1) {
d_iter[i] = 0;
} else { // d_iter[i] < d_max - 1
++d_iter[i];
incremented = true;
break;
}
} // for (int i = num_axes - 1; i >= 0; --i)
} while (incremented); // do
} // CUDA_KERNEL_LOOP(index, n)
}
template <typename Dtype>
void im2col_nd_gpu(const Dtype* data_im, const int num_spatial_axes,
const int num_kernels, const int* im_shape, const int* col_shape,
const int* kernel_shape, const int* pad, const int* stride,
Dtype* data_col) {
switch (num_spatial_axes) {
case 1:
im2col_nd_gpu_kernel<Dtype, 1> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>(
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, data_col);
break;
case 2:
im2col_nd_gpu_kernel<Dtype, 2> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>(
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, data_col);
break;
case 3:
im2col_nd_gpu_kernel<Dtype, 3> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>(
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, data_col);
break;
case 4:
im2col_nd_gpu_kernel<Dtype, 4> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>(
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, data_col);
break;
case 5:
im2col_nd_gpu_kernel<Dtype, 5> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>(
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, data_col);
break;
case 6:
im2col_nd_gpu_kernel<Dtype, 6> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>(
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, data_col);
break;
case 7:
im2col_nd_gpu_kernel<Dtype, 7> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>(
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, data_col);
break;
case 8:
im2col_nd_gpu_kernel<Dtype, 8> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>(
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, data_col);
break;
case 9:
im2col_nd_gpu_kernel<Dtype, 9> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>(
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, data_col);
break;
case 10:
im2col_nd_gpu_kernel<Dtype, 10> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>(
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, data_col);
break;
default:
LOG(FATAL) << "im2col_nd_gpu does not support computation with "
<< num_spatial_axes << " spatial axes";
}
CUDA_POST_KERNEL_CHECK;
}
// Explicit instantiation
template void im2col_nd_gpu<float>(const float* data_im,
const int num_spatial_axes, const int col_size,
const int* im_shape, const int* col_shape,
const int* kernel_shape, const int* pad, const int* stride,
float* data_col);
template void im2col_nd_gpu<double>(const double* data_im,
const int num_spatial_axes, const int col_size,
const int* im_shape, const int* col_shape,
const int* kernel_shape, const int* pad, const int* stride,
double* data_col);
template <typename Dtype>
__global__ void col2im_gpu_kernel(const int n, const Dtype* data_col,
const int height, const int width, const int channels,
const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int height_col, const int width_col,
Dtype* data_im) {
CUDA_KERNEL_LOOP(index, n) {
Dtype val = 0;
const int w_im = index % width + pad_w;
const int h_im = (index / width) % height + pad_h;
const int c_im = index / (width * height);
// compute the start and end of the output
const int w_col_start =
(w_im < kernel_w) ? 0 : (w_im - kernel_w) / stride_w + 1;
const int w_col_end =
min(w_im / stride_w + 1, width_col);
const int h_col_start =
(h_im < kernel_h) ? 0 : (h_im - kernel_h) / stride_h + 1;
const int h_col_end =
min(h_im / stride_h + 1, height_col);
/*
for (int h_col = h_col_start; h_col < h_col_end; ++h_col) {
for (int w_col = w_col_start; w_col < w_col_end; ++w_col) {
// the col location: [c * width * height + h_out, w_out]
int c_col = c_im * kernel_h * kernel_w
+ (h_im - h_col * stride_h) * kernel_w + (w_im - w_col * stride_w);
val += data_col[(c_col * height_col + h_col) * width_col + w_col];
}
}
*/
// equivalent implementation
int offset = (c_im * kernel_h * kernel_w + h_im * kernel_w + w_im)
* height_col * width_col;
int coeff_h_col = (1 - stride_h * kernel_w * height_col) * width_col;
int coeff_w_col = (1 - stride_w * height_col * width_col);
for (int h_col = h_col_start; h_col < h_col_end; ++h_col) {
for (int w_col = w_col_start; w_col < w_col_end; ++w_col) {
val += data_col[offset + h_col * coeff_h_col + w_col * coeff_w_col];
}
}
data_im[index] = val;
}
}
template <typename Dtype>
void col2im_gpu(const Dtype* data_col, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h,
const int stride_w, Dtype* data_im) {
int height_col = (height + 2 * pad_h - kernel_h) / stride_h + 1;
int width_col = (width + 2 * pad_w - kernel_w) / stride_w + 1;
int num_kernels = channels * height * width;
// To avoid involving atomic operations, we will launch one kernel per
// bottom dimension, and then in the kernel add up the top dimensions.
// NOLINT_NEXT_LINE(whitespace/operators)
col2im_gpu_kernel<Dtype><<<CAFFE_GET_BLOCKS(num_kernels),
CAFFE_CUDA_NUM_THREADS>>>(
num_kernels, data_col, height, width, channels, kernel_h, kernel_w,
pad_h, pad_w, stride_h, stride_w,
height_col, width_col, data_im);
CUDA_POST_KERNEL_CHECK;
}
// Explicit instantiation
template void col2im_gpu<float>(const float* data_col, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h,
const int stride_w, float* data_im);
template void col2im_gpu<double>(const double* data_col, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h,
const int stride_w, double* data_im);
template <typename Dtype, int num_axes>
__global__ void col2im_nd_gpu_kernel(const int n, const Dtype* data_col,
const int* im_shape, const int* col_shape,
const int* kernel_shape, const int* pad, const int* stride,
Dtype* data_im) {
int d_im[num_axes]; // NOLINT(runtime/arrays)
int d_col_iter[num_axes]; // NOLINT(runtime/arrays)
int d_col_start[num_axes]; // NOLINT(runtime/arrays)
int d_col_end[num_axes]; // NOLINT(runtime/arrays)
CUDA_KERNEL_LOOP(index, n) {
// Initialize channel_in, computed in the loop below, with intermediate
// computations used to compute the spatial indices.
int c_im = index;
// Calculate d_im (image dimensions).
for (int i = num_axes - 1; i >= 0; --i) {
d_im[i] = c_im % im_shape[i + 1] + pad[i];
c_im /= im_shape[i + 1];
}
// Calculate col start/end indices.
bool done = false;
for (int i = 0; i < num_axes; ++i) {
d_col_start[i] = d_col_iter[i] =
(d_im[i] < kernel_shape[i]) ?
0 : (d_im[i] - kernel_shape[i]) / stride[i] + 1;
d_col_end[i] = min(d_im[i] / stride[i] + 1, col_shape[i + 1]);
if (d_col_start[i] >= d_col_end[i]) {
// Skip computation if the dimension is 0 at any spatial axis --
// final val will be 0.
data_im[index] = 0;
done = true;
break; // for (int i = 0; i < num_axes; ++i)
}
}
if (done) {
continue; // CUDA_KERNEL_LOOP(index, n)
}
// Loop over the col to compute the output val.
Dtype val = 0;
bool incremented = true;
do {
// Compute the final offset.
int final_offset = 0;
int kernel_shape_prod = 1;
for (int i = num_axes - 1; i >= 0; --i) {
final_offset +=
(d_im[i] - d_col_iter[i] * stride[i]) * kernel_shape_prod;
kernel_shape_prod *= kernel_shape[i];
}
final_offset += kernel_shape_prod * c_im;
for (int i = 0; i < num_axes; ++i) {
final_offset *= col_shape[i + 1];
final_offset += d_col_iter[i];
}
val += data_col[final_offset];
incremented = false;
for (int i = num_axes - 1; i >= 0; --i) {
const int d_max = d_col_end[i];
if (d_col_iter[i] == d_max - 1) {
d_col_iter[i] = d_col_start[i];
} else { // d_col_iter[i] < d_max - 1
++d_col_iter[i];
incremented = true;
break; // for (int i = num_axes - 1; i >= 0; --i)
}
} // for (int i = num_axes - 1; i >= 0; --i)
} while (incremented);
data_im[index] = val;
} // CUDA_KERNEL_LOOP(index, n)
}
template <typename Dtype>
void col2im_nd_gpu(const Dtype* data_col, const int num_spatial_axes,
const int im_size, const int* im_shape, const int* col_shape,
const int* kernel_shape, const int* pad, const int* stride,
Dtype* data_im) {
switch (num_spatial_axes) {
case 1:
col2im_nd_gpu_kernel<Dtype, 1> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(im_size), CAFFE_CUDA_NUM_THREADS>>>(
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, data_im);
break;
case 2:
col2im_nd_gpu_kernel<Dtype, 2> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(im_size), CAFFE_CUDA_NUM_THREADS>>>(
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, data_im);
break;
case 3:
col2im_nd_gpu_kernel<Dtype, 3> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(im_size), CAFFE_CUDA_NUM_THREADS>>>(
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, data_im);
break;
case 4:
col2im_nd_gpu_kernel<Dtype, 4> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(im_size), CAFFE_CUDA_NUM_THREADS>>>(
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, data_im);
break;
case 5:
col2im_nd_gpu_kernel<Dtype, 5> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(im_size), CAFFE_CUDA_NUM_THREADS>>>(
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, data_im);
break;
case 6:
col2im_nd_gpu_kernel<Dtype, 6> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(im_size), CAFFE_CUDA_NUM_THREADS>>>(
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, data_im);
break;
case 7:
col2im_nd_gpu_kernel<Dtype, 7> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(im_size), CAFFE_CUDA_NUM_THREADS>>>(
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, data_im);
break;
case 8:
col2im_nd_gpu_kernel<Dtype, 8> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(im_size), CAFFE_CUDA_NUM_THREADS>>>(
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, data_im);
break;
case 9:
col2im_nd_gpu_kernel<Dtype, 9> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(im_size), CAFFE_CUDA_NUM_THREADS>>>(
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, data_im);
break;
case 10:
col2im_nd_gpu_kernel<Dtype, 10> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(im_size), CAFFE_CUDA_NUM_THREADS>>>(
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, data_im);
break;
default:
LOG(FATAL) << "col2im_nd_gpu does not support computation with "
<< num_spatial_axes << " spatial axes";
}
CUDA_POST_KERNEL_CHECK;
}
// Explicit instantiation
template void col2im_nd_gpu<float>(const float* data_col,
const int num_spatial_axes, const int im_size,
const int* im_shape, const int* col_shape,
const int* kernel_shape, const int* pad, const int* stride,
float* data_im);
template void col2im_nd_gpu<double>(const double* data_col,
const int num_spatial_axes, const int im_size,
const int* im_shape, const int* col_shape,
const int* kernel_shape, const int* pad, const int* stride,
double* data_im);
} // namespace caffe
|
ec53c108e47d503bd40e38e3dfeb563db53d853e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/******************************************************************************
* Copyright (c) 2011, Duane Merrill. All rights reserved.
* Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************/
/******************************************************************************
* Test of DeviceReduce utilities
******************************************************************************/
// Ensure printing of CUDA runtime errors to console
#define CUB_STDERR
#include <stdio.h>
#include <limits>
#include <typeinfo>
#include <thrust/device_ptr.h>
#include <thrust/reduce.h>
#include <hipcub/hipcub.hpp>
#include <cub/util_math.cuh>
#include <hipcub/hipcub.hpp>
#include <cub/device/device_segmented_reduce.cuh>
#include <cub/iterator/constant_input_iterator.cuh>
#include <cub/iterator/discard_output_iterator.cuh>
#include <cub/iterator/transform_input_iterator.cuh>
#include "test_util.h"
using namespace cub;
//---------------------------------------------------------------------
// Globals, constants and typedefs
//---------------------------------------------------------------------
int g_ptx_version;
int g_sm_count;
double g_device_giga_bandwidth;
bool g_verbose = false;
bool g_verbose_input = false;
int g_timing_iterations = 0;
int g_repeat = 0;
CachingDeviceAllocator g_allocator(true);
// Dispatch types
enum Backend
{
CUB, // CUB method
CUB_SEGMENTED, // CUB segmented method
CUB_CDP, // GPU-based (dynamic parallelism) dispatch to CUB method
THRUST, // Thrust method
};
// Custom max functor
struct CustomMax
{
/// Boolean max operator, returns <tt>(a > b) ? a : b</tt>
template <typename OutputT>
__host__ __device__ __forceinline__ OutputT operator()(const OutputT &a, const OutputT &b)
{
return CUB_MAX(a, b);
}
};
//---------------------------------------------------------------------
// Dispatch to different CUB DeviceReduce entrypoints
//---------------------------------------------------------------------
/**
* Dispatch to reduce entrypoint (custom-max)
*/
template <typename InputIteratorT, typename OutputIteratorT, typename OffsetIteratorT, typename ReductionOpT>
CUB_RUNTIME_FUNCTION __forceinline__
hipError_t Dispatch(
Int2Type<CUB> /*dispatch_to*/,
int timing_iterations,
size_t */*d_temp_storage_bytes*/,
hipError_t */*d_cdp_error*/,
void* d_temp_storage,
size_t& temp_storage_bytes,
InputIteratorT d_in,
OutputIteratorT d_out,
int num_items,
int /*max_segments*/,
OffsetIteratorT /*d_segment_offsets*/,
ReductionOpT reduction_op,
hipStream_t stream,
bool debug_synchronous)
{
typedef typename std::iterator_traits<InputIteratorT>::value_type InputT;
// The output value type
typedef typename If<(Equals<typename std::iterator_traits<OutputIteratorT>::value_type, void>::VALUE), // OutputT = (if output iterator's value type is void) ?
typename std::iterator_traits<InputIteratorT>::value_type, // ... then the input iterator's value type,
typename std::iterator_traits<OutputIteratorT>::value_type>::Type OutputT; // ... else the output iterator's value type
// Max-identity
OutputT identity = Traits<InputT>::Lowest(); // replace with std::numeric_limits<OutputT>::lowest() when C++ support is more prevalent
// Invoke kernel to device reduction directly
hipError_t error = hipSuccess;
for (int i = 0; i < timing_iterations; ++i)
{
error = DeviceReduce::Reduce(d_temp_storage, temp_storage_bytes,
d_in, d_out, num_items, reduction_op, identity,
stream, debug_synchronous);
}
return error;
}
/**
* Dispatch to sum entrypoint
*/
template <typename InputIteratorT, typename OutputIteratorT, typename OffsetIteratorT>
CUB_RUNTIME_FUNCTION __forceinline__
hipError_t Dispatch(
Int2Type<CUB> /*dispatch_to*/,
int timing_iterations,
size_t */*d_temp_storage_bytes*/,
hipError_t */*d_cdp_error*/,
void* d_temp_storage,
size_t& temp_storage_bytes,
InputIteratorT d_in,
OutputIteratorT d_out,
int num_items,
int /*max_segments*/,
OffsetIteratorT /*d_segment_offsets*/,
hipcub::Sum /*reduction_op*/,
hipStream_t stream,
bool debug_synchronous)
{
// Invoke kernel to device reduction directly
hipError_t error = hipSuccess;
for (int i = 0; i < timing_iterations; ++i)
{
error = DeviceReduce::Sum(d_temp_storage, temp_storage_bytes, d_in, d_out, num_items, stream, debug_synchronous);
}
return error;
}
/**
* Dispatch to min entrypoint
*/
template <typename InputIteratorT, typename OutputIteratorT, typename OffsetIteratorT>
CUB_RUNTIME_FUNCTION __forceinline__
hipError_t Dispatch(
Int2Type<CUB> /*dispatch_to*/,
int timing_iterations,
size_t */*d_temp_storage_bytes*/,
hipError_t */*d_cdp_error*/,
void* d_temp_storage,
size_t& temp_storage_bytes,
InputIteratorT d_in,
OutputIteratorT d_out,
int num_items,
int /*max_segments*/,
OffsetIteratorT /*d_segment_offsets*/,
hipcub::Min /*reduction_op*/,
hipStream_t stream,
bool debug_synchronous)
{
// Invoke kernel to device reduction directly
hipError_t error = hipSuccess;
for (int i = 0; i < timing_iterations; ++i)
{
error = DeviceReduce::Min(d_temp_storage, temp_storage_bytes, d_in, d_out, num_items, stream, debug_synchronous);
}
return error;
}
/**
* Dispatch to max entrypoint
*/
template <typename InputIteratorT, typename OutputIteratorT, typename OffsetIteratorT>
CUB_RUNTIME_FUNCTION __forceinline__
hipError_t Dispatch(
Int2Type<CUB> /*dispatch_to*/,
int timing_iterations,
size_t */*d_temp_storage_bytes*/,
hipError_t */*d_cdp_error*/,
void* d_temp_storage,
size_t& temp_storage_bytes,
InputIteratorT d_in,
OutputIteratorT d_out,
int num_items,
int /*max_segments*/,
OffsetIteratorT /*d_segment_offsets*/,
hipcub::Max /*reduction_op*/,
hipStream_t stream,
bool debug_synchronous)
{
// Invoke kernel to device reduction directly
hipError_t error = hipSuccess;
for (int i = 0; i < timing_iterations; ++i)
{
error = DeviceReduce::Max(d_temp_storage, temp_storage_bytes, d_in, d_out, num_items, stream, debug_synchronous);
}
return error;
}
/**
* Dispatch to argmin entrypoint
*/
template <typename InputIteratorT, typename OutputIteratorT, typename OffsetIteratorT>
CUB_RUNTIME_FUNCTION __forceinline__
hipError_t Dispatch(
Int2Type<CUB> /*dispatch_to*/,
int timing_iterations,
size_t */*d_temp_storage_bytes*/,
hipError_t */*d_cdp_error*/,
void* d_temp_storage,
size_t& temp_storage_bytes,
InputIteratorT d_in,
OutputIteratorT d_out,
int num_items,
int /*max_segments*/,
OffsetIteratorT /*d_segment_offsets*/,
hipcub::ArgMin /*reduction_op*/,
hipStream_t stream,
bool debug_synchronous)
{
// Invoke kernel to device reduction directly
hipError_t error = hipSuccess;
for (int i = 0; i < timing_iterations; ++i)
{
error = DeviceReduce::ArgMin(d_temp_storage, temp_storage_bytes, d_in, d_out, num_items, stream, debug_synchronous);
}
return error;
}
/**
* Dispatch to argmax entrypoint
*/
template <typename InputIteratorT, typename OutputIteratorT, typename OffsetIteratorT>
CUB_RUNTIME_FUNCTION __forceinline__
hipError_t Dispatch(
Int2Type<CUB> /*dispatch_to*/,
int timing_iterations,
size_t */*d_temp_storage_bytes*/,
hipError_t */*d_cdp_error*/,
void* d_temp_storage,
size_t& temp_storage_bytes,
InputIteratorT d_in,
OutputIteratorT d_out,
int num_items,
int /*max_segments*/,
OffsetIteratorT /*d_segment_offsets*/,
hipcub::ArgMax /*reduction_op*/,
hipStream_t stream,
bool debug_synchronous)
{
// Invoke kernel to device reduction directly
hipError_t error = hipSuccess;
for (int i = 0; i < timing_iterations; ++i)
{
error = DeviceReduce::ArgMax(d_temp_storage, temp_storage_bytes, d_in, d_out, num_items, stream, debug_synchronous);
}
return error;
}
//---------------------------------------------------------------------
// Dispatch to different CUB DeviceSegmentedReduce entrypoints
//---------------------------------------------------------------------
/**
* Dispatch to reduce entrypoint (custom-max)
*/
template <typename InputIteratorT, typename OutputIteratorT, typename OffsetIteratorT, typename ReductionOpT>
CUB_RUNTIME_FUNCTION __forceinline__
hipError_t Dispatch(
Int2Type<CUB_SEGMENTED> /*dispatch_to*/,
int timing_iterations,
size_t */*d_temp_storage_bytes*/,
hipError_t */*d_cdp_error*/,
void* d_temp_storage,
size_t& temp_storage_bytes,
InputIteratorT d_in,
OutputIteratorT d_out,
int /*num_items*/,
int max_segments,
OffsetIteratorT d_segment_offsets,
ReductionOpT reduction_op,
hipStream_t stream,
bool debug_synchronous)
{
// The input value type
typedef typename std::iterator_traits<InputIteratorT>::value_type InputT;
// The output value type
typedef typename If<(Equals<typename std::iterator_traits<OutputIteratorT>::value_type, void>::VALUE), // OutputT = (if output iterator's value type is void) ?
typename std::iterator_traits<InputIteratorT>::value_type, // ... then the input iterator's value type,
typename std::iterator_traits<OutputIteratorT>::value_type>::Type OutputT; // ... else the output iterator's value type
// Max-identity
OutputT identity = Traits<InputT>::Lowest(); // replace with std::numeric_limits<OutputT>::lowest() when C++ support is more prevalent
// Invoke kernel to device reduction directly
hipError_t error = hipSuccess;
for (int i = 0; i < timing_iterations; ++i)
{
error = DeviceSegmentedReduce::Reduce(d_temp_storage, temp_storage_bytes,
d_in, d_out, max_segments, d_segment_offsets, d_segment_offsets + 1, reduction_op, identity,
stream, debug_synchronous);
}
return error;
}
/**
* Dispatch to sum entrypoint
*/
template <typename InputIteratorT, typename OutputIteratorT, typename OffsetIteratorT>
CUB_RUNTIME_FUNCTION __forceinline__
hipError_t Dispatch(
Int2Type<CUB_SEGMENTED> /*dispatch_to*/,
int timing_iterations,
size_t */*d_temp_storage_bytes*/,
hipError_t */*d_cdp_error*/,
void* d_temp_storage,
size_t& temp_storage_bytes,
InputIteratorT d_in,
OutputIteratorT d_out,
int /*num_items*/,
int max_segments,
OffsetIteratorT d_segment_offsets,
hipcub::Sum /*reduction_op*/,
hipStream_t stream,
bool debug_synchronous)
{
// Invoke kernel to device reduction directly
hipError_t error = hipSuccess;
for (int i = 0; i < timing_iterations; ++i)
{
error = DeviceSegmentedReduce::Sum(d_temp_storage, temp_storage_bytes,
d_in, d_out, max_segments, d_segment_offsets, d_segment_offsets + 1,
stream, debug_synchronous);
}
return error;
}
/**
* Dispatch to min entrypoint
*/
template <typename InputIteratorT, typename OutputIteratorT, typename OffsetIteratorT>
CUB_RUNTIME_FUNCTION __forceinline__
hipError_t Dispatch(
Int2Type<CUB_SEGMENTED> /*dispatch_to*/,
int timing_iterations,
size_t */*d_temp_storage_bytes*/,
hipError_t */*d_cdp_error*/,
void* d_temp_storage,
size_t& temp_storage_bytes,
InputIteratorT d_in,
OutputIteratorT d_out,
int /*num_items*/,
int max_segments,
OffsetIteratorT d_segment_offsets,
hipcub::Min /*reduction_op*/,
hipStream_t stream,
bool debug_synchronous)
{
// Invoke kernel to device reduction directly
hipError_t error = hipSuccess;
for (int i = 0; i < timing_iterations; ++i)
{
error = DeviceSegmentedReduce::Min(d_temp_storage, temp_storage_bytes,
d_in, d_out, max_segments, d_segment_offsets, d_segment_offsets + 1,
stream, debug_synchronous);
}
return error;
}
/**
* Dispatch to max entrypoint
*/
template <typename InputIteratorT, typename OutputIteratorT, typename OffsetIteratorT>
CUB_RUNTIME_FUNCTION __forceinline__
hipError_t Dispatch(
Int2Type<CUB_SEGMENTED> /*dispatch_to*/,
int timing_iterations,
size_t */*d_temp_storage_bytes*/,
hipError_t */*d_cdp_error*/,
void* d_temp_storage,
size_t& temp_storage_bytes,
InputIteratorT d_in,
OutputIteratorT d_out,
int /*num_items*/,
int max_segments,
OffsetIteratorT d_segment_offsets,
hipcub::Max /*reduction_op*/,
hipStream_t stream,
bool debug_synchronous)
{
// Invoke kernel to device reduction directly
hipError_t error = hipSuccess;
for (int i = 0; i < timing_iterations; ++i)
{
error = DeviceSegmentedReduce::Max(d_temp_storage, temp_storage_bytes,
d_in, d_out, max_segments, d_segment_offsets, d_segment_offsets + 1,
stream, debug_synchronous);
}
return error;
}
/**
* Dispatch to argmin entrypoint
*/
template <typename InputIteratorT, typename OutputIteratorT, typename OffsetIteratorT>
CUB_RUNTIME_FUNCTION __forceinline__
hipError_t Dispatch(
Int2Type<CUB_SEGMENTED> /*dispatch_to*/,
int timing_iterations,
size_t */*d_temp_storage_bytes*/,
hipError_t */*d_cdp_error*/,
void* d_temp_storage,
size_t& temp_storage_bytes,
InputIteratorT d_in,
OutputIteratorT d_out,
int /*num_items*/,
int max_segments,
OffsetIteratorT d_segment_offsets,
hipcub::ArgMin /*reduction_op*/,
hipStream_t stream,
bool debug_synchronous)
{
// Invoke kernel to device reduction directly
hipError_t error = hipSuccess;
for (int i = 0; i < timing_iterations; ++i)
{
error = DeviceSegmentedReduce::ArgMin(d_temp_storage, temp_storage_bytes,
d_in, d_out, max_segments, d_segment_offsets, d_segment_offsets + 1,
stream, debug_synchronous);
}
return error;
}
/**
* Dispatch to argmax entrypoint
*/
template <typename InputIteratorT, typename OutputIteratorT, typename OffsetIteratorT>
CUB_RUNTIME_FUNCTION __forceinline__
hipError_t Dispatch(
Int2Type<CUB_SEGMENTED> /*dispatch_to*/,
int timing_iterations,
size_t */*d_temp_storage_bytes*/,
hipError_t */*d_cdp_error*/,
void* d_temp_storage,
size_t& temp_storage_bytes,
InputIteratorT d_in,
OutputIteratorT d_out,
int /*num_items*/,
int max_segments,
OffsetIteratorT d_segment_offsets,
hipcub::ArgMax /*reduction_op*/,
hipStream_t stream,
bool debug_synchronous)
{
// Invoke kernel to device reduction directly
hipError_t error = hipSuccess;
for (int i = 0; i < timing_iterations; ++i)
{
error = DeviceSegmentedReduce::ArgMax(d_temp_storage, temp_storage_bytes,
d_in, d_out, max_segments, d_segment_offsets, d_segment_offsets + 1,
stream, debug_synchronous);
}
return error;
}
//---------------------------------------------------------------------
// Dispatch to different Thrust entrypoints
//---------------------------------------------------------------------
/**
* Dispatch to reduction entrypoint (min or max specialization)
*/
template <typename InputIteratorT, typename OutputIteratorT, typename OffsetIteratorT, typename ReductionOpT>
hipError_t Dispatch(
Int2Type<THRUST> /*dispatch_to*/,
int timing_iterations,
size_t */*d_temp_storage_bytes*/,
hipError_t */*d_cdp_error*/,
void* d_temp_storage,
size_t& temp_storage_bytes,
InputIteratorT d_in,
OutputIteratorT d_out,
int num_items,
int /*max_segments*/,
OffsetIteratorT /*d_segment_offsets*/,
ReductionOpT reduction_op,
hipStream_t /*stream*/,
bool /*debug_synchronous*/)
{
// The output value type
typedef typename If<(Equals<typename std::iterator_traits<OutputIteratorT>::value_type, void>::VALUE), // OutputT = (if output iterator's value type is void) ?
typename std::iterator_traits<InputIteratorT>::value_type, // ... then the input iterator's value type,
typename std::iterator_traits<OutputIteratorT>::value_type>::Type OutputT; // ... else the output iterator's value type
if (d_temp_storage == 0)
{
temp_storage_bytes = 1;
}
else
{
OutputT init;
CubDebugExit(hipMemcpy(&init, d_in + 0, sizeof(OutputT), hipMemcpyDeviceToHost));
thrust::device_ptr<OutputT> d_in_wrapper(d_in);
OutputT retval;
for (int i = 0; i < timing_iterations; ++i)
{
retval = thrust::reduce(d_in_wrapper, d_in_wrapper + num_items, init, reduction_op);
}
if (!Equals<OutputIteratorT, DiscardOutputIterator<int> >::VALUE)
CubDebugExit(hipMemcpy(d_out, &retval, sizeof(OutputT), hipMemcpyHostToDevice));
}
return hipSuccess;
}
/**
* Dispatch to reduction entrypoint (sum specialization)
*/
template <typename InputIteratorT, typename OutputIteratorT, typename OffsetIteratorT>
hipError_t Dispatch(
Int2Type<THRUST> /*dispatch_to*/,
int timing_iterations,
size_t */*d_temp_storage_bytes*/,
hipError_t */*d_cdp_error*/,
void* d_temp_storage,
size_t& temp_storage_bytes,
InputIteratorT d_in,
OutputIteratorT d_out,
int num_items,
int /*max_segments*/,
OffsetIteratorT /*d_segment_offsets*/,
Sum /*reduction_op*/,
hipStream_t /*stream*/,
bool /*debug_synchronous*/)
{
// The output value type
typedef typename If<(Equals<typename std::iterator_traits<OutputIteratorT>::value_type, void>::VALUE), // OutputT = (if output iterator's value type is void) ?
typename std::iterator_traits<InputIteratorT>::value_type, // ... then the input iterator's value type,
typename std::iterator_traits<OutputIteratorT>::value_type>::Type OutputT; // ... else the output iterator's value type
if (d_temp_storage == 0)
{
temp_storage_bytes = 1;
}
else
{
thrust::device_ptr<OutputT> d_in_wrapper(d_in);
OutputT retval;
for (int i = 0; i < timing_iterations; ++i)
{
retval = thrust::reduce(d_in_wrapper, d_in_wrapper + num_items);
}
if (!Equals<OutputIteratorT, DiscardOutputIterator<int> >::VALUE)
CubDebugExit(hipMemcpy(d_out, &retval, sizeof(OutputT), hipMemcpyHostToDevice));
}
return hipSuccess;
}
//---------------------------------------------------------------------
// CUDA nested-parallelism test kernel
//---------------------------------------------------------------------
/**
* Simple wrapper kernel to invoke DeviceReduce
*/
template <
typename InputIteratorT,
typename OutputIteratorT,
typename OffsetIteratorT,
typename ReductionOpT>
__global__ void CnpDispatchKernel(
int timing_iterations,
size_t *d_temp_storage_bytes,
hipError_t *d_cdp_error,
void* d_temp_storage,
size_t temp_storage_bytes,
InputIteratorT d_in,
OutputIteratorT d_out,
int num_items,
int max_segments,
OffsetIteratorT d_segment_offsets,
ReductionOpT reduction_op,
bool debug_synchronous)
{
#ifndef CUB_CDP
(void)timing_iterations;
(void)d_temp_storage_bytes;
(void)d_cdp_error;
(void)d_temp_storage;
(void)temp_storage_bytes;
(void)d_in;
(void)d_out;
(void)num_items;
(void)max_segments;
(void)d_segment_offsets;
(void)reduction_op;
(void)debug_synchronous;
*d_cdp_error = hipErrorNotSupported;
#else
*d_cdp_error = Dispatch(Int2Type<CUB>(), timing_iterations, d_temp_storage_bytes, d_cdp_error, d_temp_storage, temp_storage_bytes,
d_in, d_out, num_items, max_segments, d_segment_offsets, reduction_op, 0, debug_synchronous);
*d_temp_storage_bytes = temp_storage_bytes;
#endif
}
/**
* Dispatch to CUB_CDP kernel
*/
template <typename InputIteratorT, typename OutputIteratorT, typename OffsetIteratorT, typename ReductionOpT>
CUB_RUNTIME_FUNCTION __forceinline__
hipError_t Dispatch(
Int2Type<CUB_CDP> dispatch_to,
int timing_iterations,
size_t *d_temp_storage_bytes,
hipError_t *d_cdp_error,
void* d_temp_storage,
size_t& temp_storage_bytes,
InputIteratorT d_in,
OutputIteratorT d_out,
int num_items,
int max_segments,
OffsetIteratorT d_segment_offsets,
ReductionOpT reduction_op,
hipStream_t stream,
bool debug_synchronous)
{
// Invoke kernel to invoke device-side dispatch
hipLaunchKernelGGL(( CnpDispatchKernel), dim3(1),dim3(1), 0, 0, timing_iterations, d_temp_storage_bytes, d_cdp_error, d_temp_storage, temp_storage_bytes,
d_in, d_out, num_items, max_segments, d_segment_offsets, reduction_op, debug_synchronous);
// Copy out temp_storage_bytes
CubDebugExit(hipMemcpy(&temp_storage_bytes, d_temp_storage_bytes, sizeof(size_t) * 1, hipMemcpyDeviceToHost));
// Copy out error
hipError_t retval;
CubDebugExit(hipMemcpy(&retval, d_cdp_error, sizeof(hipError_t) * 1, hipMemcpyDeviceToHost));
return retval;
}
//---------------------------------------------------------------------
// Problem generation
//---------------------------------------------------------------------
/// Initialize problem
template <typename InputT>
void Initialize(
GenMode gen_mode,
InputT *h_in,
int num_items)
{
for (int i = 0; i < num_items; ++i)
{
InitValue(gen_mode, h_in[i], i);
}
if (g_verbose_input)
{
printf("Input:\n");
DisplayResults(h_in, num_items);
printf("\n\n");
}
}
/// Solve problem (max/custom-max functor)
template <typename ReductionOpT, typename InputT, typename _OutputT>
struct Solution
{
typedef _OutputT OutputT;
template <typename HostInputIteratorT, typename OffsetT, typename OffsetIteratorT>
static void Solve(HostInputIteratorT h_in, OutputT *h_reference, OffsetT num_segments, OffsetIteratorT h_segment_offsets,
ReductionOpT reduction_op)
{
for (int i = 0; i < num_segments; ++i)
{
OutputT aggregate = Traits<InputT>::Lowest(); // replace with std::numeric_limits<OutputT>::lowest() when C++ support is more prevalent
for (int j = h_segment_offsets[i]; j < h_segment_offsets[i + 1]; ++j)
aggregate = reduction_op(aggregate, OutputT(h_in[j]));
h_reference[i] = aggregate;
}
}
};
/// Solve problem (min functor)
template <typename InputT, typename _OutputT>
struct Solution<hipcub::Min, InputT, _OutputT>
{
typedef _OutputT OutputT;
template <typename HostInputIteratorT, typename OffsetT, typename OffsetIteratorT>
static void Solve(HostInputIteratorT h_in, OutputT *h_reference, OffsetT num_segments, OffsetIteratorT h_segment_offsets,
hipcub::Min reduction_op)
{
for (int i = 0; i < num_segments; ++i)
{
OutputT aggregate = Traits<InputT>::Max(); // replace with std::numeric_limits<OutputT>::max() when C++ support is more prevalent
for (int j = h_segment_offsets[i]; j < h_segment_offsets[i + 1]; ++j)
aggregate = reduction_op(aggregate, OutputT(h_in[j]));
h_reference[i] = aggregate;
}
}
};
/// Solve problem (sum functor)
template <typename InputT, typename _OutputT>
struct Solution<hipcub::Sum, InputT, _OutputT>
{
typedef _OutputT OutputT;
template <typename HostInputIteratorT, typename OffsetT, typename OffsetIteratorT>
static void Solve(HostInputIteratorT h_in, OutputT *h_reference, OffsetT num_segments, OffsetIteratorT h_segment_offsets,
hipcub::Sum reduction_op)
{
for (int i = 0; i < num_segments; ++i)
{
OutputT aggregate;
InitValue(INTEGER_SEED, aggregate, 0);
for (int j = h_segment_offsets[i]; j < h_segment_offsets[i + 1]; ++j)
aggregate = reduction_op(aggregate, OutputT(h_in[j]));
h_reference[i] = aggregate;
}
}
};
/// Solve problem (argmin functor)
template <typename InputValueT, typename OutputValueT>
struct Solution<hipcub::ArgMin, InputValueT, OutputValueT>
{
typedef KeyValuePair<int, OutputValueT> OutputT;
template <typename HostInputIteratorT, typename OffsetT, typename OffsetIteratorT>
static void Solve(HostInputIteratorT h_in, OutputT *h_reference, OffsetT num_segments, OffsetIteratorT h_segment_offsets,
hipcub::ArgMin reduction_op)
{
for (int i = 0; i < num_segments; ++i)
{
OutputT aggregate(1, Traits<InputValueT>::Max()); // replace with std::numeric_limits<OutputT>::max() when C++ support is more prevalent
for (int j = h_segment_offsets[i]; j < h_segment_offsets[i + 1]; ++j)
{
OutputT item(j - h_segment_offsets[i], OutputValueT(h_in[j]));
aggregate = reduction_op(aggregate, item);
}
h_reference[i] = aggregate;
}
}
};
/// Solve problem (argmax functor)
template <typename InputValueT, typename OutputValueT>
struct Solution<hipcub::ArgMax, InputValueT, OutputValueT>
{
typedef KeyValuePair<int, OutputValueT> OutputT;
template <typename HostInputIteratorT, typename OffsetT, typename OffsetIteratorT>
static void Solve(HostInputIteratorT h_in, OutputT *h_reference, OffsetT num_segments, OffsetIteratorT h_segment_offsets,
hipcub::ArgMax reduction_op)
{
for (int i = 0; i < num_segments; ++i)
{
OutputT aggregate(1, Traits<InputValueT>::Lowest()); // replace with std::numeric_limits<OutputT>::lowest() when C++ support is more prevalent
for (int j = h_segment_offsets[i]; j < h_segment_offsets[i + 1]; ++j)
{
OutputT item(j - h_segment_offsets[i], OutputValueT(h_in[j]));
aggregate = reduction_op(aggregate, item);
}
h_reference[i] = aggregate;
}
}
};
//---------------------------------------------------------------------
// Problem generation
//---------------------------------------------------------------------
/// Test DeviceReduce for a given problem input
template <
typename BackendT,
typename DeviceInputIteratorT,
typename DeviceOutputIteratorT,
typename HostReferenceIteratorT,
typename OffsetT,
typename OffsetIteratorT,
typename ReductionOpT>
void Test(
BackendT backend,
DeviceInputIteratorT d_in,
DeviceOutputIteratorT d_out,
OffsetT num_items,
OffsetT num_segments,
OffsetIteratorT d_segment_offsets,
ReductionOpT reduction_op,
HostReferenceIteratorT h_reference)
{
// Input data types
typedef typename std::iterator_traits<DeviceInputIteratorT>::value_type InputT;
// Allocate CUB_CDP device arrays for temp storage size and error
size_t *d_temp_storage_bytes = NULL;
hipError_t *d_cdp_error = NULL;
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_temp_storage_bytes, sizeof(size_t) * 1));
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_cdp_error, sizeof(hipError_t) * 1));
// Inquire temp device storage
void *d_temp_storage = NULL;
size_t temp_storage_bytes = 0;
CubDebugExit(Dispatch(backend, 1,
d_temp_storage_bytes, d_cdp_error, d_temp_storage, temp_storage_bytes,
d_in, d_out, num_items, num_segments, d_segment_offsets,
reduction_op, 0, true));
// Allocate temp device storage
CubDebugExit(g_allocator.DeviceAllocate(&d_temp_storage, temp_storage_bytes));
// Run warmup/correctness iteration
CubDebugExit(Dispatch(backend, 1,
d_temp_storage_bytes, d_cdp_error, d_temp_storage, temp_storage_bytes,
d_in, d_out, num_items, num_segments, d_segment_offsets,
reduction_op, 0, true));
// Check for correctness (and display results, if specified)
int compare = CompareDeviceResults(h_reference, d_out, num_segments, g_verbose, g_verbose);
printf("\t%s", compare ? "FAIL" : "PASS");
// Flush any stdout/stderr
fflush(stdout);
fflush(stderr);
// Performance
if (g_timing_iterations > 0)
{
GpuTimer gpu_timer;
gpu_timer.Start();
CubDebugExit(Dispatch(backend, g_timing_iterations,
d_temp_storage_bytes, d_cdp_error, d_temp_storage, temp_storage_bytes,
d_in, d_out, num_items, num_segments, d_segment_offsets,
reduction_op, 0, false));
gpu_timer.Stop();
float elapsed_millis = gpu_timer.ElapsedMillis();
// Display performance
float avg_millis = elapsed_millis / g_timing_iterations;
float giga_rate = float(num_items) / avg_millis / 1000.0f / 1000.0f;
float giga_bandwidth = giga_rate * sizeof(InputT);
printf(", %.3f avg ms, %.3f billion items/s, %.3f logical GB/s, %.1f%% peak",
avg_millis, giga_rate, giga_bandwidth, giga_bandwidth / g_device_giga_bandwidth * 100.0);
}
if (d_temp_storage_bytes) CubDebugExit(g_allocator.DeviceFree(d_temp_storage_bytes));
if (d_cdp_error) CubDebugExit(g_allocator.DeviceFree(d_cdp_error));
if (d_temp_storage) CubDebugExit(g_allocator.DeviceFree(d_temp_storage));
// Correctness asserts
AssertEquals(0, compare);
}
/// Test DeviceReduce
template <
Backend BACKEND,
typename OutputValueT,
typename HostInputIteratorT,
typename DeviceInputIteratorT,
typename OffsetT,
typename OffsetIteratorT,
typename ReductionOpT>
void SolveAndTest(
HostInputIteratorT h_in,
DeviceInputIteratorT d_in,
OffsetT num_items,
OffsetT num_segments,
OffsetIteratorT h_segment_offsets,
OffsetIteratorT d_segment_offsets,
ReductionOpT reduction_op)
{
typedef typename std::iterator_traits<DeviceInputIteratorT>::value_type InputValueT;
typedef Solution<ReductionOpT, InputValueT, OutputValueT> SolutionT;
typedef typename SolutionT::OutputT OutputT;
printf("\n\n%s hipcub::DeviceReduce<%s> %d items (%s), %d segments\n",
(BACKEND == CUB_CDP) ? "CUB_CDP" : (BACKEND == THRUST) ? "Thrust" : (BACKEND == CUB_SEGMENTED) ? "CUB_SEGMENTED" : "CUB",
typeid(ReductionOpT).name(), num_items, typeid(HostInputIteratorT).name(), num_segments);
fflush(stdout);
// Allocate and solve solution
OutputT *h_reference = new OutputT[num_segments];
SolutionT::Solve(h_in, h_reference, num_segments, h_segment_offsets, reduction_op);
// // Run with discard iterator
// DiscardOutputIterator<OffsetT> discard_itr;
// Test(Int2Type<BACKEND>(), d_in, discard_itr, num_items, num_segments, d_segment_offsets, reduction_op, h_reference);
// Run with output data
OutputT *d_out = NULL;
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_out, sizeof(OutputT) * num_segments));
CubDebugExit(hipMemset(d_out, 0, sizeof(OutputT) * num_segments));
Test(Int2Type<BACKEND>(), d_in, d_out, num_items, num_segments, d_segment_offsets, reduction_op, h_reference);
// Cleanup
if (d_out) CubDebugExit(g_allocator.DeviceFree(d_out));
if (h_reference) delete[] h_reference;
}
/// Test specific problem type
template <
Backend BACKEND,
typename InputT,
typename OutputT,
typename OffsetT,
typename ReductionOpT>
void TestProblem(
OffsetT num_items,
OffsetT num_segments,
GenMode gen_mode,
ReductionOpT reduction_op)
{
printf("\n\nInitializing %d %s->%s (gen mode %d)... ", num_items, typeid(InputT).name(), typeid(OutputT).name(), gen_mode); fflush(stdout);
fflush(stdout);
// Initialize value data
InputT* h_in = new InputT[num_items];
Initialize(gen_mode, h_in, num_items);
// Initialize segment data
OffsetT *h_segment_offsets = new OffsetT[num_segments + 1];
InitializeSegments(num_items, num_segments, h_segment_offsets, g_verbose_input);
// Initialize device data
OffsetT *d_segment_offsets = NULL;
InputT *d_in = NULL;
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_in, sizeof(InputT) * num_items));
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_segment_offsets, sizeof(OffsetT) * (num_segments + 1)));
CubDebugExit(hipMemcpy(d_in, h_in, sizeof(InputT) * num_items, hipMemcpyHostToDevice));
CubDebugExit(hipMemcpy(d_segment_offsets, h_segment_offsets, sizeof(OffsetT) * (num_segments + 1), hipMemcpyHostToDevice));
SolveAndTest<BACKEND, OutputT>(h_in, d_in, num_items, num_segments, h_segment_offsets, d_segment_offsets, reduction_op);
if (h_segment_offsets) delete[] h_segment_offsets;
if (d_segment_offsets) CubDebugExit(g_allocator.DeviceFree(d_segment_offsets));
if (h_in) delete[] h_in;
if (d_in) CubDebugExit(g_allocator.DeviceFree(d_in));
}
/// Test different operators
template <
Backend BACKEND,
typename OutputT,
typename HostInputIteratorT,
typename DeviceInputIteratorT,
typename OffsetT,
typename OffsetIteratorT>
void TestByOp(
HostInputIteratorT h_in,
DeviceInputIteratorT d_in,
OffsetT num_items,
OffsetT num_segments,
OffsetIteratorT h_segment_offsets,
OffsetIteratorT d_segment_offsets)
{
SolveAndTest<BACKEND, OutputT>(h_in, d_in, num_items, num_segments, h_segment_offsets, d_segment_offsets, CustomMax());
SolveAndTest<BACKEND, OutputT>(h_in, d_in, num_items, num_segments, h_segment_offsets, d_segment_offsets, Sum());
SolveAndTest<BACKEND, OutputT>(h_in, d_in, num_items, num_segments, h_segment_offsets, d_segment_offsets, Min());
SolveAndTest<BACKEND, OutputT>(h_in, d_in, num_items, num_segments, h_segment_offsets, d_segment_offsets, ArgMin());
SolveAndTest<BACKEND, OutputT>(h_in, d_in, num_items, num_segments, h_segment_offsets, d_segment_offsets, Max());
SolveAndTest<BACKEND, OutputT>(h_in, d_in, num_items, num_segments, h_segment_offsets, d_segment_offsets, ArgMax());
}
/// Test different backends
template <
typename InputT,
typename OutputT,
typename OffsetT>
void TestByBackend(
OffsetT num_items,
OffsetT max_segments,
GenMode gen_mode)
{
// Initialize host data
printf("\n\nInitializing %d %s -> %s (gen mode %d)... ",
num_items, typeid(InputT).name(), typeid(OutputT).name(), gen_mode); fflush(stdout);
InputT *h_in = new InputT[num_items];
OffsetT *h_segment_offsets = new OffsetT[max_segments + 1];
Initialize(gen_mode, h_in, num_items);
// Initialize device data
InputT *d_in = NULL;
OffsetT *d_segment_offsets = NULL;
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_in, sizeof(InputT) * num_items));
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_segment_offsets, sizeof(OffsetT) * (max_segments + 1)));
CubDebugExit(hipMemcpy(d_in, h_in, sizeof(InputT) * num_items, hipMemcpyHostToDevice));
//
// Test single-segment implementations
//
InitializeSegments(num_items, 1, h_segment_offsets, g_verbose_input);
// Page-aligned-input tests
TestByOp<CUB, OutputT>(h_in, d_in, num_items, 1, h_segment_offsets, (OffsetT*) NULL); // Host-dispatch
#ifdef CUB_CDP
TestByOp<CUB_CDP, OutputT>(h_in, d_in, num_items, 1, h_segment_offsets, (OffsetT*) NULL); // Device-dispatch
#endif
// Non-page-aligned-input tests
if (num_items > 1)
{
InitializeSegments(num_items - 1, 1, h_segment_offsets, g_verbose_input);
TestByOp<CUB, OutputT>(h_in + 1, d_in + 1, num_items - 1, 1, h_segment_offsets, (OffsetT*) NULL);
}
//
// Test segmented implementation
//
// Right now we assign a single thread block to each segment, so lets keep it to under 128K items per segment
int max_items_per_segment = 128000;
for (int num_segments = cub::DivideAndRoundUp(num_items, max_items_per_segment);
num_segments < max_segments;
num_segments = (num_segments * 32) + 1)
{
// Test with segment pointer
InitializeSegments(num_items, num_segments, h_segment_offsets, g_verbose_input);
CubDebugExit(hipMemcpy(d_segment_offsets, h_segment_offsets, sizeof(OffsetT) * (num_segments + 1), hipMemcpyHostToDevice));
TestByOp<CUB_SEGMENTED, OutputT>(
h_in, d_in, num_items, num_segments, h_segment_offsets, d_segment_offsets);
// Test with segment iterator
typedef CastOp<OffsetT> IdentityOpT;
IdentityOpT identity_op;
TransformInputIterator<OffsetT, IdentityOpT, OffsetT*, OffsetT> h_segment_offsets_itr(
h_segment_offsets,
identity_op);
TransformInputIterator<OffsetT, IdentityOpT, OffsetT*, OffsetT> d_segment_offsets_itr(
d_segment_offsets,
identity_op);
TestByOp<CUB_SEGMENTED, OutputT>(
h_in, d_in, num_items, num_segments, h_segment_offsets_itr, d_segment_offsets_itr);
}
if (h_in) delete[] h_in;
if (h_segment_offsets) delete[] h_segment_offsets;
if (d_in) CubDebugExit(g_allocator.DeviceFree(d_in));
if (d_segment_offsets) CubDebugExit(g_allocator.DeviceFree(d_segment_offsets));
}
/// Test different input-generation modes
template <
typename InputT,
typename OutputT,
typename OffsetT>
void TestByGenMode(
OffsetT num_items,
OffsetT max_segments)
{
//
// Test pointer support using different input-generation modes
//
TestByBackend<InputT, OutputT>(num_items, max_segments, UNIFORM);
TestByBackend<InputT, OutputT>(num_items, max_segments, INTEGER_SEED);
TestByBackend<InputT, OutputT>(num_items, max_segments, RANDOM);
//
// Test iterator support using a constant-iterator and SUM
//
InputT val;
InitValue(UNIFORM, val, 0);
ConstantInputIterator<InputT, OffsetT> h_in(val);
OffsetT *h_segment_offsets = new OffsetT[1 + 1];
InitializeSegments(num_items, 1, h_segment_offsets, g_verbose_input);
SolveAndTest<CUB, OutputT>(h_in, h_in, num_items, 1, h_segment_offsets, (OffsetT*) NULL, Sum());
#ifdef CUB_CDP
SolveAndTest<CUB_CDP, OutputT>(h_in, h_in, num_items, 1, h_segment_offsets, (OffsetT*) NULL, Sum());
#endif
if (h_segment_offsets) delete[] h_segment_offsets;
}
/// Test different problem sizes
template <
typename InputT,
typename OutputT,
typename OffsetT>
struct TestBySize
{
OffsetT max_items;
OffsetT max_segments;
TestBySize(OffsetT max_items, OffsetT max_segments) :
max_items(max_items),
max_segments(max_segments)
{}
template <typename ActivePolicyT>
hipError_t Invoke()
{
//
// Black-box testing on all backends
//
// Test 0, 1, many
TestByGenMode<InputT, OutputT>(0, max_segments);
TestByGenMode<InputT, OutputT>(1, max_segments);
TestByGenMode<InputT, OutputT>(max_items, max_segments);
// Test random problem sizes from a log-distribution [8, max_items-ish)
int num_iterations = 8;
double max_exp = log(double(max_items)) / log(double(2.0));
for (int i = 0; i < num_iterations; ++i)
{
OffsetT num_items = (OffsetT) pow(2.0, RandomValue(max_exp - 3.0) + 3.0);
TestByGenMode<InputT, OutputT>(num_items, max_segments);
}
//
// White-box testing of single-segment problems around specific sizes
//
// Tile-boundaries: multiple blocks, one tile per block
OffsetT tile_size = ActivePolicyT::ReducePolicy::BLOCK_THREADS * ActivePolicyT::ReducePolicy::ITEMS_PER_THREAD;
TestProblem<CUB, InputT, OutputT>(tile_size * 4, 1, RANDOM, Sum());
TestProblem<CUB, InputT, OutputT>(tile_size * 4 + 1, 1, RANDOM, Sum());
TestProblem<CUB, InputT, OutputT>(tile_size * 4 - 1, 1, RANDOM, Sum());
// Tile-boundaries: multiple blocks, multiple tiles per block
OffsetT sm_occupancy = 32;
OffsetT occupancy = tile_size * sm_occupancy * g_sm_count;
TestProblem<CUB, InputT, OutputT>(occupancy, 1, RANDOM, Sum());
TestProblem<CUB, InputT, OutputT>(occupancy + 1, 1, RANDOM, Sum());
TestProblem<CUB, InputT, OutputT>(occupancy - 1, 1, RANDOM, Sum());
return hipSuccess;
}
};
/// Test problem type
template <
typename InputT,
typename OutputT,
typename OffsetT>
void TestType(
OffsetT max_items,
OffsetT max_segments)
{
typedef typename DeviceReducePolicy<InputT, OutputT, OffsetT, hipcub::Sum>::MaxPolicy MaxPolicyT;
TestBySize<InputT, OutputT, OffsetT> dispatch(max_items, max_segments);
MaxPolicyT::Invoke(g_ptx_version, dispatch);
}
//---------------------------------------------------------------------
// Main
//---------------------------------------------------------------------
/**
* Main
*/
int main(int argc, char** argv)
{
typedef int OffsetT;
OffsetT max_items = 27000000;
OffsetT max_segments = 34000;
// Initialize command line
CommandLineArgs args(argc, argv);
g_verbose = args.CheckCmdLineFlag("v");
g_verbose_input = args.CheckCmdLineFlag("v2");
args.GetCmdLineArgument("n", max_items);
args.GetCmdLineArgument("s", max_segments);
args.GetCmdLineArgument("i", g_timing_iterations);
args.GetCmdLineArgument("repeat", g_repeat);
// Print usage
if (args.CheckCmdLineFlag("help"))
{
printf("%s "
"[--n=<input items> "
"[--s=<num segments> "
"[--i=<timing iterations> "
"[--device=<device-id>] "
"[--repeat=<repetitions of entire test suite>]"
"[--v] "
"[--cdp]"
"\n", argv[0]);
exit(0);
}
// Initialize device
CubDebugExit(args.DeviceInit());
g_device_giga_bandwidth = args.device_giga_bandwidth;
// Get ptx version
CubDebugExit(PtxVersion(g_ptx_version));
// Get SM count
g_sm_count = args.deviceProp.multiProcessorCount;
#ifdef CUB_TEST_MINIMAL
// Compile/run basic test
TestProblem<CUB, char, int>( max_items, 1, RANDOM_BIT, Sum());
TestProblem<CUB, short, int>( max_items, 1, RANDOM_BIT, Sum());
printf("\n-------------------------------\n");
TestProblem<CUB, int, int>( max_items, 1, RANDOM_BIT, Sum());
TestProblem<CUB, long long, long long>( max_items, 1, RANDOM_BIT, Sum());
printf("\n-------------------------------\n");
TestProblem<CUB, float, float>( max_items, 1, RANDOM_BIT, Sum());
TestProblem<CUB, double, double>( max_items, 1, RANDOM_BIT, Sum());
printf("\n-------------------------------\n");
TestProblem<CUB_SEGMENTED, int, int>(max_items, max_segments, RANDOM_BIT, Sum());
#elif defined(CUB_TEST_BENCHMARK)
// Compile/run quick comparison tests
TestProblem<CUB, char, char>( max_items * 4, 1, UNIFORM, Sum());
TestProblem<THRUST, char, char>( max_items * 4, 1, UNIFORM, Sum());
printf("\n----------------------------\n");
TestProblem<CUB, short, short>( max_items * 2, 1, UNIFORM, Sum());
TestProblem<THRUST, short, short>( max_items * 2, 1, UNIFORM, Sum());
printf("\n----------------------------\n");
TestProblem<CUB, int, int>( max_items, 1, UNIFORM, Sum());
TestProblem<THRUST, int, int>( max_items, 1, UNIFORM, Sum());
printf("\n----------------------------\n");
TestProblem<CUB, long long, long long>( max_items / 2, 1, UNIFORM, Sum());
TestProblem<THRUST, long long, long long>( max_items / 2, 1, UNIFORM, Sum());
printf("\n----------------------------\n");
TestProblem<CUB, TestFoo, TestFoo>( max_items / 4, 1, UNIFORM, Max());
TestProblem<THRUST, TestFoo, TestFoo>( max_items / 4, 1, UNIFORM, Max());
#else
// Compile/run thorough tests
for (int i = 0; i <= g_repeat; ++i)
{
// Test different input types
TestType<char, char>(max_items, max_segments);
TestType<unsigned char, unsigned char>(max_items, max_segments);
TestType<char, int>(max_items, max_segments);
TestType<short, short>(max_items, max_segments);
TestType<int, int>(max_items, max_segments);
TestType<long, long>(max_items, max_segments);
TestType<long long, long long>(max_items, max_segments);
TestType<uchar2, uchar2>(max_items, max_segments);
TestType<uint2, uint2>(max_items, max_segments);
TestType<ulonglong2, ulonglong2>(max_items, max_segments);
TestType<ulonglong4, ulonglong4>(max_items, max_segments);
TestType<TestFoo, TestFoo>(max_items, max_segments);
TestType<TestBar, TestBar>(max_items, max_segments);
}
#endif
printf("\n");
return 0;
}
| ec53c108e47d503bd40e38e3dfeb563db53d853e.cu | /******************************************************************************
* Copyright (c) 2011, Duane Merrill. All rights reserved.
* Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************/
/******************************************************************************
* Test of DeviceReduce utilities
******************************************************************************/
// Ensure printing of CUDA runtime errors to console
#define CUB_STDERR
#include <stdio.h>
#include <limits>
#include <typeinfo>
#include <thrust/device_ptr.h>
#include <thrust/reduce.h>
#include <cub/util_allocator.cuh>
#include <cub/util_math.cuh>
#include <cub/device/device_reduce.cuh>
#include <cub/device/device_segmented_reduce.cuh>
#include <cub/iterator/constant_input_iterator.cuh>
#include <cub/iterator/discard_output_iterator.cuh>
#include <cub/iterator/transform_input_iterator.cuh>
#include "test_util.h"
using namespace cub;
//---------------------------------------------------------------------
// Globals, constants and typedefs
//---------------------------------------------------------------------
int g_ptx_version;
int g_sm_count;
double g_device_giga_bandwidth;
bool g_verbose = false;
bool g_verbose_input = false;
int g_timing_iterations = 0;
int g_repeat = 0;
CachingDeviceAllocator g_allocator(true);
// Dispatch types
enum Backend
{
CUB, // CUB method
CUB_SEGMENTED, // CUB segmented method
CUB_CDP, // GPU-based (dynamic parallelism) dispatch to CUB method
THRUST, // Thrust method
};
// Custom max functor
struct CustomMax
{
/// Boolean max operator, returns <tt>(a > b) ? a : b</tt>
template <typename OutputT>
__host__ __device__ __forceinline__ OutputT operator()(const OutputT &a, const OutputT &b)
{
return CUB_MAX(a, b);
}
};
//---------------------------------------------------------------------
// Dispatch to different CUB DeviceReduce entrypoints
//---------------------------------------------------------------------
/**
* Dispatch to reduce entrypoint (custom-max)
*/
template <typename InputIteratorT, typename OutputIteratorT, typename OffsetIteratorT, typename ReductionOpT>
CUB_RUNTIME_FUNCTION __forceinline__
cudaError_t Dispatch(
Int2Type<CUB> /*dispatch_to*/,
int timing_iterations,
size_t */*d_temp_storage_bytes*/,
cudaError_t */*d_cdp_error*/,
void* d_temp_storage,
size_t& temp_storage_bytes,
InputIteratorT d_in,
OutputIteratorT d_out,
int num_items,
int /*max_segments*/,
OffsetIteratorT /*d_segment_offsets*/,
ReductionOpT reduction_op,
cudaStream_t stream,
bool debug_synchronous)
{
typedef typename std::iterator_traits<InputIteratorT>::value_type InputT;
// The output value type
typedef typename If<(Equals<typename std::iterator_traits<OutputIteratorT>::value_type, void>::VALUE), // OutputT = (if output iterator's value type is void) ?
typename std::iterator_traits<InputIteratorT>::value_type, // ... then the input iterator's value type,
typename std::iterator_traits<OutputIteratorT>::value_type>::Type OutputT; // ... else the output iterator's value type
// Max-identity
OutputT identity = Traits<InputT>::Lowest(); // replace with std::numeric_limits<OutputT>::lowest() when C++ support is more prevalent
// Invoke kernel to device reduction directly
cudaError_t error = cudaSuccess;
for (int i = 0; i < timing_iterations; ++i)
{
error = DeviceReduce::Reduce(d_temp_storage, temp_storage_bytes,
d_in, d_out, num_items, reduction_op, identity,
stream, debug_synchronous);
}
return error;
}
/**
* Dispatch to sum entrypoint
*/
template <typename InputIteratorT, typename OutputIteratorT, typename OffsetIteratorT>
CUB_RUNTIME_FUNCTION __forceinline__
cudaError_t Dispatch(
Int2Type<CUB> /*dispatch_to*/,
int timing_iterations,
size_t */*d_temp_storage_bytes*/,
cudaError_t */*d_cdp_error*/,
void* d_temp_storage,
size_t& temp_storage_bytes,
InputIteratorT d_in,
OutputIteratorT d_out,
int num_items,
int /*max_segments*/,
OffsetIteratorT /*d_segment_offsets*/,
cub::Sum /*reduction_op*/,
cudaStream_t stream,
bool debug_synchronous)
{
// Invoke kernel to device reduction directly
cudaError_t error = cudaSuccess;
for (int i = 0; i < timing_iterations; ++i)
{
error = DeviceReduce::Sum(d_temp_storage, temp_storage_bytes, d_in, d_out, num_items, stream, debug_synchronous);
}
return error;
}
/**
* Dispatch to min entrypoint
*/
template <typename InputIteratorT, typename OutputIteratorT, typename OffsetIteratorT>
CUB_RUNTIME_FUNCTION __forceinline__
cudaError_t Dispatch(
Int2Type<CUB> /*dispatch_to*/,
int timing_iterations,
size_t */*d_temp_storage_bytes*/,
cudaError_t */*d_cdp_error*/,
void* d_temp_storage,
size_t& temp_storage_bytes,
InputIteratorT d_in,
OutputIteratorT d_out,
int num_items,
int /*max_segments*/,
OffsetIteratorT /*d_segment_offsets*/,
cub::Min /*reduction_op*/,
cudaStream_t stream,
bool debug_synchronous)
{
// Invoke kernel to device reduction directly
cudaError_t error = cudaSuccess;
for (int i = 0; i < timing_iterations; ++i)
{
error = DeviceReduce::Min(d_temp_storage, temp_storage_bytes, d_in, d_out, num_items, stream, debug_synchronous);
}
return error;
}
/**
* Dispatch to max entrypoint
*/
template <typename InputIteratorT, typename OutputIteratorT, typename OffsetIteratorT>
CUB_RUNTIME_FUNCTION __forceinline__
cudaError_t Dispatch(
Int2Type<CUB> /*dispatch_to*/,
int timing_iterations,
size_t */*d_temp_storage_bytes*/,
cudaError_t */*d_cdp_error*/,
void* d_temp_storage,
size_t& temp_storage_bytes,
InputIteratorT d_in,
OutputIteratorT d_out,
int num_items,
int /*max_segments*/,
OffsetIteratorT /*d_segment_offsets*/,
cub::Max /*reduction_op*/,
cudaStream_t stream,
bool debug_synchronous)
{
// Invoke kernel to device reduction directly
cudaError_t error = cudaSuccess;
for (int i = 0; i < timing_iterations; ++i)
{
error = DeviceReduce::Max(d_temp_storage, temp_storage_bytes, d_in, d_out, num_items, stream, debug_synchronous);
}
return error;
}
/**
* Dispatch to argmin entrypoint
*/
template <typename InputIteratorT, typename OutputIteratorT, typename OffsetIteratorT>
CUB_RUNTIME_FUNCTION __forceinline__
cudaError_t Dispatch(
Int2Type<CUB> /*dispatch_to*/,
int timing_iterations,
size_t */*d_temp_storage_bytes*/,
cudaError_t */*d_cdp_error*/,
void* d_temp_storage,
size_t& temp_storage_bytes,
InputIteratorT d_in,
OutputIteratorT d_out,
int num_items,
int /*max_segments*/,
OffsetIteratorT /*d_segment_offsets*/,
cub::ArgMin /*reduction_op*/,
cudaStream_t stream,
bool debug_synchronous)
{
// Invoke kernel to device reduction directly
cudaError_t error = cudaSuccess;
for (int i = 0; i < timing_iterations; ++i)
{
error = DeviceReduce::ArgMin(d_temp_storage, temp_storage_bytes, d_in, d_out, num_items, stream, debug_synchronous);
}
return error;
}
/**
* Dispatch to argmax entrypoint
*/
template <typename InputIteratorT, typename OutputIteratorT, typename OffsetIteratorT>
CUB_RUNTIME_FUNCTION __forceinline__
cudaError_t Dispatch(
Int2Type<CUB> /*dispatch_to*/,
int timing_iterations,
size_t */*d_temp_storage_bytes*/,
cudaError_t */*d_cdp_error*/,
void* d_temp_storage,
size_t& temp_storage_bytes,
InputIteratorT d_in,
OutputIteratorT d_out,
int num_items,
int /*max_segments*/,
OffsetIteratorT /*d_segment_offsets*/,
cub::ArgMax /*reduction_op*/,
cudaStream_t stream,
bool debug_synchronous)
{
// Invoke kernel to device reduction directly
cudaError_t error = cudaSuccess;
for (int i = 0; i < timing_iterations; ++i)
{
error = DeviceReduce::ArgMax(d_temp_storage, temp_storage_bytes, d_in, d_out, num_items, stream, debug_synchronous);
}
return error;
}
//---------------------------------------------------------------------
// Dispatch to different CUB DeviceSegmentedReduce entrypoints
//---------------------------------------------------------------------
/**
* Dispatch to reduce entrypoint (custom-max)
*/
template <typename InputIteratorT, typename OutputIteratorT, typename OffsetIteratorT, typename ReductionOpT>
CUB_RUNTIME_FUNCTION __forceinline__
cudaError_t Dispatch(
Int2Type<CUB_SEGMENTED> /*dispatch_to*/,
int timing_iterations,
size_t */*d_temp_storage_bytes*/,
cudaError_t */*d_cdp_error*/,
void* d_temp_storage,
size_t& temp_storage_bytes,
InputIteratorT d_in,
OutputIteratorT d_out,
int /*num_items*/,
int max_segments,
OffsetIteratorT d_segment_offsets,
ReductionOpT reduction_op,
cudaStream_t stream,
bool debug_synchronous)
{
// The input value type
typedef typename std::iterator_traits<InputIteratorT>::value_type InputT;
// The output value type
typedef typename If<(Equals<typename std::iterator_traits<OutputIteratorT>::value_type, void>::VALUE), // OutputT = (if output iterator's value type is void) ?
typename std::iterator_traits<InputIteratorT>::value_type, // ... then the input iterator's value type,
typename std::iterator_traits<OutputIteratorT>::value_type>::Type OutputT; // ... else the output iterator's value type
// Max-identity
OutputT identity = Traits<InputT>::Lowest(); // replace with std::numeric_limits<OutputT>::lowest() when C++ support is more prevalent
// Invoke kernel to device reduction directly
cudaError_t error = cudaSuccess;
for (int i = 0; i < timing_iterations; ++i)
{
error = DeviceSegmentedReduce::Reduce(d_temp_storage, temp_storage_bytes,
d_in, d_out, max_segments, d_segment_offsets, d_segment_offsets + 1, reduction_op, identity,
stream, debug_synchronous);
}
return error;
}
/**
* Dispatch to sum entrypoint
*/
template <typename InputIteratorT, typename OutputIteratorT, typename OffsetIteratorT>
CUB_RUNTIME_FUNCTION __forceinline__
cudaError_t Dispatch(
Int2Type<CUB_SEGMENTED> /*dispatch_to*/,
int timing_iterations,
size_t */*d_temp_storage_bytes*/,
cudaError_t */*d_cdp_error*/,
void* d_temp_storage,
size_t& temp_storage_bytes,
InputIteratorT d_in,
OutputIteratorT d_out,
int /*num_items*/,
int max_segments,
OffsetIteratorT d_segment_offsets,
cub::Sum /*reduction_op*/,
cudaStream_t stream,
bool debug_synchronous)
{
// Invoke kernel to device reduction directly
cudaError_t error = cudaSuccess;
for (int i = 0; i < timing_iterations; ++i)
{
error = DeviceSegmentedReduce::Sum(d_temp_storage, temp_storage_bytes,
d_in, d_out, max_segments, d_segment_offsets, d_segment_offsets + 1,
stream, debug_synchronous);
}
return error;
}
/**
* Dispatch to min entrypoint
*/
template <typename InputIteratorT, typename OutputIteratorT, typename OffsetIteratorT>
CUB_RUNTIME_FUNCTION __forceinline__
cudaError_t Dispatch(
Int2Type<CUB_SEGMENTED> /*dispatch_to*/,
int timing_iterations,
size_t */*d_temp_storage_bytes*/,
cudaError_t */*d_cdp_error*/,
void* d_temp_storage,
size_t& temp_storage_bytes,
InputIteratorT d_in,
OutputIteratorT d_out,
int /*num_items*/,
int max_segments,
OffsetIteratorT d_segment_offsets,
cub::Min /*reduction_op*/,
cudaStream_t stream,
bool debug_synchronous)
{
// Invoke kernel to device reduction directly
cudaError_t error = cudaSuccess;
for (int i = 0; i < timing_iterations; ++i)
{
error = DeviceSegmentedReduce::Min(d_temp_storage, temp_storage_bytes,
d_in, d_out, max_segments, d_segment_offsets, d_segment_offsets + 1,
stream, debug_synchronous);
}
return error;
}
/**
* Dispatch to max entrypoint
*/
template <typename InputIteratorT, typename OutputIteratorT, typename OffsetIteratorT>
CUB_RUNTIME_FUNCTION __forceinline__
cudaError_t Dispatch(
Int2Type<CUB_SEGMENTED> /*dispatch_to*/,
int timing_iterations,
size_t */*d_temp_storage_bytes*/,
cudaError_t */*d_cdp_error*/,
void* d_temp_storage,
size_t& temp_storage_bytes,
InputIteratorT d_in,
OutputIteratorT d_out,
int /*num_items*/,
int max_segments,
OffsetIteratorT d_segment_offsets,
cub::Max /*reduction_op*/,
cudaStream_t stream,
bool debug_synchronous)
{
// Invoke kernel to device reduction directly
cudaError_t error = cudaSuccess;
for (int i = 0; i < timing_iterations; ++i)
{
error = DeviceSegmentedReduce::Max(d_temp_storage, temp_storage_bytes,
d_in, d_out, max_segments, d_segment_offsets, d_segment_offsets + 1,
stream, debug_synchronous);
}
return error;
}
/**
* Dispatch to argmin entrypoint
*/
template <typename InputIteratorT, typename OutputIteratorT, typename OffsetIteratorT>
CUB_RUNTIME_FUNCTION __forceinline__
cudaError_t Dispatch(
Int2Type<CUB_SEGMENTED> /*dispatch_to*/,
int timing_iterations,
size_t */*d_temp_storage_bytes*/,
cudaError_t */*d_cdp_error*/,
void* d_temp_storage,
size_t& temp_storage_bytes,
InputIteratorT d_in,
OutputIteratorT d_out,
int /*num_items*/,
int max_segments,
OffsetIteratorT d_segment_offsets,
cub::ArgMin /*reduction_op*/,
cudaStream_t stream,
bool debug_synchronous)
{
// Invoke kernel to device reduction directly
cudaError_t error = cudaSuccess;
for (int i = 0; i < timing_iterations; ++i)
{
error = DeviceSegmentedReduce::ArgMin(d_temp_storage, temp_storage_bytes,
d_in, d_out, max_segments, d_segment_offsets, d_segment_offsets + 1,
stream, debug_synchronous);
}
return error;
}
/**
* Dispatch to argmax entrypoint
*/
template <typename InputIteratorT, typename OutputIteratorT, typename OffsetIteratorT>
CUB_RUNTIME_FUNCTION __forceinline__
cudaError_t Dispatch(
Int2Type<CUB_SEGMENTED> /*dispatch_to*/,
int timing_iterations,
size_t */*d_temp_storage_bytes*/,
cudaError_t */*d_cdp_error*/,
void* d_temp_storage,
size_t& temp_storage_bytes,
InputIteratorT d_in,
OutputIteratorT d_out,
int /*num_items*/,
int max_segments,
OffsetIteratorT d_segment_offsets,
cub::ArgMax /*reduction_op*/,
cudaStream_t stream,
bool debug_synchronous)
{
// Invoke kernel to device reduction directly
cudaError_t error = cudaSuccess;
for (int i = 0; i < timing_iterations; ++i)
{
error = DeviceSegmentedReduce::ArgMax(d_temp_storage, temp_storage_bytes,
d_in, d_out, max_segments, d_segment_offsets, d_segment_offsets + 1,
stream, debug_synchronous);
}
return error;
}
//---------------------------------------------------------------------
// Dispatch to different Thrust entrypoints
//---------------------------------------------------------------------
/**
* Dispatch to reduction entrypoint (min or max specialization)
*/
template <typename InputIteratorT, typename OutputIteratorT, typename OffsetIteratorT, typename ReductionOpT>
cudaError_t Dispatch(
Int2Type<THRUST> /*dispatch_to*/,
int timing_iterations,
size_t */*d_temp_storage_bytes*/,
cudaError_t */*d_cdp_error*/,
void* d_temp_storage,
size_t& temp_storage_bytes,
InputIteratorT d_in,
OutputIteratorT d_out,
int num_items,
int /*max_segments*/,
OffsetIteratorT /*d_segment_offsets*/,
ReductionOpT reduction_op,
cudaStream_t /*stream*/,
bool /*debug_synchronous*/)
{
// The output value type
typedef typename If<(Equals<typename std::iterator_traits<OutputIteratorT>::value_type, void>::VALUE), // OutputT = (if output iterator's value type is void) ?
typename std::iterator_traits<InputIteratorT>::value_type, // ... then the input iterator's value type,
typename std::iterator_traits<OutputIteratorT>::value_type>::Type OutputT; // ... else the output iterator's value type
if (d_temp_storage == 0)
{
temp_storage_bytes = 1;
}
else
{
OutputT init;
CubDebugExit(cudaMemcpy(&init, d_in + 0, sizeof(OutputT), cudaMemcpyDeviceToHost));
thrust::device_ptr<OutputT> d_in_wrapper(d_in);
OutputT retval;
for (int i = 0; i < timing_iterations; ++i)
{
retval = thrust::reduce(d_in_wrapper, d_in_wrapper + num_items, init, reduction_op);
}
if (!Equals<OutputIteratorT, DiscardOutputIterator<int> >::VALUE)
CubDebugExit(cudaMemcpy(d_out, &retval, sizeof(OutputT), cudaMemcpyHostToDevice));
}
return cudaSuccess;
}
/**
* Dispatch to reduction entrypoint (sum specialization)
*/
template <typename InputIteratorT, typename OutputIteratorT, typename OffsetIteratorT>
cudaError_t Dispatch(
Int2Type<THRUST> /*dispatch_to*/,
int timing_iterations,
size_t */*d_temp_storage_bytes*/,
cudaError_t */*d_cdp_error*/,
void* d_temp_storage,
size_t& temp_storage_bytes,
InputIteratorT d_in,
OutputIteratorT d_out,
int num_items,
int /*max_segments*/,
OffsetIteratorT /*d_segment_offsets*/,
Sum /*reduction_op*/,
cudaStream_t /*stream*/,
bool /*debug_synchronous*/)
{
// The output value type
typedef typename If<(Equals<typename std::iterator_traits<OutputIteratorT>::value_type, void>::VALUE), // OutputT = (if output iterator's value type is void) ?
typename std::iterator_traits<InputIteratorT>::value_type, // ... then the input iterator's value type,
typename std::iterator_traits<OutputIteratorT>::value_type>::Type OutputT; // ... else the output iterator's value type
if (d_temp_storage == 0)
{
temp_storage_bytes = 1;
}
else
{
thrust::device_ptr<OutputT> d_in_wrapper(d_in);
OutputT retval;
for (int i = 0; i < timing_iterations; ++i)
{
retval = thrust::reduce(d_in_wrapper, d_in_wrapper + num_items);
}
if (!Equals<OutputIteratorT, DiscardOutputIterator<int> >::VALUE)
CubDebugExit(cudaMemcpy(d_out, &retval, sizeof(OutputT), cudaMemcpyHostToDevice));
}
return cudaSuccess;
}
//---------------------------------------------------------------------
// CUDA nested-parallelism test kernel
//---------------------------------------------------------------------
/**
* Simple wrapper kernel to invoke DeviceReduce
*/
template <
typename InputIteratorT,
typename OutputIteratorT,
typename OffsetIteratorT,
typename ReductionOpT>
__global__ void CnpDispatchKernel(
int timing_iterations,
size_t *d_temp_storage_bytes,
cudaError_t *d_cdp_error,
void* d_temp_storage,
size_t temp_storage_bytes,
InputIteratorT d_in,
OutputIteratorT d_out,
int num_items,
int max_segments,
OffsetIteratorT d_segment_offsets,
ReductionOpT reduction_op,
bool debug_synchronous)
{
#ifndef CUB_CDP
(void)timing_iterations;
(void)d_temp_storage_bytes;
(void)d_cdp_error;
(void)d_temp_storage;
(void)temp_storage_bytes;
(void)d_in;
(void)d_out;
(void)num_items;
(void)max_segments;
(void)d_segment_offsets;
(void)reduction_op;
(void)debug_synchronous;
*d_cdp_error = cudaErrorNotSupported;
#else
*d_cdp_error = Dispatch(Int2Type<CUB>(), timing_iterations, d_temp_storage_bytes, d_cdp_error, d_temp_storage, temp_storage_bytes,
d_in, d_out, num_items, max_segments, d_segment_offsets, reduction_op, 0, debug_synchronous);
*d_temp_storage_bytes = temp_storage_bytes;
#endif
}
/**
* Dispatch to CUB_CDP kernel
*/
template <typename InputIteratorT, typename OutputIteratorT, typename OffsetIteratorT, typename ReductionOpT>
CUB_RUNTIME_FUNCTION __forceinline__
cudaError_t Dispatch(
Int2Type<CUB_CDP> dispatch_to,
int timing_iterations,
size_t *d_temp_storage_bytes,
cudaError_t *d_cdp_error,
void* d_temp_storage,
size_t& temp_storage_bytes,
InputIteratorT d_in,
OutputIteratorT d_out,
int num_items,
int max_segments,
OffsetIteratorT d_segment_offsets,
ReductionOpT reduction_op,
cudaStream_t stream,
bool debug_synchronous)
{
// Invoke kernel to invoke device-side dispatch
CnpDispatchKernel<<<1,1>>>(timing_iterations, d_temp_storage_bytes, d_cdp_error, d_temp_storage, temp_storage_bytes,
d_in, d_out, num_items, max_segments, d_segment_offsets, reduction_op, debug_synchronous);
// Copy out temp_storage_bytes
CubDebugExit(cudaMemcpy(&temp_storage_bytes, d_temp_storage_bytes, sizeof(size_t) * 1, cudaMemcpyDeviceToHost));
// Copy out error
cudaError_t retval;
CubDebugExit(cudaMemcpy(&retval, d_cdp_error, sizeof(cudaError_t) * 1, cudaMemcpyDeviceToHost));
return retval;
}
//---------------------------------------------------------------------
// Problem generation
//---------------------------------------------------------------------
/// Initialize problem
template <typename InputT>
void Initialize(
GenMode gen_mode,
InputT *h_in,
int num_items)
{
for (int i = 0; i < num_items; ++i)
{
InitValue(gen_mode, h_in[i], i);
}
if (g_verbose_input)
{
printf("Input:\n");
DisplayResults(h_in, num_items);
printf("\n\n");
}
}
/// Solve problem (max/custom-max functor)
template <typename ReductionOpT, typename InputT, typename _OutputT>
struct Solution
{
typedef _OutputT OutputT;
template <typename HostInputIteratorT, typename OffsetT, typename OffsetIteratorT>
static void Solve(HostInputIteratorT h_in, OutputT *h_reference, OffsetT num_segments, OffsetIteratorT h_segment_offsets,
ReductionOpT reduction_op)
{
for (int i = 0; i < num_segments; ++i)
{
OutputT aggregate = Traits<InputT>::Lowest(); // replace with std::numeric_limits<OutputT>::lowest() when C++ support is more prevalent
for (int j = h_segment_offsets[i]; j < h_segment_offsets[i + 1]; ++j)
aggregate = reduction_op(aggregate, OutputT(h_in[j]));
h_reference[i] = aggregate;
}
}
};
/// Solve problem (min functor)
template <typename InputT, typename _OutputT>
struct Solution<cub::Min, InputT, _OutputT>
{
typedef _OutputT OutputT;
template <typename HostInputIteratorT, typename OffsetT, typename OffsetIteratorT>
static void Solve(HostInputIteratorT h_in, OutputT *h_reference, OffsetT num_segments, OffsetIteratorT h_segment_offsets,
cub::Min reduction_op)
{
for (int i = 0; i < num_segments; ++i)
{
OutputT aggregate = Traits<InputT>::Max(); // replace with std::numeric_limits<OutputT>::max() when C++ support is more prevalent
for (int j = h_segment_offsets[i]; j < h_segment_offsets[i + 1]; ++j)
aggregate = reduction_op(aggregate, OutputT(h_in[j]));
h_reference[i] = aggregate;
}
}
};
/// Solve problem (sum functor)
template <typename InputT, typename _OutputT>
struct Solution<cub::Sum, InputT, _OutputT>
{
typedef _OutputT OutputT;
template <typename HostInputIteratorT, typename OffsetT, typename OffsetIteratorT>
static void Solve(HostInputIteratorT h_in, OutputT *h_reference, OffsetT num_segments, OffsetIteratorT h_segment_offsets,
cub::Sum reduction_op)
{
for (int i = 0; i < num_segments; ++i)
{
OutputT aggregate;
InitValue(INTEGER_SEED, aggregate, 0);
for (int j = h_segment_offsets[i]; j < h_segment_offsets[i + 1]; ++j)
aggregate = reduction_op(aggregate, OutputT(h_in[j]));
h_reference[i] = aggregate;
}
}
};
/// Solve problem (argmin functor)
template <typename InputValueT, typename OutputValueT>
struct Solution<cub::ArgMin, InputValueT, OutputValueT>
{
typedef KeyValuePair<int, OutputValueT> OutputT;
template <typename HostInputIteratorT, typename OffsetT, typename OffsetIteratorT>
static void Solve(HostInputIteratorT h_in, OutputT *h_reference, OffsetT num_segments, OffsetIteratorT h_segment_offsets,
cub::ArgMin reduction_op)
{
for (int i = 0; i < num_segments; ++i)
{
OutputT aggregate(1, Traits<InputValueT>::Max()); // replace with std::numeric_limits<OutputT>::max() when C++ support is more prevalent
for (int j = h_segment_offsets[i]; j < h_segment_offsets[i + 1]; ++j)
{
OutputT item(j - h_segment_offsets[i], OutputValueT(h_in[j]));
aggregate = reduction_op(aggregate, item);
}
h_reference[i] = aggregate;
}
}
};
/// Solve problem (argmax functor)
template <typename InputValueT, typename OutputValueT>
struct Solution<cub::ArgMax, InputValueT, OutputValueT>
{
typedef KeyValuePair<int, OutputValueT> OutputT;
template <typename HostInputIteratorT, typename OffsetT, typename OffsetIteratorT>
static void Solve(HostInputIteratorT h_in, OutputT *h_reference, OffsetT num_segments, OffsetIteratorT h_segment_offsets,
cub::ArgMax reduction_op)
{
for (int i = 0; i < num_segments; ++i)
{
OutputT aggregate(1, Traits<InputValueT>::Lowest()); // replace with std::numeric_limits<OutputT>::lowest() when C++ support is more prevalent
for (int j = h_segment_offsets[i]; j < h_segment_offsets[i + 1]; ++j)
{
OutputT item(j - h_segment_offsets[i], OutputValueT(h_in[j]));
aggregate = reduction_op(aggregate, item);
}
h_reference[i] = aggregate;
}
}
};
//---------------------------------------------------------------------
// Problem generation
//---------------------------------------------------------------------
/// Test DeviceReduce for a given problem input
template <
typename BackendT,
typename DeviceInputIteratorT,
typename DeviceOutputIteratorT,
typename HostReferenceIteratorT,
typename OffsetT,
typename OffsetIteratorT,
typename ReductionOpT>
void Test(
BackendT backend,
DeviceInputIteratorT d_in,
DeviceOutputIteratorT d_out,
OffsetT num_items,
OffsetT num_segments,
OffsetIteratorT d_segment_offsets,
ReductionOpT reduction_op,
HostReferenceIteratorT h_reference)
{
// Input data types
typedef typename std::iterator_traits<DeviceInputIteratorT>::value_type InputT;
// Allocate CUB_CDP device arrays for temp storage size and error
size_t *d_temp_storage_bytes = NULL;
cudaError_t *d_cdp_error = NULL;
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_temp_storage_bytes, sizeof(size_t) * 1));
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_cdp_error, sizeof(cudaError_t) * 1));
// Inquire temp device storage
void *d_temp_storage = NULL;
size_t temp_storage_bytes = 0;
CubDebugExit(Dispatch(backend, 1,
d_temp_storage_bytes, d_cdp_error, d_temp_storage, temp_storage_bytes,
d_in, d_out, num_items, num_segments, d_segment_offsets,
reduction_op, 0, true));
// Allocate temp device storage
CubDebugExit(g_allocator.DeviceAllocate(&d_temp_storage, temp_storage_bytes));
// Run warmup/correctness iteration
CubDebugExit(Dispatch(backend, 1,
d_temp_storage_bytes, d_cdp_error, d_temp_storage, temp_storage_bytes,
d_in, d_out, num_items, num_segments, d_segment_offsets,
reduction_op, 0, true));
// Check for correctness (and display results, if specified)
int compare = CompareDeviceResults(h_reference, d_out, num_segments, g_verbose, g_verbose);
printf("\t%s", compare ? "FAIL" : "PASS");
// Flush any stdout/stderr
fflush(stdout);
fflush(stderr);
// Performance
if (g_timing_iterations > 0)
{
GpuTimer gpu_timer;
gpu_timer.Start();
CubDebugExit(Dispatch(backend, g_timing_iterations,
d_temp_storage_bytes, d_cdp_error, d_temp_storage, temp_storage_bytes,
d_in, d_out, num_items, num_segments, d_segment_offsets,
reduction_op, 0, false));
gpu_timer.Stop();
float elapsed_millis = gpu_timer.ElapsedMillis();
// Display performance
float avg_millis = elapsed_millis / g_timing_iterations;
float giga_rate = float(num_items) / avg_millis / 1000.0f / 1000.0f;
float giga_bandwidth = giga_rate * sizeof(InputT);
printf(", %.3f avg ms, %.3f billion items/s, %.3f logical GB/s, %.1f%% peak",
avg_millis, giga_rate, giga_bandwidth, giga_bandwidth / g_device_giga_bandwidth * 100.0);
}
if (d_temp_storage_bytes) CubDebugExit(g_allocator.DeviceFree(d_temp_storage_bytes));
if (d_cdp_error) CubDebugExit(g_allocator.DeviceFree(d_cdp_error));
if (d_temp_storage) CubDebugExit(g_allocator.DeviceFree(d_temp_storage));
// Correctness asserts
AssertEquals(0, compare);
}
/// Test DeviceReduce
template <
Backend BACKEND,
typename OutputValueT,
typename HostInputIteratorT,
typename DeviceInputIteratorT,
typename OffsetT,
typename OffsetIteratorT,
typename ReductionOpT>
void SolveAndTest(
HostInputIteratorT h_in,
DeviceInputIteratorT d_in,
OffsetT num_items,
OffsetT num_segments,
OffsetIteratorT h_segment_offsets,
OffsetIteratorT d_segment_offsets,
ReductionOpT reduction_op)
{
typedef typename std::iterator_traits<DeviceInputIteratorT>::value_type InputValueT;
typedef Solution<ReductionOpT, InputValueT, OutputValueT> SolutionT;
typedef typename SolutionT::OutputT OutputT;
printf("\n\n%s cub::DeviceReduce<%s> %d items (%s), %d segments\n",
(BACKEND == CUB_CDP) ? "CUB_CDP" : (BACKEND == THRUST) ? "Thrust" : (BACKEND == CUB_SEGMENTED) ? "CUB_SEGMENTED" : "CUB",
typeid(ReductionOpT).name(), num_items, typeid(HostInputIteratorT).name(), num_segments);
fflush(stdout);
// Allocate and solve solution
OutputT *h_reference = new OutputT[num_segments];
SolutionT::Solve(h_in, h_reference, num_segments, h_segment_offsets, reduction_op);
// // Run with discard iterator
// DiscardOutputIterator<OffsetT> discard_itr;
// Test(Int2Type<BACKEND>(), d_in, discard_itr, num_items, num_segments, d_segment_offsets, reduction_op, h_reference);
// Run with output data
OutputT *d_out = NULL;
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_out, sizeof(OutputT) * num_segments));
CubDebugExit(cudaMemset(d_out, 0, sizeof(OutputT) * num_segments));
Test(Int2Type<BACKEND>(), d_in, d_out, num_items, num_segments, d_segment_offsets, reduction_op, h_reference);
// Cleanup
if (d_out) CubDebugExit(g_allocator.DeviceFree(d_out));
if (h_reference) delete[] h_reference;
}
/// Test specific problem type
template <
Backend BACKEND,
typename InputT,
typename OutputT,
typename OffsetT,
typename ReductionOpT>
void TestProblem(
OffsetT num_items,
OffsetT num_segments,
GenMode gen_mode,
ReductionOpT reduction_op)
{
printf("\n\nInitializing %d %s->%s (gen mode %d)... ", num_items, typeid(InputT).name(), typeid(OutputT).name(), gen_mode); fflush(stdout);
fflush(stdout);
// Initialize value data
InputT* h_in = new InputT[num_items];
Initialize(gen_mode, h_in, num_items);
// Initialize segment data
OffsetT *h_segment_offsets = new OffsetT[num_segments + 1];
InitializeSegments(num_items, num_segments, h_segment_offsets, g_verbose_input);
// Initialize device data
OffsetT *d_segment_offsets = NULL;
InputT *d_in = NULL;
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_in, sizeof(InputT) * num_items));
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_segment_offsets, sizeof(OffsetT) * (num_segments + 1)));
CubDebugExit(cudaMemcpy(d_in, h_in, sizeof(InputT) * num_items, cudaMemcpyHostToDevice));
CubDebugExit(cudaMemcpy(d_segment_offsets, h_segment_offsets, sizeof(OffsetT) * (num_segments + 1), cudaMemcpyHostToDevice));
SolveAndTest<BACKEND, OutputT>(h_in, d_in, num_items, num_segments, h_segment_offsets, d_segment_offsets, reduction_op);
if (h_segment_offsets) delete[] h_segment_offsets;
if (d_segment_offsets) CubDebugExit(g_allocator.DeviceFree(d_segment_offsets));
if (h_in) delete[] h_in;
if (d_in) CubDebugExit(g_allocator.DeviceFree(d_in));
}
/// Test different operators
template <
Backend BACKEND,
typename OutputT,
typename HostInputIteratorT,
typename DeviceInputIteratorT,
typename OffsetT,
typename OffsetIteratorT>
void TestByOp(
HostInputIteratorT h_in,
DeviceInputIteratorT d_in,
OffsetT num_items,
OffsetT num_segments,
OffsetIteratorT h_segment_offsets,
OffsetIteratorT d_segment_offsets)
{
SolveAndTest<BACKEND, OutputT>(h_in, d_in, num_items, num_segments, h_segment_offsets, d_segment_offsets, CustomMax());
SolveAndTest<BACKEND, OutputT>(h_in, d_in, num_items, num_segments, h_segment_offsets, d_segment_offsets, Sum());
SolveAndTest<BACKEND, OutputT>(h_in, d_in, num_items, num_segments, h_segment_offsets, d_segment_offsets, Min());
SolveAndTest<BACKEND, OutputT>(h_in, d_in, num_items, num_segments, h_segment_offsets, d_segment_offsets, ArgMin());
SolveAndTest<BACKEND, OutputT>(h_in, d_in, num_items, num_segments, h_segment_offsets, d_segment_offsets, Max());
SolveAndTest<BACKEND, OutputT>(h_in, d_in, num_items, num_segments, h_segment_offsets, d_segment_offsets, ArgMax());
}
/// Test different backends
template <
typename InputT,
typename OutputT,
typename OffsetT>
void TestByBackend(
OffsetT num_items,
OffsetT max_segments,
GenMode gen_mode)
{
// Initialize host data
printf("\n\nInitializing %d %s -> %s (gen mode %d)... ",
num_items, typeid(InputT).name(), typeid(OutputT).name(), gen_mode); fflush(stdout);
InputT *h_in = new InputT[num_items];
OffsetT *h_segment_offsets = new OffsetT[max_segments + 1];
Initialize(gen_mode, h_in, num_items);
// Initialize device data
InputT *d_in = NULL;
OffsetT *d_segment_offsets = NULL;
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_in, sizeof(InputT) * num_items));
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_segment_offsets, sizeof(OffsetT) * (max_segments + 1)));
CubDebugExit(cudaMemcpy(d_in, h_in, sizeof(InputT) * num_items, cudaMemcpyHostToDevice));
//
// Test single-segment implementations
//
InitializeSegments(num_items, 1, h_segment_offsets, g_verbose_input);
// Page-aligned-input tests
TestByOp<CUB, OutputT>(h_in, d_in, num_items, 1, h_segment_offsets, (OffsetT*) NULL); // Host-dispatch
#ifdef CUB_CDP
TestByOp<CUB_CDP, OutputT>(h_in, d_in, num_items, 1, h_segment_offsets, (OffsetT*) NULL); // Device-dispatch
#endif
// Non-page-aligned-input tests
if (num_items > 1)
{
InitializeSegments(num_items - 1, 1, h_segment_offsets, g_verbose_input);
TestByOp<CUB, OutputT>(h_in + 1, d_in + 1, num_items - 1, 1, h_segment_offsets, (OffsetT*) NULL);
}
//
// Test segmented implementation
//
// Right now we assign a single thread block to each segment, so lets keep it to under 128K items per segment
int max_items_per_segment = 128000;
for (int num_segments = cub::DivideAndRoundUp(num_items, max_items_per_segment);
num_segments < max_segments;
num_segments = (num_segments * 32) + 1)
{
// Test with segment pointer
InitializeSegments(num_items, num_segments, h_segment_offsets, g_verbose_input);
CubDebugExit(cudaMemcpy(d_segment_offsets, h_segment_offsets, sizeof(OffsetT) * (num_segments + 1), cudaMemcpyHostToDevice));
TestByOp<CUB_SEGMENTED, OutputT>(
h_in, d_in, num_items, num_segments, h_segment_offsets, d_segment_offsets);
// Test with segment iterator
typedef CastOp<OffsetT> IdentityOpT;
IdentityOpT identity_op;
TransformInputIterator<OffsetT, IdentityOpT, OffsetT*, OffsetT> h_segment_offsets_itr(
h_segment_offsets,
identity_op);
TransformInputIterator<OffsetT, IdentityOpT, OffsetT*, OffsetT> d_segment_offsets_itr(
d_segment_offsets,
identity_op);
TestByOp<CUB_SEGMENTED, OutputT>(
h_in, d_in, num_items, num_segments, h_segment_offsets_itr, d_segment_offsets_itr);
}
if (h_in) delete[] h_in;
if (h_segment_offsets) delete[] h_segment_offsets;
if (d_in) CubDebugExit(g_allocator.DeviceFree(d_in));
if (d_segment_offsets) CubDebugExit(g_allocator.DeviceFree(d_segment_offsets));
}
/// Test different input-generation modes
template <
typename InputT,
typename OutputT,
typename OffsetT>
void TestByGenMode(
OffsetT num_items,
OffsetT max_segments)
{
//
// Test pointer support using different input-generation modes
//
TestByBackend<InputT, OutputT>(num_items, max_segments, UNIFORM);
TestByBackend<InputT, OutputT>(num_items, max_segments, INTEGER_SEED);
TestByBackend<InputT, OutputT>(num_items, max_segments, RANDOM);
//
// Test iterator support using a constant-iterator and SUM
//
InputT val;
InitValue(UNIFORM, val, 0);
ConstantInputIterator<InputT, OffsetT> h_in(val);
OffsetT *h_segment_offsets = new OffsetT[1 + 1];
InitializeSegments(num_items, 1, h_segment_offsets, g_verbose_input);
SolveAndTest<CUB, OutputT>(h_in, h_in, num_items, 1, h_segment_offsets, (OffsetT*) NULL, Sum());
#ifdef CUB_CDP
SolveAndTest<CUB_CDP, OutputT>(h_in, h_in, num_items, 1, h_segment_offsets, (OffsetT*) NULL, Sum());
#endif
if (h_segment_offsets) delete[] h_segment_offsets;
}
/// Test different problem sizes
template <
typename InputT,
typename OutputT,
typename OffsetT>
struct TestBySize
{
OffsetT max_items;
OffsetT max_segments;
TestBySize(OffsetT max_items, OffsetT max_segments) :
max_items(max_items),
max_segments(max_segments)
{}
template <typename ActivePolicyT>
cudaError_t Invoke()
{
//
// Black-box testing on all backends
//
// Test 0, 1, many
TestByGenMode<InputT, OutputT>(0, max_segments);
TestByGenMode<InputT, OutputT>(1, max_segments);
TestByGenMode<InputT, OutputT>(max_items, max_segments);
// Test random problem sizes from a log-distribution [8, max_items-ish)
int num_iterations = 8;
double max_exp = log(double(max_items)) / log(double(2.0));
for (int i = 0; i < num_iterations; ++i)
{
OffsetT num_items = (OffsetT) pow(2.0, RandomValue(max_exp - 3.0) + 3.0);
TestByGenMode<InputT, OutputT>(num_items, max_segments);
}
//
// White-box testing of single-segment problems around specific sizes
//
// Tile-boundaries: multiple blocks, one tile per block
OffsetT tile_size = ActivePolicyT::ReducePolicy::BLOCK_THREADS * ActivePolicyT::ReducePolicy::ITEMS_PER_THREAD;
TestProblem<CUB, InputT, OutputT>(tile_size * 4, 1, RANDOM, Sum());
TestProblem<CUB, InputT, OutputT>(tile_size * 4 + 1, 1, RANDOM, Sum());
TestProblem<CUB, InputT, OutputT>(tile_size * 4 - 1, 1, RANDOM, Sum());
// Tile-boundaries: multiple blocks, multiple tiles per block
OffsetT sm_occupancy = 32;
OffsetT occupancy = tile_size * sm_occupancy * g_sm_count;
TestProblem<CUB, InputT, OutputT>(occupancy, 1, RANDOM, Sum());
TestProblem<CUB, InputT, OutputT>(occupancy + 1, 1, RANDOM, Sum());
TestProblem<CUB, InputT, OutputT>(occupancy - 1, 1, RANDOM, Sum());
return cudaSuccess;
}
};
/// Test problem type
template <
typename InputT,
typename OutputT,
typename OffsetT>
void TestType(
OffsetT max_items,
OffsetT max_segments)
{
typedef typename DeviceReducePolicy<InputT, OutputT, OffsetT, cub::Sum>::MaxPolicy MaxPolicyT;
TestBySize<InputT, OutputT, OffsetT> dispatch(max_items, max_segments);
MaxPolicyT::Invoke(g_ptx_version, dispatch);
}
//---------------------------------------------------------------------
// Main
//---------------------------------------------------------------------
/**
* Main
*/
int main(int argc, char** argv)
{
typedef int OffsetT;
OffsetT max_items = 27000000;
OffsetT max_segments = 34000;
// Initialize command line
CommandLineArgs args(argc, argv);
g_verbose = args.CheckCmdLineFlag("v");
g_verbose_input = args.CheckCmdLineFlag("v2");
args.GetCmdLineArgument("n", max_items);
args.GetCmdLineArgument("s", max_segments);
args.GetCmdLineArgument("i", g_timing_iterations);
args.GetCmdLineArgument("repeat", g_repeat);
// Print usage
if (args.CheckCmdLineFlag("help"))
{
printf("%s "
"[--n=<input items> "
"[--s=<num segments> "
"[--i=<timing iterations> "
"[--device=<device-id>] "
"[--repeat=<repetitions of entire test suite>]"
"[--v] "
"[--cdp]"
"\n", argv[0]);
exit(0);
}
// Initialize device
CubDebugExit(args.DeviceInit());
g_device_giga_bandwidth = args.device_giga_bandwidth;
// Get ptx version
CubDebugExit(PtxVersion(g_ptx_version));
// Get SM count
g_sm_count = args.deviceProp.multiProcessorCount;
#ifdef CUB_TEST_MINIMAL
// Compile/run basic test
TestProblem<CUB, char, int>( max_items, 1, RANDOM_BIT, Sum());
TestProblem<CUB, short, int>( max_items, 1, RANDOM_BIT, Sum());
printf("\n-------------------------------\n");
TestProblem<CUB, int, int>( max_items, 1, RANDOM_BIT, Sum());
TestProblem<CUB, long long, long long>( max_items, 1, RANDOM_BIT, Sum());
printf("\n-------------------------------\n");
TestProblem<CUB, float, float>( max_items, 1, RANDOM_BIT, Sum());
TestProblem<CUB, double, double>( max_items, 1, RANDOM_BIT, Sum());
printf("\n-------------------------------\n");
TestProblem<CUB_SEGMENTED, int, int>(max_items, max_segments, RANDOM_BIT, Sum());
#elif defined(CUB_TEST_BENCHMARK)
// Compile/run quick comparison tests
TestProblem<CUB, char, char>( max_items * 4, 1, UNIFORM, Sum());
TestProblem<THRUST, char, char>( max_items * 4, 1, UNIFORM, Sum());
printf("\n----------------------------\n");
TestProblem<CUB, short, short>( max_items * 2, 1, UNIFORM, Sum());
TestProblem<THRUST, short, short>( max_items * 2, 1, UNIFORM, Sum());
printf("\n----------------------------\n");
TestProblem<CUB, int, int>( max_items, 1, UNIFORM, Sum());
TestProblem<THRUST, int, int>( max_items, 1, UNIFORM, Sum());
printf("\n----------------------------\n");
TestProblem<CUB, long long, long long>( max_items / 2, 1, UNIFORM, Sum());
TestProblem<THRUST, long long, long long>( max_items / 2, 1, UNIFORM, Sum());
printf("\n----------------------------\n");
TestProblem<CUB, TestFoo, TestFoo>( max_items / 4, 1, UNIFORM, Max());
TestProblem<THRUST, TestFoo, TestFoo>( max_items / 4, 1, UNIFORM, Max());
#else
// Compile/run thorough tests
for (int i = 0; i <= g_repeat; ++i)
{
// Test different input types
TestType<char, char>(max_items, max_segments);
TestType<unsigned char, unsigned char>(max_items, max_segments);
TestType<char, int>(max_items, max_segments);
TestType<short, short>(max_items, max_segments);
TestType<int, int>(max_items, max_segments);
TestType<long, long>(max_items, max_segments);
TestType<long long, long long>(max_items, max_segments);
TestType<uchar2, uchar2>(max_items, max_segments);
TestType<uint2, uint2>(max_items, max_segments);
TestType<ulonglong2, ulonglong2>(max_items, max_segments);
TestType<ulonglong4, ulonglong4>(max_items, max_segments);
TestType<TestFoo, TestFoo>(max_items, max_segments);
TestType<TestBar, TestBar>(max_items, max_segments);
}
#endif
printf("\n");
return 0;
}
|
e36519965a9c40cbeb0188b78f0eb2a6ffaef35a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Saxpy.cu
// A saxpy program to demonstrate single-precision A*X + Y.
#include <heteroflow/heteroflow.hpp>
// Kernel: saxpy
__global__ void saxpy(int n, float a, float *x, float *y) {
// Get the corresponding idx
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < n) {
y[i] = a*x[i] + y[i];
}
}
// Function: create_vector
float* create_vector(size_t N, float value) {
auto ptr = new float[N];
std::fill_n(ptr, N, value);
return ptr;
}
// Procedure: delete_vector
void delete_vector(float* ptr) {
delete [] ptr;
}
// Procedure: verify_result
void verify_result(float* x, float* y, size_t N) {
// verify the result
float maxError = 0.0f;
for (size_t i = 0; i < N; i++) {
maxError = ::max(maxError, abs(x[i]-1.0f));
maxError = ::max(maxError, abs(y[i]-4.0f));
}
std::cout << "Max error: " << maxError << '\n';
}
// Function: main
int main(void) {
const size_t N = 1<<20;
const size_t B = N*sizeof(float);
float *x {nullptr};
float *y {nullptr};
hf::Executor executor(1, 1);
hf::Heteroflow hf("saxpy");
auto host_x = hf.host([&]{ x = create_vector(N, 1.0f); }).name("create_x");
auto host_y = hf.host([&]{ y = create_vector(N, 2.0f); }).name("create_y");
auto pull_x = hf.pull(std::ref(x), B).name("pull_x");
auto pull_y = hf.pull(std::ref(y), B).name("pull_y");
auto kernel = hf.kernel((N+255)/256, 256, 0, saxpy, N, 2.0f, pull_x, pull_y)
.name("saxpy");
auto push_x = hf.push(std::ref(x), pull_x, B).name("push_x");
auto push_y = hf.push(std::ref(y), pull_y, B).name("push_y");
auto verify = hf.host([&]{ verify_result(x, y, N); }).name("verify");
auto kill_x = hf.host([&]{ delete_vector(x); }).name("delete_x");
auto kill_y = hf.host([&]{ delete_vector(y); }).name("delete_y");
host_x.precede(pull_x);
host_y.precede(pull_y);
kernel.precede(push_x, push_y)
.succeed(pull_x, pull_y);
verify.precede(kill_x, kill_y)
.succeed(push_x, push_y);
// dump the graph
hf.dump(std::cout);
// run the graph
executor.run(hf).wait();
return 0;
}
| e36519965a9c40cbeb0188b78f0eb2a6ffaef35a.cu | // Saxpy.cu
// A saxpy program to demonstrate single-precision A*X + Y.
#include <heteroflow/heteroflow.hpp>
// Kernel: saxpy
__global__ void saxpy(int n, float a, float *x, float *y) {
// Get the corresponding idx
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < n) {
y[i] = a*x[i] + y[i];
}
}
// Function: create_vector
float* create_vector(size_t N, float value) {
auto ptr = new float[N];
std::fill_n(ptr, N, value);
return ptr;
}
// Procedure: delete_vector
void delete_vector(float* ptr) {
delete [] ptr;
}
// Procedure: verify_result
void verify_result(float* x, float* y, size_t N) {
// verify the result
float maxError = 0.0f;
for (size_t i = 0; i < N; i++) {
maxError = std::max(maxError, abs(x[i]-1.0f));
maxError = std::max(maxError, abs(y[i]-4.0f));
}
std::cout << "Max error: " << maxError << '\n';
}
// Function: main
int main(void) {
const size_t N = 1<<20;
const size_t B = N*sizeof(float);
float *x {nullptr};
float *y {nullptr};
hf::Executor executor(1, 1);
hf::Heteroflow hf("saxpy");
auto host_x = hf.host([&]{ x = create_vector(N, 1.0f); }).name("create_x");
auto host_y = hf.host([&]{ y = create_vector(N, 2.0f); }).name("create_y");
auto pull_x = hf.pull(std::ref(x), B).name("pull_x");
auto pull_y = hf.pull(std::ref(y), B).name("pull_y");
auto kernel = hf.kernel((N+255)/256, 256, 0, saxpy, N, 2.0f, pull_x, pull_y)
.name("saxpy");
auto push_x = hf.push(std::ref(x), pull_x, B).name("push_x");
auto push_y = hf.push(std::ref(y), pull_y, B).name("push_y");
auto verify = hf.host([&]{ verify_result(x, y, N); }).name("verify");
auto kill_x = hf.host([&]{ delete_vector(x); }).name("delete_x");
auto kill_y = hf.host([&]{ delete_vector(y); }).name("delete_y");
host_x.precede(pull_x);
host_y.precede(pull_y);
kernel.precede(push_x, push_y)
.succeed(pull_x, pull_y);
verify.precede(kill_x, kill_y)
.succeed(push_x, push_y);
// dump the graph
hf.dump(std::cout);
// run the graph
executor.run(hf).wait();
return 0;
}
|
4d623714fc815313411655b23be519f4ee75095f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifdef __CUDNN__
#include "Concatenate.hpp"
// template class ConcatenateChannelWise<int>;
template class ConcatenateChannelWise<float>;
// template class ConcatenateChannelWise<double>;
__global__ void ConcatenateChannelWise_ForwardPropagate_kernel(int sizeOfResultImg, int sizeOfInputImg, int timesize, int batchsize, float *result, float *input, int preSize) {
for (int idx = blockIdx.x * blockDim.x + threadIdx.x; idx < sizeOfInputImg; idx += blockDim.x * gridDim.x) {
for (int ba = 0; ba < batchsize; ba++) {
result[ba * sizeOfResultImg + idx + preSize] = input[ba * sizeOfInputImg + idx];
}
}
}
template<typename DTYPE> int ConcatenateChannelWise<DTYPE>::ForwardPropagateOnGPU(int pTime) {
Tensor<DTYPE> *result = this->GetResult();
Tensor<DTYPE> *input = NULL;
int timesize = result->GetTimeSize();
int batchsize = result->GetBatchSize();
int channelsize = result->GetChannelSize();
int rowsize = result->GetRowSize();
int colsize = result->GetColSize();
Shape *resultTenShape = result->GetShape();
int sizeOfPlane = rowsize * colsize;
int sizeOfResultImg = channelsize * sizeOfPlane;
int sizeOfInputImg = 0;
DTYPE *result_gpu = result->GetGPUData();
DTYPE *input_gpu = NULL;
int preSize = 0;
int inputChannelSize = 0;
for (int opnum = 0; opnum < m_noOperator; opnum++) {
input = this->GetInput()[opnum]->GetResult();
input_gpu = input->GetGPUData();
inputChannelSize = input->GetChannelSize();
preSize = m_aAccumulate[opnum] * sizeOfPlane;
sizeOfInputImg = inputChannelSize * sizeOfPlane;
// std::cout << "check" << '\n';
ConcatenateChannelWise_ForwardPropagate_kernel << < 64, 128 >> > (sizeOfResultImg, sizeOfInputImg, timesize, batchsize, result_gpu, input_gpu, preSize);
}
return TRUE;
}
__global__ void ConcatenateChannelWise_BackPropagate_kernel(int sizeOfResultImg, int sizeOfInputImg, int timesize, int batchsize, float *delta_gpu, float *input_delta_gpu, int preSize) {
for (int idx = blockIdx.x * blockDim.x + threadIdx.x; idx < sizeOfInputImg; idx += blockDim.x * gridDim.x) {
for (int ba = 0; ba < batchsize; ba++) {
input_delta_gpu[ba * sizeOfInputImg + idx] += delta_gpu[ba * sizeOfResultImg + idx + preSize];
}
}
}
template<typename DTYPE> int ConcatenateChannelWise<DTYPE>::BackPropagateOnGPU(int pTime) {
Tensor<DTYPE> *this_delta = this->GetDelta();
Tensor<DTYPE> *input_delta = NULL;
int timesize = this_delta->GetTimeSize();
int batchsize = this_delta->GetBatchSize();
int channelsize = this_delta->GetChannelSize();
int rowsize = this_delta->GetRowSize();
int colsize = this_delta->GetColSize();
Shape *resultTenShape = this_delta->GetShape();
int sizeOfPlane = rowsize * colsize;
int sizeOfResultImg = channelsize * sizeOfPlane;
int sizeOfInputImg = 0;
DTYPE *delta_gpu = this_delta->GetGPUData();
DTYPE *input_delta_gpu = NULL;
int preSize = 0;
int inputChannelSize = 0;
for (int opnum = 0; opnum < m_noOperator; opnum++) {
input_delta = this->GetInput()[opnum]->GetDelta();
input_delta_gpu = input_delta->GetGPUData();
inputChannelSize = input_delta->GetChannelSize();
preSize = m_aAccumulate[opnum] * sizeOfPlane;;
sizeOfInputImg = inputChannelSize * sizeOfPlane;
ConcatenateChannelWise_BackPropagate_kernel << < 64, 128 >> > (sizeOfResultImg, sizeOfInputImg, timesize, batchsize, delta_gpu, input_delta_gpu, preSize);
}
return TRUE;
}
#endif // ifdef __CUDNN__
| 4d623714fc815313411655b23be519f4ee75095f.cu | #ifdef __CUDNN__
#include "Concatenate.hpp"
// template class ConcatenateChannelWise<int>;
template class ConcatenateChannelWise<float>;
// template class ConcatenateChannelWise<double>;
__global__ void ConcatenateChannelWise_ForwardPropagate_kernel(int sizeOfResultImg, int sizeOfInputImg, int timesize, int batchsize, float *result, float *input, int preSize) {
for (int idx = blockIdx.x * blockDim.x + threadIdx.x; idx < sizeOfInputImg; idx += blockDim.x * gridDim.x) {
for (int ba = 0; ba < batchsize; ba++) {
result[ba * sizeOfResultImg + idx + preSize] = input[ba * sizeOfInputImg + idx];
}
}
}
template<typename DTYPE> int ConcatenateChannelWise<DTYPE>::ForwardPropagateOnGPU(int pTime) {
Tensor<DTYPE> *result = this->GetResult();
Tensor<DTYPE> *input = NULL;
int timesize = result->GetTimeSize();
int batchsize = result->GetBatchSize();
int channelsize = result->GetChannelSize();
int rowsize = result->GetRowSize();
int colsize = result->GetColSize();
Shape *resultTenShape = result->GetShape();
int sizeOfPlane = rowsize * colsize;
int sizeOfResultImg = channelsize * sizeOfPlane;
int sizeOfInputImg = 0;
DTYPE *result_gpu = result->GetGPUData();
DTYPE *input_gpu = NULL;
int preSize = 0;
int inputChannelSize = 0;
for (int opnum = 0; opnum < m_noOperator; opnum++) {
input = this->GetInput()[opnum]->GetResult();
input_gpu = input->GetGPUData();
inputChannelSize = input->GetChannelSize();
preSize = m_aAccumulate[opnum] * sizeOfPlane;
sizeOfInputImg = inputChannelSize * sizeOfPlane;
// std::cout << "check" << '\n';
ConcatenateChannelWise_ForwardPropagate_kernel << < 64, 128 >> > (sizeOfResultImg, sizeOfInputImg, timesize, batchsize, result_gpu, input_gpu, preSize);
}
return TRUE;
}
__global__ void ConcatenateChannelWise_BackPropagate_kernel(int sizeOfResultImg, int sizeOfInputImg, int timesize, int batchsize, float *delta_gpu, float *input_delta_gpu, int preSize) {
for (int idx = blockIdx.x * blockDim.x + threadIdx.x; idx < sizeOfInputImg; idx += blockDim.x * gridDim.x) {
for (int ba = 0; ba < batchsize; ba++) {
input_delta_gpu[ba * sizeOfInputImg + idx] += delta_gpu[ba * sizeOfResultImg + idx + preSize];
}
}
}
template<typename DTYPE> int ConcatenateChannelWise<DTYPE>::BackPropagateOnGPU(int pTime) {
Tensor<DTYPE> *this_delta = this->GetDelta();
Tensor<DTYPE> *input_delta = NULL;
int timesize = this_delta->GetTimeSize();
int batchsize = this_delta->GetBatchSize();
int channelsize = this_delta->GetChannelSize();
int rowsize = this_delta->GetRowSize();
int colsize = this_delta->GetColSize();
Shape *resultTenShape = this_delta->GetShape();
int sizeOfPlane = rowsize * colsize;
int sizeOfResultImg = channelsize * sizeOfPlane;
int sizeOfInputImg = 0;
DTYPE *delta_gpu = this_delta->GetGPUData();
DTYPE *input_delta_gpu = NULL;
int preSize = 0;
int inputChannelSize = 0;
for (int opnum = 0; opnum < m_noOperator; opnum++) {
input_delta = this->GetInput()[opnum]->GetDelta();
input_delta_gpu = input_delta->GetGPUData();
inputChannelSize = input_delta->GetChannelSize();
preSize = m_aAccumulate[opnum] * sizeOfPlane;;
sizeOfInputImg = inputChannelSize * sizeOfPlane;
ConcatenateChannelWise_BackPropagate_kernel << < 64, 128 >> > (sizeOfResultImg, sizeOfInputImg, timesize, batchsize, delta_gpu, input_delta_gpu, preSize);
}
return TRUE;
}
#endif // ifdef __CUDNN__
|
fc0017cc4348a6c8077ee13145b6c1505c6227bb.hip | // !!! This is a file automatically generated by hipify!!!
#include <cassert>
#include <stdio.h>
#include <hip/hip_runtime.h>
#include "transpose_device.cuh"
/*
* DONE for all kernels (including naive):
* Leave a comment above all non-coalesced memory accesses and bank conflicts.
* Make it clear if the suboptimal access is a read or write. If an access is
* non-coalesced, specify how many cache lines it touches, and if an access
* causes bank conflicts, say if its a 2-way bank conflict, 4-way bank
* conflict, etc.
*
* Comment all of your kernels.
*/
/*
* Each block of the naive transpose handles a 64x64 block of the input matrix,
* with each thread of the block handling a 1x4 section and each warp handling
* a 32x4 section.
*
* If we split the 64x64 matrix into 32 blocks of shape (32, 4), then we have
* a block matrix of shape (2 blocks, 16 blocks).
* Warp 0 handles block (0, 0), warp 1 handles (1, 0), warp 2 handles (0, 1),
* warp n handles (n % 2, n / 2).
*
* This kernel is launched with block shape (64, 16) and grid shape
* (n / 64, n / 64) where n is the size of the square matrix.
*
* You may notice that we suggested in lecture that threads should be able to
* handle an arbitrary number of elements and that this kernel handles exactly
* 4 elements per thread. This is OK here because to overwhelm this kernel
* it would take a 4194304 x 4194304 matrix, which would take ~17.6TB of
* memory (well beyond what I expect GPUs to have in the next few years).
*/
__global__
void naiveTransposeKernel(const float *input, float *output, int n) {
// DONE: do not modify code, just comment on suboptimal accesses
const int i = threadIdx.x + 64 * blockIdx.x;
int j = 4 * threadIdx.y + 64 * blockIdx.y;
const int end_j = j + 4;
/*
* The write here (output[j + n * i]) is non-coalesced, and touches
* 32 cache lines. This is because, within a single warp, it accesses the
* output array in a non-contiguous manner, causing multiple memory
* accesses to occur.
*/
for (; j < end_j; j++)
output[j + n * i] = input[i + n * j];
}
/*
* The main optimization used here is the usage of shared memory. By using
* shared memory, we are able to get rid of the coalesced write, as seen
* above in the naive implemention. We also pad the shared memory (make it
* unnecessarily large) in order to avoid bank conflicts.
*
* We statically allocate a 65 x 64 chunk of shared memory. Only 64 x 64
* of this is used; the remaining row is the padding. In the first for-loop,
* we copy the 64 x 64 global memory chunk that the block is handling
* into our shared memory array. To ensure that we do not proceed any further
* until every thread in the block has copied its elements to shared memory,
* we call __syncthreads(). We then reinitialize/change some variables. Note
* that blockIdx.y and blockIdx.x get switched around for i0 and j0. This
* is explained further in the comments below, but in short it is because
* the transpose of some elements is in a different block than the original
* block. Then, in the last for loop, we write to output from shared memory.
* Note that we access output in a coalesced manner. We accessing
* the shared memory in a non-sequential manner; however, this does not
* affect performance. This is the benefit that shared memory gives us.
* Also note how the indices are switched in the write to output (we access
* different elements of sh_in and output). This, along with the block
* switching, is the transpose logic.
*
* Finally, note that, for threads in a warp, we read from shared memory
* in offsets of 65. This gives us a stride of 1 and 0 bank conflicts.
* More specifically, this ensures that when reading from the shared output,
* each read will access a separate bank. When writing to shared memory,
* we have sequential access within a warp, so bank conflicts are not a problem.
*/
__global__
void shmemTransposeKernel(const float *input, float *output, int n) {
// DONE: Modify transpose kernel to use shared memory. All global memory
// reads and writes should be coalesced. Minimize the number of shared
// memory bank conflicts (0 bank conflicts should be possible using
// padding). Again, comment on all sub-optimal accesses.
// 65 x 64 shared memory matrix.
__shared__ float sh_in[65 * 64];
// Initial variable initialization.
int i0 = threadIdx.x + 64 * blockIdx.x;
int j0 = 4 * threadIdx.y + 64 * blockIdx.y;
const int i1 = threadIdx.x;
int j1 = 4 * threadIdx.y;
int end_j0 = j0 + 4;
// Read in a 64 x 64 chunk from global memory into the 65 x 64 sized
// shared memory array (padded to fix bank conflicts).
for (; j0 < end_j0; j0++, j1++) {
sh_in[i1 + 65 * j1] = input[i0 + n * j0];
}
// Make sure the entire block has writen to shared memory.
__syncthreads();
// Flip the block indices for the global matrix indices, and reassign
// variables. We do this because the transpose of some elements is not
// necessarily in the same block as the element itself.
// No need to switch threadIdx.x, threadIdx.y - this transpose
// happens in the last for-loop.
i0 = threadIdx.x + 64 * blockIdx.y;
j0 = 4 * threadIdx.y + 64 * blockIdx.x;
j1 = 4 * threadIdx.y;
end_j0 = j0 + 4;
// Write the memory in shared memory to the global output array.
// We access the shared memory in non-sequential order - this is not
// a problem for shared memory when trying to achieve optimal performance.
// Note that we access the output array in sequential order.
for (; j0 < end_j0; j0++, j1++) {
output[i0 + n * j0] = sh_in[j1 + 65 * i1];
}
}
/*
* Besides the shared memory and padding as seen in shmemTransposeKernel,
* here are some other performance optimizations (mostly small ones) used
* in this method:
* - unroll the for-loops
* - get rid of unneccessary variables and reassignments (e.g. end_j0)
* - minimize instruction dependencies
*/
__global__
void optimalTransposeKernel(const float *input, float *output, int n) {
// DONE: This should be based off of your shmemTransposeKernel.
// Use any optimization tricks discussed so far to improve performance.
// Consider ILP and loop unrolling.
// 65 x 64 shared memory matrix.
__shared__ float sh_in[65 * 64];
// Initial variable initialization.
int i0 = threadIdx.x + 64 * blockIdx.x;
int j0 = 4 * threadIdx.y + 64 * blockIdx.y;
const int i1 = threadIdx.x;
int j1 = 4 * threadIdx.y;
// Unroll the first for-loop, which writes to shared memory.
sh_in[i1 + 65 * j1] = input[i0 + n * j0];
sh_in[i1 + 65 * (j1 + 1)] = input[i0 + n * (j0 + 1)];
sh_in[i1 + 65 * (j1 + 2)] = input[i0 + n * (j0 + 2)];
sh_in[i1 + 65 * (j1 + 3)] = input[i0 + n * (j0 + 3)];
// Make sure the entire block has writen to shared memory.
__syncthreads();
// Flip the block indices.
i0 = threadIdx.x + 64 * blockIdx.y;
j0 = 4 * threadIdx.y + 64 * blockIdx.x;
// Unroll the second for-loop, which writes to output.
output[i0 + n * j0] = sh_in[j1 + 65 * i1];
output[i0 + n * (j0 + 1)] = sh_in[(j1 + 1) + 65 * i1];
output[i0 + n * (j0 + 2)] = sh_in[(j1 + 2) + 65 * i1];
output[i0 + n * (j0 + 3)] = sh_in[(j1 + 3) + 65 * i1];
}
void cudaTranspose(
const float *d_input,
float *d_output,
int n,
TransposeImplementation type)
{
if (type == NAIVE) {
dim3 blockSize(64, 16);
dim3 gridSize(n / 64, n / 64);
hipLaunchKernelGGL(( naiveTransposeKernel), dim3(gridSize), dim3(blockSize), 0, 0, d_input, d_output, n);
}
else if (type == SHMEM) {
dim3 blockSize(64, 16);
dim3 gridSize(n / 64, n / 64);
hipLaunchKernelGGL(( shmemTransposeKernel), dim3(gridSize), dim3(blockSize), 0, 0, d_input, d_output, n);
}
else if (type == OPTIMAL) {
dim3 blockSize(64, 16);
dim3 gridSize(n / 64, n / 64);
hipLaunchKernelGGL(( optimalTransposeKernel), dim3(gridSize), dim3(blockSize), 0, 0, d_input, d_output, n);
}
// Unknown type
else
assert(false);
}
| fc0017cc4348a6c8077ee13145b6c1505c6227bb.cu | #include <cassert>
#include <stdio.h>
#include <cuda_runtime.h>
#include "transpose_device.cuh"
/*
* DONE for all kernels (including naive):
* Leave a comment above all non-coalesced memory accesses and bank conflicts.
* Make it clear if the suboptimal access is a read or write. If an access is
* non-coalesced, specify how many cache lines it touches, and if an access
* causes bank conflicts, say if its a 2-way bank conflict, 4-way bank
* conflict, etc.
*
* Comment all of your kernels.
*/
/*
* Each block of the naive transpose handles a 64x64 block of the input matrix,
* with each thread of the block handling a 1x4 section and each warp handling
* a 32x4 section.
*
* If we split the 64x64 matrix into 32 blocks of shape (32, 4), then we have
* a block matrix of shape (2 blocks, 16 blocks).
* Warp 0 handles block (0, 0), warp 1 handles (1, 0), warp 2 handles (0, 1),
* warp n handles (n % 2, n / 2).
*
* This kernel is launched with block shape (64, 16) and grid shape
* (n / 64, n / 64) where n is the size of the square matrix.
*
* You may notice that we suggested in lecture that threads should be able to
* handle an arbitrary number of elements and that this kernel handles exactly
* 4 elements per thread. This is OK here because to overwhelm this kernel
* it would take a 4194304 x 4194304 matrix, which would take ~17.6TB of
* memory (well beyond what I expect GPUs to have in the next few years).
*/
__global__
void naiveTransposeKernel(const float *input, float *output, int n) {
// DONE: do not modify code, just comment on suboptimal accesses
const int i = threadIdx.x + 64 * blockIdx.x;
int j = 4 * threadIdx.y + 64 * blockIdx.y;
const int end_j = j + 4;
/*
* The write here (output[j + n * i]) is non-coalesced, and touches
* 32 cache lines. This is because, within a single warp, it accesses the
* output array in a non-contiguous manner, causing multiple memory
* accesses to occur.
*/
for (; j < end_j; j++)
output[j + n * i] = input[i + n * j];
}
/*
* The main optimization used here is the usage of shared memory. By using
* shared memory, we are able to get rid of the coalesced write, as seen
* above in the naive implemention. We also pad the shared memory (make it
* unnecessarily large) in order to avoid bank conflicts.
*
* We statically allocate a 65 x 64 chunk of shared memory. Only 64 x 64
* of this is used; the remaining row is the padding. In the first for-loop,
* we copy the 64 x 64 global memory chunk that the block is handling
* into our shared memory array. To ensure that we do not proceed any further
* until every thread in the block has copied its elements to shared memory,
* we call __syncthreads(). We then reinitialize/change some variables. Note
* that blockIdx.y and blockIdx.x get switched around for i0 and j0. This
* is explained further in the comments below, but in short it is because
* the transpose of some elements is in a different block than the original
* block. Then, in the last for loop, we write to output from shared memory.
* Note that we access output in a coalesced manner. We accessing
* the shared memory in a non-sequential manner; however, this does not
* affect performance. This is the benefit that shared memory gives us.
* Also note how the indices are switched in the write to output (we access
* different elements of sh_in and output). This, along with the block
* switching, is the transpose logic.
*
* Finally, note that, for threads in a warp, we read from shared memory
* in offsets of 65. This gives us a stride of 1 and 0 bank conflicts.
* More specifically, this ensures that when reading from the shared output,
* each read will access a separate bank. When writing to shared memory,
* we have sequential access within a warp, so bank conflicts are not a problem.
*/
__global__
void shmemTransposeKernel(const float *input, float *output, int n) {
// DONE: Modify transpose kernel to use shared memory. All global memory
// reads and writes should be coalesced. Minimize the number of shared
// memory bank conflicts (0 bank conflicts should be possible using
// padding). Again, comment on all sub-optimal accesses.
// 65 x 64 shared memory matrix.
__shared__ float sh_in[65 * 64];
// Initial variable initialization.
int i0 = threadIdx.x + 64 * blockIdx.x;
int j0 = 4 * threadIdx.y + 64 * blockIdx.y;
const int i1 = threadIdx.x;
int j1 = 4 * threadIdx.y;
int end_j0 = j0 + 4;
// Read in a 64 x 64 chunk from global memory into the 65 x 64 sized
// shared memory array (padded to fix bank conflicts).
for (; j0 < end_j0; j0++, j1++) {
sh_in[i1 + 65 * j1] = input[i0 + n * j0];
}
// Make sure the entire block has writen to shared memory.
__syncthreads();
// Flip the block indices for the global matrix indices, and reassign
// variables. We do this because the transpose of some elements is not
// necessarily in the same block as the element itself.
// No need to switch threadIdx.x, threadIdx.y - this transpose
// happens in the last for-loop.
i0 = threadIdx.x + 64 * blockIdx.y;
j0 = 4 * threadIdx.y + 64 * blockIdx.x;
j1 = 4 * threadIdx.y;
end_j0 = j0 + 4;
// Write the memory in shared memory to the global output array.
// We access the shared memory in non-sequential order - this is not
// a problem for shared memory when trying to achieve optimal performance.
// Note that we access the output array in sequential order.
for (; j0 < end_j0; j0++, j1++) {
output[i0 + n * j0] = sh_in[j1 + 65 * i1];
}
}
/*
* Besides the shared memory and padding as seen in shmemTransposeKernel,
* here are some other performance optimizations (mostly small ones) used
* in this method:
* - unroll the for-loops
* - get rid of unneccessary variables and reassignments (e.g. end_j0)
* - minimize instruction dependencies
*/
__global__
void optimalTransposeKernel(const float *input, float *output, int n) {
// DONE: This should be based off of your shmemTransposeKernel.
// Use any optimization tricks discussed so far to improve performance.
// Consider ILP and loop unrolling.
// 65 x 64 shared memory matrix.
__shared__ float sh_in[65 * 64];
// Initial variable initialization.
int i0 = threadIdx.x + 64 * blockIdx.x;
int j0 = 4 * threadIdx.y + 64 * blockIdx.y;
const int i1 = threadIdx.x;
int j1 = 4 * threadIdx.y;
// Unroll the first for-loop, which writes to shared memory.
sh_in[i1 + 65 * j1] = input[i0 + n * j0];
sh_in[i1 + 65 * (j1 + 1)] = input[i0 + n * (j0 + 1)];
sh_in[i1 + 65 * (j1 + 2)] = input[i0 + n * (j0 + 2)];
sh_in[i1 + 65 * (j1 + 3)] = input[i0 + n * (j0 + 3)];
// Make sure the entire block has writen to shared memory.
__syncthreads();
// Flip the block indices.
i0 = threadIdx.x + 64 * blockIdx.y;
j0 = 4 * threadIdx.y + 64 * blockIdx.x;
// Unroll the second for-loop, which writes to output.
output[i0 + n * j0] = sh_in[j1 + 65 * i1];
output[i0 + n * (j0 + 1)] = sh_in[(j1 + 1) + 65 * i1];
output[i0 + n * (j0 + 2)] = sh_in[(j1 + 2) + 65 * i1];
output[i0 + n * (j0 + 3)] = sh_in[(j1 + 3) + 65 * i1];
}
void cudaTranspose(
const float *d_input,
float *d_output,
int n,
TransposeImplementation type)
{
if (type == NAIVE) {
dim3 blockSize(64, 16);
dim3 gridSize(n / 64, n / 64);
naiveTransposeKernel<<<gridSize, blockSize>>>(d_input, d_output, n);
}
else if (type == SHMEM) {
dim3 blockSize(64, 16);
dim3 gridSize(n / 64, n / 64);
shmemTransposeKernel<<<gridSize, blockSize>>>(d_input, d_output, n);
}
else if (type == OPTIMAL) {
dim3 blockSize(64, 16);
dim3 gridSize(n / 64, n / 64);
optimalTransposeKernel<<<gridSize, blockSize>>>(d_input, d_output, n);
}
// Unknown type
else
assert(false);
}
|
f08c65c151a44c035091d721bc12056a824b2b8a.hip | // !!! This is a file automatically generated by hipify!!!
/*!
* Copyright 2018~2020 XGBoost contributors
*/
#include <xgboost/logging.h>
#include <thrust/copy.h>
#include <thrust/functional.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/iterator/transform_iterator.h>
#include <thrust/iterator/discard_iterator.h>
#include <thrust/reduce.h>
#include <thrust/sort.h>
#include <thrust/binary_search.h>
#include <thrust/execution_policy.h>
#include <memory>
#include <mutex>
#include <utility>
#include <vector>
#include "device_helpers_hip.cuh"
#include "hist_util.h"
#include "hist_util_hip.cuh"
#include "math.h" // NOLINT
#include "quantile.h"
#include "categorical.h"
#include "xgboost/host_device_vector.h"
namespace xgboost {
namespace common {
constexpr float SketchContainer::kFactor;
namespace detail {
size_t RequiredSampleCutsPerColumn(int max_bins, size_t num_rows) {
double eps = 1.0 / (WQSketch::kFactor * max_bins);
size_t dummy_nlevel;
size_t num_cuts;
WQuantileSketch<bst_float, bst_float>::LimitSizeLevel(
num_rows, eps, &dummy_nlevel, &num_cuts);
return ::min(num_cuts, num_rows);
}
size_t RequiredSampleCuts(bst_row_t num_rows, bst_feature_t num_columns,
size_t max_bins, size_t nnz) {
auto per_column = RequiredSampleCutsPerColumn(max_bins, num_rows);
auto if_dense = num_columns * per_column;
auto result = ::min(nnz, if_dense);
return result;
}
size_t RequiredMemory(bst_row_t num_rows, bst_feature_t num_columns, size_t nnz,
size_t num_bins, bool with_weights) {
size_t peak = 0;
// 0. Allocate cut pointer in quantile container by increasing: n_columns + 1
size_t total = (num_columns + 1) * sizeof(SketchContainer::OffsetT);
// 1. Copy and sort: 2 * bytes_per_element * shape
total += BytesPerElement(with_weights) * num_rows * num_columns;
peak = ::max(peak, total);
// 2. Deallocate bytes_per_element * shape due to reusing memory in sort.
total -= BytesPerElement(with_weights) * num_rows * num_columns / 2;
// 3. Allocate colomn size scan by increasing: n_columns + 1
total += (num_columns + 1) * sizeof(SketchContainer::OffsetT);
// 4. Allocate cut pointer by increasing: n_columns + 1
total += (num_columns + 1) * sizeof(SketchContainer::OffsetT);
// 5. Allocate cuts: assuming rows is greater than bins: n_columns * limit_size
total += RequiredSampleCuts(num_rows, num_bins, num_bins, nnz) * sizeof(SketchEntry);
// 6. Deallocate copied entries by reducing: bytes_per_element * shape.
peak = ::max(peak, total);
total -= (BytesPerElement(with_weights) * num_rows * num_columns) / 2;
// 7. Deallocate column size scan.
peak = ::max(peak, total);
total -= (num_columns + 1) * sizeof(SketchContainer::OffsetT);
// 8. Deallocate cut size scan.
total -= (num_columns + 1) * sizeof(SketchContainer::OffsetT);
// 9. Allocate final cut values, min values, cut ptrs: ::min(rows, bins + 1) *
// n_columns + n_columns + n_columns + 1
total += ::min(num_rows, num_bins) * num_columns * sizeof(float);
total += num_columns *
sizeof(std::remove_reference_t<decltype(
std::declval<HistogramCuts>().MinValues())>::value_type);
total += (num_columns + 1) *
sizeof(std::remove_reference_t<decltype(
std::declval<HistogramCuts>().Ptrs())>::value_type);
peak = ::max(peak, total);
return peak;
}
size_t SketchBatchNumElements(size_t sketch_batch_num_elements,
bst_row_t num_rows, bst_feature_t columns,
size_t nnz, int device,
size_t num_cuts, bool has_weight) {
if (sketch_batch_num_elements == 0) {
auto required_memory = RequiredMemory(num_rows, columns, nnz, num_cuts, has_weight);
// use up to 80% of available space
auto avail = dh::AvailableMemory(device) * 0.8;
if (required_memory > avail) {
sketch_batch_num_elements = avail / BytesPerElement(has_weight);
} else {
sketch_batch_num_elements = ::min(num_rows * static_cast<size_t>(columns), nnz);
}
}
return sketch_batch_num_elements;
}
void SortByWeight(dh::device_vector<float>* weights,
dh::device_vector<Entry>* sorted_entries) {
// Sort both entries and wegihts.
dh::XGBDeviceAllocator<char> alloc;
thrust::sort_by_key(thrust::hip::par(alloc), sorted_entries->begin(),
sorted_entries->end(), weights->begin(),
detail::EntryCompareOp());
// Scan weights
dh::XGBCachingDeviceAllocator<char> caching;
thrust::inclusive_scan_by_key(thrust::hip::par(caching),
sorted_entries->begin(), sorted_entries->end(),
weights->begin(), weights->begin(),
[=] __device__(const Entry& a, const Entry& b) {
return a.index == b.index;
});
}
struct IsCatOp {
XGBOOST_DEVICE bool operator()(FeatureType ft) { return ft == FeatureType::kCategorical; }
};
void RemoveDuplicatedCategories(
int32_t device, MetaInfo const &info, Span<bst_row_t> d_cuts_ptr,
dh::device_vector<Entry> *p_sorted_entries,
dh::caching_device_vector<size_t>* p_column_sizes_scan) {
auto d_feature_types = info.feature_types.ConstDeviceSpan();
auto& column_sizes_scan = *p_column_sizes_scan;
if (!info.feature_types.Empty() &&
thrust::any_of(dh::tbegin(d_feature_types), dh::tend(d_feature_types),
IsCatOp{})) {
auto& sorted_entries = *p_sorted_entries;
// Removing duplicated entries in categorical features.
dh::caching_device_vector<size_t> new_column_scan(column_sizes_scan.size());
dh::SegmentedUnique(
column_sizes_scan.data().get(),
column_sizes_scan.data().get() + column_sizes_scan.size(),
sorted_entries.begin(), sorted_entries.end(),
new_column_scan.data().get(), sorted_entries.begin(),
[=] __device__(Entry const &l, Entry const &r) {
if (l.index == r.index) {
if (IsCat(d_feature_types, l.index)) {
return l.fvalue == r.fvalue;
}
}
return false;
});
// Renew the column scan and cut scan based on categorical data.
auto d_old_column_sizes_scan = dh::ToSpan(column_sizes_scan);
dh::caching_device_vector<SketchContainer::OffsetT> new_cuts_size(
info.num_col_ + 1);
auto d_new_cuts_size = dh::ToSpan(new_cuts_size);
auto d_new_columns_ptr = dh::ToSpan(new_column_scan);
CHECK_EQ(new_column_scan.size(), new_cuts_size.size());
dh::LaunchN(device, new_column_scan.size(), [=] __device__(size_t idx) {
d_old_column_sizes_scan[idx] = d_new_columns_ptr[idx];
if (idx == d_new_columns_ptr.size() - 1) {
return;
}
if (IsCat(d_feature_types, idx)) {
// Cut size is the same as number of categories in input.
d_new_cuts_size[idx] =
d_new_columns_ptr[idx + 1] - d_new_columns_ptr[idx];
} else {
d_new_cuts_size[idx] = d_cuts_ptr[idx] - d_cuts_ptr[idx];
}
});
// Turn size into ptr.
thrust::exclusive_scan(thrust::device, new_cuts_size.cbegin(),
new_cuts_size.cend(), d_cuts_ptr.data());
}
}
} // namespace detail
void ProcessBatch(int device, MetaInfo const &info, const SparsePage &page,
size_t begin, size_t end, SketchContainer *sketch_container,
int num_cuts_per_feature, size_t num_columns) {
dh::XGBCachingDeviceAllocator<char> alloc;
const auto& host_data = page.data.ConstHostVector();
dh::device_vector<Entry> sorted_entries(host_data.begin() + begin,
host_data.begin() + end);
thrust::sort(thrust::hip::par(alloc), sorted_entries.begin(),
sorted_entries.end(), detail::EntryCompareOp());
HostDeviceVector<SketchContainer::OffsetT> cuts_ptr;
dh::caching_device_vector<size_t> column_sizes_scan;
data::IsValidFunctor dummy_is_valid(std::numeric_limits<float>::quiet_NaN());
auto batch_it = dh::MakeTransformIterator<data::COOTuple>(
sorted_entries.data().get(),
[] __device__(Entry const &e) -> data::COOTuple {
return {0, e.index, e.fvalue}; // row_idx is not needed for scanning column size.
});
detail::GetColumnSizesScan(device, num_columns, num_cuts_per_feature,
batch_it, dummy_is_valid,
0, sorted_entries.size(),
&cuts_ptr, &column_sizes_scan);
auto d_cuts_ptr = cuts_ptr.DeviceSpan();
detail::RemoveDuplicatedCategories(device, info, d_cuts_ptr, &sorted_entries,
&column_sizes_scan);
auto const& h_cuts_ptr = cuts_ptr.ConstHostVector();
CHECK_EQ(d_cuts_ptr.size(), column_sizes_scan.size());
// add cuts into sketches
sketch_container->Push(dh::ToSpan(sorted_entries), dh::ToSpan(column_sizes_scan),
d_cuts_ptr, h_cuts_ptr.back());
sorted_entries.clear();
sorted_entries.shrink_to_fit();
CHECK_EQ(sorted_entries.capacity(), 0);
CHECK_NE(cuts_ptr.Size(), 0);
}
void ProcessWeightedBatch(int device, const SparsePage& page,
Span<const float> weights, size_t begin, size_t end,
SketchContainer* sketch_container, int num_cuts_per_feature,
size_t num_columns,
bool is_ranking, Span<bst_group_t const> d_group_ptr) {
dh::XGBCachingDeviceAllocator<char> alloc;
const auto& host_data = page.data.ConstHostVector();
dh::device_vector<Entry> sorted_entries(host_data.begin() + begin,
host_data.begin() + end);
// Binary search to assign weights to each element
dh::device_vector<float> temp_weights(sorted_entries.size());
auto d_temp_weights = temp_weights.data().get();
page.offset.SetDevice(device);
auto row_ptrs = page.offset.ConstDeviceSpan();
size_t base_rowid = page.base_rowid;
if (is_ranking) {
CHECK_GE(d_group_ptr.size(), 2)
<< "Must have at least 1 group for ranking.";
CHECK_EQ(weights.size(), d_group_ptr.size() - 1)
<< "Weight size should equal to number of groups.";
dh::LaunchN(device, temp_weights.size(), [=] __device__(size_t idx) {
size_t element_idx = idx + begin;
size_t ridx = dh::SegmentId(row_ptrs, element_idx);
bst_group_t group_idx = dh::SegmentId(d_group_ptr, ridx + base_rowid);
d_temp_weights[idx] = weights[group_idx];
});
} else {
dh::LaunchN(device, temp_weights.size(), [=] __device__(size_t idx) {
size_t element_idx = idx + begin;
size_t ridx = dh::SegmentId(row_ptrs, element_idx);
d_temp_weights[idx] = weights[ridx + base_rowid];
});
}
detail::SortByWeight(&temp_weights, &sorted_entries);
HostDeviceVector<SketchContainer::OffsetT> cuts_ptr;
dh::caching_device_vector<size_t> column_sizes_scan;
data::IsValidFunctor dummy_is_valid(std::numeric_limits<float>::quiet_NaN());
auto batch_it = dh::MakeTransformIterator<data::COOTuple>(
sorted_entries.data().get(),
[] __device__(Entry const &e) -> data::COOTuple {
return {0, e.index, e.fvalue}; // row_idx is not needed for scaning column size.
});
detail::GetColumnSizesScan(device, num_columns, num_cuts_per_feature,
batch_it, dummy_is_valid,
0, sorted_entries.size(),
&cuts_ptr, &column_sizes_scan);
auto const& h_cuts_ptr = cuts_ptr.ConstHostVector();
auto d_cuts_ptr = cuts_ptr.DeviceSpan();
// Extract cuts
sketch_container->Push(dh::ToSpan(sorted_entries),
dh::ToSpan(column_sizes_scan), d_cuts_ptr,
h_cuts_ptr.back(), dh::ToSpan(temp_weights));
sorted_entries.clear();
sorted_entries.shrink_to_fit();
}
HistogramCuts DeviceSketch(int device, DMatrix* dmat, int max_bins,
size_t sketch_batch_num_elements) {
dmat->Info().feature_types.SetDevice(device);
dmat->Info().feature_types.ConstDevicePointer(); // pull to device early
// Configure batch size based on available memory
bool has_weights = dmat->Info().weights_.Size() > 0;
size_t num_cuts_per_feature =
detail::RequiredSampleCutsPerColumn(max_bins, dmat->Info().num_row_);
sketch_batch_num_elements = detail::SketchBatchNumElements(
sketch_batch_num_elements,
dmat->Info().num_row_,
dmat->Info().num_col_,
dmat->Info().num_nonzero_,
device, num_cuts_per_feature, has_weights);
HistogramCuts cuts;
SketchContainer sketch_container(dmat->Info().feature_types, max_bins, dmat->Info().num_col_,
dmat->Info().num_row_, device);
dmat->Info().weights_.SetDevice(device);
for (const auto& batch : dmat->GetBatches<SparsePage>()) {
size_t batch_nnz = batch.data.Size();
auto const& info = dmat->Info();
for (auto begin = 0ull; begin < batch_nnz; begin += sketch_batch_num_elements) {
size_t end = ::min(batch_nnz, size_t(begin + sketch_batch_num_elements));
if (has_weights) {
bool is_ranking = HostSketchContainer::UseGroup(dmat->Info());
dh::caching_device_vector<uint32_t> groups(info.group_ptr_.cbegin(),
info.group_ptr_.cend());
ProcessWeightedBatch(
device, batch, dmat->Info().weights_.ConstDeviceSpan(), begin, end,
&sketch_container,
num_cuts_per_feature,
dmat->Info().num_col_,
is_ranking, dh::ToSpan(groups));
} else {
ProcessBatch(device, dmat->Info(), batch, begin, end, &sketch_container,
num_cuts_per_feature, dmat->Info().num_col_);
}
}
}
sketch_container.MakeCuts(&cuts);
return cuts;
}
} // namespace common
} // namespace xgboost
| f08c65c151a44c035091d721bc12056a824b2b8a.cu | /*!
* Copyright 2018~2020 XGBoost contributors
*/
#include <xgboost/logging.h>
#include <thrust/copy.h>
#include <thrust/functional.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/iterator/transform_iterator.h>
#include <thrust/iterator/discard_iterator.h>
#include <thrust/reduce.h>
#include <thrust/sort.h>
#include <thrust/binary_search.h>
#include <thrust/execution_policy.h>
#include <memory>
#include <mutex>
#include <utility>
#include <vector>
#include "device_helpers.cuh"
#include "hist_util.h"
#include "hist_util.cuh"
#include "math.h" // NOLINT
#include "quantile.h"
#include "categorical.h"
#include "xgboost/host_device_vector.h"
namespace xgboost {
namespace common {
constexpr float SketchContainer::kFactor;
namespace detail {
size_t RequiredSampleCutsPerColumn(int max_bins, size_t num_rows) {
double eps = 1.0 / (WQSketch::kFactor * max_bins);
size_t dummy_nlevel;
size_t num_cuts;
WQuantileSketch<bst_float, bst_float>::LimitSizeLevel(
num_rows, eps, &dummy_nlevel, &num_cuts);
return std::min(num_cuts, num_rows);
}
size_t RequiredSampleCuts(bst_row_t num_rows, bst_feature_t num_columns,
size_t max_bins, size_t nnz) {
auto per_column = RequiredSampleCutsPerColumn(max_bins, num_rows);
auto if_dense = num_columns * per_column;
auto result = std::min(nnz, if_dense);
return result;
}
size_t RequiredMemory(bst_row_t num_rows, bst_feature_t num_columns, size_t nnz,
size_t num_bins, bool with_weights) {
size_t peak = 0;
// 0. Allocate cut pointer in quantile container by increasing: n_columns + 1
size_t total = (num_columns + 1) * sizeof(SketchContainer::OffsetT);
// 1. Copy and sort: 2 * bytes_per_element * shape
total += BytesPerElement(with_weights) * num_rows * num_columns;
peak = std::max(peak, total);
// 2. Deallocate bytes_per_element * shape due to reusing memory in sort.
total -= BytesPerElement(with_weights) * num_rows * num_columns / 2;
// 3. Allocate colomn size scan by increasing: n_columns + 1
total += (num_columns + 1) * sizeof(SketchContainer::OffsetT);
// 4. Allocate cut pointer by increasing: n_columns + 1
total += (num_columns + 1) * sizeof(SketchContainer::OffsetT);
// 5. Allocate cuts: assuming rows is greater than bins: n_columns * limit_size
total += RequiredSampleCuts(num_rows, num_bins, num_bins, nnz) * sizeof(SketchEntry);
// 6. Deallocate copied entries by reducing: bytes_per_element * shape.
peak = std::max(peak, total);
total -= (BytesPerElement(with_weights) * num_rows * num_columns) / 2;
// 7. Deallocate column size scan.
peak = std::max(peak, total);
total -= (num_columns + 1) * sizeof(SketchContainer::OffsetT);
// 8. Deallocate cut size scan.
total -= (num_columns + 1) * sizeof(SketchContainer::OffsetT);
// 9. Allocate final cut values, min values, cut ptrs: std::min(rows, bins + 1) *
// n_columns + n_columns + n_columns + 1
total += std::min(num_rows, num_bins) * num_columns * sizeof(float);
total += num_columns *
sizeof(std::remove_reference_t<decltype(
std::declval<HistogramCuts>().MinValues())>::value_type);
total += (num_columns + 1) *
sizeof(std::remove_reference_t<decltype(
std::declval<HistogramCuts>().Ptrs())>::value_type);
peak = std::max(peak, total);
return peak;
}
size_t SketchBatchNumElements(size_t sketch_batch_num_elements,
bst_row_t num_rows, bst_feature_t columns,
size_t nnz, int device,
size_t num_cuts, bool has_weight) {
if (sketch_batch_num_elements == 0) {
auto required_memory = RequiredMemory(num_rows, columns, nnz, num_cuts, has_weight);
// use up to 80% of available space
auto avail = dh::AvailableMemory(device) * 0.8;
if (required_memory > avail) {
sketch_batch_num_elements = avail / BytesPerElement(has_weight);
} else {
sketch_batch_num_elements = std::min(num_rows * static_cast<size_t>(columns), nnz);
}
}
return sketch_batch_num_elements;
}
void SortByWeight(dh::device_vector<float>* weights,
dh::device_vector<Entry>* sorted_entries) {
// Sort both entries and wegihts.
dh::XGBDeviceAllocator<char> alloc;
thrust::sort_by_key(thrust::cuda::par(alloc), sorted_entries->begin(),
sorted_entries->end(), weights->begin(),
detail::EntryCompareOp());
// Scan weights
dh::XGBCachingDeviceAllocator<char> caching;
thrust::inclusive_scan_by_key(thrust::cuda::par(caching),
sorted_entries->begin(), sorted_entries->end(),
weights->begin(), weights->begin(),
[=] __device__(const Entry& a, const Entry& b) {
return a.index == b.index;
});
}
struct IsCatOp {
XGBOOST_DEVICE bool operator()(FeatureType ft) { return ft == FeatureType::kCategorical; }
};
void RemoveDuplicatedCategories(
int32_t device, MetaInfo const &info, Span<bst_row_t> d_cuts_ptr,
dh::device_vector<Entry> *p_sorted_entries,
dh::caching_device_vector<size_t>* p_column_sizes_scan) {
auto d_feature_types = info.feature_types.ConstDeviceSpan();
auto& column_sizes_scan = *p_column_sizes_scan;
if (!info.feature_types.Empty() &&
thrust::any_of(dh::tbegin(d_feature_types), dh::tend(d_feature_types),
IsCatOp{})) {
auto& sorted_entries = *p_sorted_entries;
// Removing duplicated entries in categorical features.
dh::caching_device_vector<size_t> new_column_scan(column_sizes_scan.size());
dh::SegmentedUnique(
column_sizes_scan.data().get(),
column_sizes_scan.data().get() + column_sizes_scan.size(),
sorted_entries.begin(), sorted_entries.end(),
new_column_scan.data().get(), sorted_entries.begin(),
[=] __device__(Entry const &l, Entry const &r) {
if (l.index == r.index) {
if (IsCat(d_feature_types, l.index)) {
return l.fvalue == r.fvalue;
}
}
return false;
});
// Renew the column scan and cut scan based on categorical data.
auto d_old_column_sizes_scan = dh::ToSpan(column_sizes_scan);
dh::caching_device_vector<SketchContainer::OffsetT> new_cuts_size(
info.num_col_ + 1);
auto d_new_cuts_size = dh::ToSpan(new_cuts_size);
auto d_new_columns_ptr = dh::ToSpan(new_column_scan);
CHECK_EQ(new_column_scan.size(), new_cuts_size.size());
dh::LaunchN(device, new_column_scan.size(), [=] __device__(size_t idx) {
d_old_column_sizes_scan[idx] = d_new_columns_ptr[idx];
if (idx == d_new_columns_ptr.size() - 1) {
return;
}
if (IsCat(d_feature_types, idx)) {
// Cut size is the same as number of categories in input.
d_new_cuts_size[idx] =
d_new_columns_ptr[idx + 1] - d_new_columns_ptr[idx];
} else {
d_new_cuts_size[idx] = d_cuts_ptr[idx] - d_cuts_ptr[idx];
}
});
// Turn size into ptr.
thrust::exclusive_scan(thrust::device, new_cuts_size.cbegin(),
new_cuts_size.cend(), d_cuts_ptr.data());
}
}
} // namespace detail
void ProcessBatch(int device, MetaInfo const &info, const SparsePage &page,
size_t begin, size_t end, SketchContainer *sketch_container,
int num_cuts_per_feature, size_t num_columns) {
dh::XGBCachingDeviceAllocator<char> alloc;
const auto& host_data = page.data.ConstHostVector();
dh::device_vector<Entry> sorted_entries(host_data.begin() + begin,
host_data.begin() + end);
thrust::sort(thrust::cuda::par(alloc), sorted_entries.begin(),
sorted_entries.end(), detail::EntryCompareOp());
HostDeviceVector<SketchContainer::OffsetT> cuts_ptr;
dh::caching_device_vector<size_t> column_sizes_scan;
data::IsValidFunctor dummy_is_valid(std::numeric_limits<float>::quiet_NaN());
auto batch_it = dh::MakeTransformIterator<data::COOTuple>(
sorted_entries.data().get(),
[] __device__(Entry const &e) -> data::COOTuple {
return {0, e.index, e.fvalue}; // row_idx is not needed for scanning column size.
});
detail::GetColumnSizesScan(device, num_columns, num_cuts_per_feature,
batch_it, dummy_is_valid,
0, sorted_entries.size(),
&cuts_ptr, &column_sizes_scan);
auto d_cuts_ptr = cuts_ptr.DeviceSpan();
detail::RemoveDuplicatedCategories(device, info, d_cuts_ptr, &sorted_entries,
&column_sizes_scan);
auto const& h_cuts_ptr = cuts_ptr.ConstHostVector();
CHECK_EQ(d_cuts_ptr.size(), column_sizes_scan.size());
// add cuts into sketches
sketch_container->Push(dh::ToSpan(sorted_entries), dh::ToSpan(column_sizes_scan),
d_cuts_ptr, h_cuts_ptr.back());
sorted_entries.clear();
sorted_entries.shrink_to_fit();
CHECK_EQ(sorted_entries.capacity(), 0);
CHECK_NE(cuts_ptr.Size(), 0);
}
void ProcessWeightedBatch(int device, const SparsePage& page,
Span<const float> weights, size_t begin, size_t end,
SketchContainer* sketch_container, int num_cuts_per_feature,
size_t num_columns,
bool is_ranking, Span<bst_group_t const> d_group_ptr) {
dh::XGBCachingDeviceAllocator<char> alloc;
const auto& host_data = page.data.ConstHostVector();
dh::device_vector<Entry> sorted_entries(host_data.begin() + begin,
host_data.begin() + end);
// Binary search to assign weights to each element
dh::device_vector<float> temp_weights(sorted_entries.size());
auto d_temp_weights = temp_weights.data().get();
page.offset.SetDevice(device);
auto row_ptrs = page.offset.ConstDeviceSpan();
size_t base_rowid = page.base_rowid;
if (is_ranking) {
CHECK_GE(d_group_ptr.size(), 2)
<< "Must have at least 1 group for ranking.";
CHECK_EQ(weights.size(), d_group_ptr.size() - 1)
<< "Weight size should equal to number of groups.";
dh::LaunchN(device, temp_weights.size(), [=] __device__(size_t idx) {
size_t element_idx = idx + begin;
size_t ridx = dh::SegmentId(row_ptrs, element_idx);
bst_group_t group_idx = dh::SegmentId(d_group_ptr, ridx + base_rowid);
d_temp_weights[idx] = weights[group_idx];
});
} else {
dh::LaunchN(device, temp_weights.size(), [=] __device__(size_t idx) {
size_t element_idx = idx + begin;
size_t ridx = dh::SegmentId(row_ptrs, element_idx);
d_temp_weights[idx] = weights[ridx + base_rowid];
});
}
detail::SortByWeight(&temp_weights, &sorted_entries);
HostDeviceVector<SketchContainer::OffsetT> cuts_ptr;
dh::caching_device_vector<size_t> column_sizes_scan;
data::IsValidFunctor dummy_is_valid(std::numeric_limits<float>::quiet_NaN());
auto batch_it = dh::MakeTransformIterator<data::COOTuple>(
sorted_entries.data().get(),
[] __device__(Entry const &e) -> data::COOTuple {
return {0, e.index, e.fvalue}; // row_idx is not needed for scaning column size.
});
detail::GetColumnSizesScan(device, num_columns, num_cuts_per_feature,
batch_it, dummy_is_valid,
0, sorted_entries.size(),
&cuts_ptr, &column_sizes_scan);
auto const& h_cuts_ptr = cuts_ptr.ConstHostVector();
auto d_cuts_ptr = cuts_ptr.DeviceSpan();
// Extract cuts
sketch_container->Push(dh::ToSpan(sorted_entries),
dh::ToSpan(column_sizes_scan), d_cuts_ptr,
h_cuts_ptr.back(), dh::ToSpan(temp_weights));
sorted_entries.clear();
sorted_entries.shrink_to_fit();
}
HistogramCuts DeviceSketch(int device, DMatrix* dmat, int max_bins,
size_t sketch_batch_num_elements) {
dmat->Info().feature_types.SetDevice(device);
dmat->Info().feature_types.ConstDevicePointer(); // pull to device early
// Configure batch size based on available memory
bool has_weights = dmat->Info().weights_.Size() > 0;
size_t num_cuts_per_feature =
detail::RequiredSampleCutsPerColumn(max_bins, dmat->Info().num_row_);
sketch_batch_num_elements = detail::SketchBatchNumElements(
sketch_batch_num_elements,
dmat->Info().num_row_,
dmat->Info().num_col_,
dmat->Info().num_nonzero_,
device, num_cuts_per_feature, has_weights);
HistogramCuts cuts;
SketchContainer sketch_container(dmat->Info().feature_types, max_bins, dmat->Info().num_col_,
dmat->Info().num_row_, device);
dmat->Info().weights_.SetDevice(device);
for (const auto& batch : dmat->GetBatches<SparsePage>()) {
size_t batch_nnz = batch.data.Size();
auto const& info = dmat->Info();
for (auto begin = 0ull; begin < batch_nnz; begin += sketch_batch_num_elements) {
size_t end = std::min(batch_nnz, size_t(begin + sketch_batch_num_elements));
if (has_weights) {
bool is_ranking = HostSketchContainer::UseGroup(dmat->Info());
dh::caching_device_vector<uint32_t> groups(info.group_ptr_.cbegin(),
info.group_ptr_.cend());
ProcessWeightedBatch(
device, batch, dmat->Info().weights_.ConstDeviceSpan(), begin, end,
&sketch_container,
num_cuts_per_feature,
dmat->Info().num_col_,
is_ranking, dh::ToSpan(groups));
} else {
ProcessBatch(device, dmat->Info(), batch, begin, end, &sketch_container,
num_cuts_per_feature, dmat->Info().num_col_);
}
}
}
sketch_container.MakeCuts(&cuts);
return cuts;
}
} // namespace common
} // namespace xgboost
|
7565a0a571ef5d7d431b283164b5a62fa244304e.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <vector>
#include <stdlib.h>
#include <time.h>
#include <hip/hip_runtime.h>
#include "kernel.h"
#include "dev_array.h"
#include <math.h>
using namespace std;
int main()
{
// Perform matrix multiplication C = A*B
// where A, B and C are NxN matrices
int N = 3000;
int SIZE = N*N;
hipEvent_t start, stop;
float elapsedTime;
// Allocate memory on the host
vector<double> h_A(SIZE);
vector<double> h_B(SIZE);
vector<double> h_C(SIZE);
// Initialize matrices on the host
for (int i=0; i<N; i++){
for (int j=0; j<N; j++){
h_A[i*N+j] = sin(i);
h_B[i*N+j] = cos(j);
}
}
// Allocate memory on the device
dev_array<double> d_A(SIZE);
dev_array<double> d_B(SIZE);
dev_array<double> d_C(SIZE);
d_A.set(&h_A[0], SIZE);
d_B.set(&h_B[0], SIZE);
hipEventCreate( &start );
hipEventCreate( &stop );
hipEventRecord( start, 0 );
matrixMultiplication(d_A.getData(), d_B.getData(), d_C.getData(), N);
hipDeviceSynchronize();
hipEventRecord( stop, 0 );
hipEventSynchronize( stop );
hipEventElapsedTime( &elapsedTime, start, stop );
cout << "Time taken: "<< elapsedTime << "ms" << endl;
d_C.get(&h_C[0], SIZE);
hipDeviceSynchronize();
double *cpu_C;
cpu_C=new double[SIZE];
// Now do the matrix multiplication on the CPU
double sum;
for (int row=0; row<N; row++){
for (int col=0; col<N; col++){
sum = 0.f;
for (int n=0; n<N; n++){
sum += h_A[row*N+n]*h_B[n*N+col];
}
cpu_C[row*N+col] = sum;
}
}
double err = 0;
// Check the result and make sure it is correct
for (int ROW=0; ROW < N; ROW++){
for (int COL=0; COL < N; COL++){
err += cpu_C[ROW * N + COL] - h_C[ROW * N + COL];
}
}
cout << "Error: " << err << endl;
return 0;
}
| 7565a0a571ef5d7d431b283164b5a62fa244304e.cu | #include <iostream>
#include <vector>
#include <stdlib.h>
#include <time.h>
#include <cuda_runtime.h>
#include "kernel.h"
#include "dev_array.h"
#include <math.h>
using namespace std;
int main()
{
// Perform matrix multiplication C = A*B
// where A, B and C are NxN matrices
int N = 3000;
int SIZE = N*N;
cudaEvent_t start, stop;
float elapsedTime;
// Allocate memory on the host
vector<double> h_A(SIZE);
vector<double> h_B(SIZE);
vector<double> h_C(SIZE);
// Initialize matrices on the host
for (int i=0; i<N; i++){
for (int j=0; j<N; j++){
h_A[i*N+j] = sin(i);
h_B[i*N+j] = cos(j);
}
}
// Allocate memory on the device
dev_array<double> d_A(SIZE);
dev_array<double> d_B(SIZE);
dev_array<double> d_C(SIZE);
d_A.set(&h_A[0], SIZE);
d_B.set(&h_B[0], SIZE);
cudaEventCreate( &start );
cudaEventCreate( &stop );
cudaEventRecord( start, 0 );
matrixMultiplication(d_A.getData(), d_B.getData(), d_C.getData(), N);
cudaDeviceSynchronize();
cudaEventRecord( stop, 0 );
cudaEventSynchronize( stop );
cudaEventElapsedTime( &elapsedTime, start, stop );
cout << "Time taken: "<< elapsedTime << "ms" << endl;
d_C.get(&h_C[0], SIZE);
cudaDeviceSynchronize();
double *cpu_C;
cpu_C=new double[SIZE];
// Now do the matrix multiplication on the CPU
double sum;
for (int row=0; row<N; row++){
for (int col=0; col<N; col++){
sum = 0.f;
for (int n=0; n<N; n++){
sum += h_A[row*N+n]*h_B[n*N+col];
}
cpu_C[row*N+col] = sum;
}
}
double err = 0;
// Check the result and make sure it is correct
for (int ROW=0; ROW < N; ROW++){
for (int COL=0; COL < N; COL++){
err += cpu_C[ROW * N + COL] - h_C[ROW * N + COL];
}
}
cout << "Error: " << err << endl;
return 0;
}
|
e575692ca45f3d2a2ce48d193112ace2e343e8e8.hip | // !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/hip/Loops.cuh>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/BinaryOps.h>
// NOTE: CUDA on Windows requires that the enclosing function
// of a __device__ lambda not have internal linkage.
namespace at { namespace native {
void atan2_kernel_cuda(TensorIteratorBase& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half, at::ScalarType::BFloat16,
iter.common_dtype(), "atan2_cuda",
[&]() {
gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
return ::atan2(a, b);
});
});
}
void hypot_kernel_cuda(TensorIteratorBase& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half, at::ScalarType::BFloat16,
iter.common_dtype(), "hypot_cuda",
[&]() {
opmath_symmetric_gpu_kernel_with_scalars<scalar_t>(
iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
return ::hypot(a, b);
});
});
}
REGISTER_DISPATCH(atan2_stub, &atan2_kernel_cuda);
REGISTER_DISPATCH(hypot_stub, &hypot_kernel_cuda);
}} // namespace at::native
| e575692ca45f3d2a2ce48d193112ace2e343e8e8.cu | #define TORCH_ASSERT_NO_OPERATORS
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/BinaryOps.h>
// NOTE: CUDA on Windows requires that the enclosing function
// of a __device__ lambda not have internal linkage.
namespace at { namespace native {
void atan2_kernel_cuda(TensorIteratorBase& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half, at::ScalarType::BFloat16,
iter.common_dtype(), "atan2_cuda",
[&]() {
gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
return ::atan2(a, b);
});
});
}
void hypot_kernel_cuda(TensorIteratorBase& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half, at::ScalarType::BFloat16,
iter.common_dtype(), "hypot_cuda",
[&]() {
opmath_symmetric_gpu_kernel_with_scalars<scalar_t>(
iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
return ::hypot(a, b);
});
});
}
REGISTER_DISPATCH(atan2_stub, &atan2_kernel_cuda);
REGISTER_DISPATCH(hypot_stub, &hypot_kernel_cuda);
}} // namespace at::native
|
34d5c0b02d96754c8ef7eb55e505c5c814c65cd9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Copyright (c) 2021 by Contributors
* @file array/cuda/rowwise_sampling.cu
* @brief uniform rowwise sampling
*/
#include <hiprand/hiprand_kernel.h>
#include <dgl/random.h>
#include <dgl/runtime/device_api.h>
#include <dgl/runtime/tensordispatch.h>
#include <numeric>
#include "../../array/cuda/atomic.cuh"
#include "../../runtime/cuda/cuda_common.h"
#include "./dgl_cub.cuh"
#include "./utils.h"
using namespace dgl::cuda;
using namespace dgl::aten::cuda;
using TensorDispatcher = dgl::runtime::TensorDispatcher;
namespace dgl {
namespace aten {
namespace impl {
namespace {
constexpr int BLOCK_SIZE = 128;
/**
* @brief Compute the size of each row in the sampled CSR, without replacement.
*
* @tparam IdType The type of node and edge indexes.
* @param num_picks The number of non-zero entries to pick per row.
* @param num_rows The number of rows to pick.
* @param in_rows The set of rows to pick.
* @param in_ptr The index where each row's edges start.
* @param out_deg The size of each row in the sampled matrix, as indexed by
* `in_rows` (output).
*/
template <typename IdType>
__global__ void _CSRRowWiseSampleDegreeKernel(
const int64_t num_picks, const int64_t num_rows,
const IdType* const in_rows, const IdType* const in_ptr,
IdType* const out_deg) {
const int tIdx = threadIdx.x + blockIdx.x * blockDim.x;
if (tIdx < num_rows) {
const int in_row = in_rows[tIdx];
const int out_row = tIdx;
out_deg[out_row] = min(
static_cast<IdType>(num_picks), in_ptr[in_row + 1] - in_ptr[in_row]);
if (out_row == num_rows - 1) {
// make the prefixsum work
out_deg[num_rows] = 0;
}
}
}
/**
* @brief Compute the size of each row in the sampled CSR, with replacement.
*
* @tparam IdType The type of node and edge indexes.
* @param num_picks The number of non-zero entries to pick per row.
* @param num_rows The number of rows to pick.
* @param in_rows The set of rows to pick.
* @param in_ptr The index where each row's edges start.
* @param out_deg The size of each row in the sampled matrix, as indexed by
* `in_rows` (output).
*/
template <typename IdType>
__global__ void _CSRRowWiseSampleDegreeReplaceKernel(
const int64_t num_picks, const int64_t num_rows,
const IdType* const in_rows, const IdType* const in_ptr,
IdType* const out_deg) {
const int tIdx = threadIdx.x + blockIdx.x * blockDim.x;
if (tIdx < num_rows) {
const int64_t in_row = in_rows[tIdx];
const int64_t out_row = tIdx;
if (in_ptr[in_row + 1] - in_ptr[in_row] == 0) {
out_deg[out_row] = 0;
} else {
out_deg[out_row] = static_cast<IdType>(num_picks);
}
if (out_row == num_rows - 1) {
// make the prefixsum work
out_deg[num_rows] = 0;
}
}
}
/**
* @brief Perform row-wise uniform sampling on a CSR matrix,
* and generate a COO matrix, without replacement.
*
* @tparam IdType The ID type used for matrices.
* @tparam TILE_SIZE The number of rows covered by each threadblock.
* @param rand_seed The random seed to use.
* @param num_picks The number of non-zeros to pick per row.
* @param num_rows The number of rows to pick.
* @param in_rows The set of rows to pick.
* @param in_ptr The indptr array of the input CSR.
* @param in_index The indices array of the input CSR.
* @param data The data array of the input CSR.
* @param out_ptr The offset to write each row to in the output COO.
* @param out_rows The rows of the output COO (output).
* @param out_cols The columns of the output COO (output).
* @param out_idxs The data array of the output COO (output).
*/
template <typename IdType, int TILE_SIZE>
__global__ void _CSRRowWiseSampleUniformKernel(
const uint64_t rand_seed, const int64_t num_picks, const int64_t num_rows,
const IdType* const in_rows, const IdType* const in_ptr,
const IdType* const in_index, const IdType* const data,
const IdType* const out_ptr, IdType* const out_rows, IdType* const out_cols,
IdType* const out_idxs) {
// we assign one warp per row
assert(blockDim.x == BLOCK_SIZE);
int64_t out_row = blockIdx.x * TILE_SIZE;
const int64_t last_row =
min(static_cast<int64_t>(blockIdx.x + 1) * TILE_SIZE, num_rows);
hiprandStatePhilox4_32_10_t rng;
hiprand_init(rand_seed * gridDim.x + blockIdx.x, threadIdx.x, 0, &rng);
while (out_row < last_row) {
const int64_t row = in_rows[out_row];
const int64_t in_row_start = in_ptr[row];
const int64_t deg = in_ptr[row + 1] - in_row_start;
const int64_t out_row_start = out_ptr[out_row];
if (deg <= num_picks) {
// just copy row when there is not enough nodes to sample.
for (int idx = threadIdx.x; idx < deg; idx += BLOCK_SIZE) {
const IdType in_idx = in_row_start + idx;
out_rows[out_row_start + idx] = row;
out_cols[out_row_start + idx] = in_index[in_idx];
out_idxs[out_row_start + idx] = data ? data[in_idx] : in_idx;
}
} else {
// generate permutation list via reservoir algorithm
for (int idx = threadIdx.x; idx < num_picks; idx += BLOCK_SIZE) {
out_idxs[out_row_start + idx] = idx;
}
__syncthreads();
for (int idx = num_picks + threadIdx.x; idx < deg; idx += BLOCK_SIZE) {
const int num = hiprand(&rng) % (idx + 1);
if (num < num_picks) {
// use max so as to achieve the replacement order the serial
// algorithm would have
AtomicMax(out_idxs + out_row_start + num, idx);
}
}
__syncthreads();
// copy permutation over
for (int idx = threadIdx.x; idx < num_picks; idx += BLOCK_SIZE) {
const IdType perm_idx = out_idxs[out_row_start + idx] + in_row_start;
out_rows[out_row_start + idx] = row;
out_cols[out_row_start + idx] = in_index[perm_idx];
out_idxs[out_row_start + idx] = data ? data[perm_idx] : perm_idx;
}
}
out_row += 1;
}
}
/**
* @brief Perform row-wise uniform sampling on a CSR matrix,
* and generate a COO matrix, with replacement.
*
* @tparam IdType The ID type used for matrices.
* @tparam TILE_SIZE The number of rows covered by each threadblock.
* @param rand_seed The random seed to use.
* @param num_picks The number of non-zeros to pick per row.
* @param num_rows The number of rows to pick.
* @param in_rows The set of rows to pick.
* @param in_ptr The indptr array of the input CSR.
* @param in_index The indices array of the input CSR.
* @param data The data array of the input CSR.
* @param out_ptr The offset to write each row to in the output COO.
* @param out_rows The rows of the output COO (output).
* @param out_cols The columns of the output COO (output).
* @param out_idxs The data array of the output COO (output).
*/
template <typename IdType, int TILE_SIZE>
__global__ void _CSRRowWiseSampleUniformReplaceKernel(
const uint64_t rand_seed, const int64_t num_picks, const int64_t num_rows,
const IdType* const in_rows, const IdType* const in_ptr,
const IdType* const in_index, const IdType* const data,
const IdType* const out_ptr, IdType* const out_rows, IdType* const out_cols,
IdType* const out_idxs) {
// we assign one warp per row
assert(blockDim.x == BLOCK_SIZE);
int64_t out_row = blockIdx.x * TILE_SIZE;
const int64_t last_row =
min(static_cast<int64_t>(blockIdx.x + 1) * TILE_SIZE, num_rows);
hiprandStatePhilox4_32_10_t rng;
hiprand_init(rand_seed * gridDim.x + blockIdx.x, threadIdx.x, 0, &rng);
while (out_row < last_row) {
const int64_t row = in_rows[out_row];
const int64_t in_row_start = in_ptr[row];
const int64_t out_row_start = out_ptr[out_row];
const int64_t deg = in_ptr[row + 1] - in_row_start;
if (deg > 0) {
// each thread then blindly copies in rows only if deg > 0.
for (int idx = threadIdx.x; idx < num_picks; idx += BLOCK_SIZE) {
const int64_t edge = hiprand(&rng) % deg;
const int64_t out_idx = out_row_start + idx;
out_rows[out_idx] = row;
out_cols[out_idx] = in_index[in_row_start + edge];
out_idxs[out_idx] =
data ? data[in_row_start + edge] : in_row_start + edge;
}
}
out_row += 1;
}
}
} // namespace
///////////////////////////// CSR sampling //////////////////////////
template <DGLDeviceType XPU, typename IdType>
COOMatrix _CSRRowWiseSamplingUniform(
CSRMatrix mat, IdArray rows, const int64_t num_picks, const bool replace) {
const auto& ctx = rows->ctx;
auto device = runtime::DeviceAPI::Get(ctx);
hipStream_t stream = runtime::getCurrentHIPStreamMasqueradingAsCUDA();
const int64_t num_rows = rows->shape[0];
const IdType* const slice_rows = static_cast<const IdType*>(rows->data);
IdArray picked_row =
NewIdArray(num_rows * num_picks, ctx, sizeof(IdType) * 8);
IdArray picked_col =
NewIdArray(num_rows * num_picks, ctx, sizeof(IdType) * 8);
IdArray picked_idx =
NewIdArray(num_rows * num_picks, ctx, sizeof(IdType) * 8);
IdType* const out_rows = static_cast<IdType*>(picked_row->data);
IdType* const out_cols = static_cast<IdType*>(picked_col->data);
IdType* const out_idxs = static_cast<IdType*>(picked_idx->data);
const IdType* in_ptr = static_cast<IdType*>(GetDevicePointer(mat.indptr));
const IdType* in_cols = static_cast<IdType*>(GetDevicePointer(mat.indices));
const IdType* data = CSRHasData(mat)
? static_cast<IdType*>(GetDevicePointer(mat.data))
: nullptr;
// compute degree
IdType* out_deg = static_cast<IdType*>(
device->AllocWorkspace(ctx, (num_rows + 1) * sizeof(IdType)));
if (replace) {
const dim3 block(512);
const dim3 grid((num_rows + block.x - 1) / block.x);
CUDA_KERNEL_CALL(
_CSRRowWiseSampleDegreeReplaceKernel, grid, block, 0, stream, num_picks,
num_rows, slice_rows, in_ptr, out_deg);
} else {
const dim3 block(512);
const dim3 grid((num_rows + block.x - 1) / block.x);
CUDA_KERNEL_CALL(
_CSRRowWiseSampleDegreeKernel, grid, block, 0, stream, num_picks,
num_rows, slice_rows, in_ptr, out_deg);
}
// fill out_ptr
IdType* out_ptr = static_cast<IdType*>(
device->AllocWorkspace(ctx, (num_rows + 1) * sizeof(IdType)));
size_t prefix_temp_size = 0;
CUDA_CALL(hipcub::DeviceScan::ExclusiveSum(
nullptr, prefix_temp_size, out_deg, out_ptr, num_rows + 1, stream));
void* prefix_temp = device->AllocWorkspace(ctx, prefix_temp_size);
CUDA_CALL(hipcub::DeviceScan::ExclusiveSum(
prefix_temp, prefix_temp_size, out_deg, out_ptr, num_rows + 1, stream));
device->FreeWorkspace(ctx, prefix_temp);
device->FreeWorkspace(ctx, out_deg);
hipEvent_t copyEvent;
CUDA_CALL(hipEventCreate(©Event));
NDArray new_len_tensor;
if (TensorDispatcher::Global()->IsAvailable()) {
new_len_tensor = NDArray::PinnedEmpty(
{1}, DGLDataTypeTraits<IdType>::dtype, DGLContext{kDGLCPU, 0});
} else {
// use pageable memory, it will unecessarily block but be functional
new_len_tensor = NDArray::Empty(
{1}, DGLDataTypeTraits<IdType>::dtype, DGLContext{kDGLCPU, 0});
}
// copy using the internal current stream
CUDA_CALL(hipMemcpyAsync(
new_len_tensor->data, out_ptr + num_rows, sizeof(IdType),
hipMemcpyDeviceToHost, stream));
CUDA_CALL(hipEventRecord(copyEvent, stream));
const uint64_t random_seed = RandomEngine::ThreadLocal()->RandInt(1000000000);
// select edges
// the number of rows each thread block will cover
constexpr int TILE_SIZE = 128 / BLOCK_SIZE;
if (replace) { // with replacement
const dim3 block(BLOCK_SIZE);
const dim3 grid((num_rows + TILE_SIZE - 1) / TILE_SIZE);
CUDA_KERNEL_CALL(
(_CSRRowWiseSampleUniformReplaceKernel<IdType, TILE_SIZE>), grid, block,
0, stream, random_seed, num_picks, num_rows, slice_rows, in_ptr,
in_cols, data, out_ptr, out_rows, out_cols, out_idxs);
} else { // without replacement
const dim3 block(BLOCK_SIZE);
const dim3 grid((num_rows + TILE_SIZE - 1) / TILE_SIZE);
CUDA_KERNEL_CALL(
(_CSRRowWiseSampleUniformKernel<IdType, TILE_SIZE>), grid, block, 0,
stream, random_seed, num_picks, num_rows, slice_rows, in_ptr, in_cols,
data, out_ptr, out_rows, out_cols, out_idxs);
}
device->FreeWorkspace(ctx, out_ptr);
// wait for copying `new_len` to finish
CUDA_CALL(hipEventSynchronize(copyEvent));
CUDA_CALL(hipEventDestroy(copyEvent));
const IdType new_len = static_cast<const IdType*>(new_len_tensor->data)[0];
picked_row = picked_row.CreateView({new_len}, picked_row->dtype);
picked_col = picked_col.CreateView({new_len}, picked_col->dtype);
picked_idx = picked_idx.CreateView({new_len}, picked_idx->dtype);
return COOMatrix(
mat.num_rows, mat.num_cols, picked_row, picked_col, picked_idx);
}
template <DGLDeviceType XPU, typename IdType>
COOMatrix CSRRowWiseSamplingUniform(
CSRMatrix mat, IdArray rows, const int64_t num_picks, const bool replace) {
if (num_picks == -1) {
// Basically this is UnitGraph::InEdges().
COOMatrix coo = CSRToCOO(CSRSliceRows(mat, rows), false);
IdArray sliced_rows = IndexSelect(rows, coo.row);
return COOMatrix(
mat.num_rows, mat.num_cols, sliced_rows, coo.col, coo.data);
} else {
return _CSRRowWiseSamplingUniform<XPU, IdType>(
mat, rows, num_picks, replace);
}
}
template COOMatrix CSRRowWiseSamplingUniform<kDGLCUDA, int32_t>(
CSRMatrix, IdArray, int64_t, bool);
template COOMatrix CSRRowWiseSamplingUniform<kDGLCUDA, int64_t>(
CSRMatrix, IdArray, int64_t, bool);
} // namespace impl
} // namespace aten
} // namespace dgl
| 34d5c0b02d96754c8ef7eb55e505c5c814c65cd9.cu | /**
* Copyright (c) 2021 by Contributors
* @file array/cuda/rowwise_sampling.cu
* @brief uniform rowwise sampling
*/
#include <curand_kernel.h>
#include <dgl/random.h>
#include <dgl/runtime/device_api.h>
#include <dgl/runtime/tensordispatch.h>
#include <numeric>
#include "../../array/cuda/atomic.cuh"
#include "../../runtime/cuda/cuda_common.h"
#include "./dgl_cub.cuh"
#include "./utils.h"
using namespace dgl::cuda;
using namespace dgl::aten::cuda;
using TensorDispatcher = dgl::runtime::TensorDispatcher;
namespace dgl {
namespace aten {
namespace impl {
namespace {
constexpr int BLOCK_SIZE = 128;
/**
* @brief Compute the size of each row in the sampled CSR, without replacement.
*
* @tparam IdType The type of node and edge indexes.
* @param num_picks The number of non-zero entries to pick per row.
* @param num_rows The number of rows to pick.
* @param in_rows The set of rows to pick.
* @param in_ptr The index where each row's edges start.
* @param out_deg The size of each row in the sampled matrix, as indexed by
* `in_rows` (output).
*/
template <typename IdType>
__global__ void _CSRRowWiseSampleDegreeKernel(
const int64_t num_picks, const int64_t num_rows,
const IdType* const in_rows, const IdType* const in_ptr,
IdType* const out_deg) {
const int tIdx = threadIdx.x + blockIdx.x * blockDim.x;
if (tIdx < num_rows) {
const int in_row = in_rows[tIdx];
const int out_row = tIdx;
out_deg[out_row] = min(
static_cast<IdType>(num_picks), in_ptr[in_row + 1] - in_ptr[in_row]);
if (out_row == num_rows - 1) {
// make the prefixsum work
out_deg[num_rows] = 0;
}
}
}
/**
* @brief Compute the size of each row in the sampled CSR, with replacement.
*
* @tparam IdType The type of node and edge indexes.
* @param num_picks The number of non-zero entries to pick per row.
* @param num_rows The number of rows to pick.
* @param in_rows The set of rows to pick.
* @param in_ptr The index where each row's edges start.
* @param out_deg The size of each row in the sampled matrix, as indexed by
* `in_rows` (output).
*/
template <typename IdType>
__global__ void _CSRRowWiseSampleDegreeReplaceKernel(
const int64_t num_picks, const int64_t num_rows,
const IdType* const in_rows, const IdType* const in_ptr,
IdType* const out_deg) {
const int tIdx = threadIdx.x + blockIdx.x * blockDim.x;
if (tIdx < num_rows) {
const int64_t in_row = in_rows[tIdx];
const int64_t out_row = tIdx;
if (in_ptr[in_row + 1] - in_ptr[in_row] == 0) {
out_deg[out_row] = 0;
} else {
out_deg[out_row] = static_cast<IdType>(num_picks);
}
if (out_row == num_rows - 1) {
// make the prefixsum work
out_deg[num_rows] = 0;
}
}
}
/**
* @brief Perform row-wise uniform sampling on a CSR matrix,
* and generate a COO matrix, without replacement.
*
* @tparam IdType The ID type used for matrices.
* @tparam TILE_SIZE The number of rows covered by each threadblock.
* @param rand_seed The random seed to use.
* @param num_picks The number of non-zeros to pick per row.
* @param num_rows The number of rows to pick.
* @param in_rows The set of rows to pick.
* @param in_ptr The indptr array of the input CSR.
* @param in_index The indices array of the input CSR.
* @param data The data array of the input CSR.
* @param out_ptr The offset to write each row to in the output COO.
* @param out_rows The rows of the output COO (output).
* @param out_cols The columns of the output COO (output).
* @param out_idxs The data array of the output COO (output).
*/
template <typename IdType, int TILE_SIZE>
__global__ void _CSRRowWiseSampleUniformKernel(
const uint64_t rand_seed, const int64_t num_picks, const int64_t num_rows,
const IdType* const in_rows, const IdType* const in_ptr,
const IdType* const in_index, const IdType* const data,
const IdType* const out_ptr, IdType* const out_rows, IdType* const out_cols,
IdType* const out_idxs) {
// we assign one warp per row
assert(blockDim.x == BLOCK_SIZE);
int64_t out_row = blockIdx.x * TILE_SIZE;
const int64_t last_row =
min(static_cast<int64_t>(blockIdx.x + 1) * TILE_SIZE, num_rows);
curandStatePhilox4_32_10_t rng;
curand_init(rand_seed * gridDim.x + blockIdx.x, threadIdx.x, 0, &rng);
while (out_row < last_row) {
const int64_t row = in_rows[out_row];
const int64_t in_row_start = in_ptr[row];
const int64_t deg = in_ptr[row + 1] - in_row_start;
const int64_t out_row_start = out_ptr[out_row];
if (deg <= num_picks) {
// just copy row when there is not enough nodes to sample.
for (int idx = threadIdx.x; idx < deg; idx += BLOCK_SIZE) {
const IdType in_idx = in_row_start + idx;
out_rows[out_row_start + idx] = row;
out_cols[out_row_start + idx] = in_index[in_idx];
out_idxs[out_row_start + idx] = data ? data[in_idx] : in_idx;
}
} else {
// generate permutation list via reservoir algorithm
for (int idx = threadIdx.x; idx < num_picks; idx += BLOCK_SIZE) {
out_idxs[out_row_start + idx] = idx;
}
__syncthreads();
for (int idx = num_picks + threadIdx.x; idx < deg; idx += BLOCK_SIZE) {
const int num = curand(&rng) % (idx + 1);
if (num < num_picks) {
// use max so as to achieve the replacement order the serial
// algorithm would have
AtomicMax(out_idxs + out_row_start + num, idx);
}
}
__syncthreads();
// copy permutation over
for (int idx = threadIdx.x; idx < num_picks; idx += BLOCK_SIZE) {
const IdType perm_idx = out_idxs[out_row_start + idx] + in_row_start;
out_rows[out_row_start + idx] = row;
out_cols[out_row_start + idx] = in_index[perm_idx];
out_idxs[out_row_start + idx] = data ? data[perm_idx] : perm_idx;
}
}
out_row += 1;
}
}
/**
* @brief Perform row-wise uniform sampling on a CSR matrix,
* and generate a COO matrix, with replacement.
*
* @tparam IdType The ID type used for matrices.
* @tparam TILE_SIZE The number of rows covered by each threadblock.
* @param rand_seed The random seed to use.
* @param num_picks The number of non-zeros to pick per row.
* @param num_rows The number of rows to pick.
* @param in_rows The set of rows to pick.
* @param in_ptr The indptr array of the input CSR.
* @param in_index The indices array of the input CSR.
* @param data The data array of the input CSR.
* @param out_ptr The offset to write each row to in the output COO.
* @param out_rows The rows of the output COO (output).
* @param out_cols The columns of the output COO (output).
* @param out_idxs The data array of the output COO (output).
*/
template <typename IdType, int TILE_SIZE>
__global__ void _CSRRowWiseSampleUniformReplaceKernel(
const uint64_t rand_seed, const int64_t num_picks, const int64_t num_rows,
const IdType* const in_rows, const IdType* const in_ptr,
const IdType* const in_index, const IdType* const data,
const IdType* const out_ptr, IdType* const out_rows, IdType* const out_cols,
IdType* const out_idxs) {
// we assign one warp per row
assert(blockDim.x == BLOCK_SIZE);
int64_t out_row = blockIdx.x * TILE_SIZE;
const int64_t last_row =
min(static_cast<int64_t>(blockIdx.x + 1) * TILE_SIZE, num_rows);
curandStatePhilox4_32_10_t rng;
curand_init(rand_seed * gridDim.x + blockIdx.x, threadIdx.x, 0, &rng);
while (out_row < last_row) {
const int64_t row = in_rows[out_row];
const int64_t in_row_start = in_ptr[row];
const int64_t out_row_start = out_ptr[out_row];
const int64_t deg = in_ptr[row + 1] - in_row_start;
if (deg > 0) {
// each thread then blindly copies in rows only if deg > 0.
for (int idx = threadIdx.x; idx < num_picks; idx += BLOCK_SIZE) {
const int64_t edge = curand(&rng) % deg;
const int64_t out_idx = out_row_start + idx;
out_rows[out_idx] = row;
out_cols[out_idx] = in_index[in_row_start + edge];
out_idxs[out_idx] =
data ? data[in_row_start + edge] : in_row_start + edge;
}
}
out_row += 1;
}
}
} // namespace
///////////////////////////// CSR sampling //////////////////////////
template <DGLDeviceType XPU, typename IdType>
COOMatrix _CSRRowWiseSamplingUniform(
CSRMatrix mat, IdArray rows, const int64_t num_picks, const bool replace) {
const auto& ctx = rows->ctx;
auto device = runtime::DeviceAPI::Get(ctx);
cudaStream_t stream = runtime::getCurrentCUDAStream();
const int64_t num_rows = rows->shape[0];
const IdType* const slice_rows = static_cast<const IdType*>(rows->data);
IdArray picked_row =
NewIdArray(num_rows * num_picks, ctx, sizeof(IdType) * 8);
IdArray picked_col =
NewIdArray(num_rows * num_picks, ctx, sizeof(IdType) * 8);
IdArray picked_idx =
NewIdArray(num_rows * num_picks, ctx, sizeof(IdType) * 8);
IdType* const out_rows = static_cast<IdType*>(picked_row->data);
IdType* const out_cols = static_cast<IdType*>(picked_col->data);
IdType* const out_idxs = static_cast<IdType*>(picked_idx->data);
const IdType* in_ptr = static_cast<IdType*>(GetDevicePointer(mat.indptr));
const IdType* in_cols = static_cast<IdType*>(GetDevicePointer(mat.indices));
const IdType* data = CSRHasData(mat)
? static_cast<IdType*>(GetDevicePointer(mat.data))
: nullptr;
// compute degree
IdType* out_deg = static_cast<IdType*>(
device->AllocWorkspace(ctx, (num_rows + 1) * sizeof(IdType)));
if (replace) {
const dim3 block(512);
const dim3 grid((num_rows + block.x - 1) / block.x);
CUDA_KERNEL_CALL(
_CSRRowWiseSampleDegreeReplaceKernel, grid, block, 0, stream, num_picks,
num_rows, slice_rows, in_ptr, out_deg);
} else {
const dim3 block(512);
const dim3 grid((num_rows + block.x - 1) / block.x);
CUDA_KERNEL_CALL(
_CSRRowWiseSampleDegreeKernel, grid, block, 0, stream, num_picks,
num_rows, slice_rows, in_ptr, out_deg);
}
// fill out_ptr
IdType* out_ptr = static_cast<IdType*>(
device->AllocWorkspace(ctx, (num_rows + 1) * sizeof(IdType)));
size_t prefix_temp_size = 0;
CUDA_CALL(cub::DeviceScan::ExclusiveSum(
nullptr, prefix_temp_size, out_deg, out_ptr, num_rows + 1, stream));
void* prefix_temp = device->AllocWorkspace(ctx, prefix_temp_size);
CUDA_CALL(cub::DeviceScan::ExclusiveSum(
prefix_temp, prefix_temp_size, out_deg, out_ptr, num_rows + 1, stream));
device->FreeWorkspace(ctx, prefix_temp);
device->FreeWorkspace(ctx, out_deg);
cudaEvent_t copyEvent;
CUDA_CALL(cudaEventCreate(©Event));
NDArray new_len_tensor;
if (TensorDispatcher::Global()->IsAvailable()) {
new_len_tensor = NDArray::PinnedEmpty(
{1}, DGLDataTypeTraits<IdType>::dtype, DGLContext{kDGLCPU, 0});
} else {
// use pageable memory, it will unecessarily block but be functional
new_len_tensor = NDArray::Empty(
{1}, DGLDataTypeTraits<IdType>::dtype, DGLContext{kDGLCPU, 0});
}
// copy using the internal current stream
CUDA_CALL(cudaMemcpyAsync(
new_len_tensor->data, out_ptr + num_rows, sizeof(IdType),
cudaMemcpyDeviceToHost, stream));
CUDA_CALL(cudaEventRecord(copyEvent, stream));
const uint64_t random_seed = RandomEngine::ThreadLocal()->RandInt(1000000000);
// select edges
// the number of rows each thread block will cover
constexpr int TILE_SIZE = 128 / BLOCK_SIZE;
if (replace) { // with replacement
const dim3 block(BLOCK_SIZE);
const dim3 grid((num_rows + TILE_SIZE - 1) / TILE_SIZE);
CUDA_KERNEL_CALL(
(_CSRRowWiseSampleUniformReplaceKernel<IdType, TILE_SIZE>), grid, block,
0, stream, random_seed, num_picks, num_rows, slice_rows, in_ptr,
in_cols, data, out_ptr, out_rows, out_cols, out_idxs);
} else { // without replacement
const dim3 block(BLOCK_SIZE);
const dim3 grid((num_rows + TILE_SIZE - 1) / TILE_SIZE);
CUDA_KERNEL_CALL(
(_CSRRowWiseSampleUniformKernel<IdType, TILE_SIZE>), grid, block, 0,
stream, random_seed, num_picks, num_rows, slice_rows, in_ptr, in_cols,
data, out_ptr, out_rows, out_cols, out_idxs);
}
device->FreeWorkspace(ctx, out_ptr);
// wait for copying `new_len` to finish
CUDA_CALL(cudaEventSynchronize(copyEvent));
CUDA_CALL(cudaEventDestroy(copyEvent));
const IdType new_len = static_cast<const IdType*>(new_len_tensor->data)[0];
picked_row = picked_row.CreateView({new_len}, picked_row->dtype);
picked_col = picked_col.CreateView({new_len}, picked_col->dtype);
picked_idx = picked_idx.CreateView({new_len}, picked_idx->dtype);
return COOMatrix(
mat.num_rows, mat.num_cols, picked_row, picked_col, picked_idx);
}
template <DGLDeviceType XPU, typename IdType>
COOMatrix CSRRowWiseSamplingUniform(
CSRMatrix mat, IdArray rows, const int64_t num_picks, const bool replace) {
if (num_picks == -1) {
// Basically this is UnitGraph::InEdges().
COOMatrix coo = CSRToCOO(CSRSliceRows(mat, rows), false);
IdArray sliced_rows = IndexSelect(rows, coo.row);
return COOMatrix(
mat.num_rows, mat.num_cols, sliced_rows, coo.col, coo.data);
} else {
return _CSRRowWiseSamplingUniform<XPU, IdType>(
mat, rows, num_picks, replace);
}
}
template COOMatrix CSRRowWiseSamplingUniform<kDGLCUDA, int32_t>(
CSRMatrix, IdArray, int64_t, bool);
template COOMatrix CSRRowWiseSamplingUniform<kDGLCUDA, int64_t>(
CSRMatrix, IdArray, int64_t, bool);
} // namespace impl
} // namespace aten
} // namespace dgl
|
53bf9277db7aefd966edde8ab390d060497970d8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
template<class T>
__device__ inline int CalcMandelbrot(const T xPos, const T yPos, const int crunch)
{
T y = yPos;
T x = xPos;
T yy = y * y;
T xx = x * x;
int i = crunch;
while (--i && (xx + yy < T(4.0))) {
y = x * y * T(2.0) + yPos;
x = xx - yy + xPos;
yy = y * y;
xx = x * x;
}
return i;
}
// The Mandelbrot CUDA GPU thread function
extern "C" __global__ void Mandelbrot0_sm10(int *dst, const int imageW, const int imageH, const int crunch,
const float xOff, const float yOff, const float scale)
{
const int ix = blockDim.x * blockIdx.x + threadIdx.x;
const int iy = blockDim.y * blockIdx.y + threadIdx.y;
if ((ix < imageW) && (iy < imageH)) {
// Calculate the location
const float xPos = xOff + (float)ix * scale;
const float yPos = yOff - (float)iy * scale;
// Calculate the Mandelbrot index for the current location
int m = CalcMandelbrot<float>(xPos, yPos, crunch);
m = m > 0 ? crunch - m : crunch;
// Output the pixel
int pixel = imageW * iy + ix;
dst[pixel] = m;
}
}
| 53bf9277db7aefd966edde8ab390d060497970d8.cu | #include <stdio.h>
template<class T>
__device__ inline int CalcMandelbrot(const T xPos, const T yPos, const int crunch)
{
T y = yPos;
T x = xPos;
T yy = y * y;
T xx = x * x;
int i = crunch;
while (--i && (xx + yy < T(4.0))) {
y = x * y * T(2.0) + yPos;
x = xx - yy + xPos;
yy = y * y;
xx = x * x;
}
return i;
}
// The Mandelbrot CUDA GPU thread function
extern "C" __global__ void Mandelbrot0_sm10(int *dst, const int imageW, const int imageH, const int crunch,
const float xOff, const float yOff, const float scale)
{
const int ix = blockDim.x * blockIdx.x + threadIdx.x;
const int iy = blockDim.y * blockIdx.y + threadIdx.y;
if ((ix < imageW) && (iy < imageH)) {
// Calculate the location
const float xPos = xOff + (float)ix * scale;
const float yPos = yOff - (float)iy * scale;
// Calculate the Mandelbrot index for the current location
int m = CalcMandelbrot<float>(xPos, yPos, crunch);
m = m > 0 ? crunch - m : crunch;
// Output the pixel
int pixel = imageW * iy + ix;
dst[pixel] = m;
}
}
|
d62a6429bac177dab652cf5dcd0d8dfb83699443.hip | // !!! This is a file automatically generated by hipify!!!
/***************************************************************************
*cr
*cr (C) Copyright 2010 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
***************************************************************************/
/*
* Main entry of dense matrix-matrix multiplication kernel
*/
#ifdef _GLIBCXX_USE_INT128
#undef _GLIBCXX_USE_INT128
#endif
#ifdef _GLIBCXX_ATOMIC_BUILTINS
#undef _GLIBCXX_ATOMIC_BUILTINS
#endif
#include <stdio.h>
#include <math.h>
#include <stdlib.h>
#include <string.h>
#include <sys/time.h>
#include <malloc.h>
#include <vector>
#include <parboil.h>
#include <iostream>
#include "sgemm_kernel.hip"
// I/O routines
extern bool readColMajorMatrixFile(const char *fn, int &nr_row, int &nr_col, std::vector<float>&v);
extern bool writeColMajorMatrixFile(const char *fn, int, int, std::vector<float>&);
extern "C"
void computeGold(float *, const float*, const float*, unsigned int, unsigned int, unsigned int);
int
main (int argc, char *argv[]) {
struct pb_Parameters *params;
struct pb_TimerSet timers;
float *dA, *dB, *dC;
size_t A_sz, B_sz, C_sz;
int matArow, matAcol;
int matBrow, matBcol;
std::vector<float> matA, matBT;
pb_InitializeTimerSet(&timers);
/* Read command line. Expect 3 inputs: A, B and B^T
in column-major layout*/
params = pb_ReadParameters(&argc, argv);
if ((params->inpFiles[0] == NULL)
|| (params->inpFiles[1] == NULL)
|| (params->inpFiles[2] == NULL)
|| (params->inpFiles[3] != NULL))
{
fprintf(stderr, "Expecting three input filenames\n");
exit(-1);
}
/* Read in data */
pb_SwitchToTimer(&timers, pb_TimerID_IO);
// load A
readColMajorMatrixFile(params->inpFiles[0],
matArow, matAcol, matA);
// copy A to device memory
A_sz = matArow*matAcol*sizeof(float);
// load B^T
readColMajorMatrixFile(params->inpFiles[2],
matBcol, matBrow, matBT);
pb_SwitchToTimer( &timers, pb_TimerID_COMPUTE );
B_sz = matBrow*matBcol*sizeof(float);
// allocate space for C
C_sz = matArow*matBcol*sizeof(float);
// CUDA memory allocation
std::vector<float> matC(matArow*matBcol);
hipMalloc((void**)&dA, A_sz);
hipMalloc((void**)&dB, B_sz);
hipMalloc((void**)&dC, C_sz);
// Copy A and B^T into device memory
pb_SwitchToTimer( &timers, pb_TimerID_COPY );
hipMemcpy(dA, &matA.front(), A_sz, hipMemcpyHostToDevice);
hipMemcpy(dB, &matBT.front(), B_sz, hipMemcpyHostToDevice);
pb_SwitchToTimer( &timers, pb_TimerID_GPU );
// Use standard sgemm interface
regtileSgemm('N', 'T', matArow, matBcol, matAcol, 1.0f, \
dA, matArow, dB, matBcol, 0.0f, dC, matArow);
if (params->outFile) {
pb_SwitchToTimer( &timers, pb_TimerID_COPY );
hipMemcpy(&matC.front(), dC, C_sz, hipMemcpyDeviceToHost);
/* Write C to file */
pb_SwitchToTimer(&timers, pb_TimerID_IO);
writeColMajorMatrixFile(params->outFile,
matArow, matBcol, matC);
}
pb_SwitchToTimer(&timers, pb_TimerID_NONE);
double GPUtime = pb_GetElapsedTime(&(timers.timers[pb_TimerID_GPU]));
std::cout<< "GFLOPs = " << 2.* matArow * matBcol * matAcol/GPUtime/1e9 << std::endl;
pb_PrintTimerSet(&timers);
pb_FreeParameters(params);
hipFree(dA);
hipFree(dB);
hipFree(dC);
return 0;
}
| d62a6429bac177dab652cf5dcd0d8dfb83699443.cu | /***************************************************************************
*cr
*cr (C) Copyright 2010 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
***************************************************************************/
/*
* Main entry of dense matrix-matrix multiplication kernel
*/
#ifdef _GLIBCXX_USE_INT128
#undef _GLIBCXX_USE_INT128
#endif
#ifdef _GLIBCXX_ATOMIC_BUILTINS
#undef _GLIBCXX_ATOMIC_BUILTINS
#endif
#include <stdio.h>
#include <math.h>
#include <stdlib.h>
#include <string.h>
#include <sys/time.h>
#include <malloc.h>
#include <vector>
#include <parboil.h>
#include <iostream>
#include "sgemm_kernel.cu"
// I/O routines
extern bool readColMajorMatrixFile(const char *fn, int &nr_row, int &nr_col, std::vector<float>&v);
extern bool writeColMajorMatrixFile(const char *fn, int, int, std::vector<float>&);
extern "C"
void computeGold(float *, const float*, const float*, unsigned int, unsigned int, unsigned int);
int
main (int argc, char *argv[]) {
struct pb_Parameters *params;
struct pb_TimerSet timers;
float *dA, *dB, *dC;
size_t A_sz, B_sz, C_sz;
int matArow, matAcol;
int matBrow, matBcol;
std::vector<float> matA, matBT;
pb_InitializeTimerSet(&timers);
/* Read command line. Expect 3 inputs: A, B and B^T
in column-major layout*/
params = pb_ReadParameters(&argc, argv);
if ((params->inpFiles[0] == NULL)
|| (params->inpFiles[1] == NULL)
|| (params->inpFiles[2] == NULL)
|| (params->inpFiles[3] != NULL))
{
fprintf(stderr, "Expecting three input filenames\n");
exit(-1);
}
/* Read in data */
pb_SwitchToTimer(&timers, pb_TimerID_IO);
// load A
readColMajorMatrixFile(params->inpFiles[0],
matArow, matAcol, matA);
// copy A to device memory
A_sz = matArow*matAcol*sizeof(float);
// load B^T
readColMajorMatrixFile(params->inpFiles[2],
matBcol, matBrow, matBT);
pb_SwitchToTimer( &timers, pb_TimerID_COMPUTE );
B_sz = matBrow*matBcol*sizeof(float);
// allocate space for C
C_sz = matArow*matBcol*sizeof(float);
// CUDA memory allocation
std::vector<float> matC(matArow*matBcol);
cudaMalloc((void**)&dA, A_sz);
cudaMalloc((void**)&dB, B_sz);
cudaMalloc((void**)&dC, C_sz);
// Copy A and B^T into device memory
pb_SwitchToTimer( &timers, pb_TimerID_COPY );
cudaMemcpy(dA, &matA.front(), A_sz, cudaMemcpyHostToDevice);
cudaMemcpy(dB, &matBT.front(), B_sz, cudaMemcpyHostToDevice);
pb_SwitchToTimer( &timers, pb_TimerID_GPU );
// Use standard sgemm interface
regtileSgemm('N', 'T', matArow, matBcol, matAcol, 1.0f, \
dA, matArow, dB, matBcol, 0.0f, dC, matArow);
if (params->outFile) {
pb_SwitchToTimer( &timers, pb_TimerID_COPY );
cudaMemcpy(&matC.front(), dC, C_sz, cudaMemcpyDeviceToHost);
/* Write C to file */
pb_SwitchToTimer(&timers, pb_TimerID_IO);
writeColMajorMatrixFile(params->outFile,
matArow, matBcol, matC);
}
pb_SwitchToTimer(&timers, pb_TimerID_NONE);
double GPUtime = pb_GetElapsedTime(&(timers.timers[pb_TimerID_GPU]));
std::cout<< "GFLOPs = " << 2.* matArow * matBcol * matAcol/GPUtime/1e9 << std::endl;
pb_PrintTimerSet(&timers);
pb_FreeParameters(params);
cudaFree(dA);
cudaFree(dB);
cudaFree(dC);
return 0;
}
|
6dbc27529f26cdb56bfe0ae21849aaa173736873.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/inference/tensorrt/plugin/transformer_input_convert_plugin.h"
namespace paddle {
namespace inference {
namespace tensorrt {
namespace plugin {
__global__ void TransformerInputConvertKernel(const int64_t* input,
int32_t* output0) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ int32_t shared_data;
if (threadIdx.x == static_cast<int>(input[tid])) {
atomicAdd(&shared_data, 1);
}
output0[0] = 0;
output0[blockIdx.x + 1] = shared_data;
__syncthreads();
for (int i = 0; i < blockDim.x; ++i) {
output0[i + 1] += output0[i];
}
}
nvinfer1::DataType TransformerInputConvertPlugin::getOutputDataType(
int index, const nvinfer1::DataType* input_types,
int nb_inputs) const TRT_NOEXCEPT {
return nvinfer1::DataType::kINT32;
}
nvinfer1::DimsExprs TransformerInputConvertPlugin::getOutputDimensions(
int outputIndex, const nvinfer1::DimsExprs* inputs, int nbInputs,
nvinfer1::IExprBuilder& exprBuilder) TRT_NOEXCEPT {
nvinfer1::DimsExprs output_dims{};
output_dims.nbDims = 1;
if (outputIndex == 0) { // PosId
const auto* one = exprBuilder.constant(1);
output_dims.d[0] = exprBuilder.operation(nvinfer1::DimensionOperation::kSUM,
*inputs[0].d[0], *one);
} else { // MaxSeqlen
output_dims.d[0] = inputs[0].d[1];
}
return output_dims;
}
bool TransformerInputConvertPlugin::supportsFormatCombination(
int pos, const nvinfer1::PluginTensorDesc* inOut, int nbInputs,
int nbOutputs) TRT_NOEXCEPT {
PADDLE_ENFORCE_EQ(nbInputs, 1,
platform::errors::InvalidArgument("Must have 1 inputs, "
"but got %d input(s). ",
nbInputs));
PADDLE_ENFORCE_EQ(nbOutputs, getNbOutputs(),
platform::errors::InvalidArgument("Must have 2 output, "
"but got %d output(s). ",
nbOutputs));
if (pos == 0) { // input
return inOut[pos].format == nvinfer1::TensorFormat::kLINEAR;
} else { // output0, output1
return inOut[pos].type == nvinfer1::DataType::kINT32 &&
inOut[pos].format == nvinfer1::TensorFormat::kLINEAR;
}
}
void TransformerInputConvertPlugin::configurePlugin(
const nvinfer1::DynamicPluginTensorDesc* inputs, int nbInputs,
const nvinfer1::DynamicPluginTensorDesc* outputs,
int nbOutputs) TRT_NOEXCEPT {}
void TransformerInputConvertPlugin::attachToContext(
cudnnContext* cudnnContext, cublasContext* cublasContext,
nvinfer1::IGpuAllocator* gpuAllocator) TRT_NOEXCEPT {}
void TransformerInputConvertPlugin::detachFromContext() TRT_NOEXCEPT {}
void TransformerInputConvertPlugin::terminate() TRT_NOEXCEPT {}
int TransformerInputConvertPlugin::enqueue(
const nvinfer1::PluginTensorDesc* inputDesc,
const nvinfer1::PluginTensorDesc* outputDesc, const void* const* inputs,
void* const* outputs, void* workspace, hipStream_t stream) TRT_NOEXCEPT {
const auto input_desc = inputDesc[0];
const int64_t* input = static_cast<const int64_t*>(inputs[0]);
int32_t* output0 = static_cast<int32_t*>(outputs[0]); // PosId
// int32_t* output1 = static_cast<int32_t*>(outputs[1]); // MaxSeqlen
const int32_t num_blocks = input_desc.dims.d[0]; // batchs
const int32_t num_threads = input_desc.dims.d[1]; // max sequnce length
hipLaunchKernelGGL(( TransformerInputConvertKernel), dim3(num_blocks), dim3(num_threads), 0, stream,
input, output0);
return hipGetLastError() != hipSuccess;
}
} // namespace plugin
} // namespace tensorrt
} // namespace inference
} // namespace paddle
| 6dbc27529f26cdb56bfe0ae21849aaa173736873.cu | /* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/inference/tensorrt/plugin/transformer_input_convert_plugin.h"
namespace paddle {
namespace inference {
namespace tensorrt {
namespace plugin {
__global__ void TransformerInputConvertKernel(const int64_t* input,
int32_t* output0) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ int32_t shared_data;
if (threadIdx.x == static_cast<int>(input[tid])) {
atomicAdd(&shared_data, 1);
}
output0[0] = 0;
output0[blockIdx.x + 1] = shared_data;
__syncthreads();
for (int i = 0; i < blockDim.x; ++i) {
output0[i + 1] += output0[i];
}
}
nvinfer1::DataType TransformerInputConvertPlugin::getOutputDataType(
int index, const nvinfer1::DataType* input_types,
int nb_inputs) const TRT_NOEXCEPT {
return nvinfer1::DataType::kINT32;
}
nvinfer1::DimsExprs TransformerInputConvertPlugin::getOutputDimensions(
int outputIndex, const nvinfer1::DimsExprs* inputs, int nbInputs,
nvinfer1::IExprBuilder& exprBuilder) TRT_NOEXCEPT {
nvinfer1::DimsExprs output_dims{};
output_dims.nbDims = 1;
if (outputIndex == 0) { // PosId
const auto* one = exprBuilder.constant(1);
output_dims.d[0] = exprBuilder.operation(nvinfer1::DimensionOperation::kSUM,
*inputs[0].d[0], *one);
} else { // MaxSeqlen
output_dims.d[0] = inputs[0].d[1];
}
return output_dims;
}
bool TransformerInputConvertPlugin::supportsFormatCombination(
int pos, const nvinfer1::PluginTensorDesc* inOut, int nbInputs,
int nbOutputs) TRT_NOEXCEPT {
PADDLE_ENFORCE_EQ(nbInputs, 1,
platform::errors::InvalidArgument("Must have 1 inputs, "
"but got %d input(s). ",
nbInputs));
PADDLE_ENFORCE_EQ(nbOutputs, getNbOutputs(),
platform::errors::InvalidArgument("Must have 2 output, "
"but got %d output(s). ",
nbOutputs));
if (pos == 0) { // input
return inOut[pos].format == nvinfer1::TensorFormat::kLINEAR;
} else { // output0, output1
return inOut[pos].type == nvinfer1::DataType::kINT32 &&
inOut[pos].format == nvinfer1::TensorFormat::kLINEAR;
}
}
void TransformerInputConvertPlugin::configurePlugin(
const nvinfer1::DynamicPluginTensorDesc* inputs, int nbInputs,
const nvinfer1::DynamicPluginTensorDesc* outputs,
int nbOutputs) TRT_NOEXCEPT {}
void TransformerInputConvertPlugin::attachToContext(
cudnnContext* cudnnContext, cublasContext* cublasContext,
nvinfer1::IGpuAllocator* gpuAllocator) TRT_NOEXCEPT {}
void TransformerInputConvertPlugin::detachFromContext() TRT_NOEXCEPT {}
void TransformerInputConvertPlugin::terminate() TRT_NOEXCEPT {}
int TransformerInputConvertPlugin::enqueue(
const nvinfer1::PluginTensorDesc* inputDesc,
const nvinfer1::PluginTensorDesc* outputDesc, const void* const* inputs,
void* const* outputs, void* workspace, cudaStream_t stream) TRT_NOEXCEPT {
const auto input_desc = inputDesc[0];
const int64_t* input = static_cast<const int64_t*>(inputs[0]);
int32_t* output0 = static_cast<int32_t*>(outputs[0]); // PosId
// int32_t* output1 = static_cast<int32_t*>(outputs[1]); // MaxSeqlen
const int32_t num_blocks = input_desc.dims.d[0]; // batchs
const int32_t num_threads = input_desc.dims.d[1]; // max sequnce length
TransformerInputConvertKernel<<<num_blocks, num_threads, 0, stream>>>(
input, output0);
return cudaGetLastError() != cudaSuccess;
}
} // namespace plugin
} // namespace tensorrt
} // namespace inference
} // namespace paddle
|
acbf44d2db8ff3bd82cfbfb3e4e68db3b057d409.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cmath>
#include <iostream>
#include "gpu-new-forward.h"
#define TILE_WIDTH 8
__global__ void conv_forward_kernel(float *y, const float *x, const float *k, const int B, const int M, const int C, const int H, const int W, const int K)
{
/*
Modify this function to implement the forward pass described in Chapter 16.
We have added an additional dimension to the tensors to support an entire mini-batch
The goal here is to be correct AND fast.
Function paramter definitions:
y - output
x - input
k - kernel
B - batch_size (number of images in x)
M - number of output feature maps
C - number of input feature maps
H - input height dimension
W - input width dimension
K - kernel height and width (K x K)
*/
const int H_out = H - K + 1;
const int W_out = W - K + 1;
const int W_grid = ceil(1.0 * W_out / TILE_WIDTH);
// We have some nice #defs for you below to simplify indexing. Feel free to use them, or create your own.
// An example use of these macros:
// float a = y4d(0,0,0,0)
// y4d(0,0,0,0) = a
#define y4d(i3, i2, i1, i0) y[(i3) * (M * H_out * W_out) + (i2) * (H_out * W_out) + (i1) * (W_out) + i0]
#define x4d(i3, i2, i1, i0) x[(i3) * (C * H * W) + (i2) * (H * W) + (i1) * (W) + i0]
#define k4d(i3, i2, i1, i0) k[(i3) * (C * K * K) + (i2) * (K * K) + (i1) * (K) + i0]
// Insert your GPU convolution kernel code here
int b = blockIdx.x;
int m = blockIdx.y;
int h = blockIdx.z / W_grid * TILE_WIDTH + threadIdx.y;
int w = blockIdx.z % W_grid * TILE_WIDTH + threadIdx.x;
if (h < H_out && w < W_out) {
float acc = 0.;
for (int c = 0; c < C; c++) {
for (int p = 0; p < K; p++) {
for (int q = 0; q < K; q++) {
acc += x4d(b, c, h+p, w+q) * k4d(m, c, p, q);
}
}
}
y4d(b, m, h, w) = acc;
}
#undef y4d
#undef x4d
#undef k4d
}
__host__ void GPUInterface::conv_forward_gpu_prolog(const float *host_y, const float *host_x, const float *host_k, float **device_y_ptr, float **device_x_ptr, float **device_k_ptr, const int B, const int M, const int C, const int H, const int W, const int K)
{
// Allocate memory and copy over the relevant data structures to the GPU
// We pass double pointers for you to initialize the relevant device pointers,
// which are passed to the other two functions.
const int H_out = H - K + 1;
const int W_out = W - K + 1;
hipMalloc((void **) device_x_ptr, B * C * W * H * sizeof(float));
hipMalloc((void **) device_k_ptr, M * C * K * K * sizeof(float));
hipMalloc((void **) device_y_ptr, B * M * W_out * H_out * sizeof(float));
hipMemcpy(*device_x_ptr, host_x, B * C * W * H * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(*device_k_ptr, host_k, M * C * K * K * sizeof(float), hipMemcpyHostToDevice);
// Useful snippet for error checking
hipError_t error = hipGetLastError();
if(error != hipSuccess) {
std::cout<<"CUDA error: "<<hipGetErrorString(error)<<std::endl;
exit(-1);
}
}
__host__ void GPUInterface::conv_forward_gpu(float *device_y, const float *device_x, const float *device_k, const int B, const int M, const int C, const int H, const int W, const int K)
{
// Set the kernel dimensions and call the kernel
const int H_out = H - K + 1;
const int W_out = W - K + 1;
const int W_grid = ceil(1.0 * W_out / TILE_WIDTH);
const int H_grid = ceil(1.0 * H_out / TILE_WIDTH);
const int Z = W_grid * H_grid;
dim3 dimBlock(TILE_WIDTH, TILE_WIDTH, 1);
dim3 dimGrid(B, M, Z);
hipLaunchKernelGGL(( conv_forward_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, device_y, device_x, device_k, B, M, C, H, W, K);
}
__host__ void GPUInterface::conv_forward_gpu_epilog(float *host_y, float *device_y, float *device_x, float *device_k, const int B, const int M, const int C, const int H, const int W, const int K)
{
const int H_out = H - K + 1;
const int W_out = W - K + 1;
// Copy the output back to host
hipMemcpy(host_y, device_y, B * M * W_out * H_out * sizeof(float), hipMemcpyDeviceToHost);
// Free device memory
hipFree(device_x);
hipFree(device_y);
hipFree(device_k);
}
__host__ void GPUInterface::get_device_properties()
{
int deviceCount;
hipGetDeviceCount(&deviceCount);
for(int dev = 0; dev < deviceCount; dev++)
{
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, dev);
std::cout<<"Device "<<dev<<" name: "<<deviceProp.name<<std::endl;
std::cout<<"Computational capabilities: "<<deviceProp.major<<"."<<deviceProp.minor<<std::endl;
std::cout<<"Max Global memory size: "<<deviceProp.totalGlobalMem<<std::endl;
std::cout<<"Max Constant memory size: "<<deviceProp.totalConstMem<<std::endl;
std::cout<<"Max Shared memory size per block: "<<deviceProp.sharedMemPerBlock<<std::endl;
std::cout<<"Max threads per block: "<<deviceProp.maxThreadsPerBlock<<std::endl;
std::cout<<"Max block dimensions: "<<deviceProp.maxThreadsDim[0]<<" x, "<<deviceProp.maxThreadsDim[1]<<" y, "<<deviceProp.maxThreadsDim[2]<<" z"<<std::endl;
std::cout<<"Max grid dimensions: "<<deviceProp.maxGridSize[0]<<" x, "<<deviceProp.maxGridSize[1]<<" y, "<<deviceProp.maxGridSize[2]<<" z"<<std::endl;
std::cout<<"Warp Size: "<<deviceProp.warpSize<<std::endl;
}
} | acbf44d2db8ff3bd82cfbfb3e4e68db3b057d409.cu | #include <cmath>
#include <iostream>
#include "gpu-new-forward.h"
#define TILE_WIDTH 8
__global__ void conv_forward_kernel(float *y, const float *x, const float *k, const int B, const int M, const int C, const int H, const int W, const int K)
{
/*
Modify this function to implement the forward pass described in Chapter 16.
We have added an additional dimension to the tensors to support an entire mini-batch
The goal here is to be correct AND fast.
Function paramter definitions:
y - output
x - input
k - kernel
B - batch_size (number of images in x)
M - number of output feature maps
C - number of input feature maps
H - input height dimension
W - input width dimension
K - kernel height and width (K x K)
*/
const int H_out = H - K + 1;
const int W_out = W - K + 1;
const int W_grid = ceil(1.0 * W_out / TILE_WIDTH);
// We have some nice #defs for you below to simplify indexing. Feel free to use them, or create your own.
// An example use of these macros:
// float a = y4d(0,0,0,0)
// y4d(0,0,0,0) = a
#define y4d(i3, i2, i1, i0) y[(i3) * (M * H_out * W_out) + (i2) * (H_out * W_out) + (i1) * (W_out) + i0]
#define x4d(i3, i2, i1, i0) x[(i3) * (C * H * W) + (i2) * (H * W) + (i1) * (W) + i0]
#define k4d(i3, i2, i1, i0) k[(i3) * (C * K * K) + (i2) * (K * K) + (i1) * (K) + i0]
// Insert your GPU convolution kernel code here
int b = blockIdx.x;
int m = blockIdx.y;
int h = blockIdx.z / W_grid * TILE_WIDTH + threadIdx.y;
int w = blockIdx.z % W_grid * TILE_WIDTH + threadIdx.x;
if (h < H_out && w < W_out) {
float acc = 0.;
for (int c = 0; c < C; c++) {
for (int p = 0; p < K; p++) {
for (int q = 0; q < K; q++) {
acc += x4d(b, c, h+p, w+q) * k4d(m, c, p, q);
}
}
}
y4d(b, m, h, w) = acc;
}
#undef y4d
#undef x4d
#undef k4d
}
__host__ void GPUInterface::conv_forward_gpu_prolog(const float *host_y, const float *host_x, const float *host_k, float **device_y_ptr, float **device_x_ptr, float **device_k_ptr, const int B, const int M, const int C, const int H, const int W, const int K)
{
// Allocate memory and copy over the relevant data structures to the GPU
// We pass double pointers for you to initialize the relevant device pointers,
// which are passed to the other two functions.
const int H_out = H - K + 1;
const int W_out = W - K + 1;
cudaMalloc((void **) device_x_ptr, B * C * W * H * sizeof(float));
cudaMalloc((void **) device_k_ptr, M * C * K * K * sizeof(float));
cudaMalloc((void **) device_y_ptr, B * M * W_out * H_out * sizeof(float));
cudaMemcpy(*device_x_ptr, host_x, B * C * W * H * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(*device_k_ptr, host_k, M * C * K * K * sizeof(float), cudaMemcpyHostToDevice);
// Useful snippet for error checking
cudaError_t error = cudaGetLastError();
if(error != cudaSuccess) {
std::cout<<"CUDA error: "<<cudaGetErrorString(error)<<std::endl;
exit(-1);
}
}
__host__ void GPUInterface::conv_forward_gpu(float *device_y, const float *device_x, const float *device_k, const int B, const int M, const int C, const int H, const int W, const int K)
{
// Set the kernel dimensions and call the kernel
const int H_out = H - K + 1;
const int W_out = W - K + 1;
const int W_grid = ceil(1.0 * W_out / TILE_WIDTH);
const int H_grid = ceil(1.0 * H_out / TILE_WIDTH);
const int Z = W_grid * H_grid;
dim3 dimBlock(TILE_WIDTH, TILE_WIDTH, 1);
dim3 dimGrid(B, M, Z);
conv_forward_kernel<<<dimGrid, dimBlock>>>(device_y, device_x, device_k, B, M, C, H, W, K);
}
__host__ void GPUInterface::conv_forward_gpu_epilog(float *host_y, float *device_y, float *device_x, float *device_k, const int B, const int M, const int C, const int H, const int W, const int K)
{
const int H_out = H - K + 1;
const int W_out = W - K + 1;
// Copy the output back to host
cudaMemcpy(host_y, device_y, B * M * W_out * H_out * sizeof(float), cudaMemcpyDeviceToHost);
// Free device memory
cudaFree(device_x);
cudaFree(device_y);
cudaFree(device_k);
}
__host__ void GPUInterface::get_device_properties()
{
int deviceCount;
cudaGetDeviceCount(&deviceCount);
for(int dev = 0; dev < deviceCount; dev++)
{
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, dev);
std::cout<<"Device "<<dev<<" name: "<<deviceProp.name<<std::endl;
std::cout<<"Computational capabilities: "<<deviceProp.major<<"."<<deviceProp.minor<<std::endl;
std::cout<<"Max Global memory size: "<<deviceProp.totalGlobalMem<<std::endl;
std::cout<<"Max Constant memory size: "<<deviceProp.totalConstMem<<std::endl;
std::cout<<"Max Shared memory size per block: "<<deviceProp.sharedMemPerBlock<<std::endl;
std::cout<<"Max threads per block: "<<deviceProp.maxThreadsPerBlock<<std::endl;
std::cout<<"Max block dimensions: "<<deviceProp.maxThreadsDim[0]<<" x, "<<deviceProp.maxThreadsDim[1]<<" y, "<<deviceProp.maxThreadsDim[2]<<" z"<<std::endl;
std::cout<<"Max grid dimensions: "<<deviceProp.maxGridSize[0]<<" x, "<<deviceProp.maxGridSize[1]<<" y, "<<deviceProp.maxGridSize[2]<<" z"<<std::endl;
std::cout<<"Warp Size: "<<deviceProp.warpSize<<std::endl;
}
} |
f17df19bf69fc5078c7ef10af824dddc2a437e34.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "oneflow/core/kernel/cuda_check_numerics_kernel_observer.h"
#include "oneflow/core/kernel/kernel.h"
namespace oneflow {
namespace {
template<typename T>
__device__ bool IsNotFinite(T x) {
return !isfinite(x);
}
template<>
__device__ bool IsNotFinite<half>(half x) {
return (__hisinf(x) || __hisnan(x));
}
template<typename T>
__global__ void HasNotFiniteGpuKernel(const int64_t n, const T* x, volatile bool* has_not_finite) {
if (*has_not_finite) { return; }
CUDA_1D_KERNEL_LOOP_T(int64_t, i, n) {
if (IsNotFinite(x[i])) {
*has_not_finite = true;
return;
}
}
}
template<typename T>
bool HasNotFinite(DeviceCtx* device_ctx, const int64_t elem_cnt, const T* data_ptr,
bool* has_not_finite_host, bool* has_not_finite_device) {
OF_CUDA_CHECK(hipMemsetAsync(has_not_finite_device, 0, sizeof(bool), device_ctx->cuda_stream()));
hipLaunchKernelGGL(( HasNotFiniteGpuKernel<T>)
, dim3(BlocksNum4ThreadsNum(elem_cnt)), dim3(kCudaThreadsNumPerBlock), 0, device_ctx->cuda_stream(),
elem_cnt, data_ptr, has_not_finite_device);
OF_CUDA_CHECK(hipMemcpyAsync(has_not_finite_host, has_not_finite_device, sizeof(bool),
hipMemcpyDefault, device_ctx->cuda_stream()));
OF_CUDA_CHECK(hipStreamSynchronize(device_ctx->cuda_stream()));
return *has_not_finite_host;
}
bool HasNotFiniteGpu(DeviceCtx* device_ctx, const Blob* blob, bool* has_not_finite_host,
bool* has_not_finite_device) {
const DataType dtype = blob->data_type();
const int64_t elem_cnt = blob->shape().elem_cnt();
if (dtype == kFloat) {
return HasNotFinite<float>(device_ctx, elem_cnt, blob->dptr<float>(), has_not_finite_host,
has_not_finite_device);
} else if (dtype == kDouble) {
return HasNotFinite<double>(device_ctx, elem_cnt, blob->dptr<double>(), has_not_finite_host,
has_not_finite_device);
} else if (dtype == kFloat16) {
return HasNotFinite<half>(device_ctx, elem_cnt, blob->dptr<half>(), has_not_finite_host,
has_not_finite_device);
} else {
return false;
}
}
} // namespace
CudaCheckNumericsKernelObserver::CudaCheckNumericsKernelObserver()
: has_not_finite_host_(nullptr), has_not_finite_device_(nullptr) {
OF_CUDA_CHECK(hipGetDevice(&device_id_));
OF_CUDA_CHECK(hipHostMalloc(&has_not_finite_host_, sizeof(bool)));
OF_CUDA_CHECK(hipMalloc(&has_not_finite_device_, sizeof(bool)));
}
CudaCheckNumericsKernelObserver::~CudaCheckNumericsKernelObserver() {
CudaCurrentDeviceGuard guard(device_id_);
OF_CUDA_CHECK(hipHostFree(has_not_finite_host_));
OF_CUDA_CHECK(hipFree(has_not_finite_device_));
}
void CudaCheckNumericsKernelObserver::DidForwardDataContent(KernelContext* ctx,
const Kernel* kernel) {
for (const auto& obn : kernel->op_attribute().output_bns()) {
Blob* blob = ctx->BnInOp2Blob(obn);
if (blob != nullptr) {
bool has_not_finite =
HasNotFiniteGpu(ctx->device_ctx(), blob, has_not_finite_host_, has_not_finite_device_);
CHECK(!has_not_finite) << kernel->op_conf().name() << " : " << obn << " has nan or inf";
}
}
}
} // namespace oneflow
| f17df19bf69fc5078c7ef10af824dddc2a437e34.cu | /*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "oneflow/core/kernel/cuda_check_numerics_kernel_observer.h"
#include "oneflow/core/kernel/kernel.h"
namespace oneflow {
namespace {
template<typename T>
__device__ bool IsNotFinite(T x) {
return !isfinite(x);
}
template<>
__device__ bool IsNotFinite<half>(half x) {
return (__hisinf(x) || __hisnan(x));
}
template<typename T>
__global__ void HasNotFiniteGpuKernel(const int64_t n, const T* x, volatile bool* has_not_finite) {
if (*has_not_finite) { return; }
CUDA_1D_KERNEL_LOOP_T(int64_t, i, n) {
if (IsNotFinite(x[i])) {
*has_not_finite = true;
return;
}
}
}
template<typename T>
bool HasNotFinite(DeviceCtx* device_ctx, const int64_t elem_cnt, const T* data_ptr,
bool* has_not_finite_host, bool* has_not_finite_device) {
OF_CUDA_CHECK(cudaMemsetAsync(has_not_finite_device, 0, sizeof(bool), device_ctx->cuda_stream()));
HasNotFiniteGpuKernel<T>
<<<BlocksNum4ThreadsNum(elem_cnt), kCudaThreadsNumPerBlock, 0, device_ctx->cuda_stream()>>>(
elem_cnt, data_ptr, has_not_finite_device);
OF_CUDA_CHECK(cudaMemcpyAsync(has_not_finite_host, has_not_finite_device, sizeof(bool),
cudaMemcpyDefault, device_ctx->cuda_stream()));
OF_CUDA_CHECK(cudaStreamSynchronize(device_ctx->cuda_stream()));
return *has_not_finite_host;
}
bool HasNotFiniteGpu(DeviceCtx* device_ctx, const Blob* blob, bool* has_not_finite_host,
bool* has_not_finite_device) {
const DataType dtype = blob->data_type();
const int64_t elem_cnt = blob->shape().elem_cnt();
if (dtype == kFloat) {
return HasNotFinite<float>(device_ctx, elem_cnt, blob->dptr<float>(), has_not_finite_host,
has_not_finite_device);
} else if (dtype == kDouble) {
return HasNotFinite<double>(device_ctx, elem_cnt, blob->dptr<double>(), has_not_finite_host,
has_not_finite_device);
} else if (dtype == kFloat16) {
return HasNotFinite<half>(device_ctx, elem_cnt, blob->dptr<half>(), has_not_finite_host,
has_not_finite_device);
} else {
return false;
}
}
} // namespace
CudaCheckNumericsKernelObserver::CudaCheckNumericsKernelObserver()
: has_not_finite_host_(nullptr), has_not_finite_device_(nullptr) {
OF_CUDA_CHECK(cudaGetDevice(&device_id_));
OF_CUDA_CHECK(cudaMallocHost(&has_not_finite_host_, sizeof(bool)));
OF_CUDA_CHECK(cudaMalloc(&has_not_finite_device_, sizeof(bool)));
}
CudaCheckNumericsKernelObserver::~CudaCheckNumericsKernelObserver() {
CudaCurrentDeviceGuard guard(device_id_);
OF_CUDA_CHECK(cudaFreeHost(has_not_finite_host_));
OF_CUDA_CHECK(cudaFree(has_not_finite_device_));
}
void CudaCheckNumericsKernelObserver::DidForwardDataContent(KernelContext* ctx,
const Kernel* kernel) {
for (const auto& obn : kernel->op_attribute().output_bns()) {
Blob* blob = ctx->BnInOp2Blob(obn);
if (blob != nullptr) {
bool has_not_finite =
HasNotFiniteGpu(ctx->device_ctx(), blob, has_not_finite_host_, has_not_finite_device_);
CHECK(!has_not_finite) << kernel->op_conf().name() << " : " << obn << " has nan or inf";
}
}
}
} // namespace oneflow
|
49e89150c22f0690ba8bbdc2e7dde68dbd26a3bf.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void xMaxDeltaIntegralKernel( const float *intData, const int intDataStrideChannel, float *tmpArray, const int batchSize, const int nInputPlane, const int nWindows, const int h, const int w, const float *xMax, const float *yMin, const float *yMax) {
int id = NUM_THREADS * blockIdx.x + threadIdx.x;
tmpArray += id; // tmpArray now points to our output pixel
const int y = id % w + 1; id /= w; // 1-indexed
const int x = id % h + 1; id /= h; // 1-indexed
const int windowIdx = id % nWindows; id /= nWindows;
// `id` is now is now the current global input plane number
intData += id * intDataStrideChannel;
const int globalWindowIdx = (id % nInputPlane) * nWindows + windowIdx; id /= nInputPlane;
const int & batchIdx = id;
if (batchIdx < batchSize) {
// const int xMinInt = (int)ceil(xMin[globalWindowIdx]-1);
const int yMinInt = (int)ceil(yMin[globalWindowIdx]-1);
const int xMaxInt = (int)floor(xMax[globalWindowIdx]);
const int yMaxInt = (int)floor(yMax[globalWindowIdx]);
float delta = 0;
delta +=
intData[max(0,min(x+xMaxInt+1, h))*(w+1)
+ max(0,min(y+yMaxInt, w))];
delta -=
intData[max(0,min(x+xMaxInt , h))*(w+1)
+ max(0,min(y+yMaxInt, w))];
delta -=
intData[max(0,min(x+xMaxInt+1, h))*(w+1)
+ max(0,min(y+yMinInt, w))];
delta +=
intData[max(0,min(x+xMaxInt , h))*(w+1)
+ max(0,min(y+yMinInt, w))];
delta *= (x+xMaxInt >= 1 and x+xMaxInt < h);
*tmpArray = delta;
}
} | 49e89150c22f0690ba8bbdc2e7dde68dbd26a3bf.cu | #include "includes.h"
__global__ void xMaxDeltaIntegralKernel( const float *intData, const int intDataStrideChannel, float *tmpArray, const int batchSize, const int nInputPlane, const int nWindows, const int h, const int w, const float *xMax, const float *yMin, const float *yMax) {
int id = NUM_THREADS * blockIdx.x + threadIdx.x;
tmpArray += id; // tmpArray now points to our output pixel
const int y = id % w + 1; id /= w; // 1-indexed
const int x = id % h + 1; id /= h; // 1-indexed
const int windowIdx = id % nWindows; id /= nWindows;
// `id` is now is now the current global input plane number
intData += id * intDataStrideChannel;
const int globalWindowIdx = (id % nInputPlane) * nWindows + windowIdx; id /= nInputPlane;
const int & batchIdx = id;
if (batchIdx < batchSize) {
// const int xMinInt = (int)ceil(xMin[globalWindowIdx]-1);
const int yMinInt = (int)ceil(yMin[globalWindowIdx]-1);
const int xMaxInt = (int)floor(xMax[globalWindowIdx]);
const int yMaxInt = (int)floor(yMax[globalWindowIdx]);
float delta = 0;
delta +=
intData[max(0,min(x+xMaxInt+1, h))*(w+1)
+ max(0,min(y+yMaxInt, w))];
delta -=
intData[max(0,min(x+xMaxInt , h))*(w+1)
+ max(0,min(y+yMaxInt, w))];
delta -=
intData[max(0,min(x+xMaxInt+1, h))*(w+1)
+ max(0,min(y+yMinInt, w))];
delta +=
intData[max(0,min(x+xMaxInt , h))*(w+1)
+ max(0,min(y+yMinInt, w))];
delta *= (x+xMaxInt >= 1 and x+xMaxInt < h);
*tmpArray = delta;
}
} |
575f92c7992af35c8b8f350eeeedd64b7293e507.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
__global__ void KNNSearch( float * result, const int * args, const float * pc1, const float * pc2)
{
int cudaNumBlocks = args[0];
int cudaNumThreads = args[1];
int pc1NumPts = args[2];
int pc2NumPts = args[3];
int pc2Idx = blockIdx.x * cudaNumThreads + threadIdx.x;
float currPtX = pc2[pc2NumPts * 0 + pc2Idx];
float currPtY = pc2[pc2NumPts * 1 + pc2Idx];
float currPtZ = pc2[pc2NumPts * 2 + pc2Idx];
if (pc2Idx < pc2NumPts) {
int nnIdx = 0;
float nnDist = 100000.0f;
for (int i = 0; i < pc1NumPts; i++) {
float otherPtX = pc1[pc1NumPts * 0 + i];
float otherPtY = pc1[pc1NumPts * 1 + i];
float otherPtZ = pc1[pc1NumPts * 2 + i];
float checkDist = (currPtX - otherPtX) * (currPtX - otherPtX) +
(currPtY - otherPtY) * (currPtY - otherPtY) +
(currPtZ - otherPtZ) * (currPtZ - otherPtZ);
if (checkDist < nnDist) {
nnDist = checkDist;
nnIdx = i;
}
}
result[pc2NumPts * 0 + pc2Idx] = nnIdx + 1;
result[pc2NumPts * 1 + pc2Idx] = nnDist;
}
} | 575f92c7992af35c8b8f350eeeedd64b7293e507.cu | __global__ void KNNSearch( float * result, const int * args, const float * pc1, const float * pc2)
{
int cudaNumBlocks = args[0];
int cudaNumThreads = args[1];
int pc1NumPts = args[2];
int pc2NumPts = args[3];
int pc2Idx = blockIdx.x * cudaNumThreads + threadIdx.x;
float currPtX = pc2[pc2NumPts * 0 + pc2Idx];
float currPtY = pc2[pc2NumPts * 1 + pc2Idx];
float currPtZ = pc2[pc2NumPts * 2 + pc2Idx];
if (pc2Idx < pc2NumPts) {
int nnIdx = 0;
float nnDist = 100000.0f;
for (int i = 0; i < pc1NumPts; i++) {
float otherPtX = pc1[pc1NumPts * 0 + i];
float otherPtY = pc1[pc1NumPts * 1 + i];
float otherPtZ = pc1[pc1NumPts * 2 + i];
float checkDist = (currPtX - otherPtX) * (currPtX - otherPtX) +
(currPtY - otherPtY) * (currPtY - otherPtY) +
(currPtZ - otherPtZ) * (currPtZ - otherPtZ);
if (checkDist < nnDist) {
nnDist = checkDist;
nnIdx = i;
}
}
result[pc2NumPts * 0 + pc2Idx] = nnIdx + 1;
result[pc2NumPts * 1 + pc2Idx] = nnDist;
}
} |
0a1536e7d1b476a35771cbabc0dc3de0388e5f70.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "makeKernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *KernelPhase = NULL;
hipMalloc(&KernelPhase, XSIZE*YSIZE);
int row = 1;
int column = 1;
float *ImgProperties = NULL;
hipMalloc(&ImgProperties, XSIZE*YSIZE);
float MagXscaling = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
makeKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, KernelPhase,row,column,ImgProperties,MagXscaling);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
makeKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, KernelPhase,row,column,ImgProperties,MagXscaling);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
makeKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, KernelPhase,row,column,ImgProperties,MagXscaling);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 0a1536e7d1b476a35771cbabc0dc3de0388e5f70.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "makeKernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *KernelPhase = NULL;
cudaMalloc(&KernelPhase, XSIZE*YSIZE);
int row = 1;
int column = 1;
float *ImgProperties = NULL;
cudaMalloc(&ImgProperties, XSIZE*YSIZE);
float MagXscaling = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
makeKernel<<<gridBlock,threadBlock>>>(KernelPhase,row,column,ImgProperties,MagXscaling);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
makeKernel<<<gridBlock,threadBlock>>>(KernelPhase,row,column,ImgProperties,MagXscaling);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
makeKernel<<<gridBlock,threadBlock>>>(KernelPhase,row,column,ImgProperties,MagXscaling);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
e0f664f535fcce5ad88b66571752b699853b2f81.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* ******************************************************************************
*
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* See the NOTICE file distributed with this work for additional
* information regarding copyright ownership.
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author sgazeos@gmail.com
//
#include <array/NDArray.h>
#include <system/op_boilerplate.h>
namespace sd {
namespace ops {
namespace helpers {
typedef NDArray ColorTable_t;
static NDArray DefaultColorTable(int depth, sd::LaunchContext* context) {
// std::vector<std::vector<float>> colorTable;
const sd::LongType kDefaultTableLength = 10;
const sd::LongType kDefaultChannelLength = 4;
NDArray colorTable('c', {kDefaultTableLength, kDefaultChannelLength},
{
1, 1, 0, 1, // yellow
0, 0, 1, 1, // 1: blue
1, 0, 0, 1, // 2: red
0, 1, 0, 1, // 3: lime
0.5, 0, 0.5, 1, // 4: purple
0.5, 0.5, 0, 1, // 5: olive
0.5, 0, 0, 1, // 6: maroon
0, 0, 0.5, 1, // 7: navy blue
0, 1, 1, 1, // 8: aqua
1, 0, 1, 1 // 9: fuchsia
},
DataType::FLOAT32, context);
if (depth == 1) {
colorTable.assign(1.f); // all to white when black and white colors
}
return colorTable;
}
template <typename T>
static SD_KERNEL void drawBoundingBoxesKernel(T const* images, const sd::LongType* imagesShape, float const* boxes,
const sd::LongType* boxesShape, float const* colorTable,
const sd::LongType* colorTableShape, T* output,
const sd::LongType* outputShape, sd::LongType batchSize,
sd::LongType width, sd::LongType height, sd::LongType channels,
sd::LongType boxSize, sd::LongType colorTableLen) {
for (auto batch = blockIdx.x; batch < (int)batchSize; batch += gridDim.x) { // loop by batch
for (auto boxIndex = 0; boxIndex < boxSize; ++boxIndex) {
// box with shape
// auto internalBox = &boxes[b * colorSetSize * 4 + c * 4];//(*boxes)(b, {0})(c, {0});//internalBoxes->at(c);
auto colorIndex = boxIndex % colorTableLen; // colorSet->at(c);
// auto rowStart = sd::math::sd_max(sd::LongType (0), sd::LongType ((height - 1) *
// internalBox[0])); auto rowEnd = sd::math::sd_min(sd::LongType (height - 1), sd::LongType
// ((height - 1) * internalBox[2])); auto colStart = sd::math::sd_max(sd::LongType (0),
// sd::LongType ((width - 1) * internalBox[1])); auto colEnd = sd::math::sd_min(sd::LongType(width
// - 1), sd::LongType ((width - 1) * internalBox[3]));
sd::LongType indices0[] = {batch, boxIndex, 0};
sd::LongType indices1[] = {batch, boxIndex, 1};
sd::LongType indices2[] = {batch, boxIndex, 2};
sd::LongType indices3[] = {batch, boxIndex, 3};
auto rowStart = sd::LongType((height - 1) * boxes[shape::getOffset(boxesShape, indices0, 0)]);
auto rowStartBound = sd::math::sd_max(sd::LongType(0), rowStart);
auto rowEnd = sd::LongType((height - 1) * boxes[shape::getOffset(boxesShape, indices2, 0)]);
auto rowEndBound = sd::math::sd_min(sd::LongType(height - 1), rowEnd);
auto colStart = sd::LongType((width - 1) * boxes[shape::getOffset(boxesShape, indices1, 0)]);
auto colStartBound = sd::math::sd_max(sd::LongType(0), colStart);
auto colEnd = sd::LongType((width - 1) * boxes[shape::getOffset(boxesShape, indices3, 0)]);
auto colEndBound = sd::math::sd_min(sd::LongType(width - 1), colEnd);
if (rowStart > rowEnd || colStart > colEnd) {
// printf("helpers::drawBoundingBoxesFunctor: Bounding box (%lld, %lld, %lld, %lld) is
// inverted "
// "and will not be drawn\n", rowStart, colStart, rowEnd, colEnd);
continue;
}
if (rowStart >= height || rowEnd < 0 || colStart >= width || colEnd < 0) {
// printf("helpers::drawBoundingBoxesFunctor: Bounding box (%lld, %lld, %lld, %lld) is
// completely "
// "outside the image and not be drawn\n", rowStart, colStart, rowEnd, colEnd);
continue;
}
// Draw upper line
if (rowStart >= 0) {
for (auto j = colStartBound + threadIdx.x; j <= colEndBound; j += blockDim.x)
for (auto c = 0; c < channels; c++) {
sd::LongType zPos[] = {batch, rowStart, j, c};
sd::LongType cPos[] = {colorIndex, c};
auto cIndex = shape::getOffset(colorTableShape, cPos, 0);
auto zIndex = shape::getOffset(outputShape, zPos, 0);
output[zIndex] = (T)colorTable[cIndex];
}
}
// Draw bottom line.
if (rowEnd < height) {
for (auto j = colStartBound + threadIdx.x; j <= colEndBound; j += blockDim.x)
for (auto c = 0; c < channels; c++) {
sd::LongType zPos[] = {batch, rowEnd, j, c};
sd::LongType cPos[] = {colorIndex, c};
auto cIndex = shape::getOffset(colorTableShape, cPos, 0);
auto zIndex = shape::getOffset(outputShape, zPos, 0);
output[zIndex] = (T)colorTable[cIndex];
}
}
// Draw left line.
if (colStart >= 0) {
for (auto i = rowStartBound + threadIdx.x; i <= rowEndBound; i += blockDim.x)
for (auto c = 0; c < channels; c++) {
sd::LongType zPos[] = {batch, i, colStart, c};
sd::LongType cPos[] = {colorIndex, c};
auto cIndex = shape::getOffset(colorTableShape, cPos, 0);
auto zIndex = shape::getOffset(outputShape, zPos, 0);
output[zIndex] = (T)colorTable[cIndex];
}
}
// Draw right line.
if (colEnd < width) {
for (auto i = rowStartBound + threadIdx.x; i <= rowEndBound; i += blockDim.x)
for (auto c = 0; c < channels; c++) {
sd::LongType zPos[] = {batch, i, colEnd, c};
sd::LongType cPos[] = {colorIndex, c};
auto cIndex = shape::getOffset(colorTableShape, cPos, 0);
auto zIndex = shape::getOffset(outputShape, zPos, 0);
output[zIndex] = (T)colorTable[cIndex];
}
}
}
}
}
template <typename T>
void drawBoundingBoxesH(sd::LaunchContext* context, NDArray const* images, NDArray const* boxes, NDArray const* colors,
NDArray* output) {
auto batchSize = images->sizeAt(0);
auto height = images->sizeAt(1);
auto width = images->sizeAt(2);
auto channels = images->sizeAt(3);
auto stream = context->getCudaStream();
auto boxSize = boxes->sizeAt(1);
NDArray colorsTable = DefaultColorTable(channels, context);
if ((colors != nullptr && colors->lengthOf() > 0)) {
colorsTable = *colors;
}
auto imagesBuf = images->getDataBuffer()->specialAsT<T>();
auto boxesBuf = boxes->getDataBuffer()->specialAsT<float>(); // boxes should be float32
auto colorsTableBuf = colorsTable.getDataBuffer()->specialAsT<float>(); // color table is float32
auto outputBuf = output->dataBuffer()->specialAsT<T>();
hipLaunchKernelGGL(( drawBoundingBoxesKernel), dim3(128), dim3(128), 1024, *stream,
imagesBuf, images->specialShapeInfo(), boxesBuf, boxes->specialShapeInfo(), colorsTableBuf,
colorsTable.specialShapeInfo(), outputBuf, output->specialShapeInfo(), batchSize, width, height, channels,
boxSize, colorsTable.lengthOf());
}
void drawBoundingBoxesFunctor(sd::LaunchContext* context, NDArray* images, NDArray* boxes, NDArray* colors,
NDArray* output) {
// images - batch of 3D images with BW (last dim = 1), RGB (last dim = 3) or RGBA (last dim = 4) channel set
// boxes - batch of 2D bounds with last dim (y_start, x_start, y_end, x_end) to compute i and j as
// floor((height - 1 ) * y_start) => rowStart, floor((height - 1) * y_end) => rowEnd
// floor((width - 1 ) * x_start) => colStart, floor((width - 1) * x_end) => colEnd
// height = images->sizeAt(1), width = images->sizeAt(2)
// colors - colors for each box given
// set up color for each box as frame
NDArray::prepareSpecialUse({output}, {images, boxes, colors});
output->assign(images);
BUILD_SINGLE_SELECTOR(output->dataType(), drawBoundingBoxesH, (context, images, boxes, colors, output),
SD_FLOAT_TYPES);
NDArray::registerSpecialUse({output}, {images, boxes, colors});
}
} // namespace helpers
} // namespace ops
} // namespace sd
| e0f664f535fcce5ad88b66571752b699853b2f81.cu | /* ******************************************************************************
*
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* See the NOTICE file distributed with this work for additional
* information regarding copyright ownership.
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author sgazeos@gmail.com
//
#include <array/NDArray.h>
#include <system/op_boilerplate.h>
namespace sd {
namespace ops {
namespace helpers {
typedef NDArray ColorTable_t;
static NDArray DefaultColorTable(int depth, sd::LaunchContext* context) {
// std::vector<std::vector<float>> colorTable;
const sd::LongType kDefaultTableLength = 10;
const sd::LongType kDefaultChannelLength = 4;
NDArray colorTable('c', {kDefaultTableLength, kDefaultChannelLength},
{
1, 1, 0, 1, // yellow
0, 0, 1, 1, // 1: blue
1, 0, 0, 1, // 2: red
0, 1, 0, 1, // 3: lime
0.5, 0, 0.5, 1, // 4: purple
0.5, 0.5, 0, 1, // 5: olive
0.5, 0, 0, 1, // 6: maroon
0, 0, 0.5, 1, // 7: navy blue
0, 1, 1, 1, // 8: aqua
1, 0, 1, 1 // 9: fuchsia
},
DataType::FLOAT32, context);
if (depth == 1) {
colorTable.assign(1.f); // all to white when black and white colors
}
return colorTable;
}
template <typename T>
static SD_KERNEL void drawBoundingBoxesKernel(T const* images, const sd::LongType* imagesShape, float const* boxes,
const sd::LongType* boxesShape, float const* colorTable,
const sd::LongType* colorTableShape, T* output,
const sd::LongType* outputShape, sd::LongType batchSize,
sd::LongType width, sd::LongType height, sd::LongType channels,
sd::LongType boxSize, sd::LongType colorTableLen) {
for (auto batch = blockIdx.x; batch < (int)batchSize; batch += gridDim.x) { // loop by batch
for (auto boxIndex = 0; boxIndex < boxSize; ++boxIndex) {
// box with shape
// auto internalBox = &boxes[b * colorSetSize * 4 + c * 4];//(*boxes)(b, {0})(c, {0});//internalBoxes->at(c);
auto colorIndex = boxIndex % colorTableLen; // colorSet->at(c);
// auto rowStart = sd::math::sd_max(sd::LongType (0), sd::LongType ((height - 1) *
// internalBox[0])); auto rowEnd = sd::math::sd_min(sd::LongType (height - 1), sd::LongType
// ((height - 1) * internalBox[2])); auto colStart = sd::math::sd_max(sd::LongType (0),
// sd::LongType ((width - 1) * internalBox[1])); auto colEnd = sd::math::sd_min(sd::LongType(width
// - 1), sd::LongType ((width - 1) * internalBox[3]));
sd::LongType indices0[] = {batch, boxIndex, 0};
sd::LongType indices1[] = {batch, boxIndex, 1};
sd::LongType indices2[] = {batch, boxIndex, 2};
sd::LongType indices3[] = {batch, boxIndex, 3};
auto rowStart = sd::LongType((height - 1) * boxes[shape::getOffset(boxesShape, indices0, 0)]);
auto rowStartBound = sd::math::sd_max(sd::LongType(0), rowStart);
auto rowEnd = sd::LongType((height - 1) * boxes[shape::getOffset(boxesShape, indices2, 0)]);
auto rowEndBound = sd::math::sd_min(sd::LongType(height - 1), rowEnd);
auto colStart = sd::LongType((width - 1) * boxes[shape::getOffset(boxesShape, indices1, 0)]);
auto colStartBound = sd::math::sd_max(sd::LongType(0), colStart);
auto colEnd = sd::LongType((width - 1) * boxes[shape::getOffset(boxesShape, indices3, 0)]);
auto colEndBound = sd::math::sd_min(sd::LongType(width - 1), colEnd);
if (rowStart > rowEnd || colStart > colEnd) {
// printf("helpers::drawBoundingBoxesFunctor: Bounding box (%lld, %lld, %lld, %lld) is
// inverted "
// "and will not be drawn\n", rowStart, colStart, rowEnd, colEnd);
continue;
}
if (rowStart >= height || rowEnd < 0 || colStart >= width || colEnd < 0) {
// printf("helpers::drawBoundingBoxesFunctor: Bounding box (%lld, %lld, %lld, %lld) is
// completely "
// "outside the image and not be drawn\n", rowStart, colStart, rowEnd, colEnd);
continue;
}
// Draw upper line
if (rowStart >= 0) {
for (auto j = colStartBound + threadIdx.x; j <= colEndBound; j += blockDim.x)
for (auto c = 0; c < channels; c++) {
sd::LongType zPos[] = {batch, rowStart, j, c};
sd::LongType cPos[] = {colorIndex, c};
auto cIndex = shape::getOffset(colorTableShape, cPos, 0);
auto zIndex = shape::getOffset(outputShape, zPos, 0);
output[zIndex] = (T)colorTable[cIndex];
}
}
// Draw bottom line.
if (rowEnd < height) {
for (auto j = colStartBound + threadIdx.x; j <= colEndBound; j += blockDim.x)
for (auto c = 0; c < channels; c++) {
sd::LongType zPos[] = {batch, rowEnd, j, c};
sd::LongType cPos[] = {colorIndex, c};
auto cIndex = shape::getOffset(colorTableShape, cPos, 0);
auto zIndex = shape::getOffset(outputShape, zPos, 0);
output[zIndex] = (T)colorTable[cIndex];
}
}
// Draw left line.
if (colStart >= 0) {
for (auto i = rowStartBound + threadIdx.x; i <= rowEndBound; i += blockDim.x)
for (auto c = 0; c < channels; c++) {
sd::LongType zPos[] = {batch, i, colStart, c};
sd::LongType cPos[] = {colorIndex, c};
auto cIndex = shape::getOffset(colorTableShape, cPos, 0);
auto zIndex = shape::getOffset(outputShape, zPos, 0);
output[zIndex] = (T)colorTable[cIndex];
}
}
// Draw right line.
if (colEnd < width) {
for (auto i = rowStartBound + threadIdx.x; i <= rowEndBound; i += blockDim.x)
for (auto c = 0; c < channels; c++) {
sd::LongType zPos[] = {batch, i, colEnd, c};
sd::LongType cPos[] = {colorIndex, c};
auto cIndex = shape::getOffset(colorTableShape, cPos, 0);
auto zIndex = shape::getOffset(outputShape, zPos, 0);
output[zIndex] = (T)colorTable[cIndex];
}
}
}
}
}
template <typename T>
void drawBoundingBoxesH(sd::LaunchContext* context, NDArray const* images, NDArray const* boxes, NDArray const* colors,
NDArray* output) {
auto batchSize = images->sizeAt(0);
auto height = images->sizeAt(1);
auto width = images->sizeAt(2);
auto channels = images->sizeAt(3);
auto stream = context->getCudaStream();
auto boxSize = boxes->sizeAt(1);
NDArray colorsTable = DefaultColorTable(channels, context);
if ((colors != nullptr && colors->lengthOf() > 0)) {
colorsTable = *colors;
}
auto imagesBuf = images->getDataBuffer()->specialAsT<T>();
auto boxesBuf = boxes->getDataBuffer()->specialAsT<float>(); // boxes should be float32
auto colorsTableBuf = colorsTable.getDataBuffer()->specialAsT<float>(); // color table is float32
auto outputBuf = output->dataBuffer()->specialAsT<T>();
drawBoundingBoxesKernel<<<128, 128, 1024, *stream>>>(
imagesBuf, images->specialShapeInfo(), boxesBuf, boxes->specialShapeInfo(), colorsTableBuf,
colorsTable.specialShapeInfo(), outputBuf, output->specialShapeInfo(), batchSize, width, height, channels,
boxSize, colorsTable.lengthOf());
}
void drawBoundingBoxesFunctor(sd::LaunchContext* context, NDArray* images, NDArray* boxes, NDArray* colors,
NDArray* output) {
// images - batch of 3D images with BW (last dim = 1), RGB (last dim = 3) or RGBA (last dim = 4) channel set
// boxes - batch of 2D bounds with last dim (y_start, x_start, y_end, x_end) to compute i and j as
// floor((height - 1 ) * y_start) => rowStart, floor((height - 1) * y_end) => rowEnd
// floor((width - 1 ) * x_start) => colStart, floor((width - 1) * x_end) => colEnd
// height = images->sizeAt(1), width = images->sizeAt(2)
// colors - colors for each box given
// set up color for each box as frame
NDArray::prepareSpecialUse({output}, {images, boxes, colors});
output->assign(images);
BUILD_SINGLE_SELECTOR(output->dataType(), drawBoundingBoxesH, (context, images, boxes, colors, output),
SD_FLOAT_TYPES);
NDArray::registerSpecialUse({output}, {images, boxes, colors});
}
} // namespace helpers
} // namespace ops
} // namespace sd
|
cfc4dbe63f045d8ca2c47467a8b8622066dd71d9.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
__global__ void hello_kernel()
{
printf("Hello World from Thread %d\n", threadIdx.x);
}
int main(int argc, char *argv[])
{
//set the CUDA device to the default CUDA GPU (device 0)
hipError_t result = hipSetDevice(0);
if (result != hipSuccess ){
printf("Error setting default GPU device.\n");
}
//call a CUDA kernel
dim3 blocksPerGrid(1, 1, 1);
dim3 threadsPerBlock(10, 1, 1);
hipLaunchKernelGGL(( hello_kernel), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, );
//synchronise
hipDeviceSynchronize();
return 0;
} | cfc4dbe63f045d8ca2c47467a8b8622066dd71d9.cu | #include <stdio.h>
#include <stdlib.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
__global__ void hello_kernel()
{
printf("Hello World from Thread %d\n", threadIdx.x);
}
int main(int argc, char *argv[])
{
//set the CUDA device to the default CUDA GPU (device 0)
cudaError result = cudaSetDevice(0);
if (result != cudaSuccess ){
printf("Error setting default GPU device.\n");
}
//call a CUDA kernel
dim3 blocksPerGrid(1, 1, 1);
dim3 threadsPerBlock(10, 1, 1);
hello_kernel<<<blocksPerGrid, threadsPerBlock>>>();
//synchronise
cudaDeviceSynchronize();
return 0;
} |
271928b1786264ae378e11aa883cdbf3aafbe4fd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <transfer.h>
#include <gauge_field_order.h>
#include <color_spinor_field_order.h>
#include <index_helper.cuh>
#if __COMPUTE_CAPABILITY__ >= 300
#include <generics/shfl.h>
#endif
namespace quda {
#ifdef GPU_MULTIGRID
template <typename Float, typename F, typename G>
struct DslashCoarseArg {
F out;
const F inA;
const F inB;
const G Y;
const G X;
const Float kappa;
const int parity; // only use this for single parity fields
const int nParity; // number of parities we're working on
const int nFace; // hard code to 1 for now
const int dim[5]; // full lattice dimensions
const int commDim[4]; // whether a given dimension is partitioned or not
const int volumeCB;
DslashCoarseArg(F &out, const F &inA, const F &inB, const G &Y, const G &X,
Float kappa, int parity, const ColorSpinorField &meta)
: out(out), inA(inA), inB(inB), Y(Y), X(X), kappa(kappa), parity(parity),
nParity(meta.SiteSubset()), nFace(1),
dim{ (3-nParity) * meta.X(0), meta.X(1), meta.X(2), meta.X(3), meta.Ndim() == 5 ? meta.X(4) : 1 },
commDim{comm_dim_partitioned(0), comm_dim_partitioned(1), comm_dim_partitioned(2), comm_dim_partitioned(3)},
volumeCB(meta.VolumeCB()/dim[4])
{ }
};
/**
Applies the coarse dslash on a given parity and checkerboard site index
@param out The result - kappa * Dslash in
@param Y The coarse gauge field
@param kappa Kappa value
@param in The input field
@param parity The site parity
@param x_cb The checkerboarded site index
*/
extern __shared__ float s[];
template <typename Float, typename F, typename G, int nDim, int Ns, int Nc, int Mc, int color_stride, int dim_stride, int thread_dir, int thread_dim>
__device__ __host__ inline void applyDslash(complex<Float> out[], DslashCoarseArg<Float,F,G> &arg, int x_cb, int src_idx, int parity, int s_row, int color_block, int color_offset) {
const int their_spinor_parity = (arg.nParity == 2) ? (parity+1)&1 : 0;
int coord[5];
getCoords(coord, x_cb, arg.dim, parity);
coord[4] = src_idx;
#ifdef __CUDA_ARCH__
complex<Float> *shared_sum = (complex<Float>*)s;
if (!thread_dir) {
#endif
//Forward gather - compute fwd offset for spinor fetch
#pragma unroll
for(int d = thread_dim; d < nDim; d+=dim_stride) // loop over dimension
{
const int fwd_idx = linkIndexP1(coord, arg.dim, d);
if ( arg.commDim[d] && (coord[d] + arg.nFace >= arg.dim[d]) ) {
int ghost_idx = ghostFaceIndex<1>(coord, arg.dim, d, arg.nFace);
#pragma unroll
for(int color_local = 0; color_local < Mc; color_local++) { //Color row
int c_row = color_block + color_local; // global color index
int row = s_row*Nc + c_row;
#pragma unroll
for(int s_col = 0; s_col < Ns; s_col++) { //Spin column
#pragma unroll
for(int c_col = 0; c_col < Nc; c_col+=color_stride) { //Color column
int col = s_col*Nc + c_col + color_offset;
out[color_local] += arg.Y(d+4, parity, x_cb, row, col)
* arg.inA.Ghost(d, 1, their_spinor_parity, ghost_idx + src_idx*arg.volumeCB, s_col, c_col+color_offset);
}
}
}
} else {
#pragma unroll
for(int color_local = 0; color_local < Mc; color_local++) { //Color row
int c_row = color_block + color_local; // global color index
int row = s_row*Nc + c_row;
#pragma unroll
for(int s_col = 0; s_col < Ns; s_col++) { //Spin column
#pragma unroll
for(int c_col = 0; c_col < Nc; c_col+=color_stride) { //Color column
int col = s_col*Nc + c_col + color_offset;
out[color_local] += arg.Y(d+4, parity, x_cb, row, col)
* arg.inA(their_spinor_parity, fwd_idx + src_idx*arg.volumeCB, s_col, c_col+color_offset);
}
}
}
}
} // nDim
#if defined(__CUDA_ARCH__)
if (thread_dim > 0) { // only need to write to shared memory if not master thread
#pragma unroll
for (int color_local=0; color_local < Mc; color_local++) {
shared_sum[((color_local * blockDim.z + threadIdx.z )*blockDim.y + threadIdx.y)*blockDim.x + threadIdx.x] = out[color_local];
}
}
#endif
#ifdef __CUDA_ARCH__
} else {
#endif
//Backward gather - compute back offset for spinor and gauge fetch
#pragma unroll
for(int d = thread_dim; d < nDim; d+=dim_stride)
{
const int back_idx = linkIndexM1(coord, arg.dim, d);
const int gauge_idx = back_idx;
if ( arg.commDim[d] && (coord[d] - arg.nFace < 0) ) {
const int ghost_idx = ghostFaceIndex<0>(coord, arg.dim, d, arg.nFace);
#pragma unroll
for (int color_local=0; color_local<Mc; color_local++) {
int c_row = color_block + color_local;
int row = s_row*Nc + c_row;
#pragma unroll
for (int s_col=0; s_col<Ns; s_col++)
#pragma unroll
for (int c_col=0; c_col<Nc; c_col+=color_stride) {
int col = s_col*Nc + c_col + color_offset;
out[color_local] += conj(arg.Y.Ghost(d, (parity+1)&1, ghost_idx, col, row))
* arg.inA.Ghost(d, 0, their_spinor_parity, ghost_idx + src_idx*arg.volumeCB, s_col, c_col+color_offset);
}
}
} else {
#pragma unroll
for(int color_local = 0; color_local < Mc; color_local++) {
int c_row = color_block + color_local;
int row = s_row*Nc + c_row;
#pragma unroll
for(int s_col = 0; s_col < Ns; s_col++)
#pragma unroll
for(int c_col = 0; c_col < Nc; c_col+=color_stride) {
int col = s_col*Nc + c_col + color_offset;
out[color_local] += conj(arg.Y(d, (parity+1)&1, gauge_idx, col, row))
* arg.inA(their_spinor_parity, back_idx + src_idx*arg.volumeCB, s_col, c_col+color_offset);
}
}
}
} //nDim
#if defined(__CUDA_ARCH__)
#pragma unroll
for (int color_local=0; color_local < Mc; color_local++) {
shared_sum[ ((color_local * blockDim.z + threadIdx.z )*blockDim.y + threadIdx.y)*blockDim.x + threadIdx.x] = out[color_local];
}
} // forwards / backwards thread split
#endif
#ifdef __CUDA_ARCH__ // CUDA path has to recombine the foward and backward results
__syncthreads();
// (colorspin * dim_stride + dim * 2 + dir)
if (thread_dim == 0 && thread_dir == 0) {
// full split over dimension and direction
#pragma unroll
for (int d=1; d<dim_stride; d++) { // get remaining forward fathers (if any)
// 4-way 1,2,3 (stride = 4)
// 2-way 1 (stride = 2)
#pragma unroll
for (int color_local=0; color_local < Mc; color_local++) {
out[color_local] +=
shared_sum[(((color_local*blockDim.z/(2*dim_stride) + threadIdx.z/(2*dim_stride)) * 2 * dim_stride + d * 2 + 0)*blockDim.y+threadIdx.y)*blockDim.x+threadIdx.x];
}
}
#pragma unroll
for (int d=0; d<dim_stride; d++) { // get all backward gathers
#pragma unroll
for (int color_local=0; color_local < Mc; color_local++) {
out[color_local] +=
shared_sum[(((color_local*blockDim.z/(2*dim_stride) + threadIdx.z/(2*dim_stride)) * 2 * dim_stride + d * 2 + 1)*blockDim.y+threadIdx.y)*blockDim.x+threadIdx.x];
}
}
// apply kappa
#pragma unroll
for (int color_local=0; color_local<Mc; color_local++) out[color_local] *= -arg.kappa;
}
#else // !__CUDA_ARCH__
for (int color_local=0; color_local<Mc; color_local++) out[color_local] *= -arg.kappa;
#endif
}
/**
Applies the coarse clover matrix on a given parity and
checkerboard site index
@param out The result out += X * in
@param X The coarse clover field
@param in The input field
@param parity The site parity
@param x_cb The checkerboarded site index
*/
template <typename Float, typename F, typename G, int Ns, int Nc, int Mc, int color_stride>
__device__ __host__ inline void applyClover(complex<Float> out[], DslashCoarseArg<Float,F,G> &arg, int x_cb, int src_idx, int parity, int s, int color_block, int color_offset) {
const int spinor_parity = (arg.nParity == 2) ? parity : 0;
// M is number of colors per thread
#pragma unroll
for(int color_local = 0; color_local < Mc; color_local++) {//Color out
int c = color_block + color_local; // global color index
int row = s*Nc + c;
#pragma unroll
for(int s_col = 0; s_col < Ns; s_col++) //Spin in
#pragma unroll
for(int c_col = 0; c_col < Nc; c_col+=color_stride) { //Color in
//Factor of kappa and diagonal addition now incorporated in X
int col = s_col*Nc + c_col + color_offset;
out[color_local] += arg.X(0, parity, x_cb, row, col) * arg.inB(spinor_parity, x_cb+src_idx*arg.volumeCB, s_col, c_col+color_offset);
}
}
}
//out(x) = M*in = \sum_mu Y_{-\mu}(x)in(x+mu) + Y^\dagger_mu(x-mu)in(x-mu)
template <typename Float, typename F, typename G, int nDim, int Ns, int Nc, int Mc, int color_stride,
int dim_thread_split, bool dslash, bool clover, int dir, int dim>
__device__ __host__ inline void coarseDslash(DslashCoarseArg<Float,F,G> &arg, int x_cb, int src_idx, int parity, int s, int color_block, int color_offset)
{
complex <Float> out[Mc];
#pragma unroll
for (int c=0; c<Mc; c++) out[c] = 0.0;
if (dslash) applyDslash<Float,F,G,nDim,Ns,Nc,Mc,color_stride,dim_thread_split,dir,dim>(out, arg, x_cb, src_idx, parity, s, color_block, color_offset);
if (clover && dir==0 && dim==0) applyClover<Float,F,G,Ns,Nc,Mc,color_stride>(out, arg, x_cb, src_idx, parity, s, color_block, color_offset);
if (dir==0 && dim==0) {
const int my_spinor_parity = (arg.nParity == 2) ? parity : 0;
#pragma unroll
for (int color_local=0; color_local<Mc; color_local++) {
#if __CUDA_ARCH__ >= 300
// reduce down to the first group of column-split threads
const int warp_size = 32; // FIXME - this is buggy when x-dim * color_stride < 32
#pragma unroll
for (int offset = warp_size/2; offset >= warp_size/color_stride; offset /= 2) out[color_local] += __shfl_down(out[color_local], offset);
#endif
int c = color_block + color_local; // global color index
if (color_offset == 0) arg.out(my_spinor_parity, x_cb+src_idx*arg.volumeCB, s, c) = out[color_local];
}
}
}
// CPU kernel for applying the coarse Dslash to a vector
template <typename Float, typename F, typename G, int nDim, int Ns, int Nc, int Mc, bool dslash, bool clover>
void coarseDslash(DslashCoarseArg<Float,F,G> arg)
{
// the fine-grain parameters mean nothing for CPU variant
const int color_stride = 1;
const int color_offset = 0;
const int dim_thread_split = 1;
const int dir = 0;
const int dim = 0;
for (int parity= 0; parity < arg.nParity; parity++) {
// for full fields then set parity from loop else use arg setting
parity = (arg.nParity == 2) ? parity : arg.parity;
for (int src_idx = 0; src_idx < arg.dim[4]; src_idx++) {
//#pragma omp parallel for
for(int x_cb = 0; x_cb < arg.volumeCB; x_cb++) { // 4-d volume
for (int s=0; s<2; s++) {
for (int color_block=0; color_block<Nc; color_block+=Mc) { // Mc=Nc means all colors in a thread
coarseDslash<Float,F,G,nDim,Ns,Nc,Mc,color_stride,dim_thread_split,dslash,clover,dir,dim>(arg, x_cb, src_idx, parity, s, color_block, color_offset);
}
}
} // 4-d volumeCB
} // src index
} // parity
}
// GPU Kernel for applying the coarse Dslash to a vector
template <typename Float, typename F, typename G, int nDim, int Ns, int Nc, int Mc, int color_stride, int dim_thread_split, bool dslash, bool clover>
__global__ void coarseDslashKernel(DslashCoarseArg<Float,F,G> arg)
{
constexpr int warp_size = 32;
const int lane_id = threadIdx.x % warp_size;
const int warp_id = threadIdx.x / warp_size;
const int vector_site_width = warp_size / color_stride;
int x_cb = blockIdx.x*(blockDim.x/color_stride) + warp_id*(warp_size/color_stride) + lane_id % vector_site_width;
const int color_offset = lane_id / vector_site_width;
// for full fields set parity from y thread index else use arg setting
int paritySrc = blockDim.y*blockIdx.y + threadIdx.y;
int src_idx = (arg.nParity == 2) ? paritySrc / 2 : paritySrc; // maybe want to swap order or source and parity for improved locality of same parity
int parity = (arg.nParity == 2) ? paritySrc % 2 : arg.parity;
// z thread dimension is (( s*(Nc/Mc) + color_block )*dim_thread_split + dim)*2 + dir
int sMd = blockDim.z*blockIdx.z + threadIdx.z;
int dir = sMd & 1;
int sMdim = sMd >> 1;
int dim = sMdim % dim_thread_split;
int sM = sMdim / dim_thread_split;
int s = sM / (Nc/Mc);
int color_block = (sM % (Nc/Mc)) * Mc;
if (x_cb >= arg.volumeCB) return;
if (paritySrc >= arg.nParity * arg.dim[4]) return;
if (dir == 0) {
if (dim == 0) coarseDslash<Float,F,G,nDim,Ns,Nc,Mc,color_stride,dim_thread_split,dslash,clover,0,0>(arg, x_cb, src_idx, parity, s, color_block, color_offset);
else if (dim == 1) coarseDslash<Float,F,G,nDim,Ns,Nc,Mc,color_stride,dim_thread_split,dslash,clover,0,1>(arg, x_cb, src_idx, parity, s, color_block, color_offset);
else if (dim == 2) coarseDslash<Float,F,G,nDim,Ns,Nc,Mc,color_stride,dim_thread_split,dslash,clover,0,2>(arg, x_cb, src_idx, parity, s, color_block, color_offset);
else if (dim == 3) coarseDslash<Float,F,G,nDim,Ns,Nc,Mc,color_stride,dim_thread_split,dslash,clover,0,3>(arg, x_cb, src_idx, parity, s, color_block, color_offset);
} else if (dir == 1) {
if (dim == 0) coarseDslash<Float,F,G,nDim,Ns,Nc,Mc,color_stride,dim_thread_split,dslash,clover,1,0>(arg, x_cb, src_idx, parity, s, color_block, color_offset);
else if (dim == 1) coarseDslash<Float,F,G,nDim,Ns,Nc,Mc,color_stride,dim_thread_split,dslash,clover,1,1>(arg, x_cb, src_idx, parity, s, color_block, color_offset);
else if (dim == 2) coarseDslash<Float,F,G,nDim,Ns,Nc,Mc,color_stride,dim_thread_split,dslash,clover,1,2>(arg, x_cb, src_idx, parity, s, color_block, color_offset);
else if (dim == 3) coarseDslash<Float,F,G,nDim,Ns,Nc,Mc,color_stride,dim_thread_split,dslash,clover,1,3>(arg, x_cb, src_idx, parity, s, color_block, color_offset);
}
}
template <typename Float, typename F, typename G, int nDim, int Ns, int Nc, int Mc, bool dslash, bool clover>
class DslashCoarse : public Tunable {
protected:
DslashCoarseArg<Float,F,G> &arg;
const ColorSpinorField &meta;
const int max_color_col_stride = 4;
mutable int color_col_stride;
mutable int dim_threads;
long long flops() const
{
return ((dslash*2*nDim+clover*1)*(8*Ns*Nc*Ns*Nc)-2*Ns*Nc)*arg.nParity*(long long)meta.VolumeCB();
}
long long bytes() const
{
return (dslash||clover) * arg.out.Bytes() + dslash*8*arg.inA.Bytes() + clover*arg.inB.Bytes() +
arg.dim[4]*arg.nParity*(dslash*8*arg.Y.Bytes() + clover*arg.X.Bytes());
}
unsigned int sharedBytesPerThread() const { return (sizeof(complex<Float>) * Mc); }
unsigned int sharedBytesPerBlock(const TuneParam ¶m) const { return 0; }
bool tuneGridDim() const { return false; } // Don't tune the grid dimensions
unsigned int minThreads() const { return color_col_stride * arg.volumeCB; } // 4-d volume since this x threads only
unsigned int maxBlockSize() const { return deviceProp.maxThreadsPerBlock / (dim_threads * 2 * arg.nParity); }
bool advanceBlockDim(TuneParam ¶m) const
{
dim3 block = param.block;
dim3 grid = param.grid;
bool ret = Tunable::advanceBlockDim(param);
param.block.y = block.y; param.block.z = block.z;
param.grid.y = grid.y; param.grid.z = grid.z;
if (ret) { // we advanced the block.x so we're done
return true;
} else { // block.x (spacetime) was reset
if (param.block.y < (unsigned int)(arg.nParity * arg.dim[4])) { // advance parity / 5th dimension
param.block.y++;
param.grid.y = (arg.nParity * arg.dim[4] + param.block.y - 1) / param.block.y;
return true;
} else {
// reset parity / 5th dimension
param.block.y = 1;
param.grid.y = arg.nParity * arg.dim[4];
// let's try to advance spin/block-color
while(param.block.z <= (unsigned int)(dim_threads * 2 * 2 * (Nc/Mc))) {
param.block.z+=dim_threads * 2;
if ( (dim_threads*2*2*(Nc/Mc)) % param.block.z == 0) {
param.grid.z = (dim_threads * 2 * 2 * (Nc/Mc)) / param.block.z;
break;
}
}
// we can advance spin/block-color since this is valid
if (param.block.z <= (unsigned int)(dim_threads * 2 * 2 * (Nc/Mc)) &&
param.block.z <= (unsigned int)deviceProp.maxThreadsDim[2] ) { //
return true;
} else { // we have run off the end so let's reset
param.block.z = dim_threads * 2;
param.grid.z = 2 * (Nc/Mc);
return false;
}
}
}
}
int blockStep() const { return deviceProp.warpSize/4; }
int blockMin() const { return deviceProp.warpSize/4; }
// Experimental autotuning of the color column stride
bool advanceAux(TuneParam ¶m) const
{
#if __COMPUTE_CAPABILITY__ >= 300
// we can only split the dot product on Kepler and later since we need the __shfl instruction
if (2*param.aux.x <= max_color_col_stride && Nc % (2*param.aux.x) == 0 &&
param.block.x % deviceProp.warpSize == 0) {
// An x-dimension block size that is not a multiple of the
// warp size is incompatible with splitting the dot product
// across the warp so we must skip this
param.aux.x *= 2; // safe to advance
color_col_stride = param.aux.x;
// recompute grid size since minThreads() has now been updated
param.grid.x = (minThreads()+param.block.x-1)/param.block.x;
// check this grid size is valid before returning
if (param.grid.x < (unsigned int)deviceProp.maxGridSize[0]) return true;
}
#endif
// reset color column stride if too large or not divisible
param.aux.x = 1;
color_col_stride = param.aux.x;
// recompute grid size since minThreads() has now been updated
param.grid.x = (minThreads()+param.block.x-1)/param.block.x;
if (2*param.aux.y <= nDim) {
param.aux.y *= 2;
dim_threads = param.aux.y;
// need to reset z-block/grid size/shared_bytes since dim_threads has changed
param.block.z = dim_threads * 2;
param.grid.z = 2* (Nc / Mc);
param.shared_bytes = sharedBytesPerThread()*param.block.x*param.block.y*param.block.z > sharedBytesPerBlock(param) ?
sharedBytesPerThread()*param.block.x*param.block.y*param.block.z : sharedBytesPerBlock(param);
return true;
} else {
param.aux.y = 1;
dim_threads = param.aux.y;
// need to reset z-block/grid size/shared_bytes since
// dim_threads has changed. Strictly speaking this isn't needed
// since this is the outer dimension to tune, but would be
// needed if we added an aux.z tuning dimension
param.block.z = dim_threads * 2;
param.grid.z = 2* (Nc / Mc);
param.shared_bytes = sharedBytesPerThread()*param.block.x*param.block.y*param.block.z > sharedBytesPerBlock(param) ?
sharedBytesPerThread()*param.block.x*param.block.y*param.block.z : sharedBytesPerBlock(param);
return false;
}
}
virtual void initTuneParam(TuneParam ¶m) const
{
param.aux = make_int4(1,1,1,1);
color_col_stride = param.aux.x;
dim_threads = param.aux.y;
Tunable::initTuneParam(param);
param.block.y = 1;
param.grid.y = arg.nParity * arg.dim[4];
param.block.z = dim_threads * 2;
param.grid.z = 2*(Nc/Mc);
param.shared_bytes = sharedBytesPerThread()*param.block.x*param.block.y*param.block.z > sharedBytesPerBlock(param) ?
sharedBytesPerThread()*param.block.x*param.block.y*param.block.z : sharedBytesPerBlock(param);
}
/** sets default values for when tuning is disabled */
virtual void defaultTuneParam(TuneParam ¶m) const
{
param.aux = make_int4(1,1,1,1);
color_col_stride = param.aux.x;
dim_threads = param.aux.y;
Tunable::defaultTuneParam(param);
param.block.y = 1;
param.grid.y = arg.nParity * arg.dim[4];
param.block.z = dim_threads * 2;
param.grid.z = 2*(Nc/Mc);
param.shared_bytes = sharedBytesPerThread()*param.block.x*param.block.y*param.block.z > sharedBytesPerBlock(param) ?
sharedBytesPerThread()*param.block.x*param.block.y*param.block.z : sharedBytesPerBlock(param);
}
public:
DslashCoarse(DslashCoarseArg<Float,F,G> &arg, const ColorSpinorField &meta)
: arg(arg), meta(meta) {
strcpy(aux, meta.AuxString());
#ifdef MULTI_GPU
char comm[5];
comm[0] = (arg.commDim[0] ? '1' : '0');
comm[1] = (arg.commDim[1] ? '1' : '0');
comm[2] = (arg.commDim[2] ? '1' : '0');
comm[3] = (arg.commDim[3] ? '1' : '0');
comm[4] = '\0';
strcat(aux,",comm=");
strcat(aux,comm);
#endif
}
virtual ~DslashCoarse() { }
void apply(const hipStream_t &stream) {
if (meta.Location() == QUDA_CPU_FIELD_LOCATION) {
coarseDslash<Float,F,G,nDim,Ns,Nc,Mc,dslash,clover>(arg);
} else {
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
switch (tp.aux.y) { // dimension gather parallelisation
case 1:
switch (tp.aux.x) { // this is color_col_stride
case 1:
hipLaunchKernelGGL(( coarseDslashKernel<Float,F,G,nDim,Ns,Nc,Mc,1,1,dslash,clover>) , dim3(tp.grid),dim3(tp.block),tp.shared_bytes,stream, arg);
break;
case 2:
hipLaunchKernelGGL(( coarseDslashKernel<Float,F,G,nDim,Ns,Nc,Mc,2,1,dslash,clover>) , dim3(tp.grid),dim3(tp.block),tp.shared_bytes,stream, arg);
break;
case 4:
hipLaunchKernelGGL(( coarseDslashKernel<Float,F,G,nDim,Ns,Nc,Mc,4,1,dslash,clover>) , dim3(tp.grid),dim3(tp.block),tp.shared_bytes,stream, arg);
break;
default:
errorQuda("Color column stride %d not valid", tp.aux.x);
}
break;
case 2:
switch (tp.aux.x) { // this is color_col_stride
case 1:
hipLaunchKernelGGL(( coarseDslashKernel<Float,F,G,nDim,Ns,Nc,Mc,1,2,dslash,clover>) , dim3(tp.grid),dim3(tp.block),tp.shared_bytes,stream, arg);
break;
case 2:
hipLaunchKernelGGL(( coarseDslashKernel<Float,F,G,nDim,Ns,Nc,Mc,2,2,dslash,clover>) , dim3(tp.grid),dim3(tp.block),tp.shared_bytes,stream, arg);
break;
case 4:
hipLaunchKernelGGL(( coarseDslashKernel<Float,F,G,nDim,Ns,Nc,Mc,4,2,dslash,clover>) , dim3(tp.grid),dim3(tp.block),tp.shared_bytes,stream, arg);
break;
default:
errorQuda("Color column stride %d not valid", tp.aux.x);
}
break;
case 4:
switch (tp.aux.x) { // this is color_col_stride
case 1:
hipLaunchKernelGGL(( coarseDslashKernel<Float,F,G,nDim,Ns,Nc,Mc,1,4,dslash,clover>) , dim3(tp.grid),dim3(tp.block),tp.shared_bytes,stream, arg);
break;
case 2:
hipLaunchKernelGGL(( coarseDslashKernel<Float,F,G,nDim,Ns,Nc,Mc,2,4,dslash,clover>) , dim3(tp.grid),dim3(tp.block),tp.shared_bytes,stream, arg);
break;
case 4:
hipLaunchKernelGGL(( coarseDslashKernel<Float,F,G,nDim,Ns,Nc,Mc,4,4,dslash,clover>) , dim3(tp.grid),dim3(tp.block),tp.shared_bytes,stream, arg);
break;
default:
errorQuda("Color column stride %d not valid", tp.aux.x);
}
break;
default:
errorQuda("Invalid dimension thread splitting %d", tp.aux.y);
}
}
}
TuneKey tuneKey() const {
return TuneKey(meta.VolString(), typeid(*this).name(), aux);
}
};
template <typename Float, QudaFieldOrder csOrder, QudaGaugeFieldOrder gOrder, int coarseColor,
int coarseSpin, QudaFieldLocation location>
void ApplyCoarse(ColorSpinorField &out, const ColorSpinorField &inA, const ColorSpinorField &inB,
const GaugeField &Y, const GaugeField &X, double kappa, int parity, bool dslash, bool clover) {
typedef typename colorspinor::FieldOrderCB<Float,coarseSpin,coarseColor,1,csOrder> F;
typedef typename gauge::FieldOrder<Float,coarseColor*coarseSpin,coarseSpin,gOrder> G;
F outAccessor(const_cast<ColorSpinorField&>(out));
F inAccessorA(const_cast<ColorSpinorField&>(inA));
F inAccessorB(const_cast<ColorSpinorField&>(inB));
G yAccessor(const_cast<GaugeField&>(Y));
G xAccessor(const_cast<GaugeField&>(X));
DslashCoarseArg<Float,F,G> arg(outAccessor, inAccessorA, inAccessorB, yAccessor, xAccessor, (Float)kappa, parity, inA);
const int colors_per_thread = 1;
if (dslash) {
if (clover) {
DslashCoarse<Float,F,G,4,coarseSpin,coarseColor,colors_per_thread,true,true> dslash(arg, inA);
dslash.apply(0);
} else {
DslashCoarse<Float,F,G,4,coarseSpin,coarseColor,colors_per_thread,true,false> dslash(arg, inA);
dslash.apply(0);
}
} else {
if (clover) {
DslashCoarse<Float,F,G,4,coarseSpin,coarseColor,colors_per_thread,false,true> dslash(arg, inA);
dslash.apply(0);
} else {
errorQuda("Unsupported dslash=false clover=false");
}
}
}
template <typename Float, QudaFieldOrder csOrder, QudaGaugeFieldOrder gOrder, int coarseColor, int coarseSpin>
void ApplyCoarse(ColorSpinorField &out, const ColorSpinorField &inA, const ColorSpinorField &inB,
const GaugeField &Y, const GaugeField &X, double kappa, int parity, bool dslash, bool clover) {
if (inA.Location() == QUDA_CUDA_FIELD_LOCATION) {
ApplyCoarse<Float,csOrder,gOrder,coarseColor,coarseSpin,QUDA_CUDA_FIELD_LOCATION>
(out, inA, inB, Y, X, kappa, parity, dslash, clover);
} else {
ApplyCoarse<Float,csOrder,gOrder,coarseColor,coarseSpin,QUDA_CPU_FIELD_LOCATION>
(out, inA, inB, Y, X, kappa, parity, dslash, clover);
}
}
// template on the number of coarse colors
template <typename Float, QudaFieldOrder csOrder, QudaGaugeFieldOrder gOrder>
void ApplyCoarse(ColorSpinorField &out, const ColorSpinorField &inA, const ColorSpinorField &inB,
const GaugeField &Y, const GaugeField &X, double kappa, int parity, bool dslash, bool clover) {
if (inA.Nspin() != 2)
errorQuda("Unsupported number of coarse spins %d\n",inA.Nspin());
if (inA.Ncolor() == 2) {
ApplyCoarse<Float,csOrder,gOrder,2,2>(out, inA, inB, Y, X, kappa, parity, dslash, clover);
#if 0
} else if (inA.Ncolor() == 4) {
ApplyCoarse<Float,csOrder,gOrder,4,2>(out, inA, inB, Y, X, kappa, parity, dslash, clover);
} else if (inA.Ncolor() == 8) {
ApplyCoarse<Float,csOrder,gOrder,8,2>(out, inA, inB, Y, X, kappa, parity, dslash, clover);
} else if (inA.Ncolor() == 12) {
ApplyCoarse<Float,csOrder,gOrder,12,2>(out, inA, inB, Y, X, kappa, parity, dslash, clover);
} else if (inA.Ncolor() == 16) {
ApplyCoarse<Float,csOrder,gOrder,16,2>(out, inA, inB, Y, X, kappa, parity, dslash, clover);
} else if (inA.Ncolor() == 20) {
ApplyCoarse<Float,csOrder,gOrder,20,2>(out, inA, inB, Y, X, kappa, parity, dslash, clover);
#endif
} else if (inA.Ncolor() == 24) {
ApplyCoarse<Float,csOrder,gOrder,24,2>(out, inA, inB, Y, X, kappa, parity, dslash, clover);
#if 0
} else if (inA.Ncolor() == 28) {
ApplyCoarse<Float,csOrder,gOrder,28,2>(out, inA, inB, Y, X, kappa, parity, dslash, clover);
#endif
} else if (inA.Ncolor() == 32) {
ApplyCoarse<Float,csOrder,gOrder,32,2>(out, inA, inB, Y, X, kappa, parity, dslash, clover);
} else {
errorQuda("Unsupported number of coarse dof %d\n", Y.Ncolor());
}
}
template <typename Float>
void ApplyCoarse(ColorSpinorField &out, const ColorSpinorField &inA, const ColorSpinorField &inB,
const GaugeField &Y, const GaugeField &X, double kappa, int parity, bool dslash, bool clover) {
if (Y.FieldOrder() != X.FieldOrder())
errorQuda("Field order mismatch Y = %d, X = %d", Y.FieldOrder(), X.FieldOrder());
if (inA.FieldOrder() != out.FieldOrder())
errorQuda("Field order mismatch Y = %d, X = %d", Y.FieldOrder(), X.FieldOrder());
if (inA.FieldOrder() == QUDA_FLOAT2_FIELD_ORDER && Y.FieldOrder() == QUDA_FLOAT2_GAUGE_ORDER) {
ApplyCoarse<Float,QUDA_FLOAT2_FIELD_ORDER, QUDA_FLOAT2_GAUGE_ORDER>(out, inA, inB, Y, X, kappa, parity, dslash, clover);
} else if (inA.FieldOrder() == QUDA_SPACE_SPIN_COLOR_FIELD_ORDER && Y.FieldOrder() == QUDA_QDP_GAUGE_ORDER) {
ApplyCoarse<Float,QUDA_SPACE_SPIN_COLOR_FIELD_ORDER,QUDA_QDP_GAUGE_ORDER>(out, inA, inB, Y, X, kappa, parity, dslash, clover);
} else {
errorQuda("Unsupported field order colorspinor=%d gauge=%d combination\n", inA.FieldOrder(), Y.FieldOrder());
}
}
// this is the Worker pointer that may have issue additional work
// while we're waiting on communication to finish
namespace dslash {
extern Worker* aux_worker;
}
#endif // GPU_MULTIGRID
struct DslashCoarseLaunch {
ColorSpinorField &out;
const ColorSpinorField &inA;
const ColorSpinorField &inB;
const GaugeField &Y;
const GaugeField &X;
double kappa;
int parity;
bool dslash;
bool clover;
DslashCoarseLaunch(ColorSpinorField &out, const ColorSpinorField &inA, const ColorSpinorField &inB,
const GaugeField &Y, const GaugeField &X, double kappa, int parity, bool dslash, bool clover)
: out(out), inA(inA), inB(inB), Y(Y), X(X), kappa(kappa), parity(parity), dslash(dslash), clover(clover) { }
void operator()() {
#ifdef GPU_MULTIGRID
if (inA.V() == out.V()) errorQuda("Aliasing pointers");
if (out.Precision() != inA.Precision() || Y.Precision() != inA.Precision() || X.Precision() != inA.Precision())
errorQuda("Precision mismatch out=%d inA=%d inB=%d Y=%d X=%d",
out.Precision(), inA.Precision(), inB.Precision(), Y.Precision(), X.Precision());
// check all locations match
Location(out, inA, inB, Y, X);
inA.exchangeGhost((QudaParity)(1-parity), 0); // last parameter is dummy
if (dslash::aux_worker) dslash::aux_worker->apply(0);
if (Y.Precision() == QUDA_DOUBLE_PRECISION) {
#ifdef GPU_MULTIGRID_DOUBLE
ApplyCoarse<double>(out, inA, inB, Y, X, kappa, parity, dslash, clover);
#else
errorQuda("Double precision multigrid has not been enabled");
#endif
} else if (Y.Precision() == QUDA_SINGLE_PRECISION) {
ApplyCoarse<float>(out, inA, inB, Y, X, kappa, parity, dslash, clover);
} else {
errorQuda("Unsupported precision %d\n", Y.Precision());
}
#else
errorQuda("Multigrid has not been built");
#endif
}
};
// hooks into tune.cpp variables for policy tuning
typedef std::map<TuneKey, TuneParam> map;
const map& getTuneCache();
void disableProfileCount();
void enableProfileCount();
class DslashCoarsePolicyTune : public Tunable {
DslashCoarseLaunch &dslash;
unsigned int sharedBytesPerThread() const { return 0; }
unsigned int sharedBytesPerBlock(const TuneParam ¶m) const { return 0; }
public:
DslashCoarsePolicyTune(DslashCoarseLaunch &dslash) : dslash(dslash)
{
strcpy(aux,"policy,");
if (dslash.dslash) strcat(aux,"dslash");
strcat(aux, dslash.clover ? "clover," : ",");
strcat(aux,dslash.inA.AuxString());
#ifdef MULTI_GPU
char comm[5];
comm[0] = (comm_dim_partitioned(0) ? '1' : '0');
comm[1] = (comm_dim_partitioned(1) ? '1' : '0');
comm[2] = (comm_dim_partitioned(2) ? '1' : '0');
comm[3] = (comm_dim_partitioned(3) ? '1' : '0');
comm[4] = '\0';
strcat(aux,",comm=");
strcat(aux,comm);
#endif
// before we do policy tuning we must ensure the kernel
// constituents have been tuned since we can't do nested tuning
if (getTuneCache().find(tuneKey()) == getTuneCache().end()) {
disableProfileCount();
dslash();
enableProfileCount();
}
}
virtual ~DslashCoarsePolicyTune() { }
void apply(const hipStream_t &stream) {
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
dslash();
}
int tuningIter() const { return 10; }
bool advanceTuneParam(TuneParam ¶m) const { return false; }
TuneKey tuneKey() const {
return TuneKey(dslash.inA.VolString(), typeid(*this).name(), aux);
}
long long flops() const {
int nDim = 4;
int Ns = dslash.inA.Nspin();
int Nc = dslash.inA.Ncolor();
int nParity = dslash.inA.SiteSubset();
int volumeCB = dslash.inA.VolumeCB();
return ((dslash.dslash*2*nDim+dslash.clover*1)*(8*Ns*Nc*Ns*Nc)-2*Ns*Nc)*nParity*volumeCB;
}
long long bytes() const {
int nParity = dslash.inA.SiteSubset();
return (dslash.dslash||dslash.clover) * dslash.out.Bytes() +
dslash.dslash*8*dslash.inA.Bytes() + dslash.clover*dslash.inB.Bytes() +
nParity*(dslash.dslash*dslash.Y.Bytes()*dslash.Y.VolumeCB()/(2*dslash.Y.Stride())
+ dslash.clover*dslash.X.Bytes()/2);
// multiply Y by volume / stride to correct for pad
}
};
//Apply the coarse Dirac matrix to a coarse grid vector
//out(x) = M*in = X*in - kappa*\sum_mu Y_{-\mu}(x)in(x+mu) + Y^\dagger_mu(x-mu)in(x-mu)
//Uses the kappa normalization for the Wilson operator.
void ApplyCoarse(ColorSpinorField &out, const ColorSpinorField &inA, const ColorSpinorField &inB,
const GaugeField &Y, const GaugeField &X, double kappa, int parity, bool dslash, bool clover) {
DslashCoarseLaunch Dslash(out, inA, inB, Y, X, kappa, parity, dslash, clover);
DslashCoarsePolicyTune policy(Dslash);
policy.apply(0);
}//ApplyCoarse
} // namespace quda
| 271928b1786264ae378e11aa883cdbf3aafbe4fd.cu | #include <transfer.h>
#include <gauge_field_order.h>
#include <color_spinor_field_order.h>
#include <index_helper.cuh>
#if __COMPUTE_CAPABILITY__ >= 300
#include <generics/shfl.h>
#endif
namespace quda {
#ifdef GPU_MULTIGRID
template <typename Float, typename F, typename G>
struct DslashCoarseArg {
F out;
const F inA;
const F inB;
const G Y;
const G X;
const Float kappa;
const int parity; // only use this for single parity fields
const int nParity; // number of parities we're working on
const int nFace; // hard code to 1 for now
const int dim[5]; // full lattice dimensions
const int commDim[4]; // whether a given dimension is partitioned or not
const int volumeCB;
DslashCoarseArg(F &out, const F &inA, const F &inB, const G &Y, const G &X,
Float kappa, int parity, const ColorSpinorField &meta)
: out(out), inA(inA), inB(inB), Y(Y), X(X), kappa(kappa), parity(parity),
nParity(meta.SiteSubset()), nFace(1),
dim{ (3-nParity) * meta.X(0), meta.X(1), meta.X(2), meta.X(3), meta.Ndim() == 5 ? meta.X(4) : 1 },
commDim{comm_dim_partitioned(0), comm_dim_partitioned(1), comm_dim_partitioned(2), comm_dim_partitioned(3)},
volumeCB(meta.VolumeCB()/dim[4])
{ }
};
/**
Applies the coarse dslash on a given parity and checkerboard site index
@param out The result - kappa * Dslash in
@param Y The coarse gauge field
@param kappa Kappa value
@param in The input field
@param parity The site parity
@param x_cb The checkerboarded site index
*/
extern __shared__ float s[];
template <typename Float, typename F, typename G, int nDim, int Ns, int Nc, int Mc, int color_stride, int dim_stride, int thread_dir, int thread_dim>
__device__ __host__ inline void applyDslash(complex<Float> out[], DslashCoarseArg<Float,F,G> &arg, int x_cb, int src_idx, int parity, int s_row, int color_block, int color_offset) {
const int their_spinor_parity = (arg.nParity == 2) ? (parity+1)&1 : 0;
int coord[5];
getCoords(coord, x_cb, arg.dim, parity);
coord[4] = src_idx;
#ifdef __CUDA_ARCH__
complex<Float> *shared_sum = (complex<Float>*)s;
if (!thread_dir) {
#endif
//Forward gather - compute fwd offset for spinor fetch
#pragma unroll
for(int d = thread_dim; d < nDim; d+=dim_stride) // loop over dimension
{
const int fwd_idx = linkIndexP1(coord, arg.dim, d);
if ( arg.commDim[d] && (coord[d] + arg.nFace >= arg.dim[d]) ) {
int ghost_idx = ghostFaceIndex<1>(coord, arg.dim, d, arg.nFace);
#pragma unroll
for(int color_local = 0; color_local < Mc; color_local++) { //Color row
int c_row = color_block + color_local; // global color index
int row = s_row*Nc + c_row;
#pragma unroll
for(int s_col = 0; s_col < Ns; s_col++) { //Spin column
#pragma unroll
for(int c_col = 0; c_col < Nc; c_col+=color_stride) { //Color column
int col = s_col*Nc + c_col + color_offset;
out[color_local] += arg.Y(d+4, parity, x_cb, row, col)
* arg.inA.Ghost(d, 1, their_spinor_parity, ghost_idx + src_idx*arg.volumeCB, s_col, c_col+color_offset);
}
}
}
} else {
#pragma unroll
for(int color_local = 0; color_local < Mc; color_local++) { //Color row
int c_row = color_block + color_local; // global color index
int row = s_row*Nc + c_row;
#pragma unroll
for(int s_col = 0; s_col < Ns; s_col++) { //Spin column
#pragma unroll
for(int c_col = 0; c_col < Nc; c_col+=color_stride) { //Color column
int col = s_col*Nc + c_col + color_offset;
out[color_local] += arg.Y(d+4, parity, x_cb, row, col)
* arg.inA(their_spinor_parity, fwd_idx + src_idx*arg.volumeCB, s_col, c_col+color_offset);
}
}
}
}
} // nDim
#if defined(__CUDA_ARCH__)
if (thread_dim > 0) { // only need to write to shared memory if not master thread
#pragma unroll
for (int color_local=0; color_local < Mc; color_local++) {
shared_sum[((color_local * blockDim.z + threadIdx.z )*blockDim.y + threadIdx.y)*blockDim.x + threadIdx.x] = out[color_local];
}
}
#endif
#ifdef __CUDA_ARCH__
} else {
#endif
//Backward gather - compute back offset for spinor and gauge fetch
#pragma unroll
for(int d = thread_dim; d < nDim; d+=dim_stride)
{
const int back_idx = linkIndexM1(coord, arg.dim, d);
const int gauge_idx = back_idx;
if ( arg.commDim[d] && (coord[d] - arg.nFace < 0) ) {
const int ghost_idx = ghostFaceIndex<0>(coord, arg.dim, d, arg.nFace);
#pragma unroll
for (int color_local=0; color_local<Mc; color_local++) {
int c_row = color_block + color_local;
int row = s_row*Nc + c_row;
#pragma unroll
for (int s_col=0; s_col<Ns; s_col++)
#pragma unroll
for (int c_col=0; c_col<Nc; c_col+=color_stride) {
int col = s_col*Nc + c_col + color_offset;
out[color_local] += conj(arg.Y.Ghost(d, (parity+1)&1, ghost_idx, col, row))
* arg.inA.Ghost(d, 0, their_spinor_parity, ghost_idx + src_idx*arg.volumeCB, s_col, c_col+color_offset);
}
}
} else {
#pragma unroll
for(int color_local = 0; color_local < Mc; color_local++) {
int c_row = color_block + color_local;
int row = s_row*Nc + c_row;
#pragma unroll
for(int s_col = 0; s_col < Ns; s_col++)
#pragma unroll
for(int c_col = 0; c_col < Nc; c_col+=color_stride) {
int col = s_col*Nc + c_col + color_offset;
out[color_local] += conj(arg.Y(d, (parity+1)&1, gauge_idx, col, row))
* arg.inA(their_spinor_parity, back_idx + src_idx*arg.volumeCB, s_col, c_col+color_offset);
}
}
}
} //nDim
#if defined(__CUDA_ARCH__)
#pragma unroll
for (int color_local=0; color_local < Mc; color_local++) {
shared_sum[ ((color_local * blockDim.z + threadIdx.z )*blockDim.y + threadIdx.y)*blockDim.x + threadIdx.x] = out[color_local];
}
} // forwards / backwards thread split
#endif
#ifdef __CUDA_ARCH__ // CUDA path has to recombine the foward and backward results
__syncthreads();
// (colorspin * dim_stride + dim * 2 + dir)
if (thread_dim == 0 && thread_dir == 0) {
// full split over dimension and direction
#pragma unroll
for (int d=1; d<dim_stride; d++) { // get remaining forward fathers (if any)
// 4-way 1,2,3 (stride = 4)
// 2-way 1 (stride = 2)
#pragma unroll
for (int color_local=0; color_local < Mc; color_local++) {
out[color_local] +=
shared_sum[(((color_local*blockDim.z/(2*dim_stride) + threadIdx.z/(2*dim_stride)) * 2 * dim_stride + d * 2 + 0)*blockDim.y+threadIdx.y)*blockDim.x+threadIdx.x];
}
}
#pragma unroll
for (int d=0; d<dim_stride; d++) { // get all backward gathers
#pragma unroll
for (int color_local=0; color_local < Mc; color_local++) {
out[color_local] +=
shared_sum[(((color_local*blockDim.z/(2*dim_stride) + threadIdx.z/(2*dim_stride)) * 2 * dim_stride + d * 2 + 1)*blockDim.y+threadIdx.y)*blockDim.x+threadIdx.x];
}
}
// apply kappa
#pragma unroll
for (int color_local=0; color_local<Mc; color_local++) out[color_local] *= -arg.kappa;
}
#else // !__CUDA_ARCH__
for (int color_local=0; color_local<Mc; color_local++) out[color_local] *= -arg.kappa;
#endif
}
/**
Applies the coarse clover matrix on a given parity and
checkerboard site index
@param out The result out += X * in
@param X The coarse clover field
@param in The input field
@param parity The site parity
@param x_cb The checkerboarded site index
*/
template <typename Float, typename F, typename G, int Ns, int Nc, int Mc, int color_stride>
__device__ __host__ inline void applyClover(complex<Float> out[], DslashCoarseArg<Float,F,G> &arg, int x_cb, int src_idx, int parity, int s, int color_block, int color_offset) {
const int spinor_parity = (arg.nParity == 2) ? parity : 0;
// M is number of colors per thread
#pragma unroll
for(int color_local = 0; color_local < Mc; color_local++) {//Color out
int c = color_block + color_local; // global color index
int row = s*Nc + c;
#pragma unroll
for(int s_col = 0; s_col < Ns; s_col++) //Spin in
#pragma unroll
for(int c_col = 0; c_col < Nc; c_col+=color_stride) { //Color in
//Factor of kappa and diagonal addition now incorporated in X
int col = s_col*Nc + c_col + color_offset;
out[color_local] += arg.X(0, parity, x_cb, row, col) * arg.inB(spinor_parity, x_cb+src_idx*arg.volumeCB, s_col, c_col+color_offset);
}
}
}
//out(x) = M*in = \sum_mu Y_{-\mu}(x)in(x+mu) + Y^\dagger_mu(x-mu)in(x-mu)
template <typename Float, typename F, typename G, int nDim, int Ns, int Nc, int Mc, int color_stride,
int dim_thread_split, bool dslash, bool clover, int dir, int dim>
__device__ __host__ inline void coarseDslash(DslashCoarseArg<Float,F,G> &arg, int x_cb, int src_idx, int parity, int s, int color_block, int color_offset)
{
complex <Float> out[Mc];
#pragma unroll
for (int c=0; c<Mc; c++) out[c] = 0.0;
if (dslash) applyDslash<Float,F,G,nDim,Ns,Nc,Mc,color_stride,dim_thread_split,dir,dim>(out, arg, x_cb, src_idx, parity, s, color_block, color_offset);
if (clover && dir==0 && dim==0) applyClover<Float,F,G,Ns,Nc,Mc,color_stride>(out, arg, x_cb, src_idx, parity, s, color_block, color_offset);
if (dir==0 && dim==0) {
const int my_spinor_parity = (arg.nParity == 2) ? parity : 0;
#pragma unroll
for (int color_local=0; color_local<Mc; color_local++) {
#if __CUDA_ARCH__ >= 300
// reduce down to the first group of column-split threads
const int warp_size = 32; // FIXME - this is buggy when x-dim * color_stride < 32
#pragma unroll
for (int offset = warp_size/2; offset >= warp_size/color_stride; offset /= 2) out[color_local] += __shfl_down(out[color_local], offset);
#endif
int c = color_block + color_local; // global color index
if (color_offset == 0) arg.out(my_spinor_parity, x_cb+src_idx*arg.volumeCB, s, c) = out[color_local];
}
}
}
// CPU kernel for applying the coarse Dslash to a vector
template <typename Float, typename F, typename G, int nDim, int Ns, int Nc, int Mc, bool dslash, bool clover>
void coarseDslash(DslashCoarseArg<Float,F,G> arg)
{
// the fine-grain parameters mean nothing for CPU variant
const int color_stride = 1;
const int color_offset = 0;
const int dim_thread_split = 1;
const int dir = 0;
const int dim = 0;
for (int parity= 0; parity < arg.nParity; parity++) {
// for full fields then set parity from loop else use arg setting
parity = (arg.nParity == 2) ? parity : arg.parity;
for (int src_idx = 0; src_idx < arg.dim[4]; src_idx++) {
//#pragma omp parallel for
for(int x_cb = 0; x_cb < arg.volumeCB; x_cb++) { // 4-d volume
for (int s=0; s<2; s++) {
for (int color_block=0; color_block<Nc; color_block+=Mc) { // Mc=Nc means all colors in a thread
coarseDslash<Float,F,G,nDim,Ns,Nc,Mc,color_stride,dim_thread_split,dslash,clover,dir,dim>(arg, x_cb, src_idx, parity, s, color_block, color_offset);
}
}
} // 4-d volumeCB
} // src index
} // parity
}
// GPU Kernel for applying the coarse Dslash to a vector
template <typename Float, typename F, typename G, int nDim, int Ns, int Nc, int Mc, int color_stride, int dim_thread_split, bool dslash, bool clover>
__global__ void coarseDslashKernel(DslashCoarseArg<Float,F,G> arg)
{
constexpr int warp_size = 32;
const int lane_id = threadIdx.x % warp_size;
const int warp_id = threadIdx.x / warp_size;
const int vector_site_width = warp_size / color_stride;
int x_cb = blockIdx.x*(blockDim.x/color_stride) + warp_id*(warp_size/color_stride) + lane_id % vector_site_width;
const int color_offset = lane_id / vector_site_width;
// for full fields set parity from y thread index else use arg setting
int paritySrc = blockDim.y*blockIdx.y + threadIdx.y;
int src_idx = (arg.nParity == 2) ? paritySrc / 2 : paritySrc; // maybe want to swap order or source and parity for improved locality of same parity
int parity = (arg.nParity == 2) ? paritySrc % 2 : arg.parity;
// z thread dimension is (( s*(Nc/Mc) + color_block )*dim_thread_split + dim)*2 + dir
int sMd = blockDim.z*blockIdx.z + threadIdx.z;
int dir = sMd & 1;
int sMdim = sMd >> 1;
int dim = sMdim % dim_thread_split;
int sM = sMdim / dim_thread_split;
int s = sM / (Nc/Mc);
int color_block = (sM % (Nc/Mc)) * Mc;
if (x_cb >= arg.volumeCB) return;
if (paritySrc >= arg.nParity * arg.dim[4]) return;
if (dir == 0) {
if (dim == 0) coarseDslash<Float,F,G,nDim,Ns,Nc,Mc,color_stride,dim_thread_split,dslash,clover,0,0>(arg, x_cb, src_idx, parity, s, color_block, color_offset);
else if (dim == 1) coarseDslash<Float,F,G,nDim,Ns,Nc,Mc,color_stride,dim_thread_split,dslash,clover,0,1>(arg, x_cb, src_idx, parity, s, color_block, color_offset);
else if (dim == 2) coarseDslash<Float,F,G,nDim,Ns,Nc,Mc,color_stride,dim_thread_split,dslash,clover,0,2>(arg, x_cb, src_idx, parity, s, color_block, color_offset);
else if (dim == 3) coarseDslash<Float,F,G,nDim,Ns,Nc,Mc,color_stride,dim_thread_split,dslash,clover,0,3>(arg, x_cb, src_idx, parity, s, color_block, color_offset);
} else if (dir == 1) {
if (dim == 0) coarseDslash<Float,F,G,nDim,Ns,Nc,Mc,color_stride,dim_thread_split,dslash,clover,1,0>(arg, x_cb, src_idx, parity, s, color_block, color_offset);
else if (dim == 1) coarseDslash<Float,F,G,nDim,Ns,Nc,Mc,color_stride,dim_thread_split,dslash,clover,1,1>(arg, x_cb, src_idx, parity, s, color_block, color_offset);
else if (dim == 2) coarseDslash<Float,F,G,nDim,Ns,Nc,Mc,color_stride,dim_thread_split,dslash,clover,1,2>(arg, x_cb, src_idx, parity, s, color_block, color_offset);
else if (dim == 3) coarseDslash<Float,F,G,nDim,Ns,Nc,Mc,color_stride,dim_thread_split,dslash,clover,1,3>(arg, x_cb, src_idx, parity, s, color_block, color_offset);
}
}
template <typename Float, typename F, typename G, int nDim, int Ns, int Nc, int Mc, bool dslash, bool clover>
class DslashCoarse : public Tunable {
protected:
DslashCoarseArg<Float,F,G> &arg;
const ColorSpinorField &meta;
const int max_color_col_stride = 4;
mutable int color_col_stride;
mutable int dim_threads;
long long flops() const
{
return ((dslash*2*nDim+clover*1)*(8*Ns*Nc*Ns*Nc)-2*Ns*Nc)*arg.nParity*(long long)meta.VolumeCB();
}
long long bytes() const
{
return (dslash||clover) * arg.out.Bytes() + dslash*8*arg.inA.Bytes() + clover*arg.inB.Bytes() +
arg.dim[4]*arg.nParity*(dslash*8*arg.Y.Bytes() + clover*arg.X.Bytes());
}
unsigned int sharedBytesPerThread() const { return (sizeof(complex<Float>) * Mc); }
unsigned int sharedBytesPerBlock(const TuneParam ¶m) const { return 0; }
bool tuneGridDim() const { return false; } // Don't tune the grid dimensions
unsigned int minThreads() const { return color_col_stride * arg.volumeCB; } // 4-d volume since this x threads only
unsigned int maxBlockSize() const { return deviceProp.maxThreadsPerBlock / (dim_threads * 2 * arg.nParity); }
bool advanceBlockDim(TuneParam ¶m) const
{
dim3 block = param.block;
dim3 grid = param.grid;
bool ret = Tunable::advanceBlockDim(param);
param.block.y = block.y; param.block.z = block.z;
param.grid.y = grid.y; param.grid.z = grid.z;
if (ret) { // we advanced the block.x so we're done
return true;
} else { // block.x (spacetime) was reset
if (param.block.y < (unsigned int)(arg.nParity * arg.dim[4])) { // advance parity / 5th dimension
param.block.y++;
param.grid.y = (arg.nParity * arg.dim[4] + param.block.y - 1) / param.block.y;
return true;
} else {
// reset parity / 5th dimension
param.block.y = 1;
param.grid.y = arg.nParity * arg.dim[4];
// let's try to advance spin/block-color
while(param.block.z <= (unsigned int)(dim_threads * 2 * 2 * (Nc/Mc))) {
param.block.z+=dim_threads * 2;
if ( (dim_threads*2*2*(Nc/Mc)) % param.block.z == 0) {
param.grid.z = (dim_threads * 2 * 2 * (Nc/Mc)) / param.block.z;
break;
}
}
// we can advance spin/block-color since this is valid
if (param.block.z <= (unsigned int)(dim_threads * 2 * 2 * (Nc/Mc)) &&
param.block.z <= (unsigned int)deviceProp.maxThreadsDim[2] ) { //
return true;
} else { // we have run off the end so let's reset
param.block.z = dim_threads * 2;
param.grid.z = 2 * (Nc/Mc);
return false;
}
}
}
}
int blockStep() const { return deviceProp.warpSize/4; }
int blockMin() const { return deviceProp.warpSize/4; }
// Experimental autotuning of the color column stride
bool advanceAux(TuneParam ¶m) const
{
#if __COMPUTE_CAPABILITY__ >= 300
// we can only split the dot product on Kepler and later since we need the __shfl instruction
if (2*param.aux.x <= max_color_col_stride && Nc % (2*param.aux.x) == 0 &&
param.block.x % deviceProp.warpSize == 0) {
// An x-dimension block size that is not a multiple of the
// warp size is incompatible with splitting the dot product
// across the warp so we must skip this
param.aux.x *= 2; // safe to advance
color_col_stride = param.aux.x;
// recompute grid size since minThreads() has now been updated
param.grid.x = (minThreads()+param.block.x-1)/param.block.x;
// check this grid size is valid before returning
if (param.grid.x < (unsigned int)deviceProp.maxGridSize[0]) return true;
}
#endif
// reset color column stride if too large or not divisible
param.aux.x = 1;
color_col_stride = param.aux.x;
// recompute grid size since minThreads() has now been updated
param.grid.x = (minThreads()+param.block.x-1)/param.block.x;
if (2*param.aux.y <= nDim) {
param.aux.y *= 2;
dim_threads = param.aux.y;
// need to reset z-block/grid size/shared_bytes since dim_threads has changed
param.block.z = dim_threads * 2;
param.grid.z = 2* (Nc / Mc);
param.shared_bytes = sharedBytesPerThread()*param.block.x*param.block.y*param.block.z > sharedBytesPerBlock(param) ?
sharedBytesPerThread()*param.block.x*param.block.y*param.block.z : sharedBytesPerBlock(param);
return true;
} else {
param.aux.y = 1;
dim_threads = param.aux.y;
// need to reset z-block/grid size/shared_bytes since
// dim_threads has changed. Strictly speaking this isn't needed
// since this is the outer dimension to tune, but would be
// needed if we added an aux.z tuning dimension
param.block.z = dim_threads * 2;
param.grid.z = 2* (Nc / Mc);
param.shared_bytes = sharedBytesPerThread()*param.block.x*param.block.y*param.block.z > sharedBytesPerBlock(param) ?
sharedBytesPerThread()*param.block.x*param.block.y*param.block.z : sharedBytesPerBlock(param);
return false;
}
}
virtual void initTuneParam(TuneParam ¶m) const
{
param.aux = make_int4(1,1,1,1);
color_col_stride = param.aux.x;
dim_threads = param.aux.y;
Tunable::initTuneParam(param);
param.block.y = 1;
param.grid.y = arg.nParity * arg.dim[4];
param.block.z = dim_threads * 2;
param.grid.z = 2*(Nc/Mc);
param.shared_bytes = sharedBytesPerThread()*param.block.x*param.block.y*param.block.z > sharedBytesPerBlock(param) ?
sharedBytesPerThread()*param.block.x*param.block.y*param.block.z : sharedBytesPerBlock(param);
}
/** sets default values for when tuning is disabled */
virtual void defaultTuneParam(TuneParam ¶m) const
{
param.aux = make_int4(1,1,1,1);
color_col_stride = param.aux.x;
dim_threads = param.aux.y;
Tunable::defaultTuneParam(param);
param.block.y = 1;
param.grid.y = arg.nParity * arg.dim[4];
param.block.z = dim_threads * 2;
param.grid.z = 2*(Nc/Mc);
param.shared_bytes = sharedBytesPerThread()*param.block.x*param.block.y*param.block.z > sharedBytesPerBlock(param) ?
sharedBytesPerThread()*param.block.x*param.block.y*param.block.z : sharedBytesPerBlock(param);
}
public:
DslashCoarse(DslashCoarseArg<Float,F,G> &arg, const ColorSpinorField &meta)
: arg(arg), meta(meta) {
strcpy(aux, meta.AuxString());
#ifdef MULTI_GPU
char comm[5];
comm[0] = (arg.commDim[0] ? '1' : '0');
comm[1] = (arg.commDim[1] ? '1' : '0');
comm[2] = (arg.commDim[2] ? '1' : '0');
comm[3] = (arg.commDim[3] ? '1' : '0');
comm[4] = '\0';
strcat(aux,",comm=");
strcat(aux,comm);
#endif
}
virtual ~DslashCoarse() { }
void apply(const cudaStream_t &stream) {
if (meta.Location() == QUDA_CPU_FIELD_LOCATION) {
coarseDslash<Float,F,G,nDim,Ns,Nc,Mc,dslash,clover>(arg);
} else {
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
switch (tp.aux.y) { // dimension gather parallelisation
case 1:
switch (tp.aux.x) { // this is color_col_stride
case 1:
coarseDslashKernel<Float,F,G,nDim,Ns,Nc,Mc,1,1,dslash,clover> <<<tp.grid,tp.block,tp.shared_bytes,stream>>>(arg);
break;
case 2:
coarseDslashKernel<Float,F,G,nDim,Ns,Nc,Mc,2,1,dslash,clover> <<<tp.grid,tp.block,tp.shared_bytes,stream>>>(arg);
break;
case 4:
coarseDslashKernel<Float,F,G,nDim,Ns,Nc,Mc,4,1,dslash,clover> <<<tp.grid,tp.block,tp.shared_bytes,stream>>>(arg);
break;
default:
errorQuda("Color column stride %d not valid", tp.aux.x);
}
break;
case 2:
switch (tp.aux.x) { // this is color_col_stride
case 1:
coarseDslashKernel<Float,F,G,nDim,Ns,Nc,Mc,1,2,dslash,clover> <<<tp.grid,tp.block,tp.shared_bytes,stream>>>(arg);
break;
case 2:
coarseDslashKernel<Float,F,G,nDim,Ns,Nc,Mc,2,2,dslash,clover> <<<tp.grid,tp.block,tp.shared_bytes,stream>>>(arg);
break;
case 4:
coarseDslashKernel<Float,F,G,nDim,Ns,Nc,Mc,4,2,dslash,clover> <<<tp.grid,tp.block,tp.shared_bytes,stream>>>(arg);
break;
default:
errorQuda("Color column stride %d not valid", tp.aux.x);
}
break;
case 4:
switch (tp.aux.x) { // this is color_col_stride
case 1:
coarseDslashKernel<Float,F,G,nDim,Ns,Nc,Mc,1,4,dslash,clover> <<<tp.grid,tp.block,tp.shared_bytes,stream>>>(arg);
break;
case 2:
coarseDslashKernel<Float,F,G,nDim,Ns,Nc,Mc,2,4,dslash,clover> <<<tp.grid,tp.block,tp.shared_bytes,stream>>>(arg);
break;
case 4:
coarseDslashKernel<Float,F,G,nDim,Ns,Nc,Mc,4,4,dslash,clover> <<<tp.grid,tp.block,tp.shared_bytes,stream>>>(arg);
break;
default:
errorQuda("Color column stride %d not valid", tp.aux.x);
}
break;
default:
errorQuda("Invalid dimension thread splitting %d", tp.aux.y);
}
}
}
TuneKey tuneKey() const {
return TuneKey(meta.VolString(), typeid(*this).name(), aux);
}
};
template <typename Float, QudaFieldOrder csOrder, QudaGaugeFieldOrder gOrder, int coarseColor,
int coarseSpin, QudaFieldLocation location>
void ApplyCoarse(ColorSpinorField &out, const ColorSpinorField &inA, const ColorSpinorField &inB,
const GaugeField &Y, const GaugeField &X, double kappa, int parity, bool dslash, bool clover) {
typedef typename colorspinor::FieldOrderCB<Float,coarseSpin,coarseColor,1,csOrder> F;
typedef typename gauge::FieldOrder<Float,coarseColor*coarseSpin,coarseSpin,gOrder> G;
F outAccessor(const_cast<ColorSpinorField&>(out));
F inAccessorA(const_cast<ColorSpinorField&>(inA));
F inAccessorB(const_cast<ColorSpinorField&>(inB));
G yAccessor(const_cast<GaugeField&>(Y));
G xAccessor(const_cast<GaugeField&>(X));
DslashCoarseArg<Float,F,G> arg(outAccessor, inAccessorA, inAccessorB, yAccessor, xAccessor, (Float)kappa, parity, inA);
const int colors_per_thread = 1;
if (dslash) {
if (clover) {
DslashCoarse<Float,F,G,4,coarseSpin,coarseColor,colors_per_thread,true,true> dslash(arg, inA);
dslash.apply(0);
} else {
DslashCoarse<Float,F,G,4,coarseSpin,coarseColor,colors_per_thread,true,false> dslash(arg, inA);
dslash.apply(0);
}
} else {
if (clover) {
DslashCoarse<Float,F,G,4,coarseSpin,coarseColor,colors_per_thread,false,true> dslash(arg, inA);
dslash.apply(0);
} else {
errorQuda("Unsupported dslash=false clover=false");
}
}
}
template <typename Float, QudaFieldOrder csOrder, QudaGaugeFieldOrder gOrder, int coarseColor, int coarseSpin>
void ApplyCoarse(ColorSpinorField &out, const ColorSpinorField &inA, const ColorSpinorField &inB,
const GaugeField &Y, const GaugeField &X, double kappa, int parity, bool dslash, bool clover) {
if (inA.Location() == QUDA_CUDA_FIELD_LOCATION) {
ApplyCoarse<Float,csOrder,gOrder,coarseColor,coarseSpin,QUDA_CUDA_FIELD_LOCATION>
(out, inA, inB, Y, X, kappa, parity, dslash, clover);
} else {
ApplyCoarse<Float,csOrder,gOrder,coarseColor,coarseSpin,QUDA_CPU_FIELD_LOCATION>
(out, inA, inB, Y, X, kappa, parity, dslash, clover);
}
}
// template on the number of coarse colors
template <typename Float, QudaFieldOrder csOrder, QudaGaugeFieldOrder gOrder>
void ApplyCoarse(ColorSpinorField &out, const ColorSpinorField &inA, const ColorSpinorField &inB,
const GaugeField &Y, const GaugeField &X, double kappa, int parity, bool dslash, bool clover) {
if (inA.Nspin() != 2)
errorQuda("Unsupported number of coarse spins %d\n",inA.Nspin());
if (inA.Ncolor() == 2) {
ApplyCoarse<Float,csOrder,gOrder,2,2>(out, inA, inB, Y, X, kappa, parity, dslash, clover);
#if 0
} else if (inA.Ncolor() == 4) {
ApplyCoarse<Float,csOrder,gOrder,4,2>(out, inA, inB, Y, X, kappa, parity, dslash, clover);
} else if (inA.Ncolor() == 8) {
ApplyCoarse<Float,csOrder,gOrder,8,2>(out, inA, inB, Y, X, kappa, parity, dslash, clover);
} else if (inA.Ncolor() == 12) {
ApplyCoarse<Float,csOrder,gOrder,12,2>(out, inA, inB, Y, X, kappa, parity, dslash, clover);
} else if (inA.Ncolor() == 16) {
ApplyCoarse<Float,csOrder,gOrder,16,2>(out, inA, inB, Y, X, kappa, parity, dslash, clover);
} else if (inA.Ncolor() == 20) {
ApplyCoarse<Float,csOrder,gOrder,20,2>(out, inA, inB, Y, X, kappa, parity, dslash, clover);
#endif
} else if (inA.Ncolor() == 24) {
ApplyCoarse<Float,csOrder,gOrder,24,2>(out, inA, inB, Y, X, kappa, parity, dslash, clover);
#if 0
} else if (inA.Ncolor() == 28) {
ApplyCoarse<Float,csOrder,gOrder,28,2>(out, inA, inB, Y, X, kappa, parity, dslash, clover);
#endif
} else if (inA.Ncolor() == 32) {
ApplyCoarse<Float,csOrder,gOrder,32,2>(out, inA, inB, Y, X, kappa, parity, dslash, clover);
} else {
errorQuda("Unsupported number of coarse dof %d\n", Y.Ncolor());
}
}
template <typename Float>
void ApplyCoarse(ColorSpinorField &out, const ColorSpinorField &inA, const ColorSpinorField &inB,
const GaugeField &Y, const GaugeField &X, double kappa, int parity, bool dslash, bool clover) {
if (Y.FieldOrder() != X.FieldOrder())
errorQuda("Field order mismatch Y = %d, X = %d", Y.FieldOrder(), X.FieldOrder());
if (inA.FieldOrder() != out.FieldOrder())
errorQuda("Field order mismatch Y = %d, X = %d", Y.FieldOrder(), X.FieldOrder());
if (inA.FieldOrder() == QUDA_FLOAT2_FIELD_ORDER && Y.FieldOrder() == QUDA_FLOAT2_GAUGE_ORDER) {
ApplyCoarse<Float,QUDA_FLOAT2_FIELD_ORDER, QUDA_FLOAT2_GAUGE_ORDER>(out, inA, inB, Y, X, kappa, parity, dslash, clover);
} else if (inA.FieldOrder() == QUDA_SPACE_SPIN_COLOR_FIELD_ORDER && Y.FieldOrder() == QUDA_QDP_GAUGE_ORDER) {
ApplyCoarse<Float,QUDA_SPACE_SPIN_COLOR_FIELD_ORDER,QUDA_QDP_GAUGE_ORDER>(out, inA, inB, Y, X, kappa, parity, dslash, clover);
} else {
errorQuda("Unsupported field order colorspinor=%d gauge=%d combination\n", inA.FieldOrder(), Y.FieldOrder());
}
}
// this is the Worker pointer that may have issue additional work
// while we're waiting on communication to finish
namespace dslash {
extern Worker* aux_worker;
}
#endif // GPU_MULTIGRID
struct DslashCoarseLaunch {
ColorSpinorField &out;
const ColorSpinorField &inA;
const ColorSpinorField &inB;
const GaugeField &Y;
const GaugeField &X;
double kappa;
int parity;
bool dslash;
bool clover;
DslashCoarseLaunch(ColorSpinorField &out, const ColorSpinorField &inA, const ColorSpinorField &inB,
const GaugeField &Y, const GaugeField &X, double kappa, int parity, bool dslash, bool clover)
: out(out), inA(inA), inB(inB), Y(Y), X(X), kappa(kappa), parity(parity), dslash(dslash), clover(clover) { }
void operator()() {
#ifdef GPU_MULTIGRID
if (inA.V() == out.V()) errorQuda("Aliasing pointers");
if (out.Precision() != inA.Precision() || Y.Precision() != inA.Precision() || X.Precision() != inA.Precision())
errorQuda("Precision mismatch out=%d inA=%d inB=%d Y=%d X=%d",
out.Precision(), inA.Precision(), inB.Precision(), Y.Precision(), X.Precision());
// check all locations match
Location(out, inA, inB, Y, X);
inA.exchangeGhost((QudaParity)(1-parity), 0); // last parameter is dummy
if (dslash::aux_worker) dslash::aux_worker->apply(0);
if (Y.Precision() == QUDA_DOUBLE_PRECISION) {
#ifdef GPU_MULTIGRID_DOUBLE
ApplyCoarse<double>(out, inA, inB, Y, X, kappa, parity, dslash, clover);
#else
errorQuda("Double precision multigrid has not been enabled");
#endif
} else if (Y.Precision() == QUDA_SINGLE_PRECISION) {
ApplyCoarse<float>(out, inA, inB, Y, X, kappa, parity, dslash, clover);
} else {
errorQuda("Unsupported precision %d\n", Y.Precision());
}
#else
errorQuda("Multigrid has not been built");
#endif
}
};
// hooks into tune.cpp variables for policy tuning
typedef std::map<TuneKey, TuneParam> map;
const map& getTuneCache();
void disableProfileCount();
void enableProfileCount();
class DslashCoarsePolicyTune : public Tunable {
DslashCoarseLaunch &dslash;
unsigned int sharedBytesPerThread() const { return 0; }
unsigned int sharedBytesPerBlock(const TuneParam ¶m) const { return 0; }
public:
DslashCoarsePolicyTune(DslashCoarseLaunch &dslash) : dslash(dslash)
{
strcpy(aux,"policy,");
if (dslash.dslash) strcat(aux,"dslash");
strcat(aux, dslash.clover ? "clover," : ",");
strcat(aux,dslash.inA.AuxString());
#ifdef MULTI_GPU
char comm[5];
comm[0] = (comm_dim_partitioned(0) ? '1' : '0');
comm[1] = (comm_dim_partitioned(1) ? '1' : '0');
comm[2] = (comm_dim_partitioned(2) ? '1' : '0');
comm[3] = (comm_dim_partitioned(3) ? '1' : '0');
comm[4] = '\0';
strcat(aux,",comm=");
strcat(aux,comm);
#endif
// before we do policy tuning we must ensure the kernel
// constituents have been tuned since we can't do nested tuning
if (getTuneCache().find(tuneKey()) == getTuneCache().end()) {
disableProfileCount();
dslash();
enableProfileCount();
}
}
virtual ~DslashCoarsePolicyTune() { }
void apply(const cudaStream_t &stream) {
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
dslash();
}
int tuningIter() const { return 10; }
bool advanceTuneParam(TuneParam ¶m) const { return false; }
TuneKey tuneKey() const {
return TuneKey(dslash.inA.VolString(), typeid(*this).name(), aux);
}
long long flops() const {
int nDim = 4;
int Ns = dslash.inA.Nspin();
int Nc = dslash.inA.Ncolor();
int nParity = dslash.inA.SiteSubset();
int volumeCB = dslash.inA.VolumeCB();
return ((dslash.dslash*2*nDim+dslash.clover*1)*(8*Ns*Nc*Ns*Nc)-2*Ns*Nc)*nParity*volumeCB;
}
long long bytes() const {
int nParity = dslash.inA.SiteSubset();
return (dslash.dslash||dslash.clover) * dslash.out.Bytes() +
dslash.dslash*8*dslash.inA.Bytes() + dslash.clover*dslash.inB.Bytes() +
nParity*(dslash.dslash*dslash.Y.Bytes()*dslash.Y.VolumeCB()/(2*dslash.Y.Stride())
+ dslash.clover*dslash.X.Bytes()/2);
// multiply Y by volume / stride to correct for pad
}
};
//Apply the coarse Dirac matrix to a coarse grid vector
//out(x) = M*in = X*in - kappa*\sum_mu Y_{-\mu}(x)in(x+mu) + Y^\dagger_mu(x-mu)in(x-mu)
//Uses the kappa normalization for the Wilson operator.
void ApplyCoarse(ColorSpinorField &out, const ColorSpinorField &inA, const ColorSpinorField &inB,
const GaugeField &Y, const GaugeField &X, double kappa, int parity, bool dslash, bool clover) {
DslashCoarseLaunch Dslash(out, inA, inB, Y, X, kappa, parity, dslash, clover);
DslashCoarsePolicyTune policy(Dslash);
policy.apply(0);
}//ApplyCoarse
} // namespace quda
|
e62fd1d8e100762abbf88c9bda235b0bf0cfa57b.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "KernelTilesMul.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *Mat1 = NULL;
hipMalloc(&Mat1, XSIZE*YSIZE);
int *Mat2 = NULL;
hipMalloc(&Mat2, XSIZE*YSIZE);
int *Mat3 = NULL;
hipMalloc(&Mat3, XSIZE*YSIZE);
int rowM1 = 1;
int colM1 = 1;
int colM2 = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
KernelTilesMul), dim3(gridBlock),dim3(threadBlock), 0, 0, Mat1,Mat2,Mat3,rowM1,colM1,colM2);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
KernelTilesMul), dim3(gridBlock),dim3(threadBlock), 0, 0, Mat1,Mat2,Mat3,rowM1,colM1,colM2);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
KernelTilesMul), dim3(gridBlock),dim3(threadBlock), 0, 0, Mat1,Mat2,Mat3,rowM1,colM1,colM2);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | e62fd1d8e100762abbf88c9bda235b0bf0cfa57b.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "KernelTilesMul.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *Mat1 = NULL;
cudaMalloc(&Mat1, XSIZE*YSIZE);
int *Mat2 = NULL;
cudaMalloc(&Mat2, XSIZE*YSIZE);
int *Mat3 = NULL;
cudaMalloc(&Mat3, XSIZE*YSIZE);
int rowM1 = 1;
int colM1 = 1;
int colM2 = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
KernelTilesMul<<<gridBlock,threadBlock>>>(Mat1,Mat2,Mat3,rowM1,colM1,colM2);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
KernelTilesMul<<<gridBlock,threadBlock>>>(Mat1,Mat2,Mat3,rowM1,colM1,colM2);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
KernelTilesMul<<<gridBlock,threadBlock>>>(Mat1,Mat2,Mat3,rowM1,colM1,colM2);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
96cea21b2279f18e140a07ba41aae5777491b578.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Babak Poursartip
// 02/21/2021
// CUDA
//topic: shared memory
#include <cstdio>
#include <ctime>
#include <iostream>
#include <hiprand/hiprand.h>
// ==============================
__global__ void sumSingleBlock(int *d)
{
extern __shared__ int dcopy[];
int tid = threadIdx.x;
// copy d to dcopy
dcopy[tid*2] = d[tid*2];
dcopy[tid*2+1] = d[tid*2+1];
// tc: number of participating threads
//for (int tc = blockDim.x; tc > 0; tc >>=1) // changes the number of threads by half(tc>>=1)
for (int tc = blockDim.x, stepSize = 1; tc > 0; tc /=2, stepSize *=2) // changes the number of threads by half(tc>>=1)
{
// thread must be allowed to write
if (tid < tc)
{
int pa = tid * stepSize * 2;
int pb = pa + stepSize;
dcopy[pa] += dcopy[pb];
# if __CUDA_ARCH__>=200
printf("%d, %d, %d, %d, %d \n", tid, tc, stepSize, pa, pb);
#endif
}
}
if (tid == 0) d[0] = dcopy[0];
}
// ==============================
int main()
{
printf(" starts \n");
const int count = 32;
const int size = count * sizeof(int);
int h[count];
for (int i = 0; i < count; ++i)
h[i] = i + 1;
int *d;
hipMalloc(&d, size);
hipMemcpy(d, h, size, hipMemcpyHostToDevice);
hipLaunchKernelGGL((
sumSingleBlock), dim3(1), dim3(count/2), size, 0, d);
int result;
hipMemcpy(&result, d, sizeof(int), hipMemcpyDeviceToHost);
std::cout << " sum: "<< result << std::endl;
hipFree(d);
printf(" done \n");
return 0;
}
| 96cea21b2279f18e140a07ba41aae5777491b578.cu |
// Babak Poursartip
// 02/21/2021
// CUDA
//topic: shared memory
#include <cstdio>
#include <ctime>
#include <iostream>
#include <curand.h>
// ==============================
__global__ void sumSingleBlock(int *d)
{
extern __shared__ int dcopy[];
int tid = threadIdx.x;
// copy d to dcopy
dcopy[tid*2] = d[tid*2];
dcopy[tid*2+1] = d[tid*2+1];
// tc: number of participating threads
//for (int tc = blockDim.x; tc > 0; tc >>=1) // changes the number of threads by half(tc>>=1)
for (int tc = blockDim.x, stepSize = 1; tc > 0; tc /=2, stepSize *=2) // changes the number of threads by half(tc>>=1)
{
// thread must be allowed to write
if (tid < tc)
{
int pa = tid * stepSize * 2;
int pb = pa + stepSize;
dcopy[pa] += dcopy[pb];
# if __CUDA_ARCH__>=200
printf("%d, %d, %d, %d, %d \n", tid, tc, stepSize, pa, pb);
#endif
}
}
if (tid == 0) d[0] = dcopy[0];
}
// ==============================
int main()
{
printf(" starts \n");
const int count = 32;
const int size = count * sizeof(int);
int h[count];
for (int i = 0; i < count; ++i)
h[i] = i + 1;
int *d;
cudaMalloc(&d, size);
cudaMemcpy(d, h, size, cudaMemcpyHostToDevice);
sumSingleBlock<<<1, count/2, size>>>(d);
int result;
cudaMemcpy(&result, d, sizeof(int), cudaMemcpyDeviceToHost);
std::cout << " sum: "<< result << std::endl;
cudaFree(d);
printf(" done \n");
return 0;
}
|
2c0265dafcad5357b7f8ef8cf75709d8c5f6279d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.4.1) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
December 2013
@generated s Tue Dec 17 13:18:45 2013
*/
#include "common_magma.h"
#include "commonblas_s.h"
/*
* saxpy computes c += alpha*b, where b and c are 16-element vectors.
*/
static __device__ void saxpy(
float alpha,
const float* __restrict__ b,
float* __restrict__ c )
{
c[0] += alpha * b[0];
c[1] += alpha * b[1];
c[2] += alpha * b[2];
c[3] += alpha * b[3];
c[4] += alpha * b[4];
c[5] += alpha * b[5];
c[6] += alpha * b[6];
c[7] += alpha * b[7];
c[8] += alpha * b[8];
c[9] += alpha * b[9];
c[10] += alpha * b[10];
c[11] += alpha * b[11];
c[12] += alpha * b[12];
c[13] += alpha * b[13];
c[14] += alpha * b[14];
c[15] += alpha * b[15];
}
__global__ void
sgemm_kernel_N_N_64_16_16_16_4_special(
float* __restrict__ C,
const float* __restrict__ A,
const float* __restrict__ B,
int m, int n, int k,
int lda, int ldb, int ldc,
float alpha, float beta )
{
/* -- MAGMA (version 1.4.1) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
December 2013
Purpose:
========
This routine computes
C = alpha * A*B + beta * C
B is put into shared memory
Parameters Used:
blk_M=64 blk_N=16 blk_K=16 nthd_x=16 nthd_y=4
This kernel is for matrices divisible by the corresponding
blocking sizes.
=============================================================== */
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int ibx = blockIdx.x * 64;
const int iby = blockIdx.y * 16;
const int idt = ty * 16 + tx;
B += tx + __mul24(iby+ty, ldb);
A += ibx + idt;
C += ibx + idt + __mul24(iby, ldc);
const float *Bend = B + k;
float Cb[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
m = 2*lda;
n = 3*lda;
do {
//float Ab[4] = {A[0], A[lda], A[2*lda], A[3*lda]};
float Ab[4] = {A[0], A[lda], A[m], A[n]};
__shared__ float Bb[16][17];
Bb[tx][ty+0 ] = B[0];
Bb[tx][ty+4 ] = B[4*ldb];
Bb[tx][ty+8 ] = B[8*ldb];
Bb[tx][ty+12] = B[12*ldb];
__syncthreads();
A += 4 * lda;
saxpy( Ab[0], &Bb[0][0], Cb ); Ab[0] = A[0];
saxpy( Ab[1], &Bb[1][0], Cb ); Ab[1] = A[lda];
saxpy( Ab[2], &Bb[2][0], Cb ); Ab[2] = A[m];
saxpy( Ab[3], &Bb[3][0], Cb ); Ab[3] = A[n];
A += 4 * lda;
saxpy( Ab[0], &Bb[4][0], Cb ); Ab[0] = A[0];
saxpy( Ab[1], &Bb[5][0], Cb ); Ab[1] = A[lda];
saxpy( Ab[2], &Bb[6][0], Cb ); Ab[2] = A[m];
saxpy( Ab[3], &Bb[7][0], Cb ); Ab[3] = A[n];
A += 4 * lda;
saxpy( Ab[0], &Bb[8][0], Cb ); Ab[0] = A[0];
saxpy( Ab[1], &Bb[9][0], Cb ); Ab[1] = A[lda];
saxpy( Ab[2], &Bb[10][0], Cb ); Ab[2] = A[m];
saxpy( Ab[3], &Bb[11][0], Cb ); Ab[3] = A[n];
A += 4 * lda;
saxpy( Ab[0], &Bb[12][0], Cb );
saxpy( Ab[1], &Bb[13][0], Cb );
saxpy( Ab[2], &Bb[14][0], Cb );
saxpy( Ab[3], &Bb[15][0], Cb );
B += 16;
__syncthreads();
} while (B < Bend);
#pragma unroll 16
for(int i=0; i < 16; i++) {
C[0] = alpha * Cb[i] + beta * C[0];
C += ldc;
}
}
extern "C" void
magmablas_sgemm_N_N_64_16_16_16_4_special(
float *C, const float *A, const float *B,
magma_int_t m, magma_int_t n, magma_int_t k,
magma_int_t lda, magma_int_t ldb, magma_int_t ldc,
float alpha, float beta )
{
dim3 threads( 16, 4 );
dim3 grid( m/64, n/16 );
hipLaunchKernelGGL(( sgemm_kernel_N_N_64_16_16_16_4_special), dim3(grid), dim3(threads), 0, magma_stream ,
C, A, B, m, n, k, lda, ldb, ldc, alpha, beta );
}
| 2c0265dafcad5357b7f8ef8cf75709d8c5f6279d.cu | /*
-- MAGMA (version 1.4.1) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
December 2013
@generated s Tue Dec 17 13:18:45 2013
*/
#include "common_magma.h"
#include "commonblas_s.h"
/*
* saxpy computes c += alpha*b, where b and c are 16-element vectors.
*/
static __device__ void saxpy(
float alpha,
const float* __restrict__ b,
float* __restrict__ c )
{
c[0] += alpha * b[0];
c[1] += alpha * b[1];
c[2] += alpha * b[2];
c[3] += alpha * b[3];
c[4] += alpha * b[4];
c[5] += alpha * b[5];
c[6] += alpha * b[6];
c[7] += alpha * b[7];
c[8] += alpha * b[8];
c[9] += alpha * b[9];
c[10] += alpha * b[10];
c[11] += alpha * b[11];
c[12] += alpha * b[12];
c[13] += alpha * b[13];
c[14] += alpha * b[14];
c[15] += alpha * b[15];
}
__global__ void
sgemm_kernel_N_N_64_16_16_16_4_special(
float* __restrict__ C,
const float* __restrict__ A,
const float* __restrict__ B,
int m, int n, int k,
int lda, int ldb, int ldc,
float alpha, float beta )
{
/* -- MAGMA (version 1.4.1) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
December 2013
Purpose:
========
This routine computes
C = alpha * A*B + beta * C
B is put into shared memory
Parameters Used:
blk_M=64 blk_N=16 blk_K=16 nthd_x=16 nthd_y=4
This kernel is for matrices divisible by the corresponding
blocking sizes.
=============================================================== */
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int ibx = blockIdx.x * 64;
const int iby = blockIdx.y * 16;
const int idt = ty * 16 + tx;
B += tx + __mul24(iby+ty, ldb);
A += ibx + idt;
C += ibx + idt + __mul24(iby, ldc);
const float *Bend = B + k;
float Cb[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
m = 2*lda;
n = 3*lda;
do {
//float Ab[4] = {A[0], A[lda], A[2*lda], A[3*lda]};
float Ab[4] = {A[0], A[lda], A[m], A[n]};
__shared__ float Bb[16][17];
Bb[tx][ty+0 ] = B[0];
Bb[tx][ty+4 ] = B[4*ldb];
Bb[tx][ty+8 ] = B[8*ldb];
Bb[tx][ty+12] = B[12*ldb];
__syncthreads();
A += 4 * lda;
saxpy( Ab[0], &Bb[0][0], Cb ); Ab[0] = A[0];
saxpy( Ab[1], &Bb[1][0], Cb ); Ab[1] = A[lda];
saxpy( Ab[2], &Bb[2][0], Cb ); Ab[2] = A[m];
saxpy( Ab[3], &Bb[3][0], Cb ); Ab[3] = A[n];
A += 4 * lda;
saxpy( Ab[0], &Bb[4][0], Cb ); Ab[0] = A[0];
saxpy( Ab[1], &Bb[5][0], Cb ); Ab[1] = A[lda];
saxpy( Ab[2], &Bb[6][0], Cb ); Ab[2] = A[m];
saxpy( Ab[3], &Bb[7][0], Cb ); Ab[3] = A[n];
A += 4 * lda;
saxpy( Ab[0], &Bb[8][0], Cb ); Ab[0] = A[0];
saxpy( Ab[1], &Bb[9][0], Cb ); Ab[1] = A[lda];
saxpy( Ab[2], &Bb[10][0], Cb ); Ab[2] = A[m];
saxpy( Ab[3], &Bb[11][0], Cb ); Ab[3] = A[n];
A += 4 * lda;
saxpy( Ab[0], &Bb[12][0], Cb );
saxpy( Ab[1], &Bb[13][0], Cb );
saxpy( Ab[2], &Bb[14][0], Cb );
saxpy( Ab[3], &Bb[15][0], Cb );
B += 16;
__syncthreads();
} while (B < Bend);
#pragma unroll 16
for(int i=0; i < 16; i++) {
C[0] = alpha * Cb[i] + beta * C[0];
C += ldc;
}
}
extern "C" void
magmablas_sgemm_N_N_64_16_16_16_4_special(
float *C, const float *A, const float *B,
magma_int_t m, magma_int_t n, magma_int_t k,
magma_int_t lda, magma_int_t ldb, magma_int_t ldc,
float alpha, float beta )
{
dim3 threads( 16, 4 );
dim3 grid( m/64, n/16 );
sgemm_kernel_N_N_64_16_16_16_4_special<<< grid, threads, 0, magma_stream >>>
( C, A, B, m, n, k, lda, ldb, ldc, alpha, beta );
}
|
2a47e35ceb22fa3b0bdae490757b0ad258afa023.hip | // !!! This is a file automatically generated by hipify!!!
//
// Created by mrjakobdk on 6/11/20.
//
#include "ClusteringGpuBlocksMem.cuh"
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include "../../utils/util.h"
#include "../../utils/TmpMalloc.cuh"
#include "../../structures/ScyTreeArray.h"
#define BLOCK_SIZE 1024
#define PI 3.14
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort = true) {
if (code != hipSuccess) {
fprintf(stderr, "GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
using namespace std;
__device__
float dist_gpu_blocks_mem(int p_id, int q_id, float *X, int *subspace, int subspace_size, int d) {
float *p = &X[p_id * d];
float *q = &X[q_id * d];
double distance = 0;
for (int i = 0; i < subspace_size; i++) {
int d_i = subspace[i];
double diff = p[d_i] - q[d_i];
distance += diff * diff;
}
//printf("dinstance = %f\n", distance);
return sqrt(distance);//todo squared can be removed by sqrt(x)<=y => x<=y*y if x>=0, y>=0
}
__global__
void
compute_distances_blocks_mem(float *d_distance_matrix_full, int *d_restricteds_pr_dim, int restricted_dims,
int *d_neighborhoods_full,
int *d_number_of_neighbors_full,
float *X,
int **d_points_full, int *d_number_of_points, float neighborhood_size,
int **d_restricted_dims_full, int *d_number_of_restricted_dims, int d, int number_of_cells,
int n) {
for (int i_dim = blockIdx.x; i_dim < restricted_dims; i_dim += gridDim.x) {
for (int i_rest = 0; i_rest < d_restricteds_pr_dim[i_dim]; i_rest++) {
// printf("test-1\n");
int *d_points = d_points_full[i_dim * number_of_cells + i_rest];
int number_of_points = d_number_of_points[i_dim * number_of_cells + i_rest];
int *subspace = d_restricted_dims_full[i_dim * number_of_cells + i_rest];
int subspace_size = d_number_of_restricted_dims[i_dim * number_of_cells + i_rest];
int *d_neighborhoods = d_neighborhoods_full + i_dim * number_of_cells * n * n + i_rest * n * n;
float *d_distance_matrix = d_distance_matrix_full + i_dim * number_of_cells * n * n + i_rest * n * n;
int *d_number_of_neighbors = d_number_of_neighbors_full + i_dim * number_of_cells * n + i_rest * n;
// printf("test0\n");
for (int i = blockIdx.y; i < number_of_points; i += gridDim.y) {
int p_id = d_points[i];
for (int j = threadIdx.x; j < number_of_points; j += blockDim.x) {
int q_id = d_points[j];
if (i < j) {
float distance = dist_gpu_blocks_mem(p_id, q_id, X, subspace, subspace_size, d);
d_distance_matrix[i * number_of_points + j] = distance;
}
}
}
}
}
}
__global__
void
find_neighborhood_blocks_mem(float *d_distance_matrix_full, int *d_restricteds_pr_dim, int restricted_dims,
int *d_neighborhoods_full, int *d_number_of_neighbors_full, float *X,
int **d_points_full, int *d_number_of_points, float neighborhood_size,
int **d_restricted_dims_full, int *d_number_of_restricted_dims, int d, int number_of_cells,
int n) {
for (int i_dim = blockIdx.x; i_dim < restricted_dims; i_dim += gridDim.x) {
for (int i_rest = 0; i_rest < d_restricteds_pr_dim[i_dim]; i_rest++) {
// printf("test-1\n");
int *d_points = d_points_full[i_dim * number_of_cells + i_rest];
int number_of_points = d_number_of_points[i_dim * number_of_cells + i_rest];
int *subspace = d_restricted_dims_full[i_dim * number_of_cells + i_rest];
int subspace_size = d_number_of_restricted_dims[i_dim * number_of_cells + i_rest];
int *d_neighborhoods = d_neighborhoods_full + i_dim * number_of_cells * n * n + i_rest * n * n;
float *d_distance_matrix = d_distance_matrix_full + i_dim * number_of_cells * n * n + i_rest * n * n;
int *d_number_of_neighbors = d_number_of_neighbors_full + i_dim * number_of_cells * n + i_rest * n;
// printf("test0\n");
for (int i = threadIdx.x; i < number_of_points; i += blockDim.x) {
int *d_neighborhood = &d_neighborhoods[i * number_of_points];
int number_of_neighbors = 0;
int p_id = d_points[i];
for (int j = 0; j < number_of_points; j++) {
int q_id = d_points[j];
if (p_id != q_id) {
float distance = 0;//dist_gpu_blocks(p_id, q_id, X, subspace, subspace_size, d);
if (i < j) {
distance = d_distance_matrix[i * number_of_points + j];
} else if (j < i) {
distance = d_distance_matrix[j * number_of_points + i];
}
if (neighborhood_size >= distance) {
d_neighborhood[number_of_neighbors] = j;
number_of_neighbors++;
}
}
}
d_number_of_neighbors[i] = number_of_neighbors;
}
}
}
}
//
//__global__
//void
//find_neighborhood_blocks(float *d_distance_matrix_full, int *d_restricteds_pr_dim, int restricted_dims, int *d_neighborhoods_full,
// int *d_number_of_neighbors_full,
// float *X,
// int **d_points_full, int *d_number_of_points, float neighborhood_size,
// int **d_restricted_dims_full, int *d_number_of_restricted_dims, int d, int number_of_cells,
// int n) {
// for (int i_dim = blockIdx.x; i_dim < restricted_dims; i_dim += gridDim.x) {
// for (int i_rest = 0; i_rest < d_restricteds_pr_dim[i_dim]; i_rest++) {
//
//// printf("test-1\n");
// int *d_points = d_points_full[i_dim * number_of_cells + i_rest];
// int number_of_points = d_number_of_points[i_dim * number_of_cells + i_rest];
// int *subspace = d_restricted_dims_full[i_dim * number_of_cells + i_rest];
// int subspace_size = d_number_of_restricted_dims[i_dim * number_of_cells + i_rest];
// int *d_neighborhoods = d_neighborhoods_full + i_dim * number_of_cells * n * n + i_rest * n * n;
// int *d_number_of_neighbors = d_number_of_neighbors_full + i_dim * number_of_cells * n + i_rest * n;
//// printf("test0\n");
//
//
// for (int i = threadIdx.x; i < number_of_points; i += blockDim.x) {
// int *d_neighborhood = &d_neighborhoods[i * number_of_points];
// int number_of_neighbors = 0;
// int p_id = d_points[i];
// for (int j = 0; j < number_of_points; j++) {
// int q_id = d_points[j];
// if (p_id != q_id) {
// float distance = dist_gpu_blocks(p_id, q_id, X, subspace, subspace_size, d);
// if (neighborhood_size >= distance) {
// d_neighborhood[number_of_neighbors] = j;
// number_of_neighbors++;
// }
// }
// }
// d_number_of_neighbors[i] = number_of_neighbors;
// }
// }
// }
//}
__device__
float phi_gpu_blocks_mem(int p_id, int *d_neighborhood, float neighborhood_size, int number_of_neighbors,
float *X, int *d_points, int *subspace, int subspace_size, int d) {
float sum = 0;
for (int j = 0; j < number_of_neighbors; j++) {
int q_id = d_points[d_neighborhood[j]];
if (q_id >= 0) {
float distance = dist_gpu_blocks_mem(p_id, q_id, X, subspace, subspace_size, d) / neighborhood_size;
float sq = distance * distance;
sum += (1. - sq);
}
}
return sum;
}
__device__
float gamma_gpu_blocks_mem(double n) {
if (round(n) == 1) {//todo not nice cond n==1
return 1.;
} else if (n < 1) {//todo not nice cond n==1/2
return sqrt(PI);
}
return (n - 1.) * gamma_gpu_blocks_mem(n - 1.);
}
__device__
double gamma_gpu_blocks_mem(int n) {
if (n == 2) {
return 1.;
} else if (n == 1) {
return sqrt(PI);
}
return (n / 2. - 1.) * gamma_gpu_blocks_mem(n - 2);
}
__device__
float c_gpu_blocks_mem(int subspace_size) {
float r = pow(PI, subspace_size / 2.);
//r = r / gamma_gpu_blocks(subspace_size / 2. + 1.);
r = r / gamma_gpu_blocks_mem(subspace_size + 2);
return r;
}
__device__
float alpha_gpu_blocks_mem(int subspace_size, float neighborhood_size, int n) {
float v = 1.;//todo v is missing?? what is it??
float r = 2 * n * pow(neighborhood_size, subspace_size) * c_gpu_blocks_mem(subspace_size);
r = r / (pow(v, subspace_size) * (subspace_size + 2));
return r;
}
__device__
float omega_gpu_blocks_mem(int subspace_size) {
return 2.0 / (subspace_size + 2.0);
}
__global__
void
compute_is_dense_blocks_mem(int *d_restricteds_pr_dim, bool *d_is_dense_full, int **d_points_full,
int *d_number_of_points, int *d_neighborhood_end_position_full,
int *d_neighborhoods_full, float neighborhood_size,
float *X, int **d_restricted_dims_full, int *d_number_of_restricted_dims, float F, int n,
int num_obj, int d, int number_of_cells) {//todo change name of subspace
__shared__ float p;
__shared__ unsigned int number_of_neighbors;
int i_dim = blockIdx.x;
for (int i_rest = 0; i_rest < d_restricteds_pr_dim[i_dim]; i_rest++) {
int *d_points = d_points_full[i_dim * number_of_cells + i_rest];
int number_of_points = d_number_of_points[i_dim * number_of_cells + i_rest];
int *subspace = d_restricted_dims_full[i_dim * number_of_cells + i_rest];
int subspace_size = d_number_of_restricted_dims[i_dim * number_of_cells +
i_rest];//todo not needed this is constant for each clustering
bool *d_is_dense = d_is_dense_full + i_dim * number_of_cells * n + i_rest * n;
float a = alpha_gpu_blocks_mem(subspace_size, neighborhood_size, n);
float w = omega_gpu_blocks_mem(subspace_size);
for (int i = blockIdx.y; i < number_of_points; i += gridDim.y) {
int point_index = i_dim * number_of_cells * n + i_rest * n + i;
int neighborhood_start = point_index == 0 ? 0 : d_neighborhood_end_position_full[point_index - 1];
int *d_neighborhood = &d_neighborhoods_full[neighborhood_start];
int p_id = d_points[i];
__syncthreads();
p = 0;
number_of_neighbors = 0;
__syncthreads();
for (int j = threadIdx.x; j < number_of_points; j += blockDim.x) {
if (i != j) {
int q_id = d_points[j];
float distance = dist_gpu_blocks_mem(p_id, q_id, X, subspace, subspace_size, d);
if (neighborhood_size >= distance) {
unsigned int tmp = atomicInc(&number_of_neighbors, number_of_points);
d_neighborhood[tmp] = j;//q_id;
distance /= neighborhood_size;
float sq = distance * distance;
atomicAdd(&p, (1. - sq));
}
}
}
__syncthreads();
d_is_dense[i] = p >= max(F * a, num_obj * w);
}
}
}
//
//__global__
//void
//compute_is_dense_new_blocks(int *d_restricteds_pr_dim, bool *d_is_dense_full, int **d_points_full,
// int *d_number_of_points,
// float neighborhood_size,
// float *X, int **d_restricted_dims_full, int *d_number_of_restricted_dims, float F, int n,
// int num_obj, int d, int number_of_cells) {//todo change name of subspace
//
//
// int i_dim = blockIdx.x;
// for (int i_rest = 0; i_rest < d_restricteds_pr_dim[i_dim]; i_rest++) {
// int *d_points = d_points_full[i_dim * number_of_cells + i_rest];
// int number_of_points = d_number_of_points[i_dim * number_of_cells + i_rest];
// int *subspace = d_restricted_dims_full[i_dim * number_of_cells + i_rest];
// int subspace_size = d_number_of_restricted_dims[i_dim * number_of_cells + i_rest];
// bool *d_is_dense = d_is_dense_full + i_dim * number_of_cells * n + i_rest * n;
//
//
// for (int i = threadIdx.x; i < number_of_points; i += blockDim.x) {
// int p_id = d_points[i];
//// float p = phi_gpu_blocks(p_id, d_neighborhood, neighborhood_size, d_number_of_neighbors[i], X, d_points,
//// subspace, subspace_size, d);
//
// float p = 0;
//
// for (int j = 0; j < n; j++) {
// int q_id = j;
// if (p_id != q_id) {
// float distance = dist_gpu_blocks_mem(p_id, q_id, X, subspace, subspace_size, d);
// if (neighborhood_size >= distance) {
// distance = distance / neighborhood_size;
// float sq = distance * distance;
// p += (1. - sq);
// }
// }
// }
//
// float a = alpha_gpu_blocks_mem(subspace_size, neighborhood_size, n);
// float w = omega_gpu_blocks_mem(subspace_size);
//// printf("%d:%d, %f>=%f\n", p_id, subspace_size, p, max(F * a, num_obj * w));
//// printf("%d:%d, F=%f, a=%f, num_obj=%d, w=%f\n", p_id, subspace_size, F, a, num_obj, w);
// d_is_dense[i] = p >= max(F * a, num_obj * w);
// }
// }
//}
//for ref see: http://hpcg.purdue.edu/papers/Stava2011CCL.pdf
__global__
void disjoint_set_clustering_blocks_mem(int *d_restricteds_pr_dim, int *d_clustering_full, int *d_disjoint_set_full,
int *d_neighborhoods_full, int *d_number_of_neighbors_full,
int *d_neighborhood_end_position_full,
bool *d_is_dense_full, int **d_points_full, int *d_number_of_points,
int number_of_cells, int n) {
int i_dim = blockIdx.x;
for (int i_rest = 0; i_rest < d_restricteds_pr_dim[i_dim]; i_rest++) {
int *d_points = d_points_full[i_dim * number_of_cells + i_rest];
int number_of_points = d_number_of_points[i_dim * number_of_cells + i_rest];
int *d_clustering = d_clustering_full + i_dim * n;
bool *d_is_dense = d_is_dense_full + i_dim * number_of_cells * n + i_rest * n;
int *d_disjoint_set = d_disjoint_set_full + i_dim * number_of_cells * n + i_rest * n;
int *d_number_of_neighbors = d_number_of_neighbors_full + i_dim * number_of_cells * n + i_rest * n;
__shared__ int changed;
changed = 1;
__syncthreads();
//init
for (int i = threadIdx.x; i < number_of_points; i += blockDim.x) {
if (d_is_dense[i]) {
d_disjoint_set[i] = i;
} else {
d_disjoint_set[i] = -1;
}
}
__syncthreads();
//for (int itr = 1; itr < number_of_points; itr *= 2) {
while (changed) {
//disjoint_set_pass1
__syncthreads();
changed = 0;
__syncthreads();
for (int i = threadIdx.x; i < number_of_points; i += blockDim.x) {
if (!d_is_dense[i]) continue;
int root = d_disjoint_set[i];
int point_index = i_dim * number_of_cells * n + i_rest * n + i;
int neighborhood_start = point_index == 0 ? 0 : d_neighborhood_end_position_full[point_index - 1];
int *d_neighborhood = &d_neighborhoods_full[neighborhood_start];
for (int j = 0; j < d_number_of_neighbors[i]; j++) {
if (d_is_dense[d_neighborhood[j]]) {
if (d_disjoint_set[d_neighborhood[j]] < root) {
root = d_disjoint_set[d_neighborhood[j]];
atomicMax(&changed, 1);
}
}
}
d_disjoint_set[i] = root;
}
__syncthreads();
//disjoint_set_pass2
for (int i = threadIdx.x; i < number_of_points; i += blockDim.x) {
int root = d_disjoint_set[i];
while (root >= 0 && root != d_disjoint_set[root]) {
root = d_disjoint_set[root];
}
d_disjoint_set[i] = root;
}
__syncthreads();
}
//gather_clustering
for (int i = threadIdx.x; i < number_of_points; i += blockDim.x) {
if (d_is_dense[i]) {
d_clustering[d_points[i]] = d_points[d_disjoint_set[i]];
} else {
d_clustering[d_points[i]] = -1;
}
}
}
}
__global__
void
compute_number_of_neighbors_blocks_mem(int *d_restricteds_pr_dim, int restricted_dims, int *d_number_of_neighbors_full,
float *X, int **d_points_full, int *d_number_of_points, float neighborhood_size,
int **d_restricted_dims_full, int *d_number_of_restricted_dims, int d,
int number_of_cells, int n) {
__shared__ int number_of_neighbors;
for (int i_dim = blockIdx.x; i_dim < restricted_dims; i_dim += gridDim.x) {
for (int i_rest = 0; i_rest < d_restricteds_pr_dim[i_dim]; i_rest++) {
int *d_points = d_points_full[i_dim * number_of_cells + i_rest];
int number_of_points = d_number_of_points[i_dim * number_of_cells + i_rest];
int *subspace = d_restricted_dims_full[i_dim * number_of_cells + i_rest];
int subspace_size = d_number_of_restricted_dims[i_dim * number_of_cells + i_rest];
int *d_number_of_neighbors = d_number_of_neighbors_full + i_dim * number_of_cells * n + i_rest * n;
for (int i = blockIdx.y; i < number_of_points; i += gridDim.y) {
int p_id = d_points[i];
__syncthreads();
number_of_neighbors = 0;
__syncthreads();
for (int j = threadIdx.x; j < number_of_points; j += blockDim.x) {
if (i != j) {
int q_id = d_points[j];
float distance = dist_gpu_blocks_mem(p_id, q_id, X, subspace, subspace_size, d);
if (neighborhood_size >= distance) {
atomicAdd(&number_of_neighbors, 1);
}
}
}
__syncthreads();
d_number_of_neighbors[i] = number_of_neighbors;
}
}
}
}
void
ClusteringGPUBlocksMem(TmpMalloc *tmps, int *d_clustering_full, vector <vector<ScyTreeArray *>> L_pruned, float *d_X,
int n, int d, float neighborhood_size, float F, int num_obj, int number_of_cells) {
tmps->reset_counters();
int restricted_dims = L_pruned.size();
int *h_restricteds_pr_dim = new int[restricted_dims];
int **h_points_full = new int *[restricted_dims * number_of_cells];
int **h_restricted_dims_full = new int *[restricted_dims * number_of_cells];
int *h_number_of_points = new int[restricted_dims * number_of_cells];
int *h_number_of_restricted_dims = new int[restricted_dims * number_of_cells];
int avg_number_of_points = 0;
int min_number_of_points = n;
int count = 0;
for (int i = 0; i < restricted_dims; i++) {
h_restricteds_pr_dim[i] = L_pruned[i].size();
for (int j = 0; j < L_pruned[i].size(); j++) {
avg_number_of_points += L_pruned[i][j]->number_of_points;
count++;
if (min_number_of_points > L_pruned[i][j]->number_of_points)
min_number_of_points = L_pruned[i][j]->number_of_points;
h_points_full[i * number_of_cells + j] = L_pruned[i][j]->d_points;
h_restricted_dims_full[i * number_of_cells + j] = L_pruned[i][j]->d_restricted_dims;
h_number_of_points[i * number_of_cells + j] = L_pruned[i][j]->number_of_points;
h_number_of_restricted_dims[i * number_of_cells + j] = L_pruned[i][j]->number_of_restricted_dims;
}
}
if (count > 0)
avg_number_of_points /= count;
avg_number_of_points = (int) avg_number_of_points;
int *d_restricteds_pr_dim = tmps->get_int_array(tmps->int_array_counter++, restricted_dims);
hipMemcpy(d_restricteds_pr_dim, h_restricteds_pr_dim, restricted_dims * sizeof(int), hipMemcpyHostToDevice);
gpuErrchk(hipPeekAtLastError());
int **d_points_full = tmps->get_int_pointer_array(tmps->int_pointer_array_counter++,
restricted_dims * number_of_cells);
int **d_restricted_dims_full = tmps->get_int_pointer_array(tmps->int_pointer_array_counter++,
restricted_dims * number_of_cells);
int *d_number_of_points = tmps->get_int_array(tmps->int_array_counter++, restricted_dims * number_of_cells);
int *d_number_of_restricted_dims = tmps->get_int_array(tmps->int_array_counter++,
restricted_dims * number_of_cells);
gpuErrchk(hipPeekAtLastError());
hipMemcpy(d_points_full, h_points_full, restricted_dims * number_of_cells * sizeof(int *), hipMemcpyHostToDevice);
hipMemcpy(d_restricted_dims_full, h_restricted_dims_full, restricted_dims * number_of_cells * sizeof(int *),
hipMemcpyHostToDevice);
hipMemcpy(d_number_of_points, h_number_of_points, restricted_dims * number_of_cells * sizeof(int),
hipMemcpyHostToDevice);
hipMemcpy(d_number_of_restricted_dims, h_number_of_restricted_dims,
restricted_dims * number_of_cells * sizeof(int), hipMemcpyHostToDevice);
gpuErrchk(hipPeekAtLastError());
int *d_number_of_neighbors_full = tmps->get_int_array(tmps->int_array_counter++, n * restricted_dims *
number_of_cells);//todo number_of_points can be used instead of n
hipMemset(d_number_of_neighbors_full, 0, restricted_dims * number_of_cells * n * sizeof(int));
bool *d_is_dense_full = tmps->get_bool_array(tmps->bool_array_counter++,
n * restricted_dims * number_of_cells); // number_of_points
int *d_disjoint_set_full = tmps->get_int_array(tmps->int_array_counter++,
n * restricted_dims * number_of_cells); // number_of_points
// hipMemset(d_disjoint_set_full, -1, n * restricted_dims * number_of_cells * sizeof(int));
// hipDeviceSynchronize();
gpuErrchk(hipPeekAtLastError());
int *d_neighborhood_end_position_full = tmps->get_int_array(tmps->int_array_counter++,
restricted_dims * number_of_cells * n);
hipMemset(d_neighborhood_end_position_full, 0, restricted_dims * number_of_cells * n * sizeof(int));
// hipDeviceSynchronize();
gpuErrchk(hipPeekAtLastError());
int number_of_threads = min(max(1,avg_number_of_points), BLOCK_SIZE);
if (restricted_dims > 0) {
// printf("%d, %d\n", rrestricted_dims, max(1,avg_number_of_points));
//compute number of neighbors
dim3 block(min(64, max(1,avg_number_of_points)));
dim3 grid(restricted_dims, max(1,avg_number_of_points));
hipLaunchKernelGGL(( compute_number_of_neighbors_blocks_mem), dim3(grid), dim3(block), 0, 0, d_restricteds_pr_dim,
restricted_dims,
d_number_of_neighbors_full,
d_X, d_points_full,
d_number_of_points,
neighborhood_size,
d_restricted_dims_full,
d_number_of_restricted_dims, d,
number_of_cells, n);
// hipDeviceSynchronize();
gpuErrchk(hipPeekAtLastError());
//inclusive scan of number of neighbors to find indexing positions
inclusive_scan(d_number_of_neighbors_full, d_neighborhood_end_position_full,
restricted_dims * number_of_cells * n);//todo bad to use n here
// hipDeviceSynchronize();
gpuErrchk(hipPeekAtLastError());
//get/allocate memory for neighbors
int total_size_of_neighborhoods;
int *d_neighborhoods_full;
hipMemcpy(&total_size_of_neighborhoods,
d_neighborhood_end_position_full + restricted_dims * number_of_cells * n - 1, sizeof(int),
hipMemcpyDeviceToHost);
d_neighborhoods_full = tmps->get_int_array(tmps->int_array_counter++, total_size_of_neighborhoods);
// hipDeviceSynchronize();
gpuErrchk(hipPeekAtLastError());
//compute is dense
// block = dim3(min(64, avg_number_of_points));
// grid = dim3(restricted_dims, avg_number_of_points);
hipLaunchKernelGGL(( compute_is_dense_blocks_mem), dim3(grid), dim3(block), 0, 0, d_restricteds_pr_dim, d_is_dense_full,
d_points_full, d_number_of_points,
d_neighborhood_end_position_full,
d_neighborhoods_full, neighborhood_size,
d_X, d_restricted_dims_full,
d_number_of_restricted_dims, F, n,
num_obj, d, number_of_cells);
// hipDeviceSynchronize();
gpuErrchk(hipPeekAtLastError());
//gather clustering from dense neighbors
hipLaunchKernelGGL(( disjoint_set_clustering_blocks_mem), dim3(restricted_dims), dim3(number_of_threads), 0, 0, d_restricteds_pr_dim,
d_clustering_full,
d_disjoint_set_full,
d_neighborhoods_full,
d_number_of_neighbors_full,
d_neighborhood_end_position_full,
d_is_dense_full, d_points_full,
d_number_of_points,
number_of_cells, n);
hipDeviceSynchronize();
gpuErrchk(hipPeekAtLastError());
}
delete[] h_restricteds_pr_dim;
delete[] h_points_full;
delete[] h_restricted_dims_full;
delete[] h_number_of_points;
delete[] h_number_of_restricted_dims;
}
void
ClusteringGPUBlocksMemAll(TmpMalloc *tmps, int *d_clustering_full, vector <vector<ScyTreeArray *>> L_pruned, float *d_X,
int n, int d, float neighborhood_size, float F, int num_obj, int number_of_cells) {
tmps->reset_counters();
int restricted_dims = L_pruned.size();
int *h_restricteds_pr_dim = new int[restricted_dims];
int **h_points_full = new int *[restricted_dims * number_of_cells];
int **h_restricted_dims_full = new int *[restricted_dims * number_of_cells];
int *h_number_of_points = new int[restricted_dims * number_of_cells];
int *h_number_of_restricted_dims = new int[restricted_dims * number_of_cells];
int avg_number_of_points = 0;
int min_number_of_points = n;
int count = 0;
for (int i = 0; i < restricted_dims; i++) {
h_restricteds_pr_dim[i] = L_pruned[i].size();
for (int j = 0; j < L_pruned[i].size(); j++) {
avg_number_of_points += L_pruned[i][j]->number_of_points;
count++;
if (min_number_of_points > L_pruned[i][j]->number_of_points)
min_number_of_points = L_pruned[i][j]->number_of_points;
h_points_full[i * number_of_cells + j] = L_pruned[i][j]->d_points;
h_restricted_dims_full[i * number_of_cells + j] = L_pruned[i][j]->d_restricted_dims;
h_number_of_points[i * number_of_cells + j] = L_pruned[i][j]->number_of_points;
h_number_of_restricted_dims[i * number_of_cells + j] = L_pruned[i][j]->number_of_restricted_dims;
}
}
if (count > 0)
avg_number_of_points /= count;
avg_number_of_points = (int) avg_number_of_points;
int *d_restricteds_pr_dim = tmps->get_int_array(tmps->int_array_counter++, restricted_dims);
hipMemcpy(d_restricteds_pr_dim, h_restricteds_pr_dim, restricted_dims * sizeof(int), hipMemcpyHostToDevice);
gpuErrchk(hipPeekAtLastError());
int **d_points_full = tmps->get_int_pointer_array(tmps->int_pointer_array_counter++,
restricted_dims * number_of_cells);
int **d_restricted_dims_full = tmps->get_int_pointer_array(tmps->int_pointer_array_counter++,
restricted_dims * number_of_cells);
int *d_number_of_points = tmps->get_int_array(tmps->int_array_counter++, restricted_dims * number_of_cells);
int *d_number_of_restricted_dims = tmps->get_int_array(tmps->int_array_counter++,
restricted_dims * number_of_cells);
gpuErrchk(hipPeekAtLastError());
hipMemcpy(d_points_full, h_points_full, restricted_dims * number_of_cells * sizeof(int *), hipMemcpyHostToDevice);
hipMemcpy(d_restricted_dims_full, h_restricted_dims_full, restricted_dims * number_of_cells * sizeof(int *),
hipMemcpyHostToDevice);
hipMemcpy(d_number_of_points, h_number_of_points, restricted_dims * number_of_cells * sizeof(int),
hipMemcpyHostToDevice);
hipMemcpy(d_number_of_restricted_dims, h_number_of_restricted_dims,
restricted_dims * number_of_cells * sizeof(int), hipMemcpyHostToDevice);
gpuErrchk(hipPeekAtLastError());
int *d_number_of_neighbors_full = tmps->get_int_array(tmps->int_array_counter++, n * restricted_dims *
number_of_cells);//todo number_of_points can be used instead of n
hipMemset(d_number_of_neighbors_full, 0, restricted_dims * number_of_cells * n * sizeof(int));
bool *d_is_dense_full = tmps->get_bool_array(tmps->bool_array_counter++,
n * restricted_dims * number_of_cells); // number_of_points
int *d_disjoint_set_full = tmps->get_int_array(tmps->int_array_counter++,
n * restricted_dims * number_of_cells); // number_of_points
// hipMemset(d_disjoint_set_full, -1, n * restricted_dims * number_of_cells * sizeof(int));
// hipDeviceSynchronize();
gpuErrchk(hipPeekAtLastError());
int *d_neighborhood_end_position_full = tmps->get_int_array(tmps->int_array_counter++,
restricted_dims * number_of_cells * n);
hipMemset(d_neighborhood_end_position_full, 0, restricted_dims * number_of_cells * n * sizeof(int));
// hipDeviceSynchronize();
gpuErrchk(hipPeekAtLastError());
int number_of_threads = min(max(1,avg_number_of_points), BLOCK_SIZE);
if (restricted_dims > 0) {
// printf("%d, %d\n", rrestricted_dims, max(1,avg_number_of_points));
//compute number of neighbors
dim3 block(min(64, max(1,avg_number_of_points)));
dim3 grid(restricted_dims, max(1,avg_number_of_points));
hipLaunchKernelGGL(( compute_number_of_neighbors_blocks_mem), dim3(grid), dim3(block), 0, 0, d_restricteds_pr_dim,
restricted_dims,
d_number_of_neighbors_full,
d_X, d_points_full,
d_number_of_points,
neighborhood_size,
d_restricted_dims_full,
d_number_of_restricted_dims, d,
number_of_cells, n);
// hipDeviceSynchronize();
gpuErrchk(hipPeekAtLastError());
//inclusive scan of number of neighbors to find indexing positions
inclusive_scan(d_number_of_neighbors_full, d_neighborhood_end_position_full,
restricted_dims * number_of_cells * n);//todo bad to use n here
// hipDeviceSynchronize();
gpuErrchk(hipPeekAtLastError());
//get/allocate memory for neighbors
int total_size_of_neighborhoods;
int *d_neighborhoods_full;
hipMemcpy(&total_size_of_neighborhoods,
d_neighborhood_end_position_full + restricted_dims * number_of_cells * n - 1, sizeof(int),
hipMemcpyDeviceToHost);
d_neighborhoods_full = tmps->get_int_array(tmps->int_array_counter++, total_size_of_neighborhoods);
// hipDeviceSynchronize();
gpuErrchk(hipPeekAtLastError());
//compute is dense
// block = dim3(min(64, avg_number_of_points));
// grid = dim3(restricted_dims, avg_number_of_points);
hipLaunchKernelGGL(( compute_is_dense_blocks_mem), dim3(grid), dim3(block), 0, 0, d_restricteds_pr_dim, d_is_dense_full,
d_points_full, d_number_of_points,
d_neighborhood_end_position_full,
d_neighborhoods_full, neighborhood_size,
d_X, d_restricted_dims_full,
d_number_of_restricted_dims, F, n,
num_obj, d, number_of_cells);
// hipDeviceSynchronize();
gpuErrchk(hipPeekAtLastError());
//gather clustering from dense neighbors
hipLaunchKernelGGL(( disjoint_set_clustering_blocks_mem), dim3(restricted_dims), dim3(number_of_threads), 0, 0, d_restricteds_pr_dim,
d_clustering_full,
d_disjoint_set_full,
d_neighborhoods_full,
d_number_of_neighbors_full,
d_neighborhood_end_position_full,
d_is_dense_full, d_points_full,
d_number_of_points,
number_of_cells, n);
hipDeviceSynchronize();
gpuErrchk(hipPeekAtLastError());
}
delete[] h_restricteds_pr_dim;
delete[] h_points_full;
delete[] h_restricted_dims_full;
delete[] h_number_of_points;
delete[] h_number_of_restricted_dims;
} | 2a47e35ceb22fa3b0bdae490757b0ad258afa023.cu | //
// Created by mrjakobdk on 6/11/20.
//
#include "ClusteringGpuBlocksMem.cuh"
#include <cuda.h>
#include <cuda_runtime.h>
#include "../../utils/util.h"
#include "../../utils/TmpMalloc.cuh"
#include "../../structures/ScyTreeArray.h"
#define BLOCK_SIZE 1024
#define PI 3.14
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort = true) {
if (code != cudaSuccess) {
fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
using namespace std;
__device__
float dist_gpu_blocks_mem(int p_id, int q_id, float *X, int *subspace, int subspace_size, int d) {
float *p = &X[p_id * d];
float *q = &X[q_id * d];
double distance = 0;
for (int i = 0; i < subspace_size; i++) {
int d_i = subspace[i];
double diff = p[d_i] - q[d_i];
distance += diff * diff;
}
//printf("dinstance = %f\n", distance);
return sqrt(distance);//todo squared can be removed by sqrt(x)<=y => x<=y*y if x>=0, y>=0
}
__global__
void
compute_distances_blocks_mem(float *d_distance_matrix_full, int *d_restricteds_pr_dim, int restricted_dims,
int *d_neighborhoods_full,
int *d_number_of_neighbors_full,
float *X,
int **d_points_full, int *d_number_of_points, float neighborhood_size,
int **d_restricted_dims_full, int *d_number_of_restricted_dims, int d, int number_of_cells,
int n) {
for (int i_dim = blockIdx.x; i_dim < restricted_dims; i_dim += gridDim.x) {
for (int i_rest = 0; i_rest < d_restricteds_pr_dim[i_dim]; i_rest++) {
// printf("test-1\n");
int *d_points = d_points_full[i_dim * number_of_cells + i_rest];
int number_of_points = d_number_of_points[i_dim * number_of_cells + i_rest];
int *subspace = d_restricted_dims_full[i_dim * number_of_cells + i_rest];
int subspace_size = d_number_of_restricted_dims[i_dim * number_of_cells + i_rest];
int *d_neighborhoods = d_neighborhoods_full + i_dim * number_of_cells * n * n + i_rest * n * n;
float *d_distance_matrix = d_distance_matrix_full + i_dim * number_of_cells * n * n + i_rest * n * n;
int *d_number_of_neighbors = d_number_of_neighbors_full + i_dim * number_of_cells * n + i_rest * n;
// printf("test0\n");
for (int i = blockIdx.y; i < number_of_points; i += gridDim.y) {
int p_id = d_points[i];
for (int j = threadIdx.x; j < number_of_points; j += blockDim.x) {
int q_id = d_points[j];
if (i < j) {
float distance = dist_gpu_blocks_mem(p_id, q_id, X, subspace, subspace_size, d);
d_distance_matrix[i * number_of_points + j] = distance;
}
}
}
}
}
}
__global__
void
find_neighborhood_blocks_mem(float *d_distance_matrix_full, int *d_restricteds_pr_dim, int restricted_dims,
int *d_neighborhoods_full, int *d_number_of_neighbors_full, float *X,
int **d_points_full, int *d_number_of_points, float neighborhood_size,
int **d_restricted_dims_full, int *d_number_of_restricted_dims, int d, int number_of_cells,
int n) {
for (int i_dim = blockIdx.x; i_dim < restricted_dims; i_dim += gridDim.x) {
for (int i_rest = 0; i_rest < d_restricteds_pr_dim[i_dim]; i_rest++) {
// printf("test-1\n");
int *d_points = d_points_full[i_dim * number_of_cells + i_rest];
int number_of_points = d_number_of_points[i_dim * number_of_cells + i_rest];
int *subspace = d_restricted_dims_full[i_dim * number_of_cells + i_rest];
int subspace_size = d_number_of_restricted_dims[i_dim * number_of_cells + i_rest];
int *d_neighborhoods = d_neighborhoods_full + i_dim * number_of_cells * n * n + i_rest * n * n;
float *d_distance_matrix = d_distance_matrix_full + i_dim * number_of_cells * n * n + i_rest * n * n;
int *d_number_of_neighbors = d_number_of_neighbors_full + i_dim * number_of_cells * n + i_rest * n;
// printf("test0\n");
for (int i = threadIdx.x; i < number_of_points; i += blockDim.x) {
int *d_neighborhood = &d_neighborhoods[i * number_of_points];
int number_of_neighbors = 0;
int p_id = d_points[i];
for (int j = 0; j < number_of_points; j++) {
int q_id = d_points[j];
if (p_id != q_id) {
float distance = 0;//dist_gpu_blocks(p_id, q_id, X, subspace, subspace_size, d);
if (i < j) {
distance = d_distance_matrix[i * number_of_points + j];
} else if (j < i) {
distance = d_distance_matrix[j * number_of_points + i];
}
if (neighborhood_size >= distance) {
d_neighborhood[number_of_neighbors] = j;
number_of_neighbors++;
}
}
}
d_number_of_neighbors[i] = number_of_neighbors;
}
}
}
}
//
//__global__
//void
//find_neighborhood_blocks(float *d_distance_matrix_full, int *d_restricteds_pr_dim, int restricted_dims, int *d_neighborhoods_full,
// int *d_number_of_neighbors_full,
// float *X,
// int **d_points_full, int *d_number_of_points, float neighborhood_size,
// int **d_restricted_dims_full, int *d_number_of_restricted_dims, int d, int number_of_cells,
// int n) {
// for (int i_dim = blockIdx.x; i_dim < restricted_dims; i_dim += gridDim.x) {
// for (int i_rest = 0; i_rest < d_restricteds_pr_dim[i_dim]; i_rest++) {
//
//// printf("test-1\n");
// int *d_points = d_points_full[i_dim * number_of_cells + i_rest];
// int number_of_points = d_number_of_points[i_dim * number_of_cells + i_rest];
// int *subspace = d_restricted_dims_full[i_dim * number_of_cells + i_rest];
// int subspace_size = d_number_of_restricted_dims[i_dim * number_of_cells + i_rest];
// int *d_neighborhoods = d_neighborhoods_full + i_dim * number_of_cells * n * n + i_rest * n * n;
// int *d_number_of_neighbors = d_number_of_neighbors_full + i_dim * number_of_cells * n + i_rest * n;
//// printf("test0\n");
//
//
// for (int i = threadIdx.x; i < number_of_points; i += blockDim.x) {
// int *d_neighborhood = &d_neighborhoods[i * number_of_points];
// int number_of_neighbors = 0;
// int p_id = d_points[i];
// for (int j = 0; j < number_of_points; j++) {
// int q_id = d_points[j];
// if (p_id != q_id) {
// float distance = dist_gpu_blocks(p_id, q_id, X, subspace, subspace_size, d);
// if (neighborhood_size >= distance) {
// d_neighborhood[number_of_neighbors] = j;
// number_of_neighbors++;
// }
// }
// }
// d_number_of_neighbors[i] = number_of_neighbors;
// }
// }
// }
//}
__device__
float phi_gpu_blocks_mem(int p_id, int *d_neighborhood, float neighborhood_size, int number_of_neighbors,
float *X, int *d_points, int *subspace, int subspace_size, int d) {
float sum = 0;
for (int j = 0; j < number_of_neighbors; j++) {
int q_id = d_points[d_neighborhood[j]];
if (q_id >= 0) {
float distance = dist_gpu_blocks_mem(p_id, q_id, X, subspace, subspace_size, d) / neighborhood_size;
float sq = distance * distance;
sum += (1. - sq);
}
}
return sum;
}
__device__
float gamma_gpu_blocks_mem(double n) {
if (round(n) == 1) {//todo not nice cond n==1
return 1.;
} else if (n < 1) {//todo not nice cond n==1/2
return sqrt(PI);
}
return (n - 1.) * gamma_gpu_blocks_mem(n - 1.);
}
__device__
double gamma_gpu_blocks_mem(int n) {
if (n == 2) {
return 1.;
} else if (n == 1) {
return sqrt(PI);
}
return (n / 2. - 1.) * gamma_gpu_blocks_mem(n - 2);
}
__device__
float c_gpu_blocks_mem(int subspace_size) {
float r = pow(PI, subspace_size / 2.);
//r = r / gamma_gpu_blocks(subspace_size / 2. + 1.);
r = r / gamma_gpu_blocks_mem(subspace_size + 2);
return r;
}
__device__
float alpha_gpu_blocks_mem(int subspace_size, float neighborhood_size, int n) {
float v = 1.;//todo v is missing?? what is it??
float r = 2 * n * pow(neighborhood_size, subspace_size) * c_gpu_blocks_mem(subspace_size);
r = r / (pow(v, subspace_size) * (subspace_size + 2));
return r;
}
__device__
float omega_gpu_blocks_mem(int subspace_size) {
return 2.0 / (subspace_size + 2.0);
}
__global__
void
compute_is_dense_blocks_mem(int *d_restricteds_pr_dim, bool *d_is_dense_full, int **d_points_full,
int *d_number_of_points, int *d_neighborhood_end_position_full,
int *d_neighborhoods_full, float neighborhood_size,
float *X, int **d_restricted_dims_full, int *d_number_of_restricted_dims, float F, int n,
int num_obj, int d, int number_of_cells) {//todo change name of subspace
__shared__ float p;
__shared__ unsigned int number_of_neighbors;
int i_dim = blockIdx.x;
for (int i_rest = 0; i_rest < d_restricteds_pr_dim[i_dim]; i_rest++) {
int *d_points = d_points_full[i_dim * number_of_cells + i_rest];
int number_of_points = d_number_of_points[i_dim * number_of_cells + i_rest];
int *subspace = d_restricted_dims_full[i_dim * number_of_cells + i_rest];
int subspace_size = d_number_of_restricted_dims[i_dim * number_of_cells +
i_rest];//todo not needed this is constant for each clustering
bool *d_is_dense = d_is_dense_full + i_dim * number_of_cells * n + i_rest * n;
float a = alpha_gpu_blocks_mem(subspace_size, neighborhood_size, n);
float w = omega_gpu_blocks_mem(subspace_size);
for (int i = blockIdx.y; i < number_of_points; i += gridDim.y) {
int point_index = i_dim * number_of_cells * n + i_rest * n + i;
int neighborhood_start = point_index == 0 ? 0 : d_neighborhood_end_position_full[point_index - 1];
int *d_neighborhood = &d_neighborhoods_full[neighborhood_start];
int p_id = d_points[i];
__syncthreads();
p = 0;
number_of_neighbors = 0;
__syncthreads();
for (int j = threadIdx.x; j < number_of_points; j += blockDim.x) {
if (i != j) {
int q_id = d_points[j];
float distance = dist_gpu_blocks_mem(p_id, q_id, X, subspace, subspace_size, d);
if (neighborhood_size >= distance) {
unsigned int tmp = atomicInc(&number_of_neighbors, number_of_points);
d_neighborhood[tmp] = j;//q_id;
distance /= neighborhood_size;
float sq = distance * distance;
atomicAdd(&p, (1. - sq));
}
}
}
__syncthreads();
d_is_dense[i] = p >= max(F * a, num_obj * w);
}
}
}
//
//__global__
//void
//compute_is_dense_new_blocks(int *d_restricteds_pr_dim, bool *d_is_dense_full, int **d_points_full,
// int *d_number_of_points,
// float neighborhood_size,
// float *X, int **d_restricted_dims_full, int *d_number_of_restricted_dims, float F, int n,
// int num_obj, int d, int number_of_cells) {//todo change name of subspace
//
//
// int i_dim = blockIdx.x;
// for (int i_rest = 0; i_rest < d_restricteds_pr_dim[i_dim]; i_rest++) {
// int *d_points = d_points_full[i_dim * number_of_cells + i_rest];
// int number_of_points = d_number_of_points[i_dim * number_of_cells + i_rest];
// int *subspace = d_restricted_dims_full[i_dim * number_of_cells + i_rest];
// int subspace_size = d_number_of_restricted_dims[i_dim * number_of_cells + i_rest];
// bool *d_is_dense = d_is_dense_full + i_dim * number_of_cells * n + i_rest * n;
//
//
// for (int i = threadIdx.x; i < number_of_points; i += blockDim.x) {
// int p_id = d_points[i];
//// float p = phi_gpu_blocks(p_id, d_neighborhood, neighborhood_size, d_number_of_neighbors[i], X, d_points,
//// subspace, subspace_size, d);
//
// float p = 0;
//
// for (int j = 0; j < n; j++) {
// int q_id = j;
// if (p_id != q_id) {
// float distance = dist_gpu_blocks_mem(p_id, q_id, X, subspace, subspace_size, d);
// if (neighborhood_size >= distance) {
// distance = distance / neighborhood_size;
// float sq = distance * distance;
// p += (1. - sq);
// }
// }
// }
//
// float a = alpha_gpu_blocks_mem(subspace_size, neighborhood_size, n);
// float w = omega_gpu_blocks_mem(subspace_size);
//// printf("%d:%d, %f>=%f\n", p_id, subspace_size, p, max(F * a, num_obj * w));
//// printf("%d:%d, F=%f, a=%f, num_obj=%d, w=%f\n", p_id, subspace_size, F, a, num_obj, w);
// d_is_dense[i] = p >= max(F * a, num_obj * w);
// }
// }
//}
//for ref see: http://hpcg.purdue.edu/papers/Stava2011CCL.pdf
__global__
void disjoint_set_clustering_blocks_mem(int *d_restricteds_pr_dim, int *d_clustering_full, int *d_disjoint_set_full,
int *d_neighborhoods_full, int *d_number_of_neighbors_full,
int *d_neighborhood_end_position_full,
bool *d_is_dense_full, int **d_points_full, int *d_number_of_points,
int number_of_cells, int n) {
int i_dim = blockIdx.x;
for (int i_rest = 0; i_rest < d_restricteds_pr_dim[i_dim]; i_rest++) {
int *d_points = d_points_full[i_dim * number_of_cells + i_rest];
int number_of_points = d_number_of_points[i_dim * number_of_cells + i_rest];
int *d_clustering = d_clustering_full + i_dim * n;
bool *d_is_dense = d_is_dense_full + i_dim * number_of_cells * n + i_rest * n;
int *d_disjoint_set = d_disjoint_set_full + i_dim * number_of_cells * n + i_rest * n;
int *d_number_of_neighbors = d_number_of_neighbors_full + i_dim * number_of_cells * n + i_rest * n;
__shared__ int changed;
changed = 1;
__syncthreads();
//init
for (int i = threadIdx.x; i < number_of_points; i += blockDim.x) {
if (d_is_dense[i]) {
d_disjoint_set[i] = i;
} else {
d_disjoint_set[i] = -1;
}
}
__syncthreads();
//for (int itr = 1; itr < number_of_points; itr *= 2) {
while (changed) {
//disjoint_set_pass1
__syncthreads();
changed = 0;
__syncthreads();
for (int i = threadIdx.x; i < number_of_points; i += blockDim.x) {
if (!d_is_dense[i]) continue;
int root = d_disjoint_set[i];
int point_index = i_dim * number_of_cells * n + i_rest * n + i;
int neighborhood_start = point_index == 0 ? 0 : d_neighborhood_end_position_full[point_index - 1];
int *d_neighborhood = &d_neighborhoods_full[neighborhood_start];
for (int j = 0; j < d_number_of_neighbors[i]; j++) {
if (d_is_dense[d_neighborhood[j]]) {
if (d_disjoint_set[d_neighborhood[j]] < root) {
root = d_disjoint_set[d_neighborhood[j]];
atomicMax(&changed, 1);
}
}
}
d_disjoint_set[i] = root;
}
__syncthreads();
//disjoint_set_pass2
for (int i = threadIdx.x; i < number_of_points; i += blockDim.x) {
int root = d_disjoint_set[i];
while (root >= 0 && root != d_disjoint_set[root]) {
root = d_disjoint_set[root];
}
d_disjoint_set[i] = root;
}
__syncthreads();
}
//gather_clustering
for (int i = threadIdx.x; i < number_of_points; i += blockDim.x) {
if (d_is_dense[i]) {
d_clustering[d_points[i]] = d_points[d_disjoint_set[i]];
} else {
d_clustering[d_points[i]] = -1;
}
}
}
}
__global__
void
compute_number_of_neighbors_blocks_mem(int *d_restricteds_pr_dim, int restricted_dims, int *d_number_of_neighbors_full,
float *X, int **d_points_full, int *d_number_of_points, float neighborhood_size,
int **d_restricted_dims_full, int *d_number_of_restricted_dims, int d,
int number_of_cells, int n) {
__shared__ int number_of_neighbors;
for (int i_dim = blockIdx.x; i_dim < restricted_dims; i_dim += gridDim.x) {
for (int i_rest = 0; i_rest < d_restricteds_pr_dim[i_dim]; i_rest++) {
int *d_points = d_points_full[i_dim * number_of_cells + i_rest];
int number_of_points = d_number_of_points[i_dim * number_of_cells + i_rest];
int *subspace = d_restricted_dims_full[i_dim * number_of_cells + i_rest];
int subspace_size = d_number_of_restricted_dims[i_dim * number_of_cells + i_rest];
int *d_number_of_neighbors = d_number_of_neighbors_full + i_dim * number_of_cells * n + i_rest * n;
for (int i = blockIdx.y; i < number_of_points; i += gridDim.y) {
int p_id = d_points[i];
__syncthreads();
number_of_neighbors = 0;
__syncthreads();
for (int j = threadIdx.x; j < number_of_points; j += blockDim.x) {
if (i != j) {
int q_id = d_points[j];
float distance = dist_gpu_blocks_mem(p_id, q_id, X, subspace, subspace_size, d);
if (neighborhood_size >= distance) {
atomicAdd(&number_of_neighbors, 1);
}
}
}
__syncthreads();
d_number_of_neighbors[i] = number_of_neighbors;
}
}
}
}
void
ClusteringGPUBlocksMem(TmpMalloc *tmps, int *d_clustering_full, vector <vector<ScyTreeArray *>> L_pruned, float *d_X,
int n, int d, float neighborhood_size, float F, int num_obj, int number_of_cells) {
tmps->reset_counters();
int restricted_dims = L_pruned.size();
int *h_restricteds_pr_dim = new int[restricted_dims];
int **h_points_full = new int *[restricted_dims * number_of_cells];
int **h_restricted_dims_full = new int *[restricted_dims * number_of_cells];
int *h_number_of_points = new int[restricted_dims * number_of_cells];
int *h_number_of_restricted_dims = new int[restricted_dims * number_of_cells];
int avg_number_of_points = 0;
int min_number_of_points = n;
int count = 0;
for (int i = 0; i < restricted_dims; i++) {
h_restricteds_pr_dim[i] = L_pruned[i].size();
for (int j = 0; j < L_pruned[i].size(); j++) {
avg_number_of_points += L_pruned[i][j]->number_of_points;
count++;
if (min_number_of_points > L_pruned[i][j]->number_of_points)
min_number_of_points = L_pruned[i][j]->number_of_points;
h_points_full[i * number_of_cells + j] = L_pruned[i][j]->d_points;
h_restricted_dims_full[i * number_of_cells + j] = L_pruned[i][j]->d_restricted_dims;
h_number_of_points[i * number_of_cells + j] = L_pruned[i][j]->number_of_points;
h_number_of_restricted_dims[i * number_of_cells + j] = L_pruned[i][j]->number_of_restricted_dims;
}
}
if (count > 0)
avg_number_of_points /= count;
avg_number_of_points = (int) avg_number_of_points;
int *d_restricteds_pr_dim = tmps->get_int_array(tmps->int_array_counter++, restricted_dims);
cudaMemcpy(d_restricteds_pr_dim, h_restricteds_pr_dim, restricted_dims * sizeof(int), cudaMemcpyHostToDevice);
gpuErrchk(cudaPeekAtLastError());
int **d_points_full = tmps->get_int_pointer_array(tmps->int_pointer_array_counter++,
restricted_dims * number_of_cells);
int **d_restricted_dims_full = tmps->get_int_pointer_array(tmps->int_pointer_array_counter++,
restricted_dims * number_of_cells);
int *d_number_of_points = tmps->get_int_array(tmps->int_array_counter++, restricted_dims * number_of_cells);
int *d_number_of_restricted_dims = tmps->get_int_array(tmps->int_array_counter++,
restricted_dims * number_of_cells);
gpuErrchk(cudaPeekAtLastError());
cudaMemcpy(d_points_full, h_points_full, restricted_dims * number_of_cells * sizeof(int *), cudaMemcpyHostToDevice);
cudaMemcpy(d_restricted_dims_full, h_restricted_dims_full, restricted_dims * number_of_cells * sizeof(int *),
cudaMemcpyHostToDevice);
cudaMemcpy(d_number_of_points, h_number_of_points, restricted_dims * number_of_cells * sizeof(int),
cudaMemcpyHostToDevice);
cudaMemcpy(d_number_of_restricted_dims, h_number_of_restricted_dims,
restricted_dims * number_of_cells * sizeof(int), cudaMemcpyHostToDevice);
gpuErrchk(cudaPeekAtLastError());
int *d_number_of_neighbors_full = tmps->get_int_array(tmps->int_array_counter++, n * restricted_dims *
number_of_cells);//todo number_of_points can be used instead of n
cudaMemset(d_number_of_neighbors_full, 0, restricted_dims * number_of_cells * n * sizeof(int));
bool *d_is_dense_full = tmps->get_bool_array(tmps->bool_array_counter++,
n * restricted_dims * number_of_cells); // number_of_points
int *d_disjoint_set_full = tmps->get_int_array(tmps->int_array_counter++,
n * restricted_dims * number_of_cells); // number_of_points
// cudaMemset(d_disjoint_set_full, -1, n * restricted_dims * number_of_cells * sizeof(int));
// cudaDeviceSynchronize();
gpuErrchk(cudaPeekAtLastError());
int *d_neighborhood_end_position_full = tmps->get_int_array(tmps->int_array_counter++,
restricted_dims * number_of_cells * n);
cudaMemset(d_neighborhood_end_position_full, 0, restricted_dims * number_of_cells * n * sizeof(int));
// cudaDeviceSynchronize();
gpuErrchk(cudaPeekAtLastError());
int number_of_threads = min(max(1,avg_number_of_points), BLOCK_SIZE);
if (restricted_dims > 0) {
// printf("%d, %d\n", rrestricted_dims, max(1,avg_number_of_points));
//compute number of neighbors
dim3 block(min(64, max(1,avg_number_of_points)));
dim3 grid(restricted_dims, max(1,avg_number_of_points));
compute_number_of_neighbors_blocks_mem<<< grid, block>>>(d_restricteds_pr_dim,
restricted_dims,
d_number_of_neighbors_full,
d_X, d_points_full,
d_number_of_points,
neighborhood_size,
d_restricted_dims_full,
d_number_of_restricted_dims, d,
number_of_cells, n);
// cudaDeviceSynchronize();
gpuErrchk(cudaPeekAtLastError());
//inclusive scan of number of neighbors to find indexing positions
inclusive_scan(d_number_of_neighbors_full, d_neighborhood_end_position_full,
restricted_dims * number_of_cells * n);//todo bad to use n here
// cudaDeviceSynchronize();
gpuErrchk(cudaPeekAtLastError());
//get/allocate memory for neighbors
int total_size_of_neighborhoods;
int *d_neighborhoods_full;
cudaMemcpy(&total_size_of_neighborhoods,
d_neighborhood_end_position_full + restricted_dims * number_of_cells * n - 1, sizeof(int),
cudaMemcpyDeviceToHost);
d_neighborhoods_full = tmps->get_int_array(tmps->int_array_counter++, total_size_of_neighborhoods);
// cudaDeviceSynchronize();
gpuErrchk(cudaPeekAtLastError());
//compute is dense
// block = dim3(min(64, avg_number_of_points));
// grid = dim3(restricted_dims, avg_number_of_points);
compute_is_dense_blocks_mem<<<grid, block>>>(d_restricteds_pr_dim, d_is_dense_full,
d_points_full, d_number_of_points,
d_neighborhood_end_position_full,
d_neighborhoods_full, neighborhood_size,
d_X, d_restricted_dims_full,
d_number_of_restricted_dims, F, n,
num_obj, d, number_of_cells);
// cudaDeviceSynchronize();
gpuErrchk(cudaPeekAtLastError());
//gather clustering from dense neighbors
disjoint_set_clustering_blocks_mem<<< restricted_dims, number_of_threads>>>(d_restricteds_pr_dim,
d_clustering_full,
d_disjoint_set_full,
d_neighborhoods_full,
d_number_of_neighbors_full,
d_neighborhood_end_position_full,
d_is_dense_full, d_points_full,
d_number_of_points,
number_of_cells, n);
cudaDeviceSynchronize();
gpuErrchk(cudaPeekAtLastError());
}
delete[] h_restricteds_pr_dim;
delete[] h_points_full;
delete[] h_restricted_dims_full;
delete[] h_number_of_points;
delete[] h_number_of_restricted_dims;
}
void
ClusteringGPUBlocksMemAll(TmpMalloc *tmps, int *d_clustering_full, vector <vector<ScyTreeArray *>> L_pruned, float *d_X,
int n, int d, float neighborhood_size, float F, int num_obj, int number_of_cells) {
tmps->reset_counters();
int restricted_dims = L_pruned.size();
int *h_restricteds_pr_dim = new int[restricted_dims];
int **h_points_full = new int *[restricted_dims * number_of_cells];
int **h_restricted_dims_full = new int *[restricted_dims * number_of_cells];
int *h_number_of_points = new int[restricted_dims * number_of_cells];
int *h_number_of_restricted_dims = new int[restricted_dims * number_of_cells];
int avg_number_of_points = 0;
int min_number_of_points = n;
int count = 0;
for (int i = 0; i < restricted_dims; i++) {
h_restricteds_pr_dim[i] = L_pruned[i].size();
for (int j = 0; j < L_pruned[i].size(); j++) {
avg_number_of_points += L_pruned[i][j]->number_of_points;
count++;
if (min_number_of_points > L_pruned[i][j]->number_of_points)
min_number_of_points = L_pruned[i][j]->number_of_points;
h_points_full[i * number_of_cells + j] = L_pruned[i][j]->d_points;
h_restricted_dims_full[i * number_of_cells + j] = L_pruned[i][j]->d_restricted_dims;
h_number_of_points[i * number_of_cells + j] = L_pruned[i][j]->number_of_points;
h_number_of_restricted_dims[i * number_of_cells + j] = L_pruned[i][j]->number_of_restricted_dims;
}
}
if (count > 0)
avg_number_of_points /= count;
avg_number_of_points = (int) avg_number_of_points;
int *d_restricteds_pr_dim = tmps->get_int_array(tmps->int_array_counter++, restricted_dims);
cudaMemcpy(d_restricteds_pr_dim, h_restricteds_pr_dim, restricted_dims * sizeof(int), cudaMemcpyHostToDevice);
gpuErrchk(cudaPeekAtLastError());
int **d_points_full = tmps->get_int_pointer_array(tmps->int_pointer_array_counter++,
restricted_dims * number_of_cells);
int **d_restricted_dims_full = tmps->get_int_pointer_array(tmps->int_pointer_array_counter++,
restricted_dims * number_of_cells);
int *d_number_of_points = tmps->get_int_array(tmps->int_array_counter++, restricted_dims * number_of_cells);
int *d_number_of_restricted_dims = tmps->get_int_array(tmps->int_array_counter++,
restricted_dims * number_of_cells);
gpuErrchk(cudaPeekAtLastError());
cudaMemcpy(d_points_full, h_points_full, restricted_dims * number_of_cells * sizeof(int *), cudaMemcpyHostToDevice);
cudaMemcpy(d_restricted_dims_full, h_restricted_dims_full, restricted_dims * number_of_cells * sizeof(int *),
cudaMemcpyHostToDevice);
cudaMemcpy(d_number_of_points, h_number_of_points, restricted_dims * number_of_cells * sizeof(int),
cudaMemcpyHostToDevice);
cudaMemcpy(d_number_of_restricted_dims, h_number_of_restricted_dims,
restricted_dims * number_of_cells * sizeof(int), cudaMemcpyHostToDevice);
gpuErrchk(cudaPeekAtLastError());
int *d_number_of_neighbors_full = tmps->get_int_array(tmps->int_array_counter++, n * restricted_dims *
number_of_cells);//todo number_of_points can be used instead of n
cudaMemset(d_number_of_neighbors_full, 0, restricted_dims * number_of_cells * n * sizeof(int));
bool *d_is_dense_full = tmps->get_bool_array(tmps->bool_array_counter++,
n * restricted_dims * number_of_cells); // number_of_points
int *d_disjoint_set_full = tmps->get_int_array(tmps->int_array_counter++,
n * restricted_dims * number_of_cells); // number_of_points
// cudaMemset(d_disjoint_set_full, -1, n * restricted_dims * number_of_cells * sizeof(int));
// cudaDeviceSynchronize();
gpuErrchk(cudaPeekAtLastError());
int *d_neighborhood_end_position_full = tmps->get_int_array(tmps->int_array_counter++,
restricted_dims * number_of_cells * n);
cudaMemset(d_neighborhood_end_position_full, 0, restricted_dims * number_of_cells * n * sizeof(int));
// cudaDeviceSynchronize();
gpuErrchk(cudaPeekAtLastError());
int number_of_threads = min(max(1,avg_number_of_points), BLOCK_SIZE);
if (restricted_dims > 0) {
// printf("%d, %d\n", rrestricted_dims, max(1,avg_number_of_points));
//compute number of neighbors
dim3 block(min(64, max(1,avg_number_of_points)));
dim3 grid(restricted_dims, max(1,avg_number_of_points));
compute_number_of_neighbors_blocks_mem<<< grid, block>>>(d_restricteds_pr_dim,
restricted_dims,
d_number_of_neighbors_full,
d_X, d_points_full,
d_number_of_points,
neighborhood_size,
d_restricted_dims_full,
d_number_of_restricted_dims, d,
number_of_cells, n);
// cudaDeviceSynchronize();
gpuErrchk(cudaPeekAtLastError());
//inclusive scan of number of neighbors to find indexing positions
inclusive_scan(d_number_of_neighbors_full, d_neighborhood_end_position_full,
restricted_dims * number_of_cells * n);//todo bad to use n here
// cudaDeviceSynchronize();
gpuErrchk(cudaPeekAtLastError());
//get/allocate memory for neighbors
int total_size_of_neighborhoods;
int *d_neighborhoods_full;
cudaMemcpy(&total_size_of_neighborhoods,
d_neighborhood_end_position_full + restricted_dims * number_of_cells * n - 1, sizeof(int),
cudaMemcpyDeviceToHost);
d_neighborhoods_full = tmps->get_int_array(tmps->int_array_counter++, total_size_of_neighborhoods);
// cudaDeviceSynchronize();
gpuErrchk(cudaPeekAtLastError());
//compute is dense
// block = dim3(min(64, avg_number_of_points));
// grid = dim3(restricted_dims, avg_number_of_points);
compute_is_dense_blocks_mem<<<grid, block>>>(d_restricteds_pr_dim, d_is_dense_full,
d_points_full, d_number_of_points,
d_neighborhood_end_position_full,
d_neighborhoods_full, neighborhood_size,
d_X, d_restricted_dims_full,
d_number_of_restricted_dims, F, n,
num_obj, d, number_of_cells);
// cudaDeviceSynchronize();
gpuErrchk(cudaPeekAtLastError());
//gather clustering from dense neighbors
disjoint_set_clustering_blocks_mem<<< restricted_dims, number_of_threads>>>(d_restricteds_pr_dim,
d_clustering_full,
d_disjoint_set_full,
d_neighborhoods_full,
d_number_of_neighbors_full,
d_neighborhood_end_position_full,
d_is_dense_full, d_points_full,
d_number_of_points,
number_of_cells, n);
cudaDeviceSynchronize();
gpuErrchk(cudaPeekAtLastError());
}
delete[] h_restricteds_pr_dim;
delete[] h_points_full;
delete[] h_restricted_dims_full;
delete[] h_number_of_points;
delete[] h_number_of_restricted_dims;
} |
09229cbca6535a9409d160c92089622acaaea2e4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// RUN: %clang_cc1 -emit-llvm %s -o - -fcuda-is-device -triple ptx32-unknown-unknown | FileCheck %s
#include "../SemaCUDA/cuda.h"
// CHECK: @i = global
__device__ int i;
// CHECK: @j = addrspace(1) global
__constant__ int j;
// CHECK: @k = addrspace(4) global
__shared__ int k;
__device__ void foo() {
// CHECK: load i32* @i
i++;
// CHECK: load i32* bitcast (i32 addrspace(1)* @j to i32*)
j++;
// CHECK: load i32* bitcast (i32 addrspace(4)* @k to i32*)
k++;
}
| 09229cbca6535a9409d160c92089622acaaea2e4.cu | // RUN: %clang_cc1 -emit-llvm %s -o - -fcuda-is-device -triple ptx32-unknown-unknown | FileCheck %s
#include "../SemaCUDA/cuda.h"
// CHECK: @i = global
__device__ int i;
// CHECK: @j = addrspace(1) global
__constant__ int j;
// CHECK: @k = addrspace(4) global
__shared__ int k;
__device__ void foo() {
// CHECK: load i32* @i
i++;
// CHECK: load i32* bitcast (i32 addrspace(1)* @j to i32*)
j++;
// CHECK: load i32* bitcast (i32 addrspace(4)* @k to i32*)
k++;
}
|
6c7f11e3655533520cc0917b14d17c9db44d88e4.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime.h>
void printDeviceProp(hipDeviceProp_t prop) {
printf("Device Name : %s.\n", prop.name);
printf("totalGlobalMem : %d.\n", prop.totalGlobalMem);
printf("sharedMemPerBlock : %d.\n", prop.sharedMemPerBlock);
printf("regsPerBlock : %d.\n", prop.regsPerBlock);
printf("warpSize : %d.\n", prop.warpSize);
printf("memPitch : %d.\n", prop.memPitch);
printf("maxThreadsPerBlock : %d.\n", prop.maxThreadsPerBlock);
printf("maxThreadsDim[0 - 2] : %d %d %d.\n", prop.maxThreadsDim[0], prop.maxThreadsDim[1], prop.maxThreadsDim[2]);
printf("maxGridSize[0 - 2] : %d %d %d.\n", prop.maxGridSize[0], prop.maxGridSize[1], prop.maxGridSize[2]);
printf("totalConstMem : %d.\n", prop.totalConstMem);
printf("major.minor : %d.%d.\n", prop.major, prop.minor);
printf("clockRate : %d.\n", prop.clockRate);
printf("textureAlignment : %d.\n", prop.textureAlignment);
printf("deviceOverlap : %d.\n", prop.deviceOverlap);
printf("multiProcessorCount : %d.\n", prop.multiProcessorCount);
}
int main() {
int count;
// cuda
hipGetDeviceCount(&count);
if (count == 0) {
printf("There is no device.\n");
return -1;
}
for (int i = 0; i< count; i += 1) {
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, i);
//
printDeviceProp(prop);
if (hipGetDeviceProperties(&prop, i) == hipSuccess) {
if (prop.major >= 1) {
break;
}
}
}
} | 6c7f11e3655533520cc0917b14d17c9db44d88e4.cu | #include <stdio.h>
#include <cuda_runtime.h>
void printDeviceProp(cudaDeviceProp prop) {
printf("Device Name : %s.\n", prop.name);
printf("totalGlobalMem : %d.\n", prop.totalGlobalMem);
printf("sharedMemPerBlock : %d.\n", prop.sharedMemPerBlock);
printf("regsPerBlock : %d.\n", prop.regsPerBlock);
printf("warpSize : %d.\n", prop.warpSize);
printf("memPitch : %d.\n", prop.memPitch);
printf("maxThreadsPerBlock : %d.\n", prop.maxThreadsPerBlock);
printf("maxThreadsDim[0 - 2] : %d %d %d.\n", prop.maxThreadsDim[0], prop.maxThreadsDim[1], prop.maxThreadsDim[2]);
printf("maxGridSize[0 - 2] : %d %d %d.\n", prop.maxGridSize[0], prop.maxGridSize[1], prop.maxGridSize[2]);
printf("totalConstMem : %d.\n", prop.totalConstMem);
printf("major.minor : %d.%d.\n", prop.major, prop.minor);
printf("clockRate : %d.\n", prop.clockRate);
printf("textureAlignment : %d.\n", prop.textureAlignment);
printf("deviceOverlap : %d.\n", prop.deviceOverlap);
printf("multiProcessorCount : %d.\n", prop.multiProcessorCount);
}
int main() {
int count;
// 取得支持 cuda 的设备数目
cudaGetDeviceCount(&count);
if (count == 0) {
printf("There is no device.\n");
return -1;
}
for (int i = 0; i< count; i += 1) {
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, i);
//打印设备信息
printDeviceProp(prop);
if (cudaGetDeviceProperties(&prop, i) == cudaSuccess) {
if (prop.major >= 1) {
break;
}
}
}
} |
65e27ba3c3c469259a29dddc699dd7de66b85033.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<stdio.h>
#include<stdlib.h>
#include<cuda.h>
#include<math.h>
#include "parse.cu"
#include <time.h>
#define TILE_WIDTH 16
__constant__ short int Gx[][3]={{-1,0,1},{-2,0,+2},{-1,0,+1}};
__constant__ short int Gy[][3]={{1,2,1},{0,0,0},{-1,-2,-1}};
__global__ void sobel_edge_detection(unsigned char *device_p, unsigned char *device_edge,int rows, int columns)
{
int ty=threadIdx.y;
int tx=threadIdx.x;
int by=blockIdx.y;
int bx=blockIdx.x;
int row=by*TILE_WIDTH+ty;
int column=bx*TILE_WIDTH+tx;
int sumx;
int sumy;
int sum;
int I;
int J;
if(row<rows && column<columns)
{
if(row==0 || row==rows-1|| column==0 || column==columns-1)
sum=0;
else{
sumx=0;
for(I=-1;I<=1;I++)
for(J=-1;J<=1;J++){
sumx+=(int)(*(device_p+(I+row)*columns+J+column))*Gx[I+1][J+1];
}
sumy=0;
for(I=-1;I<=1;I++)
for(J=-1;J<=1;J++){
sumy+=(int)(*(device_p+(I+row)*columns+J+column))*(Gy[I+1][J+1]);
}
sum=abs(sumx)+abs(sumy);
}
if(sum>255)sum=255;
*(device_edge+row*columns+column)=255-sum;
}
}
int main(int argc, char **argv)
{
FILE *bmpinput;
FILE *bmpoutput;
unsigned long int num_rows;
unsigned long int num_columns;
unsigned long int num_colors;
unsigned char *host_p;
unsigned char *device_p;
unsigned char *host_edge;
unsigned char *device_edge;
hipError_t err;
char header[3];
clock_t t_start;
clock_t t_end;
if(argc!=3)
{
printf("<usuage> agruments mismatched\n");
exit(0);
}
if((bmpinput=fopen(argv[1],"rb"))==NULL)
{
printf("could not open input bitmap file\n");
exit(0);
}
if((bmpoutput=fopen(argv[2],"wb"))==NULL)
{
printf("could not open output bitmap file\n");
exit(0);
}
//saving header information
fscanf(bmpinput,"%s",header);
fscanf(bmpinput,"%lu %lu",&num_columns, &num_rows);
fscanf(bmpinput,"%lu",&num_colors);
printf("num_columns:%lu\n",num_columns);
printf("num_rows:%lu\n",num_rows);
printf("num_colors:%lu\n",num_colors);
fprintf(bmpoutput,"%s\n",header);
fprintf(bmpoutput,"%lu %lu\n",num_columns,num_rows);
fprintf(bmpoutput,"%lu\n",num_colors);
host_p=(unsigned char *)malloc(sizeof(unsigned char)*num_rows*num_columns);
fetch_image_data(bmpinput,host_p,num_rows,num_columns);
//print_read_data(p,num_rows,num_columns);
//memory allocation for host to store the final result
host_edge=(unsigned char *)malloc(sizeof(unsigned char)*num_rows*num_columns);
//memory allocation for device pointer used by kernel
err=hipMalloc((void**)&device_p,sizeof(unsigned char)*num_rows*num_columns);
if (err != hipSuccess) {
printf("%s in %s at line %d\n", hipGetErrorString(err), __FILE__,__LINE__);
return 0;
}
hipMalloc((void**)&device_edge,sizeof(unsigned char)*num_rows*num_columns);
hipMemcpy(device_p,host_p,sizeof(unsigned char)*num_rows*num_columns,hipMemcpyHostToDevice);
//grid and thread block allocation
dim3 dimGrid( (num_columns-1) / TILE_WIDTH + 1 , (num_rows-1) / TILE_WIDTH + 1,1);
dim3 dimBlock(TILE_WIDTH,TILE_WIDTH,1);
//Launching kernel
hipLaunchKernelGGL(( sobel_edge_detection), dim3(dimGrid),dim3(dimBlock), 0, 0, device_p,device_edge,num_rows,num_columns);
hipDeviceSynchronize();
hipMemcpy(host_edge,device_edge,sizeof(unsigned char)*num_rows*num_columns,hipMemcpyDeviceToHost);
print_read_data(host_edge,num_rows,num_columns);
copy_fetch_data(bmpoutput,host_edge,num_rows,num_columns);
hipFree(device_p);
hipFree(device_edge);
free(host_p);
free(host_edge);
fclose(bmpinput);
fclose(bmpoutput);
return 0;
}
| 65e27ba3c3c469259a29dddc699dd7de66b85033.cu | #include<stdio.h>
#include<stdlib.h>
#include<cuda.h>
#include<math.h>
#include "parse.cu"
#include <time.h>
#define TILE_WIDTH 16
__constant__ short int Gx[][3]={{-1,0,1},{-2,0,+2},{-1,0,+1}};
__constant__ short int Gy[][3]={{1,2,1},{0,0,0},{-1,-2,-1}};
__global__ void sobel_edge_detection(unsigned char *device_p, unsigned char *device_edge,int rows, int columns)
{
int ty=threadIdx.y;
int tx=threadIdx.x;
int by=blockIdx.y;
int bx=blockIdx.x;
int row=by*TILE_WIDTH+ty;
int column=bx*TILE_WIDTH+tx;
int sumx;
int sumy;
int sum;
int I;
int J;
if(row<rows && column<columns)
{
if(row==0 || row==rows-1|| column==0 || column==columns-1)
sum=0;
else{
sumx=0;
for(I=-1;I<=1;I++)
for(J=-1;J<=1;J++){
sumx+=(int)(*(device_p+(I+row)*columns+J+column))*Gx[I+1][J+1];
}
sumy=0;
for(I=-1;I<=1;I++)
for(J=-1;J<=1;J++){
sumy+=(int)(*(device_p+(I+row)*columns+J+column))*(Gy[I+1][J+1]);
}
sum=abs(sumx)+abs(sumy);
}
if(sum>255)sum=255;
*(device_edge+row*columns+column)=255-sum;
}
}
int main(int argc, char **argv)
{
FILE *bmpinput;
FILE *bmpoutput;
unsigned long int num_rows;
unsigned long int num_columns;
unsigned long int num_colors;
unsigned char *host_p;
unsigned char *device_p;
unsigned char *host_edge;
unsigned char *device_edge;
cudaError_t err;
char header[3];
clock_t t_start;
clock_t t_end;
if(argc!=3)
{
printf("<usuage> agruments mismatched\n");
exit(0);
}
if((bmpinput=fopen(argv[1],"rb"))==NULL)
{
printf("could not open input bitmap file\n");
exit(0);
}
if((bmpoutput=fopen(argv[2],"wb"))==NULL)
{
printf("could not open output bitmap file\n");
exit(0);
}
//saving header information
fscanf(bmpinput,"%s",header);
fscanf(bmpinput,"%lu %lu",&num_columns, &num_rows);
fscanf(bmpinput,"%lu",&num_colors);
printf("num_columns:%lu\n",num_columns);
printf("num_rows:%lu\n",num_rows);
printf("num_colors:%lu\n",num_colors);
fprintf(bmpoutput,"%s\n",header);
fprintf(bmpoutput,"%lu %lu\n",num_columns,num_rows);
fprintf(bmpoutput,"%lu\n",num_colors);
host_p=(unsigned char *)malloc(sizeof(unsigned char)*num_rows*num_columns);
fetch_image_data(bmpinput,host_p,num_rows,num_columns);
//print_read_data(p,num_rows,num_columns);
//memory allocation for host to store the final result
host_edge=(unsigned char *)malloc(sizeof(unsigned char)*num_rows*num_columns);
//memory allocation for device pointer used by kernel
err=cudaMalloc((void**)&device_p,sizeof(unsigned char)*num_rows*num_columns);
if (err != cudaSuccess) {
printf("%s in %s at line %d\n", cudaGetErrorString(err), __FILE__,__LINE__);
return 0;
}
cudaMalloc((void**)&device_edge,sizeof(unsigned char)*num_rows*num_columns);
cudaMemcpy(device_p,host_p,sizeof(unsigned char)*num_rows*num_columns,cudaMemcpyHostToDevice);
//grid and thread block allocation
dim3 dimGrid( (num_columns-1) / TILE_WIDTH + 1 , (num_rows-1) / TILE_WIDTH + 1,1);
dim3 dimBlock(TILE_WIDTH,TILE_WIDTH,1);
//Launching kernel
sobel_edge_detection<<<dimGrid,dimBlock>>>(device_p,device_edge,num_rows,num_columns);
cudaDeviceSynchronize();
cudaMemcpy(host_edge,device_edge,sizeof(unsigned char)*num_rows*num_columns,cudaMemcpyDeviceToHost);
print_read_data(host_edge,num_rows,num_columns);
copy_fetch_data(bmpoutput,host_edge,num_rows,num_columns);
cudaFree(device_p);
cudaFree(device_edge);
free(host_p);
free(host_edge);
fclose(bmpinput);
fclose(bmpoutput);
return 0;
}
|
f8b08fd92112e7640d52407302d6cc82f0a6dcc8.hip | // !!! This is a file automatically generated by hipify!!!
//Linear image interpolatin using GPU and Optimization with register memory
//Here every pixel is stored in register memory and then image is inteplated
#include<Windows.h>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include<cuda.h>
#include <stdio.h>
#include<time.h>
#include <iostream>
#include<device_atomic_functions.h>
#include<hip/device_functions.h>
__global__ void lneigh(unsigned char *new_image, const unsigned char *image, int rows, int cols)
{
unsigned char a;
int col = threadIdx.x + blockDim.x*blockIdx.x;
int row = threadIdx.y + blockDim.y*blockIdx.y;
int index = row*cols / 2 + col;
row *= 2;
col *= 2;
a = image[index];
new_image[(row)*cols + col] = a;
new_image[(row)*cols + col + 1] = a;
new_image[(row + 1)*cols + col] = a;
new_image[(row + 1)*cols + col + 1] = a;
}
| f8b08fd92112e7640d52407302d6cc82f0a6dcc8.cu | //Linear image interpolatin using GPU and Optimization with register memory
//Here every pixel is stored in register memory and then image is inteplated
#include<Windows.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include<cuda.h>
#include <stdio.h>
#include<time.h>
#include <iostream>
#include<device_atomic_functions.h>
#include<device_functions.h>
__global__ void lneigh(unsigned char *new_image, const unsigned char *image, int rows, int cols)
{
unsigned char a;
int col = threadIdx.x + blockDim.x*blockIdx.x;
int row = threadIdx.y + blockDim.y*blockIdx.y;
int index = row*cols / 2 + col;
row *= 2;
col *= 2;
a = image[index];
new_image[(row)*cols + col] = a;
new_image[(row)*cols + col + 1] = a;
new_image[(row + 1)*cols + col] = a;
new_image[(row + 1)*cols + col + 1] = a;
}
|
d000cc36894b013894c16ea90dc7828d9493c304.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <math_functions.h> // CUDA's, not caffe's, for fabs, signbit
#include <thrust/device_vector.h>
#include <thrust/functional.h> // thrust::plus
#include <thrust/reduce.h>
#include <cmath>
#include "caffe/common.hpp"
#include "caffe/util/math_functions.hpp"
#define SIGNED_SATURATE_MAX 2047
#define SIGNED_SATURATE_MIN -2048
#define UNSIGNED_SATURATE_MAX 4095
#define SIGNED_8BIT_SATURATE_MAX 127
#define SIGNED_8BIT_SATURATE_MIN -128
#define UNSIGNED_8BIT_SATURATE_MAX 255
namespace caffe {
template <>
void caffe_gpu_gemm<float>(const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K,
const float alpha, const float* A, const float* B, const float beta,
float* C) {
// Note that cublas follows fortran order.
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
hipblasOperation_t cuTransB =
(TransB == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
CUBLAS_CHECK(hipblasSgemm(Caffe::cublas_handle(), cuTransB, cuTransA,
N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
}
template <>
void caffe_gpu_gemm<double>(const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K,
const double alpha, const double* A, const double* B, const double beta,
double* C) {
// Note that cublas follows fortran order.
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
hipblasOperation_t cuTransB =
(TransB == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
CUBLAS_CHECK(hipblasDgemm(Caffe::cublas_handle(), cuTransB, cuTransA,
N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
}
template <>
void caffe_gpu_gemv<float>(const CBLAS_TRANSPOSE TransA, const int M,
const int N, const float alpha, const float* A, const float* x,
const float beta, float* y) {
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_T : HIPBLAS_OP_N;
CUBLAS_CHECK(hipblasSgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha,
A, N, x, 1, &beta, y, 1));
}
template <>
void caffe_gpu_gemv<double>(const CBLAS_TRANSPOSE TransA, const int M,
const int N, const double alpha, const double* A, const double* x,
const double beta, double* y) {
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_T : HIPBLAS_OP_N;
CUBLAS_CHECK(hipblasDgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha,
A, N, x, 1, &beta, y, 1));
}
template <>
void caffe_gpu_axpy<float>(const int N, const float alpha, const float* X,
float* Y) {
CUBLAS_CHECK(hipblasSaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1));
}
template <>
void caffe_gpu_axpy<double>(const int N, const double alpha, const double* X,
double* Y) {
CUBLAS_CHECK(hipblasDaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1));
}
void caffe_gpu_memcpy(const size_t N, const void* X, void* Y) {
if (X != Y) {
CUDA_CHECK(hipMemcpy(Y, X, N, hipMemcpyDefault)); // NOLINT(caffe/alt_fn)
}
}
template <>
void caffe_gpu_scal<float, float>(const int N, const float alpha, float *X) {
CUBLAS_CHECK(hipblasSscal(Caffe::cublas_handle(), N, &alpha, X, 1));
}
template <>
void caffe_gpu_scal<float, double>(const int N, const double alpha, float *X) {
// workaround to feed input/output scale in GPU mode
float alph = alpha;
CUBLAS_CHECK(hipblasSscal(Caffe::cublas_handle(), N, &alph, X, 1));
}
template <>
void caffe_gpu_scal<double, double>(const int N, const double alpha, double *X) {
CUBLAS_CHECK(hipblasDscal(Caffe::cublas_handle(), N, &alpha, X, 1));
}
template <>
void caffe_gpu_scal<float>(const int N, const float alpha, float* X,
hipStream_t str) {
hipStream_t initial_stream;
CUBLAS_CHECK(hipblasGetStream(Caffe::cublas_handle(), &initial_stream));
CUBLAS_CHECK(hipblasSetStream(Caffe::cublas_handle(), str));
CUBLAS_CHECK(hipblasSscal(Caffe::cublas_handle(), N, &alpha, X, 1));
CUBLAS_CHECK(hipblasSetStream(Caffe::cublas_handle(), initial_stream));
}
template <>
void caffe_gpu_scal<double>(const int N, const double alpha, double* X,
hipStream_t str) {
hipStream_t initial_stream;
CUBLAS_CHECK(hipblasGetStream(Caffe::cublas_handle(), &initial_stream));
CUBLAS_CHECK(hipblasSetStream(Caffe::cublas_handle(), str));
CUBLAS_CHECK(hipblasDscal(Caffe::cublas_handle(), N, &alpha, X, 1));
CUBLAS_CHECK(hipblasSetStream(Caffe::cublas_handle(), initial_stream));
}
template <>
void caffe_gpu_axpby<float>(const int N, const float alpha, const float* X,
const float beta, float* Y) {
caffe_gpu_scal<float>(N, beta, Y);
caffe_gpu_axpy<float>(N, alpha, X, Y);
}
template <>
void caffe_gpu_axpby<double>(const int N, const double alpha, const double* X,
const double beta, double* Y) {
caffe_gpu_scal<double>(N, beta, Y);
caffe_gpu_axpy<double>(N, alpha, X, Y);
}
template <>
void caffe_gpu_dot<float>(const int n, const float* x, const float* y,
float* out) {
CUBLAS_CHECK(hipblasSdot(Caffe::cublas_handle(), n, x, 1, y, 1, out));
}
template <>
void caffe_gpu_dot<double>(const int n, const double* x, const double* y,
double * out) {
CUBLAS_CHECK(hipblasDdot(Caffe::cublas_handle(), n, x, 1, y, 1, out));
}
template <>
void caffe_gpu_asum<float>(const int n, const float* x, float* y) {
CUBLAS_CHECK(hipblasSasum(Caffe::cublas_handle(), n, x, 1, y));
}
template <>
void caffe_gpu_asum<double>(const int n, const double* x, double* y) {
CUBLAS_CHECK(hipblasDasum(Caffe::cublas_handle(), n, x, 1, y));
}
template <>
void caffe_gpu_scale<float>(const int n, const float alpha, const float *x,
float* y) {
CUBLAS_CHECK(hipblasScopy(Caffe::cublas_handle(), n, x, 1, y, 1));
CUBLAS_CHECK(hipblasSscal(Caffe::cublas_handle(), n, &alpha, y, 1));
}
template <>
void caffe_gpu_scale<double>(const int n, const double alpha, const double *x,
double* y) {
CUBLAS_CHECK(hipblasDcopy(Caffe::cublas_handle(), n, x, 1, y, 1));
CUBLAS_CHECK(hipblasDscal(Caffe::cublas_handle(), n, &alpha, y, 1));
}
template <typename Dtype>
__global__ void round_kernel(const int n, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = rint(y[index]);
}
}
template <>
void caffe_gpu_round<float>(const int N, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( round_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, y);
}
template <>
void caffe_gpu_round<double>(const int N, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( round_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, y);
}
template <typename Dtype>
__global__ void int_kernel(const int n, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = int(y[index]);
}
}
template <>
void caffe_gpu_int<float>(const int N, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( int_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, y);
}
template <>
void caffe_gpu_int<double>(const int N, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( int_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, y);
}
template <typename Dtype>
__global__ void signed_saturate_kernel(const int n, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
if(y[index] > SIGNED_SATURATE_MAX)
y[index] = SIGNED_SATURATE_MAX;
if(y[index] < SIGNED_SATURATE_MIN)
y[index] = SIGNED_SATURATE_MIN;
}
}
template <>
void caffe_gpu_signed_saturate<float>(const int N, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( signed_saturate_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, y);
}
template <>
void caffe_gpu_signed_saturate<double>(const int N, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( signed_saturate_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, y);
}
template <typename Dtype>
__global__ void signed_8bit_saturate_kernel(const int n, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
if(y[index] > SIGNED_8BIT_SATURATE_MAX)
y[index] = SIGNED_8BIT_SATURATE_MAX;
if(y[index] < SIGNED_8BIT_SATURATE_MIN)
y[index] = SIGNED_8BIT_SATURATE_MIN;
}
}
template <>
void caffe_gpu_signed_8bit_saturate<float>(const int N, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( signed_8bit_saturate_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, y);
}
template <>
void caffe_gpu_signed_8bit_saturate<double>(const int N, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( signed_8bit_saturate_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, y);
}
template <typename Dtype>
__global__ void unsigned_saturate_kernel(const int n, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
if(y[index] > UNSIGNED_SATURATE_MAX)
y[index] = UNSIGNED_SATURATE_MAX;
if(y[index] < 0)
y[index] = 0;
}
}
template <>
void caffe_gpu_unsigned_saturate<float>(const int N, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( unsigned_saturate_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, y);
}
template <>
void caffe_gpu_unsigned_saturate<double>(const int N, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( unsigned_saturate_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, y);
}
template <typename Dtype>
__global__ void unsigned_8bit_saturate_kernel(const int n, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
if(y[index] > UNSIGNED_8BIT_SATURATE_MAX)
y[index] = UNSIGNED_8BIT_SATURATE_MAX;
if(y[index] < 0)
y[index] = 0;
}
}
template <>
void caffe_gpu_unsigned_8bit_saturate<float>(const int N, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( unsigned_8bit_saturate_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, y);
}
template <>
void caffe_gpu_unsigned_8bit_saturate<double>(const int N, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( unsigned_8bit_saturate_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, y);
}
template <typename Dtype>
__global__ void set_kernel(const int n, const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = alpha;
}
}
template <typename Dtype>
void caffe_gpu_set(const int N, const Dtype alpha, Dtype* Y) {
if (alpha == 0) {
CUDA_CHECK(hipMemset(Y, 0, sizeof(Dtype) * N)); // NOLINT(caffe/alt_fn)
return;
}
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( set_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, alpha, Y);
}
template void caffe_gpu_set<int>(const int N, const int alpha, int* Y);
template void caffe_gpu_set<float>(const int N, const float alpha, float* Y);
template void caffe_gpu_set<double>(const int N, const double alpha, double* Y);
template <typename Dtype>
__global__ void add_scalar_kernel(const int n, const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] += alpha;
}
}
template <>
void caffe_gpu_add_scalar(const int N, const float alpha, float* Y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( add_scalar_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, alpha, Y);
}
template <>
void caffe_gpu_add_scalar(const int N, const double alpha, double* Y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( add_scalar_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, alpha, Y);
}
template <typename Dtype>
__global__ void add_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] + b[index];
}
}
template <>
void caffe_gpu_add<float>(const int N, const float* a, const float* b,
float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( add_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <>
void caffe_gpu_add<double>(const int N, const double* a, const double* b,
double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( add_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <typename Dtype>
__global__ void sub_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] - b[index];
}
}
template <>
void caffe_gpu_sub<float>(const int N, const float* a, const float* b,
float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( sub_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <>
void caffe_gpu_sub<double>(const int N, const double* a, const double* b,
double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( sub_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <typename Dtype>
__global__ void mul_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] * b[index];
}
}
template <>
void caffe_gpu_mul<float>(const int N, const float* a,
const float* b, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( mul_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <>
void caffe_gpu_mul<double>(const int N, const double* a,
const double* b, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( mul_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <typename Dtype>
__global__ void div_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] / b[index];
}
}
template <>
void caffe_gpu_div<float>(const int N, const float* a,
const float* b, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( div_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <>
void caffe_gpu_div<double>(const int N, const double* a,
const double* b, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( div_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <typename Dtype>
__global__ void abs_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = abs(a[index]);
}
}
template <>
void caffe_gpu_abs<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( abs_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <>
void caffe_gpu_abs<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( abs_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <typename Dtype>
__global__ void exp_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = exp(a[index]);
}
}
template <>
void caffe_gpu_exp<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( exp_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <>
void caffe_gpu_exp<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( exp_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <typename Dtype>
__global__ void log_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = log(a[index]);
}
}
template <>
void caffe_gpu_log<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( log_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <>
void caffe_gpu_log<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( log_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <typename Dtype>
__global__ void powx_kernel(const int n, const Dtype* a,
const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = pow(a[index], alpha);
}
}
template <>
void caffe_gpu_powx<float>(const int N, const float* a,
const float alpha, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( powx_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, alpha, y);
}
template <>
void caffe_gpu_powx<double>(const int N, const double* a,
const double alpha, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( powx_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, alpha, y);
}
template <typename Dtype>
__global__ void sqrt_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = sqrt(a[index]);
}
}
template <>
void caffe_gpu_sqrt<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( sqrt_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <>
void caffe_gpu_sqrt<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( sqrt_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sign, y[index] = (Dtype(0) < x[index])
- (x[index] < Dtype(0)));
DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sgnbit, y[index] = signbit(x[index]));
void caffe_gpu_rng_uniform(const int n, unsigned int* r) {
CURAND_CHECK(hiprandGenerate(Caffe::curand_generator(), r, n));
}
template <>
void caffe_gpu_rng_uniform<float>(const int n, const float a, const float b,
float* r) {
CURAND_CHECK(hiprandGenerateUniform(Caffe::curand_generator(), r, n));
const float range = b - a;
if (range != static_cast<float>(1)) {
caffe_gpu_scal(n, range, r);
}
if (a != static_cast<float>(0)) {
caffe_gpu_add_scalar(n, a, r);
}
}
template <>
void caffe_gpu_rng_uniform<double>(const int n, const double a, const double b,
double* r) {
CURAND_CHECK(hiprandGenerateUniformDouble(Caffe::curand_generator(), r, n));
const double range = b - a;
if (range != static_cast<double>(1)) {
caffe_gpu_scal(n, range, r);
}
if (a != static_cast<double>(0)) {
caffe_gpu_add_scalar(n, a, r);
}
}
template <>
void caffe_gpu_rng_gaussian(const int n, const float mu, const float sigma,
float* r) {
CURAND_CHECK(
hiprandGenerateNormal(Caffe::curand_generator(), r, n, mu, sigma));
}
template <>
void caffe_gpu_rng_gaussian(const int n, const double mu, const double sigma,
double* r) {
CURAND_CHECK(
hiprandGenerateNormalDouble(Caffe::curand_generator(), r, n, mu, sigma));
}
} // namespace caffe
| d000cc36894b013894c16ea90dc7828d9493c304.cu | #include <math_functions.h> // CUDA's, not caffe's, for fabs, signbit
#include <thrust/device_vector.h>
#include <thrust/functional.h> // thrust::plus
#include <thrust/reduce.h>
#include <cmath>
#include "caffe/common.hpp"
#include "caffe/util/math_functions.hpp"
#define SIGNED_SATURATE_MAX 2047
#define SIGNED_SATURATE_MIN -2048
#define UNSIGNED_SATURATE_MAX 4095
#define SIGNED_8BIT_SATURATE_MAX 127
#define SIGNED_8BIT_SATURATE_MIN -128
#define UNSIGNED_8BIT_SATURATE_MAX 255
namespace caffe {
template <>
void caffe_gpu_gemm<float>(const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K,
const float alpha, const float* A, const float* B, const float beta,
float* C) {
// Note that cublas follows fortran order.
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
cublasOperation_t cuTransB =
(TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
CUBLAS_CHECK(cublasSgemm(Caffe::cublas_handle(), cuTransB, cuTransA,
N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
}
template <>
void caffe_gpu_gemm<double>(const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K,
const double alpha, const double* A, const double* B, const double beta,
double* C) {
// Note that cublas follows fortran order.
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
cublasOperation_t cuTransB =
(TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
CUBLAS_CHECK(cublasDgemm(Caffe::cublas_handle(), cuTransB, cuTransA,
N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
}
template <>
void caffe_gpu_gemv<float>(const CBLAS_TRANSPOSE TransA, const int M,
const int N, const float alpha, const float* A, const float* x,
const float beta, float* y) {
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_T : CUBLAS_OP_N;
CUBLAS_CHECK(cublasSgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha,
A, N, x, 1, &beta, y, 1));
}
template <>
void caffe_gpu_gemv<double>(const CBLAS_TRANSPOSE TransA, const int M,
const int N, const double alpha, const double* A, const double* x,
const double beta, double* y) {
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_T : CUBLAS_OP_N;
CUBLAS_CHECK(cublasDgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha,
A, N, x, 1, &beta, y, 1));
}
template <>
void caffe_gpu_axpy<float>(const int N, const float alpha, const float* X,
float* Y) {
CUBLAS_CHECK(cublasSaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1));
}
template <>
void caffe_gpu_axpy<double>(const int N, const double alpha, const double* X,
double* Y) {
CUBLAS_CHECK(cublasDaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1));
}
void caffe_gpu_memcpy(const size_t N, const void* X, void* Y) {
if (X != Y) {
CUDA_CHECK(cudaMemcpy(Y, X, N, cudaMemcpyDefault)); // NOLINT(caffe/alt_fn)
}
}
template <>
void caffe_gpu_scal<float, float>(const int N, const float alpha, float *X) {
CUBLAS_CHECK(cublasSscal(Caffe::cublas_handle(), N, &alpha, X, 1));
}
template <>
void caffe_gpu_scal<float, double>(const int N, const double alpha, float *X) {
// workaround to feed input/output scale in GPU mode
float alph = alpha;
CUBLAS_CHECK(cublasSscal(Caffe::cublas_handle(), N, &alph, X, 1));
}
template <>
void caffe_gpu_scal<double, double>(const int N, const double alpha, double *X) {
CUBLAS_CHECK(cublasDscal(Caffe::cublas_handle(), N, &alpha, X, 1));
}
template <>
void caffe_gpu_scal<float>(const int N, const float alpha, float* X,
cudaStream_t str) {
cudaStream_t initial_stream;
CUBLAS_CHECK(cublasGetStream(Caffe::cublas_handle(), &initial_stream));
CUBLAS_CHECK(cublasSetStream(Caffe::cublas_handle(), str));
CUBLAS_CHECK(cublasSscal(Caffe::cublas_handle(), N, &alpha, X, 1));
CUBLAS_CHECK(cublasSetStream(Caffe::cublas_handle(), initial_stream));
}
template <>
void caffe_gpu_scal<double>(const int N, const double alpha, double* X,
cudaStream_t str) {
cudaStream_t initial_stream;
CUBLAS_CHECK(cublasGetStream(Caffe::cublas_handle(), &initial_stream));
CUBLAS_CHECK(cublasSetStream(Caffe::cublas_handle(), str));
CUBLAS_CHECK(cublasDscal(Caffe::cublas_handle(), N, &alpha, X, 1));
CUBLAS_CHECK(cublasSetStream(Caffe::cublas_handle(), initial_stream));
}
template <>
void caffe_gpu_axpby<float>(const int N, const float alpha, const float* X,
const float beta, float* Y) {
caffe_gpu_scal<float>(N, beta, Y);
caffe_gpu_axpy<float>(N, alpha, X, Y);
}
template <>
void caffe_gpu_axpby<double>(const int N, const double alpha, const double* X,
const double beta, double* Y) {
caffe_gpu_scal<double>(N, beta, Y);
caffe_gpu_axpy<double>(N, alpha, X, Y);
}
template <>
void caffe_gpu_dot<float>(const int n, const float* x, const float* y,
float* out) {
CUBLAS_CHECK(cublasSdot(Caffe::cublas_handle(), n, x, 1, y, 1, out));
}
template <>
void caffe_gpu_dot<double>(const int n, const double* x, const double* y,
double * out) {
CUBLAS_CHECK(cublasDdot(Caffe::cublas_handle(), n, x, 1, y, 1, out));
}
template <>
void caffe_gpu_asum<float>(const int n, const float* x, float* y) {
CUBLAS_CHECK(cublasSasum(Caffe::cublas_handle(), n, x, 1, y));
}
template <>
void caffe_gpu_asum<double>(const int n, const double* x, double* y) {
CUBLAS_CHECK(cublasDasum(Caffe::cublas_handle(), n, x, 1, y));
}
template <>
void caffe_gpu_scale<float>(const int n, const float alpha, const float *x,
float* y) {
CUBLAS_CHECK(cublasScopy(Caffe::cublas_handle(), n, x, 1, y, 1));
CUBLAS_CHECK(cublasSscal(Caffe::cublas_handle(), n, &alpha, y, 1));
}
template <>
void caffe_gpu_scale<double>(const int n, const double alpha, const double *x,
double* y) {
CUBLAS_CHECK(cublasDcopy(Caffe::cublas_handle(), n, x, 1, y, 1));
CUBLAS_CHECK(cublasDscal(Caffe::cublas_handle(), n, &alpha, y, 1));
}
template <typename Dtype>
__global__ void round_kernel(const int n, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = rint(y[index]);
}
}
template <>
void caffe_gpu_round<float>(const int N, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
round_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(N, y);
}
template <>
void caffe_gpu_round<double>(const int N, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
round_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(N, y);
}
template <typename Dtype>
__global__ void int_kernel(const int n, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = int(y[index]);
}
}
template <>
void caffe_gpu_int<float>(const int N, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
int_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(N, y);
}
template <>
void caffe_gpu_int<double>(const int N, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
int_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(N, y);
}
template <typename Dtype>
__global__ void signed_saturate_kernel(const int n, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
if(y[index] > SIGNED_SATURATE_MAX)
y[index] = SIGNED_SATURATE_MAX;
if(y[index] < SIGNED_SATURATE_MIN)
y[index] = SIGNED_SATURATE_MIN;
}
}
template <>
void caffe_gpu_signed_saturate<float>(const int N, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
signed_saturate_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(N, y);
}
template <>
void caffe_gpu_signed_saturate<double>(const int N, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
signed_saturate_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(N, y);
}
template <typename Dtype>
__global__ void signed_8bit_saturate_kernel(const int n, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
if(y[index] > SIGNED_8BIT_SATURATE_MAX)
y[index] = SIGNED_8BIT_SATURATE_MAX;
if(y[index] < SIGNED_8BIT_SATURATE_MIN)
y[index] = SIGNED_8BIT_SATURATE_MIN;
}
}
template <>
void caffe_gpu_signed_8bit_saturate<float>(const int N, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
signed_8bit_saturate_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(N, y);
}
template <>
void caffe_gpu_signed_8bit_saturate<double>(const int N, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
signed_8bit_saturate_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(N, y);
}
template <typename Dtype>
__global__ void unsigned_saturate_kernel(const int n, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
if(y[index] > UNSIGNED_SATURATE_MAX)
y[index] = UNSIGNED_SATURATE_MAX;
if(y[index] < 0)
y[index] = 0;
}
}
template <>
void caffe_gpu_unsigned_saturate<float>(const int N, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
unsigned_saturate_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(N, y);
}
template <>
void caffe_gpu_unsigned_saturate<double>(const int N, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
unsigned_saturate_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(N, y);
}
template <typename Dtype>
__global__ void unsigned_8bit_saturate_kernel(const int n, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
if(y[index] > UNSIGNED_8BIT_SATURATE_MAX)
y[index] = UNSIGNED_8BIT_SATURATE_MAX;
if(y[index] < 0)
y[index] = 0;
}
}
template <>
void caffe_gpu_unsigned_8bit_saturate<float>(const int N, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
unsigned_8bit_saturate_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(N, y);
}
template <>
void caffe_gpu_unsigned_8bit_saturate<double>(const int N, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
unsigned_8bit_saturate_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(N, y);
}
template <typename Dtype>
__global__ void set_kernel(const int n, const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = alpha;
}
}
template <typename Dtype>
void caffe_gpu_set(const int N, const Dtype alpha, Dtype* Y) {
if (alpha == 0) {
CUDA_CHECK(cudaMemset(Y, 0, sizeof(Dtype) * N)); // NOLINT(caffe/alt_fn)
return;
}
// NOLINT_NEXT_LINE(whitespace/operators)
set_kernel<Dtype><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, alpha, Y);
}
template void caffe_gpu_set<int>(const int N, const int alpha, int* Y);
template void caffe_gpu_set<float>(const int N, const float alpha, float* Y);
template void caffe_gpu_set<double>(const int N, const double alpha, double* Y);
template <typename Dtype>
__global__ void add_scalar_kernel(const int n, const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] += alpha;
}
}
template <>
void caffe_gpu_add_scalar(const int N, const float alpha, float* Y) {
// NOLINT_NEXT_LINE(whitespace/operators)
add_scalar_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, alpha, Y);
}
template <>
void caffe_gpu_add_scalar(const int N, const double alpha, double* Y) {
// NOLINT_NEXT_LINE(whitespace/operators)
add_scalar_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, alpha, Y);
}
template <typename Dtype>
__global__ void add_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] + b[index];
}
}
template <>
void caffe_gpu_add<float>(const int N, const float* a, const float* b,
float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
add_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <>
void caffe_gpu_add<double>(const int N, const double* a, const double* b,
double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
add_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <typename Dtype>
__global__ void sub_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] - b[index];
}
}
template <>
void caffe_gpu_sub<float>(const int N, const float* a, const float* b,
float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
sub_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <>
void caffe_gpu_sub<double>(const int N, const double* a, const double* b,
double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
sub_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <typename Dtype>
__global__ void mul_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] * b[index];
}
}
template <>
void caffe_gpu_mul<float>(const int N, const float* a,
const float* b, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
mul_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <>
void caffe_gpu_mul<double>(const int N, const double* a,
const double* b, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
mul_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <typename Dtype>
__global__ void div_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] / b[index];
}
}
template <>
void caffe_gpu_div<float>(const int N, const float* a,
const float* b, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
div_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <>
void caffe_gpu_div<double>(const int N, const double* a,
const double* b, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
div_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <typename Dtype>
__global__ void abs_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = abs(a[index]);
}
}
template <>
void caffe_gpu_abs<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
abs_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <>
void caffe_gpu_abs<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
abs_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <typename Dtype>
__global__ void exp_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = exp(a[index]);
}
}
template <>
void caffe_gpu_exp<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
exp_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <>
void caffe_gpu_exp<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
exp_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <typename Dtype>
__global__ void log_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = log(a[index]);
}
}
template <>
void caffe_gpu_log<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
log_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <>
void caffe_gpu_log<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
log_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <typename Dtype>
__global__ void powx_kernel(const int n, const Dtype* a,
const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = pow(a[index], alpha);
}
}
template <>
void caffe_gpu_powx<float>(const int N, const float* a,
const float alpha, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
powx_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, alpha, y);
}
template <>
void caffe_gpu_powx<double>(const int N, const double* a,
const double alpha, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
powx_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, alpha, y);
}
template <typename Dtype>
__global__ void sqrt_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = sqrt(a[index]);
}
}
template <>
void caffe_gpu_sqrt<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
sqrt_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <>
void caffe_gpu_sqrt<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
sqrt_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sign, y[index] = (Dtype(0) < x[index])
- (x[index] < Dtype(0)));
DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sgnbit, y[index] = signbit(x[index]));
void caffe_gpu_rng_uniform(const int n, unsigned int* r) {
CURAND_CHECK(curandGenerate(Caffe::curand_generator(), r, n));
}
template <>
void caffe_gpu_rng_uniform<float>(const int n, const float a, const float b,
float* r) {
CURAND_CHECK(curandGenerateUniform(Caffe::curand_generator(), r, n));
const float range = b - a;
if (range != static_cast<float>(1)) {
caffe_gpu_scal(n, range, r);
}
if (a != static_cast<float>(0)) {
caffe_gpu_add_scalar(n, a, r);
}
}
template <>
void caffe_gpu_rng_uniform<double>(const int n, const double a, const double b,
double* r) {
CURAND_CHECK(curandGenerateUniformDouble(Caffe::curand_generator(), r, n));
const double range = b - a;
if (range != static_cast<double>(1)) {
caffe_gpu_scal(n, range, r);
}
if (a != static_cast<double>(0)) {
caffe_gpu_add_scalar(n, a, r);
}
}
template <>
void caffe_gpu_rng_gaussian(const int n, const float mu, const float sigma,
float* r) {
CURAND_CHECK(
curandGenerateNormal(Caffe::curand_generator(), r, n, mu, sigma));
}
template <>
void caffe_gpu_rng_gaussian(const int n, const double mu, const double sigma,
double* r) {
CURAND_CHECK(
curandGenerateNormalDouble(Caffe::curand_generator(), r, n, mu, sigma));
}
} // namespace caffe
|
780db28f92e1edc8a5d404ff1275e3cac1d6a6d9.hip | // !!! This is a file automatically generated by hipify!!!
/*
-- MAGMA (version 2.2.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2016
@generated from sparse/blas/zgeisai_16.cu, normal z -> d, Sun Nov 20 20:20:42 2016
*/
#include "magmasparse_internal.h"
#define PRECISION_d
#define REAL
#define BLOCKSIZE 16
#define WARP_SIZE 16
#define WRP 16
#define WRQ 4
#include <hip/hip_runtime.h> // for TORCH_HIP_VERSION
#if (TORCH_HIP_VERSION >= 7000)
__device__
void dtrsv_lower_16kernel_general(double *dA, double *dB, int *sizes)
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB[ 2 ];
double rA[ 2 ];
int n;
int k;
int N = sizes[j];
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
#pragma unroll
for (n = 0; n < 2; n++)
rB[n] = dB[n*WARP_SIZE+idn];
// Triangular solve in regs.
#pragma unroll
for (k = 0; k < N; k++)
{
#pragma unroll
for (n = 0; n < 2; n++)
rA[n] = dA[k*WARP_SIZE+n*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB[k/WARP_SIZE] /= rA[k/WARP_SIZE];
double top = __shfl(rB[k/WARP_SIZE], k%WARP_SIZE);
#pragma unroll
for (n = 0; n < 2; n++)
if (n*WARP_SIZE+idn > k)
rB[n] -= (top*rA[n]);
}
// Drop B to dev mem.
#pragma unroll
for (n = 0; n < 2; n++)
if (n*WARP_SIZE+idn < N)
dB[n*WARP_SIZE+idn] = rB[n];
#endif
}
__device__
void dtrsv_upper_16kernel_general(double *dA, double *dB, int *sizes)
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB[ 2 ];
double rA[ 2 ];
int n;
int N = sizes[j];
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
#pragma unroll
for (n = 0; n < 2; n++)
rB[n] = dB[n*WARP_SIZE+idn];
// Triangular solve in regs.
#pragma unroll
for (int k = N-1; k > -1; k--)
{
#pragma unroll
for (n = 0; n < 2; n++)
rA[n] = dA[k*WARP_SIZE+n*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB[k/WARP_SIZE] /= rA[k/WARP_SIZE];
double top = __shfl(rB[k/WARP_SIZE], k%WARP_SIZE);
#pragma unroll
for (n = 0; n < 2; n++)
if (n*WARP_SIZE+idn < k)
rB[n] -= (top*rA[n]);
}
// Drop B to dev mem.
#pragma unroll
for (n = 0; n < 2; n++)
if (n*WARP_SIZE+idn < N)
dB[n*WARP_SIZE+idn] = rB[n];
#endif
}
__device__
void dtrsv_lower_16kernel_1(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 1; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_lower_16kernel_2(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 2; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_lower_16kernel_3(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 3; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_lower_16kernel_4(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 4; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_lower_16kernel_5(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 5; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_lower_16kernel_6(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 6; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_lower_16kernel_7(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 7; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_lower_16kernel_8(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 8; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_lower_16kernel_9(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 9; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_lower_16kernel_10(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 10; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_lower_16kernel_11(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 11; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_lower_16kernel_12(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 12; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_lower_16kernel_13(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 13; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_lower_16kernel_14(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 14; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_lower_16kernel_15(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 15; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_lower_16kernel_16(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 16; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__global__
void dtrsv_lower_16kernel_switch(double *dA, double *dB, int *sizes, int num_rows )
{
int j = blockIdx.y * gridDim.x + blockIdx.x;
if (j < num_rows) {
int N = sizes[j];
switch( N ) {
case 1:
dtrsv_lower_16kernel_1( dA, dB ); break;
case 2:
dtrsv_lower_16kernel_2( dA, dB ); break;
case 3:
dtrsv_lower_16kernel_3( dA, dB ); break;
case 4:
dtrsv_lower_16kernel_4( dA, dB ); break;
case 5:
dtrsv_lower_16kernel_5( dA, dB ); break;
case 6:
dtrsv_lower_16kernel_6( dA, dB ); break;
case 7:
dtrsv_lower_16kernel_7( dA, dB ); break;
case 8:
dtrsv_lower_16kernel_8( dA, dB ); break;
case 9:
dtrsv_lower_16kernel_9( dA, dB ); break;
case 10:
dtrsv_lower_16kernel_10( dA, dB ); break;
case 11:
dtrsv_lower_16kernel_11( dA, dB ); break;
case 12:
dtrsv_lower_16kernel_12( dA, dB ); break;
case 13:
dtrsv_lower_16kernel_13( dA, dB ); break;
case 14:
dtrsv_lower_16kernel_14( dA, dB ); break;
case 15:
dtrsv_lower_16kernel_15( dA, dB ); break;
case 16:
dtrsv_lower_16kernel_16( dA, dB ); break;
default:
dtrsv_lower_16kernel_general( dA, dB, sizes ); break;
}
}
}
__device__
void dtrsv_upper_16kernel_1(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 1-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_upper_16kernel_2(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 2-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_upper_16kernel_3(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 3-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_upper_16kernel_4(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 4-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_upper_16kernel_5(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 5-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_upper_16kernel_6(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 6-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_upper_16kernel_7(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 7-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_upper_16kernel_8(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 8-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_upper_16kernel_9(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 9-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_upper_16kernel_10(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 10-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_upper_16kernel_11(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 11-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_upper_16kernel_12(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 12-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_upper_16kernel_13(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 13-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_upper_16kernel_14(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 14-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_upper_16kernel_15(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 15-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_upper_16kernel_16(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 16-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__global__
void dtrsv_upper_16kernel_switch(double *dA, double *dB, int *sizes, int num_rows )
{
int j = blockIdx.y * gridDim.x + blockIdx.x;
if (j < num_rows) {
int N = sizes[j];
switch( N ) {
case 1:
dtrsv_upper_16kernel_1( dA, dB ); break;
case 2:
dtrsv_upper_16kernel_2( dA, dB ); break;
case 3:
dtrsv_upper_16kernel_3( dA, dB ); break;
case 4:
dtrsv_upper_16kernel_4( dA, dB ); break;
case 5:
dtrsv_upper_16kernel_5( dA, dB ); break;
case 6:
dtrsv_upper_16kernel_6( dA, dB ); break;
case 7:
dtrsv_upper_16kernel_7( dA, dB ); break;
case 8:
dtrsv_upper_16kernel_8( dA, dB ); break;
case 9:
dtrsv_upper_16kernel_9( dA, dB ); break;
case 10:
dtrsv_upper_16kernel_10( dA, dB ); break;
case 11:
dtrsv_upper_16kernel_11( dA, dB ); break;
case 12:
dtrsv_upper_16kernel_12( dA, dB ); break;
case 13:
dtrsv_upper_16kernel_13( dA, dB ); break;
case 14:
dtrsv_upper_16kernel_14( dA, dB ); break;
case 15:
dtrsv_upper_16kernel_15( dA, dB ); break;
case 16:
dtrsv_upper_16kernel_16( dA, dB ); break;
default:
dtrsv_upper_16kernel_general( dA, dB, sizes ); break;
}
}
}
// initialize arrays with zero
__global__ void
magma_dgpumemzero_16kernel(
double * d,
int n,
int dim_x,
int dim_y )
{
int i = blockIdx.y * gridDim.x + blockIdx.x;
int idx = threadIdx.x;
if( i >= n ){
return;
}
if( idx >= dim_x ){
return;
}
for( int j=0; j<dim_y; j++)
d[ i*dim_x*dim_y + j*dim_y + idx ] = MAGMA_D_MAKE( 0.0, 0.0 );
}
__global__ void
magma_dlocations_lower_16kernel(
magma_int_t n,
magma_index_t *row,
magma_index_t *col,
double *val,
magma_index_t *sizes,
magma_index_t *locations,
double *trisystems,
double *rhs )
{
int i = threadIdx.x;
int j = blockIdx.y * gridDim.x + blockIdx.x;
if( j >= n ){
return;
}
int start = row[j];
int end = row[j+1];
int count = end-start;
if( i == 0 ){
sizes[j] = count;
rhs[ j*WARP_SIZE ] = MAGMA_D_ONE;
}
if ( i<count ){
locations[ j*WARP_SIZE + i ] = col[ row[j]+i ];
}
}// kernel
__global__ void
magma_dlocations_trunc_lower_16kernel(
magma_int_t n,
magma_index_t *row,
magma_index_t *col,
double *val,
magma_index_t *sizes,
magma_index_t *locations,
double *trisystems,
double *rhs )
{
int i = threadIdx.x;
int j = blockIdx.y * gridDim.x + blockIdx.x;
if( j >= n ){
return;
}
int start = row[j];
int end = row[j+1];
int count = end-start;
// normal case
if( count <= BLOCKSIZE ){ // normal case
if( i == 0 ){
sizes[j] = count;
rhs[ j*WARP_SIZE ] = MAGMA_D_ONE;
}
if ( i<count ){
locations[ j*WARP_SIZE + i ] = col[ row[j]+i ];
}
}
else {
// truncate in this row to the blocksize,
// take only the 16 elements close to the main diagonal into account
count = BLOCKSIZE;
if (i == 0) {
sizes[j] = count;
rhs[ j*WARP_SIZE ] = MAGMA_D_ONE;
}
locations[ j*WARP_SIZE + i ] = col[ row[j+1]-BLOCKSIZE+i ];
}
}// kernel
__global__ void
magma_dlocations_upper_16kernel(
magma_int_t n,
magma_index_t *row,
magma_index_t *col,
double *val,
magma_index_t *sizes,
magma_index_t *locations,
double *trisystems,
double *rhs )
{
int i = threadIdx.x;
int j = blockIdx.y * gridDim.x + blockIdx.x;
if( j >= n ){
return;
}
int start = row[j];
int end = row[j+1];
int count = end-start;
if( i == 0 ){
sizes[j] = count;
rhs[ j*WARP_SIZE+count-1 ] = MAGMA_D_ONE;
}
if ( i<count ){
locations[ j*WARP_SIZE + i ] = col[ row[j]+i ];
}
}// kernel
__global__ void
magma_dlocations_trunc_upper_16kernel(
magma_int_t n,
magma_index_t *row,
magma_index_t *col,
double *val,
magma_index_t *sizes,
magma_index_t *locations,
double *trisystems,
double *rhs )
{
int i = threadIdx.x;
int j = blockIdx.y * gridDim.x + blockIdx.x;
if( j >= n ){
return;
}
int start = row[j];
int end = row[j+1];
int count = end-start;
// normal case
if( count <= BLOCKSIZE ){ // normal case
if( i == 0 ){
sizes[j] = count;
rhs[ j*WARP_SIZE+count-1 ] = MAGMA_D_ONE;
}
if ( i<count ){
locations[ j*WARP_SIZE + i ] = col[ row[j]+i ];
}
}
else {
// truncate in this row to the blocksize,
// take only the 16 elements close to the main diagonal into account
count = BLOCKSIZE;
if (i == 0) {
sizes[j] = count;
rhs[ j*WARP_SIZE+count-1 ] = MAGMA_D_ONE;
}
locations[ j*WARP_SIZE + i ] = col[ row[j]+i ];
}
}// kernel
__global__ void
magma_dfilltrisystems_16kernel(
magma_int_t offset,
magma_int_t limit,
magma_index_t *row,
magma_index_t *col,
double *val,
magma_index_t *sizes,
magma_index_t *locations,
double *trisystems,
double *rhs )
{
int i = (blockDim.x * blockIdx.x + threadIdx.x)+offset;
int ii = (blockDim.x * blockIdx.x + threadIdx.x);
if ( ii>=limit ){
return;
}
//if ( i<offset ){
// return;
//}
for( int j=0; j<sizes[ i ]; j++ ){// no need for first
int k = row[ locations[ j+i*WARP_SIZE ] ];
int l = i*WARP_SIZE;
int idx = 0;
while( k < row[ locations[ j+i*WARP_SIZE ]+1 ] && l < (i+1)*WARP_SIZE ){ // stop once this column is done
if( locations[ l ] == col[k] ){ //match
// int loc = i*WARP_SIZE*WARP_SIZE + j*WARP_SIZE + idx;
trisystems[ ii*WARP_SIZE*WARP_SIZE + j*WARP_SIZE + idx ]
= val[ k ];
k++;
l++;
idx++;
} else if( col[k] < locations[ l ] ){// need to check next element
k++;
} else { // element does not exist, i.e. l < LC.col[k]
// printf("increment l\n");
l++; // check next elment in the sparsity pattern
idx++; // leave this element equal zero
}
}
}
}// kernel
__global__ void
magma_dbackinsert_16kernel(
magma_int_t n,
magma_index_t *row,
magma_index_t *col,
double *val,
magma_index_t *sizes,
double *rhs )
{
int i = threadIdx.x;
int j = blockIdx.y * gridDim.x + blockIdx.x;
int end = sizes[j];
if( j >= n ){
return;
}
if ( i>=end ){
return;
}
val[row[j]+i] = rhs[j*WARP_SIZE+i];
}// kernel
#endif
/**
Purpose
-------
This routine is designet to combine all kernels into one.
Arguments
---------
@param[in]
uplotype magma_uplo_t
lower or upper triangular
@param[in]
transtype magma_trans_t
possibility for transposed matrix
@param[in]
diagtype magma_diag_t
unit diagonal or not
@param[in]
L magma_d_matrix
triangular factor for which the ISAI matrix is computed.
Col-Major CSR storage.
@param[in,out]
M magma_d_matrix*
SPAI preconditioner CSR col-major
@param[out]
sizes magma_int_t*
Number of Elements that are replaced.
@param[out]
locations magma_int_t*
Array indicating the locations.
@param[out]
trisystems double*
trisystems
@param[out]
rhs double*
right-hand sides
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_daux
********************************************************************/
extern "C" magma_int_t
magma_disaigenerator_16_gpu(
magma_uplo_t uplotype,
magma_trans_t transtype,
magma_diag_t diagtype,
magma_d_matrix L,
magma_d_matrix *M,
magma_index_t *sizes,
magma_index_t *locations,
double *trisystems,
double *rhs,
magma_queue_t queue )
{
magma_int_t info = 0;
#if (TORCH_HIP_VERSION >= 7000)
magma_int_t arch = magma_getdevice_arch();
hipDeviceSetCacheConfig( hipFuncCachePreferL1 );
// routine 1
int r1bs1 = WARP_SIZE;
int r1bs2 = 1;
int r1dg1 = min( int( sqrt( double( M->num_rows ))), 65535 );
int r1dg2 = min(magma_ceildiv( M->num_rows, r1dg1 ), 65535);
int r1dg3 = magma_ceildiv( M->num_rows, r1dg1*r1dg2 );
dim3 r1block( r1bs1, r1bs2, 1 );
dim3 r1grid( r1dg1, r1dg2, r1dg3 );
int r2bs1 = WARP_SIZE;
int r2bs2 = 1;
int r2dg1 = magma_ceildiv( L.num_rows, r2bs1 );
int r2dg2 = 1;
int r2dg3 = 1;
dim3 r2block( r2bs1, r2bs2, 1 );
dim3 r2grid( r2dg1, r2dg2, r2dg3 );
int r3bs1 = WARP_SIZE;
int r3bs2 = 1;
int r3dg1 = magma_ceildiv( 32000, r2bs1 );
int r3dg2 = 1;
int r3dg3 = 1;
dim3 r3block( r3bs1, r3bs2, 1 );
dim3 r3grid( r3dg1, r3dg2, r3dg3 );
int recursive = magma_ceildiv( M->num_rows, 32000 );
if (arch >= 300) {
hipLaunchKernelGGL(( magma_dgpumemzero_16kernel), dim3(r1grid), dim3(r1block), 0, queue->cuda_stream() ,
rhs, L.num_rows, WARP_SIZE, 1);
if (uplotype == MagmaLower) {
hipLaunchKernelGGL(( magma_dlocations_lower_16kernel), dim3(r1grid), dim3(r1block), 0, queue->cuda_stream() ,
M->num_rows,
M->drow,
M->dcol,
M->dval,
sizes,
locations,
trisystems,
rhs );
}
else {
hipLaunchKernelGGL(( magma_dlocations_upper_16kernel), dim3(r1grid), dim3(r1block), 0, queue->cuda_stream() ,
M->num_rows,
M->drow,
M->dcol,
M->dval,
sizes,
locations,
trisystems,
rhs );
}
// chunk it recursively into batches of 1600
for( int z=0; z<recursive; z++ ){
int limit = min(32000, L.num_rows-32000*z);
hipLaunchKernelGGL(( magma_dgpumemzero_16kernel), dim3(r1grid), dim3(r1block), 0, queue->cuda_stream() ,
trisystems, limit, WARP_SIZE, WARP_SIZE );
hipLaunchKernelGGL(( magma_dfilltrisystems_16kernel), dim3(r3grid), dim3(r3block), 0, queue->cuda_stream() ,
32000*z,
limit,
L.drow,
L.dcol,
L.dval,
sizes,
locations,
trisystems,
rhs );
// routine 2
if (uplotype == MagmaLower) {
hipLaunchKernelGGL(( dtrsv_lower_16kernel_switch), dim3(r1grid), dim3(r1block), 0, queue->cuda_stream() ,
trisystems,
rhs+32000*16*z,
sizes+32000*z,
limit );
}
else {
hipLaunchKernelGGL(( dtrsv_upper_16kernel_switch), dim3(r1grid), dim3(r1block), 0, queue->cuda_stream() ,
trisystems,
rhs+32000*16*z,
sizes+32000*z,
limit );
}
}
// routine 3
hipLaunchKernelGGL(( magma_dbackinsert_16kernel), dim3(r1grid), dim3(r1block), 0, queue->cuda_stream() ,
M->num_rows,
M->drow,
M->dcol,
M->dval,
sizes,
rhs );
}
else {
info = MAGMA_ERR_NOT_SUPPORTED;
}
#else
// CUDA < 7000
printf( "%% error: ISAI preconditioner requires CUDA > 6.0.\n" );
info = MAGMA_ERR_NOT_SUPPORTED;
#endif
return info;
}
| 780db28f92e1edc8a5d404ff1275e3cac1d6a6d9.cu | /*
-- MAGMA (version 2.2.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2016
@generated from sparse/blas/zgeisai_16.cu, normal z -> d, Sun Nov 20 20:20:42 2016
*/
#include "magmasparse_internal.h"
#define PRECISION_d
#define REAL
#define BLOCKSIZE 16
#define WARP_SIZE 16
#define WRP 16
#define WRQ 4
#include <cuda.h> // for CUDA_VERSION
#if (CUDA_VERSION >= 7000)
__device__
void dtrsv_lower_16kernel_general(double *dA, double *dB, int *sizes)
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB[ 2 ];
double rA[ 2 ];
int n;
int k;
int N = sizes[j];
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
#pragma unroll
for (n = 0; n < 2; n++)
rB[n] = dB[n*WARP_SIZE+idn];
// Triangular solve in regs.
#pragma unroll
for (k = 0; k < N; k++)
{
#pragma unroll
for (n = 0; n < 2; n++)
rA[n] = dA[k*WARP_SIZE+n*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB[k/WARP_SIZE] /= rA[k/WARP_SIZE];
double top = __shfl(rB[k/WARP_SIZE], k%WARP_SIZE);
#pragma unroll
for (n = 0; n < 2; n++)
if (n*WARP_SIZE+idn > k)
rB[n] -= (top*rA[n]);
}
// Drop B to dev mem.
#pragma unroll
for (n = 0; n < 2; n++)
if (n*WARP_SIZE+idn < N)
dB[n*WARP_SIZE+idn] = rB[n];
#endif
}
__device__
void dtrsv_upper_16kernel_general(double *dA, double *dB, int *sizes)
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB[ 2 ];
double rA[ 2 ];
int n;
int N = sizes[j];
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
#pragma unroll
for (n = 0; n < 2; n++)
rB[n] = dB[n*WARP_SIZE+idn];
// Triangular solve in regs.
#pragma unroll
for (int k = N-1; k > -1; k--)
{
#pragma unroll
for (n = 0; n < 2; n++)
rA[n] = dA[k*WARP_SIZE+n*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB[k/WARP_SIZE] /= rA[k/WARP_SIZE];
double top = __shfl(rB[k/WARP_SIZE], k%WARP_SIZE);
#pragma unroll
for (n = 0; n < 2; n++)
if (n*WARP_SIZE+idn < k)
rB[n] -= (top*rA[n]);
}
// Drop B to dev mem.
#pragma unroll
for (n = 0; n < 2; n++)
if (n*WARP_SIZE+idn < N)
dB[n*WARP_SIZE+idn] = rB[n];
#endif
}
__device__
void dtrsv_lower_16kernel_1(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 1; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_lower_16kernel_2(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 2; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_lower_16kernel_3(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 3; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_lower_16kernel_4(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 4; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_lower_16kernel_5(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 5; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_lower_16kernel_6(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 6; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_lower_16kernel_7(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 7; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_lower_16kernel_8(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 8; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_lower_16kernel_9(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 9; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_lower_16kernel_10(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 10; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_lower_16kernel_11(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 11; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_lower_16kernel_12(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 12; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_lower_16kernel_13(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 13; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_lower_16kernel_14(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 14; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_lower_16kernel_15(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 15; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_lower_16kernel_16(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 16; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__global__
void dtrsv_lower_16kernel_switch(double *dA, double *dB, int *sizes, int num_rows )
{
int j = blockIdx.y * gridDim.x + blockIdx.x;
if (j < num_rows) {
int N = sizes[j];
switch( N ) {
case 1:
dtrsv_lower_16kernel_1( dA, dB ); break;
case 2:
dtrsv_lower_16kernel_2( dA, dB ); break;
case 3:
dtrsv_lower_16kernel_3( dA, dB ); break;
case 4:
dtrsv_lower_16kernel_4( dA, dB ); break;
case 5:
dtrsv_lower_16kernel_5( dA, dB ); break;
case 6:
dtrsv_lower_16kernel_6( dA, dB ); break;
case 7:
dtrsv_lower_16kernel_7( dA, dB ); break;
case 8:
dtrsv_lower_16kernel_8( dA, dB ); break;
case 9:
dtrsv_lower_16kernel_9( dA, dB ); break;
case 10:
dtrsv_lower_16kernel_10( dA, dB ); break;
case 11:
dtrsv_lower_16kernel_11( dA, dB ); break;
case 12:
dtrsv_lower_16kernel_12( dA, dB ); break;
case 13:
dtrsv_lower_16kernel_13( dA, dB ); break;
case 14:
dtrsv_lower_16kernel_14( dA, dB ); break;
case 15:
dtrsv_lower_16kernel_15( dA, dB ); break;
case 16:
dtrsv_lower_16kernel_16( dA, dB ); break;
default:
dtrsv_lower_16kernel_general( dA, dB, sizes ); break;
}
}
}
__device__
void dtrsv_upper_16kernel_1(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 1-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_upper_16kernel_2(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 2-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_upper_16kernel_3(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 3-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_upper_16kernel_4(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 4-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_upper_16kernel_5(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 5-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_upper_16kernel_6(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 6-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_upper_16kernel_7(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 7-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_upper_16kernel_8(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 8-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_upper_16kernel_9(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 9-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_upper_16kernel_10(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 10-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_upper_16kernel_11(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 11-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_upper_16kernel_12(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 12-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_upper_16kernel_13(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 13-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_upper_16kernel_14(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 14-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_upper_16kernel_15(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 15-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void dtrsv_upper_16kernel_16(double *dA, double *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
double rB;
double rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 16-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
double bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__global__
void dtrsv_upper_16kernel_switch(double *dA, double *dB, int *sizes, int num_rows )
{
int j = blockIdx.y * gridDim.x + blockIdx.x;
if (j < num_rows) {
int N = sizes[j];
switch( N ) {
case 1:
dtrsv_upper_16kernel_1( dA, dB ); break;
case 2:
dtrsv_upper_16kernel_2( dA, dB ); break;
case 3:
dtrsv_upper_16kernel_3( dA, dB ); break;
case 4:
dtrsv_upper_16kernel_4( dA, dB ); break;
case 5:
dtrsv_upper_16kernel_5( dA, dB ); break;
case 6:
dtrsv_upper_16kernel_6( dA, dB ); break;
case 7:
dtrsv_upper_16kernel_7( dA, dB ); break;
case 8:
dtrsv_upper_16kernel_8( dA, dB ); break;
case 9:
dtrsv_upper_16kernel_9( dA, dB ); break;
case 10:
dtrsv_upper_16kernel_10( dA, dB ); break;
case 11:
dtrsv_upper_16kernel_11( dA, dB ); break;
case 12:
dtrsv_upper_16kernel_12( dA, dB ); break;
case 13:
dtrsv_upper_16kernel_13( dA, dB ); break;
case 14:
dtrsv_upper_16kernel_14( dA, dB ); break;
case 15:
dtrsv_upper_16kernel_15( dA, dB ); break;
case 16:
dtrsv_upper_16kernel_16( dA, dB ); break;
default:
dtrsv_upper_16kernel_general( dA, dB, sizes ); break;
}
}
}
// initialize arrays with zero
__global__ void
magma_dgpumemzero_16kernel(
double * d,
int n,
int dim_x,
int dim_y )
{
int i = blockIdx.y * gridDim.x + blockIdx.x;
int idx = threadIdx.x;
if( i >= n ){
return;
}
if( idx >= dim_x ){
return;
}
for( int j=0; j<dim_y; j++)
d[ i*dim_x*dim_y + j*dim_y + idx ] = MAGMA_D_MAKE( 0.0, 0.0 );
}
__global__ void
magma_dlocations_lower_16kernel(
magma_int_t n,
magma_index_t *row,
magma_index_t *col,
double *val,
magma_index_t *sizes,
magma_index_t *locations,
double *trisystems,
double *rhs )
{
int i = threadIdx.x;
int j = blockIdx.y * gridDim.x + blockIdx.x;
if( j >= n ){
return;
}
int start = row[j];
int end = row[j+1];
int count = end-start;
if( i == 0 ){
sizes[j] = count;
rhs[ j*WARP_SIZE ] = MAGMA_D_ONE;
}
if ( i<count ){
locations[ j*WARP_SIZE + i ] = col[ row[j]+i ];
}
}// kernel
__global__ void
magma_dlocations_trunc_lower_16kernel(
magma_int_t n,
magma_index_t *row,
magma_index_t *col,
double *val,
magma_index_t *sizes,
magma_index_t *locations,
double *trisystems,
double *rhs )
{
int i = threadIdx.x;
int j = blockIdx.y * gridDim.x + blockIdx.x;
if( j >= n ){
return;
}
int start = row[j];
int end = row[j+1];
int count = end-start;
// normal case
if( count <= BLOCKSIZE ){ // normal case
if( i == 0 ){
sizes[j] = count;
rhs[ j*WARP_SIZE ] = MAGMA_D_ONE;
}
if ( i<count ){
locations[ j*WARP_SIZE + i ] = col[ row[j]+i ];
}
}
else {
// truncate in this row to the blocksize,
// take only the 16 elements close to the main diagonal into account
count = BLOCKSIZE;
if (i == 0) {
sizes[j] = count;
rhs[ j*WARP_SIZE ] = MAGMA_D_ONE;
}
locations[ j*WARP_SIZE + i ] = col[ row[j+1]-BLOCKSIZE+i ];
}
}// kernel
__global__ void
magma_dlocations_upper_16kernel(
magma_int_t n,
magma_index_t *row,
magma_index_t *col,
double *val,
magma_index_t *sizes,
magma_index_t *locations,
double *trisystems,
double *rhs )
{
int i = threadIdx.x;
int j = blockIdx.y * gridDim.x + blockIdx.x;
if( j >= n ){
return;
}
int start = row[j];
int end = row[j+1];
int count = end-start;
if( i == 0 ){
sizes[j] = count;
rhs[ j*WARP_SIZE+count-1 ] = MAGMA_D_ONE;
}
if ( i<count ){
locations[ j*WARP_SIZE + i ] = col[ row[j]+i ];
}
}// kernel
__global__ void
magma_dlocations_trunc_upper_16kernel(
magma_int_t n,
magma_index_t *row,
magma_index_t *col,
double *val,
magma_index_t *sizes,
magma_index_t *locations,
double *trisystems,
double *rhs )
{
int i = threadIdx.x;
int j = blockIdx.y * gridDim.x + blockIdx.x;
if( j >= n ){
return;
}
int start = row[j];
int end = row[j+1];
int count = end-start;
// normal case
if( count <= BLOCKSIZE ){ // normal case
if( i == 0 ){
sizes[j] = count;
rhs[ j*WARP_SIZE+count-1 ] = MAGMA_D_ONE;
}
if ( i<count ){
locations[ j*WARP_SIZE + i ] = col[ row[j]+i ];
}
}
else {
// truncate in this row to the blocksize,
// take only the 16 elements close to the main diagonal into account
count = BLOCKSIZE;
if (i == 0) {
sizes[j] = count;
rhs[ j*WARP_SIZE+count-1 ] = MAGMA_D_ONE;
}
locations[ j*WARP_SIZE + i ] = col[ row[j]+i ];
}
}// kernel
__global__ void
magma_dfilltrisystems_16kernel(
magma_int_t offset,
magma_int_t limit,
magma_index_t *row,
magma_index_t *col,
double *val,
magma_index_t *sizes,
magma_index_t *locations,
double *trisystems,
double *rhs )
{
int i = (blockDim.x * blockIdx.x + threadIdx.x)+offset;
int ii = (blockDim.x * blockIdx.x + threadIdx.x);
if ( ii>=limit ){
return;
}
//if ( i<offset ){
// return;
//}
for( int j=0; j<sizes[ i ]; j++ ){// no need for first
int k = row[ locations[ j+i*WARP_SIZE ] ];
int l = i*WARP_SIZE;
int idx = 0;
while( k < row[ locations[ j+i*WARP_SIZE ]+1 ] && l < (i+1)*WARP_SIZE ){ // stop once this column is done
if( locations[ l ] == col[k] ){ //match
// int loc = i*WARP_SIZE*WARP_SIZE + j*WARP_SIZE + idx;
trisystems[ ii*WARP_SIZE*WARP_SIZE + j*WARP_SIZE + idx ]
= val[ k ];
k++;
l++;
idx++;
} else if( col[k] < locations[ l ] ){// need to check next element
k++;
} else { // element does not exist, i.e. l < LC.col[k]
// printf("increment l\n");
l++; // check next elment in the sparsity pattern
idx++; // leave this element equal zero
}
}
}
}// kernel
__global__ void
magma_dbackinsert_16kernel(
magma_int_t n,
magma_index_t *row,
magma_index_t *col,
double *val,
magma_index_t *sizes,
double *rhs )
{
int i = threadIdx.x;
int j = blockIdx.y * gridDim.x + blockIdx.x;
int end = sizes[j];
if( j >= n ){
return;
}
if ( i>=end ){
return;
}
val[row[j]+i] = rhs[j*WARP_SIZE+i];
}// kernel
#endif
/**
Purpose
-------
This routine is designet to combine all kernels into one.
Arguments
---------
@param[in]
uplotype magma_uplo_t
lower or upper triangular
@param[in]
transtype magma_trans_t
possibility for transposed matrix
@param[in]
diagtype magma_diag_t
unit diagonal or not
@param[in]
L magma_d_matrix
triangular factor for which the ISAI matrix is computed.
Col-Major CSR storage.
@param[in,out]
M magma_d_matrix*
SPAI preconditioner CSR col-major
@param[out]
sizes magma_int_t*
Number of Elements that are replaced.
@param[out]
locations magma_int_t*
Array indicating the locations.
@param[out]
trisystems double*
trisystems
@param[out]
rhs double*
right-hand sides
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_daux
********************************************************************/
extern "C" magma_int_t
magma_disaigenerator_16_gpu(
magma_uplo_t uplotype,
magma_trans_t transtype,
magma_diag_t diagtype,
magma_d_matrix L,
magma_d_matrix *M,
magma_index_t *sizes,
magma_index_t *locations,
double *trisystems,
double *rhs,
magma_queue_t queue )
{
magma_int_t info = 0;
#if (CUDA_VERSION >= 7000)
magma_int_t arch = magma_getdevice_arch();
cudaDeviceSetCacheConfig( cudaFuncCachePreferL1 );
// routine 1
int r1bs1 = WARP_SIZE;
int r1bs2 = 1;
int r1dg1 = min( int( sqrt( double( M->num_rows ))), 65535 );
int r1dg2 = min(magma_ceildiv( M->num_rows, r1dg1 ), 65535);
int r1dg3 = magma_ceildiv( M->num_rows, r1dg1*r1dg2 );
dim3 r1block( r1bs1, r1bs2, 1 );
dim3 r1grid( r1dg1, r1dg2, r1dg3 );
int r2bs1 = WARP_SIZE;
int r2bs2 = 1;
int r2dg1 = magma_ceildiv( L.num_rows, r2bs1 );
int r2dg2 = 1;
int r2dg3 = 1;
dim3 r2block( r2bs1, r2bs2, 1 );
dim3 r2grid( r2dg1, r2dg2, r2dg3 );
int r3bs1 = WARP_SIZE;
int r3bs2 = 1;
int r3dg1 = magma_ceildiv( 32000, r2bs1 );
int r3dg2 = 1;
int r3dg3 = 1;
dim3 r3block( r3bs1, r3bs2, 1 );
dim3 r3grid( r3dg1, r3dg2, r3dg3 );
int recursive = magma_ceildiv( M->num_rows, 32000 );
if (arch >= 300) {
magma_dgpumemzero_16kernel<<< r1grid, r1block, 0, queue->cuda_stream() >>>(
rhs, L.num_rows, WARP_SIZE, 1);
if (uplotype == MagmaLower) {
magma_dlocations_lower_16kernel<<< r1grid, r1block, 0, queue->cuda_stream() >>>(
M->num_rows,
M->drow,
M->dcol,
M->dval,
sizes,
locations,
trisystems,
rhs );
}
else {
magma_dlocations_upper_16kernel<<< r1grid, r1block, 0, queue->cuda_stream() >>>(
M->num_rows,
M->drow,
M->dcol,
M->dval,
sizes,
locations,
trisystems,
rhs );
}
// chunk it recursively into batches of 1600
for( int z=0; z<recursive; z++ ){
int limit = min(32000, L.num_rows-32000*z);
magma_dgpumemzero_16kernel<<< r1grid, r1block, 0, queue->cuda_stream() >>>(
trisystems, limit, WARP_SIZE, WARP_SIZE );
magma_dfilltrisystems_16kernel<<< r3grid, r3block, 0, queue->cuda_stream() >>>(
32000*z,
limit,
L.drow,
L.dcol,
L.dval,
sizes,
locations,
trisystems,
rhs );
// routine 2
if (uplotype == MagmaLower) {
dtrsv_lower_16kernel_switch<<< r1grid, r1block, 0, queue->cuda_stream() >>>(
trisystems,
rhs+32000*16*z,
sizes+32000*z,
limit );
}
else {
dtrsv_upper_16kernel_switch<<< r1grid, r1block, 0, queue->cuda_stream() >>>(
trisystems,
rhs+32000*16*z,
sizes+32000*z,
limit );
}
}
// routine 3
magma_dbackinsert_16kernel<<< r1grid, r1block, 0, queue->cuda_stream() >>>(
M->num_rows,
M->drow,
M->dcol,
M->dval,
sizes,
rhs );
}
else {
info = MAGMA_ERR_NOT_SUPPORTED;
}
#else
// CUDA < 7000
printf( "%% error: ISAI preconditioner requires CUDA > 6.0.\n" );
info = MAGMA_ERR_NOT_SUPPORTED;
#endif
return info;
}
|
ce1e6bedd2aa576942915572415665f5705b6f0e.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "aux_fields.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
double *V = NULL;
hipMalloc(&V, XSIZE*YSIZE);
double *K = NULL;
hipMalloc(&K, XSIZE*YSIZE);
double gdt = 1;
double dt = 1;
double *Ax = NULL;
hipMalloc(&Ax, XSIZE*YSIZE);
double *Ay = NULL;
hipMalloc(&Ay, XSIZE*YSIZE);
double *Az = NULL;
hipMalloc(&Az, XSIZE*YSIZE);
double *px = NULL;
hipMalloc(&px, XSIZE*YSIZE);
double *py = NULL;
hipMalloc(&py, XSIZE*YSIZE);
double *pz = NULL;
hipMalloc(&pz, XSIZE*YSIZE);
double *pAx = NULL;
hipMalloc(&pAx, XSIZE*YSIZE);
double *pAy = NULL;
hipMalloc(&pAy, XSIZE*YSIZE);
double *pAz = NULL;
hipMalloc(&pAz, XSIZE*YSIZE);
double2 *GV = NULL;
hipMalloc(&GV, XSIZE*YSIZE);
double2 *EV = NULL;
hipMalloc(&EV, XSIZE*YSIZE);
double2 *GK = NULL;
hipMalloc(&GK, XSIZE*YSIZE);
double2 *EK = NULL;
hipMalloc(&EK, XSIZE*YSIZE);
double2 *GpAx = NULL;
hipMalloc(&GpAx, XSIZE*YSIZE);
double2 *GpAy = NULL;
hipMalloc(&GpAy, XSIZE*YSIZE);
double2 *GpAz = NULL;
hipMalloc(&GpAz, XSIZE*YSIZE);
double2 *EpAx = NULL;
hipMalloc(&EpAx, XSIZE*YSIZE);
double2 *EpAy = NULL;
hipMalloc(&EpAy, XSIZE*YSIZE);
double2 *EpAz = NULL;
hipMalloc(&EpAz, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
aux_fields), dim3(gridBlock),dim3(threadBlock), 0, 0, V,K,gdt,dt,Ax,Ay,Az,px,py,pz,pAx,pAy,pAz,GV,EV,GK,EK,GpAx,GpAy,GpAz,EpAx,EpAy,EpAz);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
aux_fields), dim3(gridBlock),dim3(threadBlock), 0, 0, V,K,gdt,dt,Ax,Ay,Az,px,py,pz,pAx,pAy,pAz,GV,EV,GK,EK,GpAx,GpAy,GpAz,EpAx,EpAy,EpAz);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
aux_fields), dim3(gridBlock),dim3(threadBlock), 0, 0, V,K,gdt,dt,Ax,Ay,Az,px,py,pz,pAx,pAy,pAz,GV,EV,GK,EK,GpAx,GpAy,GpAz,EpAx,EpAy,EpAz);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | ce1e6bedd2aa576942915572415665f5705b6f0e.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "aux_fields.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
double *V = NULL;
cudaMalloc(&V, XSIZE*YSIZE);
double *K = NULL;
cudaMalloc(&K, XSIZE*YSIZE);
double gdt = 1;
double dt = 1;
double *Ax = NULL;
cudaMalloc(&Ax, XSIZE*YSIZE);
double *Ay = NULL;
cudaMalloc(&Ay, XSIZE*YSIZE);
double *Az = NULL;
cudaMalloc(&Az, XSIZE*YSIZE);
double *px = NULL;
cudaMalloc(&px, XSIZE*YSIZE);
double *py = NULL;
cudaMalloc(&py, XSIZE*YSIZE);
double *pz = NULL;
cudaMalloc(&pz, XSIZE*YSIZE);
double *pAx = NULL;
cudaMalloc(&pAx, XSIZE*YSIZE);
double *pAy = NULL;
cudaMalloc(&pAy, XSIZE*YSIZE);
double *pAz = NULL;
cudaMalloc(&pAz, XSIZE*YSIZE);
double2 *GV = NULL;
cudaMalloc(&GV, XSIZE*YSIZE);
double2 *EV = NULL;
cudaMalloc(&EV, XSIZE*YSIZE);
double2 *GK = NULL;
cudaMalloc(&GK, XSIZE*YSIZE);
double2 *EK = NULL;
cudaMalloc(&EK, XSIZE*YSIZE);
double2 *GpAx = NULL;
cudaMalloc(&GpAx, XSIZE*YSIZE);
double2 *GpAy = NULL;
cudaMalloc(&GpAy, XSIZE*YSIZE);
double2 *GpAz = NULL;
cudaMalloc(&GpAz, XSIZE*YSIZE);
double2 *EpAx = NULL;
cudaMalloc(&EpAx, XSIZE*YSIZE);
double2 *EpAy = NULL;
cudaMalloc(&EpAy, XSIZE*YSIZE);
double2 *EpAz = NULL;
cudaMalloc(&EpAz, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
aux_fields<<<gridBlock,threadBlock>>>(V,K,gdt,dt,Ax,Ay,Az,px,py,pz,pAx,pAy,pAz,GV,EV,GK,EK,GpAx,GpAy,GpAz,EpAx,EpAy,EpAz);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
aux_fields<<<gridBlock,threadBlock>>>(V,K,gdt,dt,Ax,Ay,Az,px,py,pz,pAx,pAy,pAz,GV,EV,GK,EK,GpAx,GpAy,GpAz,EpAx,EpAy,EpAz);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
aux_fields<<<gridBlock,threadBlock>>>(V,K,gdt,dt,Ax,Ay,Az,px,py,pz,pAx,pAy,pAz,GV,EV,GK,EK,GpAx,GpAy,GpAz,EpAx,EpAy,EpAz);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
56f5ed39942a74e6aa7e9cca2ed5227bcaeb60fd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
__global__ void global_scan(float* d_out, float* d_in) {
int idx = threadIdx.x;
float out = 0.00f;
d_out[idx] = d_in[idx];
__syncthreads();
for (int interpre = 1; interpre<sizeof(d_in); interpre *= 2) {
if (idx - interpre >= 0) {
out = d_out[idx] + d_out[idx - interpre];
}
__syncthreads();
if (idx - interpre >= 0) {
d_out[idx] = out;
out = 0.00f;
}
}
}
__global__ void shmem_scan_kernel(float * d_out, const float * d_in)
{
// sdata is allocated in the kernel call: 3rd arg to <<<b, t, shmem>>>
extern __shared__ float sdata[];
int idx = threadIdx.x;
float out = 0.00f;
// load shared mem from global mem
sdata[idx] = d_in[idx];
__syncthreads();
for (int interpre = 1; interpre<sizeof(d_in); interpre *= 2) {
if (idx - interpre >= 0) {
out = sdata[idx] + sdata[idx - interpre];
}
__syncthreads();
if (idx - interpre >= 0) {
sdata[idx] = out;
out = 0.00f;
}
}
// writes all thread result for this block back to global mem
d_out[idx] = sdata[idx];
}
int main(int argc, char** argv) {
const int ARRAY_SIZE = 8;
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float);
// generate the input array on the host
float h_in[ARRAY_SIZE];
for (int i = 0; i<ARRAY_SIZE; i++) {
h_in[i] = float(i);
}
float h_out[ARRAY_SIZE];
// declare GPU memory pointers
float* d_in;
float* d_out;
// allocate GPU memory
hipMalloc((void**)&d_in, ARRAY_BYTES);
hipMalloc((void**)&d_out, ARRAY_BYTES);
// transfer the array to GPU
hipMemcpy(d_in, h_in, ARRAY_BYTES, hipMemcpyHostToDevice);
// launch the kernel
// global_scan << <1, ARRAY_SIZE >> >(d_out, d_in);
hipLaunchKernelGGL(( shmem_scan_kernel) , dim3(1), dim3(ARRAY_SIZE), ARRAY_SIZE * sizeof(float) , 0, d_out, d_in);
// shmem_scan_kernel <<<1, ARRAY_SIZE >>>(d_out, d_in);
// copy back the result array to the GPU
hipMemcpy(h_out, d_out, ARRAY_BYTES, hipMemcpyDeviceToHost);
// print out the resulting array
for (int i = 0; i<ARRAY_SIZE; i++) {
printf("%f", h_out[i]);
printf(((i % 4) != 3) ? "\t" : "\n");
}
// free GPU memory allocation
hipFree(d_in);
hipFree(d_out);
system("pause");
return 0;
}
| 56f5ed39942a74e6aa7e9cca2ed5227bcaeb60fd.cu | #include <stdio.h>
__global__ void global_scan(float* d_out, float* d_in) {
int idx = threadIdx.x;
float out = 0.00f;
d_out[idx] = d_in[idx];
__syncthreads();
for (int interpre = 1; interpre<sizeof(d_in); interpre *= 2) {
if (idx - interpre >= 0) {
out = d_out[idx] + d_out[idx - interpre];
}
__syncthreads();
if (idx - interpre >= 0) {
d_out[idx] = out;
out = 0.00f;
}
}
}
__global__ void shmem_scan_kernel(float * d_out, const float * d_in)
{
// sdata is allocated in the kernel call: 3rd arg to <<<b, t, shmem>>>
extern __shared__ float sdata[];
int idx = threadIdx.x;
float out = 0.00f;
// load shared mem from global mem
sdata[idx] = d_in[idx];
__syncthreads();
for (int interpre = 1; interpre<sizeof(d_in); interpre *= 2) {
if (idx - interpre >= 0) {
out = sdata[idx] + sdata[idx - interpre];
}
__syncthreads();
if (idx - interpre >= 0) {
sdata[idx] = out;
out = 0.00f;
}
}
// writes all thread result for this block back to global mem
d_out[idx] = sdata[idx];
}
int main(int argc, char** argv) {
const int ARRAY_SIZE = 8;
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float);
// generate the input array on the host
float h_in[ARRAY_SIZE];
for (int i = 0; i<ARRAY_SIZE; i++) {
h_in[i] = float(i);
}
float h_out[ARRAY_SIZE];
// declare GPU memory pointers
float* d_in;
float* d_out;
// allocate GPU memory
cudaMalloc((void**)&d_in, ARRAY_BYTES);
cudaMalloc((void**)&d_out, ARRAY_BYTES);
// transfer the array to GPU
cudaMemcpy(d_in, h_in, ARRAY_BYTES, cudaMemcpyHostToDevice);
// launch the kernel
// global_scan << <1, ARRAY_SIZE >> >(d_out, d_in);
shmem_scan_kernel <<<1, ARRAY_SIZE, ARRAY_SIZE * sizeof(float) >>>(d_out, d_in);
// shmem_scan_kernel <<<1, ARRAY_SIZE >>>(d_out, d_in);
// copy back the result array to the GPU
cudaMemcpy(h_out, d_out, ARRAY_BYTES, cudaMemcpyDeviceToHost);
// print out the resulting array
for (int i = 0; i<ARRAY_SIZE; i++) {
printf("%f", h_out[i]);
printf(((i % 4) != 3) ? "\t" : "\n");
}
// free GPU memory allocation
cudaFree(d_in);
cudaFree(d_out);
system("pause");
return 0;
}
|
7ef257bc99b2d3fbc38e3ad99ec3dbe4ef9a90ea.hip | // !!! This is a file automatically generated by hipify!!!
/***************************************************************************************
GpuShareSat -- Copyright (c) 2020, Nicolas Prevot
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
associated documentation files (the "Software"), to deal in the Software without restriction,
including without limitation the rights to use, copy, modify, merge, publish, distribute,
sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or
substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT
OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
**************************************************************************************************/
#define BOOST_TEST_MODULE perftest_module
#include <boost/test/unit_test.hpp>
#include "gpuShareLib/Helper.cuh"
#include "gpuShareLib/Assigs.cuh"
#include "gpuShareLib/Clauses.cuh"
#include "../gpu/GpuHelpedSolver.h"
#include "gpuShareLib/GpuRunner.cuh"
#include "../satUtils/SolverTypes.h"
#include "../core/Solver.h"
#include "gpuShareLib/Utils.h"
#include <hip/hip_runtime.h>
#include "../mtl/Vec.h"
#include <algorithm>
#include <cstdlib>
#include <ctime>
#include <memory>
#include "gpuShareLib/GpuRunner.cuh"
#include "../testUtils/TestHelper.cuh"
#include "gpuShareLib/Utils.h"
#include "gpuShareLib/my_make_unique.h"
#include "gpuShareLib/Reported.cuh"
#include "../utils/Utils.h"
using namespace GpuShare;
int getDiffMicros(timespec begin, timespec end) {
return (end.tv_sec - begin.tv_sec) * 1000000 + (end.tv_nsec - begin.tv_nsec) / 1000;
}
std::unique_ptr<GpuClauseSharerOptions> getOptions(int clCount, int clMinSize, int clMaxSize, int initReportCount) {
auto ptr = my_make_unique<GpuClauseSharerOptions>();
ptr -> gpuBlockCountGuideline = 10;
#ifndef NDEBUG
ptr -> gpuThreadsPerBlockGuideline = 150;
#else
ptr -> gpuThreadsPerBlockGuideline = 1024;
#endif
ptr -> initReportCountPerCategory = initReportCount;
return ptr;
}
class PerfFixture : public GpuFixture {
public:
int clauseCount;
int clMinSize;
int clMaxSize;
Logger logger;
PerfFixture(int clCount = 1000000, int clMinSize = 12, int clMaxSize = 20, int varCount = 500, int solverCount = 1);
};
void maybeSetVariable(double &seed, Glucose::GpuHelpedSolver &solver, int var) {
int p = Glucose::irand(seed, 3);
if (p == 0 || p == 1) {
solver.newDecisionLevel();
solver.uncheckedEnqueue(Glucose::mkLit(var, p == 1));
}
}
void resetAllVariables(double &seed, Glucose::GpuHelpedSolver &solver) {
solver.cancelUntil(0);
for (int i = 0; i < solver.nVars(); i++) {
maybeSetVariable(seed, solver, i);
}
}
// This has to be set before the gpu starts, so at the beginning of each test
void setDeviceFlags() {
unsigned int flags;
hipGetDeviceFlags(&flags);
if (flags & hipDeviceScheduleBlockingSync == 0) {
exitIfError(hipSetDeviceFlags(hipDeviceScheduleBlockingSync), POSITION);
}
}
Lit randomLit(double& seed, int varCount) {
return mkLit(Glucose::irand(seed, varCount), Glucose::irand(seed, 2));
}
PerfFixture::PerfFixture(int _clauseCount, int _clMinSize, int _clMaxSize, int nVars, int solverCount) :
clauseCount(_clauseCount),
clMinSize(_clMinSize),
clMaxSize(_clMaxSize),
GpuFixture(*(getOptions(_clauseCount, _clMinSize, _clMaxSize, 2000)), nVars, solverCount),
logger { 2, directPrint} {
srand(25);
std::vector<Lit> lits;
ContigCopier cc(logger, true);
hipStream_t &stream = gpuClauseSharer.sp.get();
GpuDims gpuDims {10, 256};
double seed = 0.4;
for (int cl = 0; cl < clauseCount; cl++) {
lits.clear();
int size = Glucose::irand(seed, clMinSize, clMaxSize);
for (int l = 0; l < size; l++) {
lits.push_back(randomLit(seed, nVars));
}
gpuClauseSharer.clauses->addClause(MinHArr<Lit>(lits.size(), &lits[0]), 5);
// HClauses is designed to copy clauses in small chunks, not a large amount at once
if (cl % 5000 == 0) {
copyToDeviceAsync(*gpuClauseSharer.clauses, stream, cc, gpuDims);
exitIfError(hipStreamSynchronize(stream), POSITION);
}
}
copyToDeviceAsync(*gpuClauseSharer.clauses, stream, cc, gpuDims);
exitIfError(hipStreamSynchronize(stream), POSITION);
}
// print all the wrong clauses
BOOST_AUTO_TEST_CASE(testPrintClauses) {
setDeviceFlags();
PerfFixture fx(300000, 10, 11);
double seed = 0.6;
resetAllVariables(seed, *(fx.solvers[0]));
fx.solvers[0]->tryCopyTrailForGpu(fx.solvers[0]->decisionLevel());
execute(fx.gpuClauseSharer);
Lit array[MAX_CL_SIZE];
long gpuClauseId;
MinHArr<Lit> lits;
while (fx.gpuClauseSharer.reported->popReportedClause(0, lits, gpuClauseId)) {
// vec doesn't have a sort method, so let's use an array instead
for (int j = 0; j < lits.size(); j++) {
array[j] = lits[j];
}
std::sort(array, array + lits.size());
printf(">> ");
for (int j = 0; j < lits.size(); j++) {
printC(array[j]);
}
printf("\n");
}
}
BOOST_AUTO_TEST_CASE(testPerf) {
setDeviceFlags();
PerfFixture fx(2000000, 12, 20, 500, 1);
exitIfLastError(POSITION);
timespec begin, gpuDone, end;
long gpuExecTimeMicros = 0;
long importTimeMicros = 0;
exitIfLastError(POSITION);
// having n = 2000 is really to slow if we're in debug
// But in release, to have a consistent result, we need a big enough
// value for n
#ifdef NDEBUG
long n = 2000;
#else
long n = 15;
#endif
double seed = 0.6;
printf("solver count: %ld\n", fx.solvers.size());
for (int i = 0; i < n; i++) {
for (int j = 0; j < fx.solvers.size(); j++) {
resetAllVariables(seed, *(fx.solvers[j]));
fx.solvers[j]->tryCopyTrailForGpu(fx.solvers[j]->decisionLevel());
}
clock_gettime(CLOCK_REALTIME, &begin);
execute(fx.gpuClauseSharer);
clock_gettime(CLOCK_REALTIME, &gpuDone);
// This is partly because we can't add more assignments unless we read clauses for existing assignments
bool a;
for (int j = 0; j < fx.solvers.size(); j++) fx.solvers[j]->gpuImportClauses(a);
exitIfLastError(POSITION);
clock_gettime(CLOCK_REALTIME, &end);
gpuExecTimeMicros += getDiffMicros(begin, gpuDone);
importTimeMicros += getDiffMicros(gpuDone, end);
}
if (gpuExecTimeMicros + importTimeMicros == 0) {
printf("no time passed");
}
else {
printf("gpu exec time taken: %ld micros\n", gpuExecTimeMicros);
printf("import time taken: %ld micros\n", importTimeMicros);
printf("wrong clause count: %ld\n", fx.gpuClauseSharer.getGlobalStat(gpuReports));
printf("clause count: %d\n", fx.clauseCount);
printf("executions per seconds: %ld\n", (n * 1000000)/ (gpuExecTimeMicros + importTimeMicros));
printf("reads per microsecond: %ld\n", n * fx.clauseCount * (1 + (fx.clMinSize + fx.clMaxSize) / 2) / (gpuExecTimeMicros));
}
#ifdef NDEBUG
BOOST_CHECK_EQUAL(19739, fx.gpuClauseSharer.getGlobalStat(gpuReports));
#else
BOOST_CHECK_EQUAL(143, fx.gpuClauseSharer.getGlobalStat(gpuReports));
#endif
exitIfLastError(POSITION);
}
BOOST_AUTO_TEST_CASE(testReportedAreValid) {
setDeviceFlags();
PerfFixture fx(1000000, 10, 11, 500);
Glucose::GpuHelpedSolver &solver = *(fx.solvers[0]);
exitIfLastError(POSITION);
bool foundEmptyClause = false;
int importedValidLastTime = 0;
int importedLastTime = 0;
double seed = 0.8;
resetAllVariables(seed, *(fx.solvers[0]));
// If the gpu reports some clauses: at least one of them must be valid
// Because the cpu solver then changes its variables because of this one,
// the next clauses may not be valid
while (true) {
fx.solvers[0]->tryCopyTrailForGpu(fx.solvers[0]->decisionLevel());
// the first maybExecute will only start the run but not get the results, so execute twice
execute(fx.gpuClauseSharer);
Glucose::CRef conflict = solver.gpuImportClauses(foundEmptyClause);
int reported = solver.stats[Glucose::nbImported], importedValid = solver.stats[Glucose::nbImportedValid];
printf("%d clauses imported out of which %d valid\n", reported, importedValid);
// continue as long as we get some clauses
if (solver.stats[Glucose::nbImported] == importedLastTime) {
break;
}
importedLastTime = solver.stats[Glucose::nbImported];
ASSERT_OP(solver.stats[Glucose::nbImportedValid], >, importedValidLastTime);
importedValidLastTime = solver.stats[Glucose::nbImportedValid];
// If the solver got a conflict at level n, it's still at level n.
// We need to cancel it until the previous level because otherwise, it will get the same conflict over and over
if (conflict != Glucose::CRef_Undef) {
if (solver.decisionLevel() == 0) break;
solver.cancelUntil(solver.decisionLevel() - 1);
}
}
exitIfError(hipStreamSynchronize(fx.gpuClauseSharer.sp.get()), POSITION);
exitIfLastError(POSITION);
}
| 7ef257bc99b2d3fbc38e3ad99ec3dbe4ef9a90ea.cu | /***************************************************************************************
GpuShareSat -- Copyright (c) 2020, Nicolas Prevot
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
associated documentation files (the "Software"), to deal in the Software without restriction,
including without limitation the rights to use, copy, modify, merge, publish, distribute,
sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or
substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT
OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
**************************************************************************************************/
#define BOOST_TEST_MODULE perftest_module
#include <boost/test/unit_test.hpp>
#include "gpuShareLib/Helper.cuh"
#include "gpuShareLib/Assigs.cuh"
#include "gpuShareLib/Clauses.cuh"
#include "../gpu/GpuHelpedSolver.h"
#include "gpuShareLib/GpuRunner.cuh"
#include "../satUtils/SolverTypes.h"
#include "../core/Solver.h"
#include "gpuShareLib/Utils.h"
#include <cuda.h>
#include "../mtl/Vec.h"
#include <algorithm>
#include <cstdlib>
#include <ctime>
#include <memory>
#include "gpuShareLib/GpuRunner.cuh"
#include "../testUtils/TestHelper.cuh"
#include "gpuShareLib/Utils.h"
#include "gpuShareLib/my_make_unique.h"
#include "gpuShareLib/Reported.cuh"
#include "../utils/Utils.h"
using namespace GpuShare;
int getDiffMicros(timespec begin, timespec end) {
return (end.tv_sec - begin.tv_sec) * 1000000 + (end.tv_nsec - begin.tv_nsec) / 1000;
}
std::unique_ptr<GpuClauseSharerOptions> getOptions(int clCount, int clMinSize, int clMaxSize, int initReportCount) {
auto ptr = my_make_unique<GpuClauseSharerOptions>();
ptr -> gpuBlockCountGuideline = 10;
#ifndef NDEBUG
ptr -> gpuThreadsPerBlockGuideline = 150;
#else
ptr -> gpuThreadsPerBlockGuideline = 1024;
#endif
ptr -> initReportCountPerCategory = initReportCount;
return ptr;
}
class PerfFixture : public GpuFixture {
public:
int clauseCount;
int clMinSize;
int clMaxSize;
Logger logger;
PerfFixture(int clCount = 1000000, int clMinSize = 12, int clMaxSize = 20, int varCount = 500, int solverCount = 1);
};
void maybeSetVariable(double &seed, Glucose::GpuHelpedSolver &solver, int var) {
int p = Glucose::irand(seed, 3);
if (p == 0 || p == 1) {
solver.newDecisionLevel();
solver.uncheckedEnqueue(Glucose::mkLit(var, p == 1));
}
}
void resetAllVariables(double &seed, Glucose::GpuHelpedSolver &solver) {
solver.cancelUntil(0);
for (int i = 0; i < solver.nVars(); i++) {
maybeSetVariable(seed, solver, i);
}
}
// This has to be set before the gpu starts, so at the beginning of each test
void setDeviceFlags() {
unsigned int flags;
cudaGetDeviceFlags(&flags);
if (flags & cudaDeviceBlockingSync == 0) {
exitIfError(cudaSetDeviceFlags(cudaDeviceBlockingSync), POSITION);
}
}
Lit randomLit(double& seed, int varCount) {
return mkLit(Glucose::irand(seed, varCount), Glucose::irand(seed, 2));
}
PerfFixture::PerfFixture(int _clauseCount, int _clMinSize, int _clMaxSize, int nVars, int solverCount) :
clauseCount(_clauseCount),
clMinSize(_clMinSize),
clMaxSize(_clMaxSize),
GpuFixture(*(getOptions(_clauseCount, _clMinSize, _clMaxSize, 2000)), nVars, solverCount),
logger { 2, directPrint} {
srand(25);
std::vector<Lit> lits;
ContigCopier cc(logger, true);
cudaStream_t &stream = gpuClauseSharer.sp.get();
GpuDims gpuDims {10, 256};
double seed = 0.4;
for (int cl = 0; cl < clauseCount; cl++) {
lits.clear();
int size = Glucose::irand(seed, clMinSize, clMaxSize);
for (int l = 0; l < size; l++) {
lits.push_back(randomLit(seed, nVars));
}
gpuClauseSharer.clauses->addClause(MinHArr<Lit>(lits.size(), &lits[0]), 5);
// HClauses is designed to copy clauses in small chunks, not a large amount at once
if (cl % 5000 == 0) {
copyToDeviceAsync(*gpuClauseSharer.clauses, stream, cc, gpuDims);
exitIfError(cudaStreamSynchronize(stream), POSITION);
}
}
copyToDeviceAsync(*gpuClauseSharer.clauses, stream, cc, gpuDims);
exitIfError(cudaStreamSynchronize(stream), POSITION);
}
// print all the wrong clauses
BOOST_AUTO_TEST_CASE(testPrintClauses) {
setDeviceFlags();
PerfFixture fx(300000, 10, 11);
double seed = 0.6;
resetAllVariables(seed, *(fx.solvers[0]));
fx.solvers[0]->tryCopyTrailForGpu(fx.solvers[0]->decisionLevel());
execute(fx.gpuClauseSharer);
Lit array[MAX_CL_SIZE];
long gpuClauseId;
MinHArr<Lit> lits;
while (fx.gpuClauseSharer.reported->popReportedClause(0, lits, gpuClauseId)) {
// vec doesn't have a sort method, so let's use an array instead
for (int j = 0; j < lits.size(); j++) {
array[j] = lits[j];
}
std::sort(array, array + lits.size());
printf(">> ");
for (int j = 0; j < lits.size(); j++) {
printC(array[j]);
}
printf("\n");
}
}
BOOST_AUTO_TEST_CASE(testPerf) {
setDeviceFlags();
PerfFixture fx(2000000, 12, 20, 500, 1);
exitIfLastError(POSITION);
timespec begin, gpuDone, end;
long gpuExecTimeMicros = 0;
long importTimeMicros = 0;
exitIfLastError(POSITION);
// having n = 2000 is really to slow if we're in debug
// But in release, to have a consistent result, we need a big enough
// value for n
#ifdef NDEBUG
long n = 2000;
#else
long n = 15;
#endif
double seed = 0.6;
printf("solver count: %ld\n", fx.solvers.size());
for (int i = 0; i < n; i++) {
for (int j = 0; j < fx.solvers.size(); j++) {
resetAllVariables(seed, *(fx.solvers[j]));
fx.solvers[j]->tryCopyTrailForGpu(fx.solvers[j]->decisionLevel());
}
clock_gettime(CLOCK_REALTIME, &begin);
execute(fx.gpuClauseSharer);
clock_gettime(CLOCK_REALTIME, &gpuDone);
// This is partly because we can't add more assignments unless we read clauses for existing assignments
bool a;
for (int j = 0; j < fx.solvers.size(); j++) fx.solvers[j]->gpuImportClauses(a);
exitIfLastError(POSITION);
clock_gettime(CLOCK_REALTIME, &end);
gpuExecTimeMicros += getDiffMicros(begin, gpuDone);
importTimeMicros += getDiffMicros(gpuDone, end);
}
if (gpuExecTimeMicros + importTimeMicros == 0) {
printf("no time passed");
}
else {
printf("gpu exec time taken: %ld micros\n", gpuExecTimeMicros);
printf("import time taken: %ld micros\n", importTimeMicros);
printf("wrong clause count: %ld\n", fx.gpuClauseSharer.getGlobalStat(gpuReports));
printf("clause count: %d\n", fx.clauseCount);
printf("executions per seconds: %ld\n", (n * 1000000)/ (gpuExecTimeMicros + importTimeMicros));
printf("reads per microsecond: %ld\n", n * fx.clauseCount * (1 + (fx.clMinSize + fx.clMaxSize) / 2) / (gpuExecTimeMicros));
}
#ifdef NDEBUG
BOOST_CHECK_EQUAL(19739, fx.gpuClauseSharer.getGlobalStat(gpuReports));
#else
BOOST_CHECK_EQUAL(143, fx.gpuClauseSharer.getGlobalStat(gpuReports));
#endif
exitIfLastError(POSITION);
}
BOOST_AUTO_TEST_CASE(testReportedAreValid) {
setDeviceFlags();
PerfFixture fx(1000000, 10, 11, 500);
Glucose::GpuHelpedSolver &solver = *(fx.solvers[0]);
exitIfLastError(POSITION);
bool foundEmptyClause = false;
int importedValidLastTime = 0;
int importedLastTime = 0;
double seed = 0.8;
resetAllVariables(seed, *(fx.solvers[0]));
// If the gpu reports some clauses: at least one of them must be valid
// Because the cpu solver then changes its variables because of this one,
// the next clauses may not be valid
while (true) {
fx.solvers[0]->tryCopyTrailForGpu(fx.solvers[0]->decisionLevel());
// the first maybExecute will only start the run but not get the results, so execute twice
execute(fx.gpuClauseSharer);
Glucose::CRef conflict = solver.gpuImportClauses(foundEmptyClause);
int reported = solver.stats[Glucose::nbImported], importedValid = solver.stats[Glucose::nbImportedValid];
printf("%d clauses imported out of which %d valid\n", reported, importedValid);
// continue as long as we get some clauses
if (solver.stats[Glucose::nbImported] == importedLastTime) {
break;
}
importedLastTime = solver.stats[Glucose::nbImported];
ASSERT_OP(solver.stats[Glucose::nbImportedValid], >, importedValidLastTime);
importedValidLastTime = solver.stats[Glucose::nbImportedValid];
// If the solver got a conflict at level n, it's still at level n.
// We need to cancel it until the previous level because otherwise, it will get the same conflict over and over
if (conflict != Glucose::CRef_Undef) {
if (solver.decisionLevel() == 0) break;
solver.cancelUntil(solver.decisionLevel() - 1);
}
}
exitIfError(cudaStreamSynchronize(fx.gpuClauseSharer.sp.get()), POSITION);
exitIfLastError(POSITION);
}
|
867f7cffd675b10957d9fa14414486617afd7219.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// ------------------------------------------------------------------
// Fast R-CNN
// Copyright (c) 2015 Microsoft
// Licensed under The MIT License [see fast-rcnn/LICENSE for details]
// Written by Ross Girshick
// ------------------------------------------------------------------
#include <thrust/system/hip/detail/hipcub/hipcub.hpp>
#include <iomanip>
#include <io.h>
#include "caffe/FRCNN/frcnn_proposal_layer.hpp"
#include "caffe/FRCNN/util/frcnn_utils.hpp"
#include "caffe/FRCNN/util/frcnn_helper.hpp"
#include "caffe/FRCNN/util/frcnn_param.hpp"
#include "caffe/FRCNN/util/frcnn_gpu_nms.hpp"
#include <iostream>
namespace caffe {
namespace Frcnn {
using std::vector;
__global__ void GetIndex(const int n, int *indices){
CUDA_KERNEL_LOOP(index, n){
indices[index] = index;
}
}
template <typename Dtype>
__global__ void BBoxTransformInv(const int nthreads, const Dtype* const bottom_rpn_bbox,
const float im_height, const float im_width, const int* sorted_indices,
const Dtype* const prior_bboxes, const Dtype* const prior_variances, float* const transform_bbox,
const Dtype* const match_info) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int score_idx = sorted_indices[index];
int match_gt_index = match_info[2 * score_idx];
float *box = transform_bbox + index * 7;
if (match_gt_index < -1){ //
box[0] = 0;
box[1] = 0;
box[2] = 0;
box[3] = 0;
box[4] = score_idx;
box[5] = match_info[score_idx * 2];
box[6] = match_info[score_idx * 2 + 1];
}
else
{
box[0] = prior_bboxes[score_idx * 4 + 0];
box[1] = prior_bboxes[score_idx * 4 + 1];
box[2] = prior_bboxes[score_idx * 4 + 2];
box[3] = prior_bboxes[score_idx * 4 + 3];
box[4] = score_idx;
box[5] = match_info[score_idx * 2];
box[6] = match_info[score_idx * 2 + 1];
if (match_gt_index < 0){ //
box[0] = max(0.0f, min(box[0] * im_width, im_width)); // im_width - 1.0
box[1] = max(0.0f, min(box[1] * im_height, im_height)); // im_height - 1.0
box[2] = max(0.0f, min(box[2] * im_width, im_width)); // im_width - 1.0
box[3] = max(0.0f, min(box[3] * im_height, im_height)); // im_height - 1.0
}
else
{
Dtype det[4] = {
det[0] = bottom_rpn_bbox[score_idx * 4 + 0],
det[1] = bottom_rpn_bbox[score_idx * 4 + 1],
det[2] = bottom_rpn_bbox[score_idx * 4 + 2],
det[3] = bottom_rpn_bbox[score_idx * 4 + 3]
};
float src_w = box[2] - box[0];// + 1 / im_width;
float src_h = box[3] - box[1];// + 1 / im_height;
float src_ctr_x = box[0] + 0.5 * src_w;
float src_ctr_y = box[1] + 0.5 * src_h;
float pred_ctr_x = prior_variances[score_idx * 4] * det[0] * src_w + src_ctr_x;
float pred_ctr_y = prior_variances[score_idx * 4 + 1] * det[1] * src_h + src_ctr_y;
float pred_w = exp(prior_variances[score_idx * 4 + 2] * det[2]) * src_w;
float pred_h = exp(prior_variances[score_idx * 4 + 3] * det[3]) * src_h;
box[0] = (pred_ctr_x - 0.5 * pred_w)*im_width;;
box[1] = (pred_ctr_y - 0.5 * pred_h)*im_height;;
box[2] = (pred_ctr_x + 0.5 * pred_w)*im_width;;
box[3] = (pred_ctr_y + 0.5 * pred_h)*im_height;;
box[0] = max(0.0f, min(box[0], im_width)); // im_width - 1.0
box[1] = max(0.0f, min(box[1], im_height)); // im_height - 1.0
box[2] = max(0.0f, min(box[2], im_width)); // im_width - 1.0
box[3] = max(0.0f, min(box[3], im_height)); // im_height - 1.0
}
}
}
}
__global__ void SelectBox(const int nthreads, const float *box, float min_size,
int *flags) {
CUDA_KERNEL_LOOP(index, nthreads) {
if ((box[index * 7 + 2] - box[index * 7 + 0] < min_size) ||
(box[index * 7 + 3] - box[index * 7 + 1] < min_size)) {
flags[index] = 0;
}
else {
flags[index] = 1;
}
}
}
template <typename Dtype>
__global__ void SelectBoxByIndices(const int nthreads, const float *in_box, int *selected_indices,
float *out_box, const Dtype *in_score, Dtype *out_score) {
CUDA_KERNEL_LOOP(index, nthreads) {
if ((index == 0 && selected_indices[index] == 1) ||
(index > 0 && selected_indices[index] == selected_indices[index - 1] + 1)) {
out_box[(selected_indices[index] - 1) * 7 + 0] = in_box[index * 7 + 0];
out_box[(selected_indices[index] - 1) * 7 + 1] = in_box[index * 7 + 1];
out_box[(selected_indices[index] - 1) * 7 + 2] = in_box[index * 7 + 2];
out_box[(selected_indices[index] - 1) * 7 + 3] = in_box[index * 7 + 3];
out_box[(selected_indices[index] - 1) * 7 + 4] = in_box[index * 7 + 4];
out_box[(selected_indices[index] - 1) * 7 + 5] = in_box[index * 7 + 5];
out_box[(selected_indices[index] - 1) * 7 + 6] = in_box[index * 7 + 6];
if (in_score != NULL && out_score != NULL) {
out_score[selected_indices[index] - 1] = in_score[index];
}
}
}
}
template <typename Dtype>
__global__ void SelectBoxAftNMS(int batch_index, int box_begin, const int nthreads, const float *in_box, int *keep_indices,
Dtype *top_data, const Dtype *in_score, Dtype* top_info) {
CUDA_KERNEL_LOOP(index, nthreads) {
int keep_idx = keep_indices[index];
top_data[box_begin * 5 + index * 5] = batch_index;// batch_index
for (int j = 1; j < 5; ++j) {
top_data[box_begin * 5 + index * 5 + j] = in_box[keep_idx * 7 + j - 1];
}
if (top_info != NULL) {
top_info[box_begin * 2 + index * 2] = in_box[keep_idx * 7 + 4];
top_info[box_begin * 2 + index * 2 + 1] = in_box[keep_idx * 7 + 5];
top_info[box_begin * 2 + index * 2 + 2] = in_box[keep_idx * 7 + 6];
}
}
}
template <typename Dtype>
void FrcnnProposalLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype> *> &bottom,
const vector<Blob<Dtype> *> &top) {
DLOG(ERROR) << "========== enter proposal layer";
const Dtype *bottom_rpn_score = bottom[0]->gpu_data();
const Dtype *bottom_rpn_bbox = bottom[1]->gpu_data();
const Dtype *prior_data = bottom[2]->gpu_data(); // prior box
const Dtype* match_info = bottom[3]->gpu_data(); // match_info
const int num = bottom[1]->num();// batch size
const int channes = bottom[1]->channels();
const int height = bottom[1]->height();
const int width = bottom[1]->width();
/*--------------------------------------------------*/
CHECK(channes % 4 == 0) << "rpn bbox pred channels should be divided by 4";
const float im_height = FrcnnParam::im_height;
const float im_width = FrcnnParam::im_width;
/*--------------------------------------------------*/
int rpn_pre_nms_top_n;
int rpn_post_nms_top_n;
float rpn_nms_thresh;
int rpn_min_size;
if (this->phase_ == TRAIN) {
rpn_pre_nms_top_n = FrcnnParam::rpn_pre_nms_top_n;
rpn_post_nms_top_n = FrcnnParam::rpn_post_nms_top_n;
rpn_nms_thresh = FrcnnParam::rpn_nms_thresh;
rpn_min_size = FrcnnParam::rpn_min_size;
}
else {
rpn_pre_nms_top_n = FrcnnParam::test_rpn_pre_nms_top_n;
rpn_post_nms_top_n = FrcnnParam::test_rpn_post_nms_top_n;
rpn_nms_thresh = FrcnnParam::test_rpn_nms_thresh;
rpn_min_size = FrcnnParam::test_rpn_min_size;
}
LOG_IF(ERROR, rpn_pre_nms_top_n <= 0) << "rpn_pre_nms_top_n : " << rpn_pre_nms_top_n;
LOG_IF(ERROR, rpn_post_nms_top_n <= 0) << "rpn_post_nms_top_n : " << rpn_post_nms_top_n;
if (rpn_pre_nms_top_n <= 0 || rpn_post_nms_top_n <= 0) return;
const int config_n_anchors = FrcnnParam::anchors.size() / 4;
const int total_anchor_num = bottom[2]->height() / 4;
/*--------------------------------------------------*/
vector<int> batch_keep_num;
vector<float*> batch_transform_bbox_;
vector<int *>batch_gpu_keep_indices_;
vector<Dtype *>batch_bbox_score_;
vector<int*> batch_match_gt_;// add output
for (int batch_index = 0; batch_index < num; batch_index++) {
//Step 1. -------------------------------Sort the rpn result----------------------
// the first half of rpn_score is the bg score
// Note that the sorting operator will change the order fg_scores (bottom_rpn_score)
const int fg_begin = batch_index * total_anchor_num;
const int bbox_begin = (4 * batch_index)*total_anchor_num;
const int transform_bbox_begin = 7 * batch_index * rpn_pre_nms_top_n;//
const int selected_flags_begin = batch_index * rpn_pre_nms_top_n;
const int gpu_keep_indices_begin = batch_index * rpn_post_nms_top_n;
Dtype *fg_scores = (Dtype*)(&bottom_rpn_score[fg_begin]);
Dtype *rpn_bbox = (Dtype*)(&bottom_rpn_bbox[bbox_begin]);
Dtype *fg_info = (Dtype*)(&match_info[fg_begin]); //
Dtype *sorted_scores = NULL;
CUDA_CHECK(hipMalloc((void**)&sorted_scores, sizeof(Dtype) * total_anchor_num));
cub::DoubleBuffer<Dtype> d_keys(fg_scores, sorted_scores);
int *indices = NULL;
CUDA_CHECK(hipMalloc((void**)&indices, sizeof(int) * total_anchor_num));
GetIndex << <caffe::CAFFE_GET_BLOCKS(total_anchor_num), caffe::CAFFE_CUDA_NUM_THREADS >> >(
total_anchor_num, indices);
hipDeviceSynchronize();
int *sorted_indices = NULL;
CUDA_CHECK(hipMalloc((void**)&sorted_indices, sizeof(int) * total_anchor_num));
cub::DoubleBuffer<int> d_values(indices, sorted_indices);
void *sort_temp_storage_ = NULL;
size_t sort_temp_storage_bytes_ = 0;
// calculate the temp_storage_bytes
hipcub::DeviceRadixSort::SortPairsDescending(sort_temp_storage_, sort_temp_storage_bytes_,
d_keys, d_values, total_anchor_num);
DLOG(ERROR) << "sort_temp_storage_bytes_ : " << sort_temp_storage_bytes_;
CUDA_CHECK(hipMalloc(&sort_temp_storage_, sort_temp_storage_bytes_));
// sorting
hipcub::DeviceRadixSort::SortPairsDescending(sort_temp_storage_, sort_temp_storage_bytes_,
d_keys, d_values, total_anchor_num);
hipDeviceSynchronize();
//Step 2. ---------------------------bbox transform----------------------------
const int retained_anchor_num = ::min(total_anchor_num, rpn_pre_nms_top_n);//3000?
// gt=-2box0filter out small box
BBoxTransformInv<Dtype> << <caffe::CAFFE_GET_BLOCKS(retained_anchor_num), caffe::CAFFE_CUDA_NUM_THREADS >> >(
retained_anchor_num, rpn_bbox, im_height, im_width, sorted_indices,
prior_data, &prior_data[total_anchor_num * 4], &transform_bbox_[transform_bbox_begin],
fg_info);// rpn_pre_nms_top_nrpn_pre_nms_top_nrpn_min_size
hipDeviceSynchronize();
/*--------------------------------------------------*/
//if (retained_anchor_num > 0) {
// std::ofstream outfile;
// if (_access("anchor_before_nmsGPU.txt", 0) != -1) //
// remove("anchor_before_nmsGPU.txt");
// outfile.open("anchor_before_nmsGPU.txt", ios::out | ios::app);
// vector<float> boxes(rpn_pre_nms_top_n * 7);
// vector<Dtype> scores(total_anchor_num);
// /*vector<int> indices(total_anchor_num);*/
// hipMemcpy(&boxes[0], &transform_bbox_[transform_bbox_begin], 7 * rpn_pre_nms_top_n * sizeof(float), hipMemcpyDeviceToHost);
// hipMemcpy(&scores[0], sorted_scores, total_anchor_num * sizeof(Dtype), hipMemcpyDeviceToHost);
// /*hipMemcpy(&indices[0], sorted_indices, total_anchor_num * sizeof(int), hipMemcpyDeviceToHost);*/
// outfile << retained_anchor_num << std::endl;
// for (int i = 0; i < retained_anchor_num; i++) {
// outfile << scores[i] << " ";
// outfile << boxes[7 * i] << " "
// << boxes[7 * i + 1] << " "
// << boxes[7 * i + 2] << " "
// << boxes[7 * i + 3] << " "
// << boxes[7 * i + 4] << " "
// << boxes[7 * i + 5] << " "
// << boxes[7 * i + 6] << std::endl;
// }
// outfile.close();
//}
/*--------------------------------------------------*/
//Step 3. -------------------------filter out small box-----------------------
SelectBox << <caffe::CAFFE_GET_BLOCKS(retained_anchor_num), caffe::CAFFE_CUDA_NUM_THREADS >> >(
retained_anchor_num, &transform_bbox_[transform_bbox_begin], rpn_min_size, &selected_flags_[selected_flags_begin]);
hipDeviceSynchronize();
/*--------------------------------------------------*/
//if (selected_flags_ != NULL)
//{
// vector<int> flags(retained_anchor_num);
// hipMemcpy(&flags[0], &selected_flags_[selected_flags_begin], retained_anchor_num * sizeof(int), hipMemcpyDeviceToHost);
// std::ofstream outfile;
// if (_access("flags_GPU.txt", 0) != -1) //
// remove("flags_GPU.txt");
// outfile.open("flags_GPU.txt", ios::out | ios::app);
// for (int i = 0; i < retained_anchor_num; i++) {
// outfile << flags[i] << std::endl;
// }
// outfile.close();
//}
/*--------------------------------------------------*/
// cumulative sum up the flags to get the copy index
int *selected_indices_ = NULL;
CUDA_CHECK(hipMalloc((void**)&selected_indices_, sizeof(int) * retained_anchor_num));
void *cumsum_temp_storage_ = NULL;
size_t cumsum_temp_storage_bytes_ = 0;
hipcub::DeviceScan::InclusiveSum(cumsum_temp_storage_, cumsum_temp_storage_bytes_,
&selected_flags_[selected_flags_begin], selected_indices_, retained_anchor_num);
DLOG(ERROR) << "cumsum_temp_storage_bytes : " << cumsum_temp_storage_bytes_;
CUDA_CHECK(hipMalloc(&cumsum_temp_storage_, cumsum_temp_storage_bytes_));
hipcub::DeviceScan::InclusiveSum(sort_temp_storage_, cumsum_temp_storage_bytes_,
&selected_flags_[selected_flags_begin], selected_indices_, retained_anchor_num);
/*--------------------------------------------------*/
//if (selected_indices_ != NULL)
//{
// vector<int> indices_(retained_anchor_num);
// hipMemcpy(&indices_[0], &selected_indices_[selected_flags_begin], retained_anchor_num * sizeof(int), hipMemcpyDeviceToHost);
// std::ofstream outfile;
// if (_access("indices_.txt", 0) != -1) //
// remove("indices_.txt");
// outfile.open("indices_.txt", ios::out | ios::app);
// for (int i = 0; i < retained_anchor_num; i++) {
// outfile << indices_[i] << std::endl;
// }
// outfile.close();
//}
/*--------------------------------------------------*/
int selected_num = -1;
hipMemcpy(&selected_num, &selected_indices_[retained_anchor_num - 1], sizeof(int), hipMemcpyDeviceToHost);
CHECK_GT(selected_num, 0);
float* tmp_transform_bbox = NULL;
CUDA_CHECK(hipMalloc(&tmp_transform_bbox, 7 * sizeof(Dtype) * rpn_pre_nms_top_n));//retained_anchor_num
hipMemcpy(tmp_transform_bbox, &transform_bbox_[transform_bbox_begin], rpn_pre_nms_top_n * sizeof(Dtype) * 7, hipMemcpyDeviceToDevice);
Dtype *bbox_score_ = NULL;
if (top.size() > 1)
{
CUDA_CHECK(hipMalloc(&bbox_score_, sizeof(Dtype) * rpn_pre_nms_top_n));//retained_anchor_num
}
SelectBoxByIndices << <caffe::CAFFE_GET_BLOCKS(retained_anchor_num), caffe::CAFFE_CUDA_NUM_THREADS >> >(
retained_anchor_num, &tmp_transform_bbox[transform_bbox_begin], &selected_indices_[selected_flags_begin], &transform_bbox_[transform_bbox_begin], sorted_scores, bbox_score_);
hipDeviceSynchronize();
/*--------------------------------------------------*/
// if (selected_num > 0) {
// std::ofstream outfile;
// if (_access("anchor_before_nmsGPU.txt", 0) != -1) //
// remove("anchor_before_nmsGPU.txt");
// outfile.open("anchor_before_nmsGPU.txt", ios::out | ios::app);
// vector<float> boxes(retained_anchor_num * 7);
// vector<float> scores(retained_anchor_num);
// /*vector<int> flags(retained_anchor_num);
// hipMemcpy(&flags[0], &selected_flags_[selected_flags_begin], retained_anchor_num * sizeof(int), hipMemcpyDeviceToHost);*/
// hipMemcpy(&boxes[0], &transform_bbox_[transform_bbox_begin], 7 * retained_anchor_num * sizeof(float), hipMemcpyDeviceToHost);
// hipMemcpy(&scores[0], bbox_score_, retained_anchor_num * sizeof(float), hipMemcpyDeviceToHost);
// /*vector<float>::iterator itBox;
// vector<float>::iterator itScore;
// vector<int>::iterator itFlag;
// for (itFlag = flags.begin(), itBox = boxes.begin(), itScore = scores.begin(); itFlag != flags.end();)
// {
// if ((*itFlag) == 0)
// {
// itFlag = flags.erase(itFlag);
// itBox = boxes.erase(itBox);
// itBox = boxes.erase(itBox);
// itBox = boxes.erase(itBox);
// itBox = boxes.erase(itBox);
// itBox = boxes.erase(itBox);
// itBox = boxes.erase(itBox);
// itBox = boxes.erase(itBox);
// itScore = scores.erase(itScore);
// }
// else
// {
// itFlag++;
// itBox = itBox + 7;
// itScore++;
// }
// }
//*/
// outfile << selected_num << std::endl;
// for (int i = 0; i < selected_num; i++) {
// outfile << scores[i] << " ";
// outfile << boxes[7 * i] << " "
// << boxes[7 * i + 1] << " "
// << boxes[7 * i + 2] << " "
// << boxes[7 * i + 3] << " "
// << boxes[7 * i + 4] << " "
// << boxes[7 * i + 5] << " "
// << boxes[7 * i + 6] << std::endl;
// }
// outfile.close();
// }
/*--------------------------------------------------*/
//Step 4. -----------------------------apply nms-------------------------------
DLOG(ERROR) << "========== apply nms with rpn_nms_thresh : " << rpn_nms_thresh;
vector<int> keep_indices(selected_num);
int keep_num = -1;
gpu_nms(&keep_indices[0], &keep_num, &transform_bbox_[transform_bbox_begin], selected_num, 4, rpn_nms_thresh);
DLOG(ERROR) << "rpn num after gpu nms: " << keep_num;
keep_num = ::min(keep_num, rpn_post_nms_top_n);
DLOG(ERROR) << "========== copy to top";
hipMemcpy(&gpu_keep_indices_[gpu_keep_indices_begin], &keep_indices[0], sizeof(int) * keep_num, hipMemcpyHostToDevice);
////////////////////////////////////
// do not forget to free the malloc memory
CUDA_CHECK(hipFree(sorted_scores));
CUDA_CHECK(hipFree(indices));
CUDA_CHECK(hipFree(sorted_indices));
CUDA_CHECK(hipFree(sort_temp_storage_));
CUDA_CHECK(hipFree(cumsum_temp_storage_));
CUDA_CHECK(hipFree(selected_indices_));
CUDA_CHECK(hipFree(tmp_transform_bbox));
batch_keep_num.push_back(keep_num);
batch_bbox_score_.push_back(bbox_score_);
}
int total_boxes = 0;
for (size_t batch_index = 0; batch_index < batch_keep_num.size(); batch_index++) {
total_boxes += batch_keep_num[batch_index];
}
top[0]->Reshape(total_boxes, 5, 1, 1);
Dtype *top_data = top[0]->mutable_gpu_data();
Dtype *top_info = NULL;
if (top.size() > 1) {
top[1]->Reshape(total_boxes, 3, 1, 1);
top_info = top[1]->mutable_gpu_data();
}
int box_begin = 0;
/*--------------------------------------------------*/
//std::ofstream outfile;
//if (_access("frcnn_proposal_layer_outputGPU.txt", 0) != -1) //
// remove("frcnn_proposal_layer_outputGPU.txt");
//outfile.open("frcnn_proposal_layer_outputGPU.txt", ios::out | ios::app);
/*--------------------------------------------------*/
for (size_t batch_index = 0; batch_index < batch_keep_num.size(); batch_index++) {
const int keep_num = batch_keep_num[batch_index];
SelectBoxAftNMS << <caffe::CAFFE_GET_BLOCKS(keep_num), caffe::CAFFE_CUDA_NUM_THREADS >> >(
batch_index, box_begin, keep_num,
&transform_bbox_[rpn_pre_nms_top_n * batch_index * 7],
&gpu_keep_indices_[rpn_post_nms_top_n * batch_index],
top_data, batch_bbox_score_[batch_index], top_info);
/*--------------------------------------------------*/
/*vector<float> boxes(rpn_pre_nms_top_n * 7);
vector<Dtype> scores(rpn_pre_nms_top_n);
vector<int> indexes(keep_num);
hipMemcpy(&boxes[0], &transform_bbox_[rpn_pre_nms_top_n * batch_index * 7], 7 * rpn_pre_nms_top_n * sizeof(float), hipMemcpyDeviceToHost);
hipMemcpy(&scores[0], batch_bbox_score_[batch_index], rpn_pre_nms_top_n * sizeof(Dtype), hipMemcpyDeviceToHost);
hipMemcpy(&indexes[0], &gpu_keep_indices_[rpn_post_nms_top_n * batch_index], keep_num * sizeof(int), hipMemcpyDeviceToHost);
outfile << "batch index : " << batch_index << " " << keep_num << std::endl;
for (int i = 0; i < keep_num; i++) {
outfile << scores[indexes[i]] << " ";
outfile << boxes[7 * indexes[i]] << " "
<< boxes[7 * indexes[i] + 1] << " "
<< boxes[7 * indexes[i] + 2] << " "
<< boxes[7 * indexes[i] + 3] << " "
<< boxes[7 * indexes[i] + 4] << " "
<< boxes[7 * indexes[i] + 5] << " "
<< boxes[7 * indexes[i] + 6] << std::endl;
}*/
/*--------------------------------------------------*/
box_begin += keep_num;
}
/*outfile.close();*/
DLOG(ERROR) << "========== exit proposal layer";
////////////////////////////////////
// do not forget to free the malloc memory
for (size_t batch_index = 0; batch_index < batch_keep_num.size(); batch_index++) {
if (batch_bbox_score_[batch_index] != NULL)
CUDA_CHECK(hipFree(batch_bbox_score_[batch_index]));
}
}
template <typename Dtype>
void FrcnnProposalLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype> *> &top,
const vector<bool> &propagate_down, const vector<Blob<Dtype> *> &bottom) {
for (int i = 0; i < propagate_down.size(); ++i) {
if (propagate_down[i]) {
NOT_IMPLEMENTED;
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(FrcnnProposalLayer);
} // namespace frcnn
} // namespace caffe
| 867f7cffd675b10957d9fa14414486617afd7219.cu | // ------------------------------------------------------------------
// Fast R-CNN
// Copyright (c) 2015 Microsoft
// Licensed under The MIT License [see fast-rcnn/LICENSE for details]
// Written by Ross Girshick
// ------------------------------------------------------------------
#include <thrust/system/cuda/detail/cub/cub.cuh>
#include <iomanip>
#include <io.h>
#include "caffe/FRCNN/frcnn_proposal_layer.hpp"
#include "caffe/FRCNN/util/frcnn_utils.hpp"
#include "caffe/FRCNN/util/frcnn_helper.hpp"
#include "caffe/FRCNN/util/frcnn_param.hpp"
#include "caffe/FRCNN/util/frcnn_gpu_nms.hpp"
#include <iostream>
namespace caffe {
namespace Frcnn {
using std::vector;
__global__ void GetIndex(const int n, int *indices){
CUDA_KERNEL_LOOP(index, n){
indices[index] = index;
}
}
template <typename Dtype>
__global__ void BBoxTransformInv(const int nthreads, const Dtype* const bottom_rpn_bbox,
const float im_height, const float im_width, const int* sorted_indices,
const Dtype* const prior_bboxes, const Dtype* const prior_variances, float* const transform_bbox,
const Dtype* const match_info) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int score_idx = sorted_indices[index];
int match_gt_index = match_info[2 * score_idx];
float *box = transform_bbox + index * 7;
if (match_gt_index < -1){ //丢弃的负样本
box[0] = 0;
box[1] = 0;
box[2] = 0;
box[3] = 0;
box[4] = score_idx;
box[5] = match_info[score_idx * 2];
box[6] = match_info[score_idx * 2 + 1];
}
else
{
box[0] = prior_bboxes[score_idx * 4 + 0];
box[1] = prior_bboxes[score_idx * 4 + 1];
box[2] = prior_bboxes[score_idx * 4 + 2];
box[3] = prior_bboxes[score_idx * 4 + 3];
box[4] = score_idx;
box[5] = match_info[score_idx * 2];
box[6] = match_info[score_idx * 2 + 1];
if (match_gt_index < 0){ // 保留的负样本
box[0] = max(0.0f, min(box[0] * im_width, im_width)); // im_width - 1.0
box[1] = max(0.0f, min(box[1] * im_height, im_height)); // im_height - 1.0
box[2] = max(0.0f, min(box[2] * im_width, im_width)); // im_width - 1.0
box[3] = max(0.0f, min(box[3] * im_height, im_height)); // im_height - 1.0
}
else
{
Dtype det[4] = {
det[0] = bottom_rpn_bbox[score_idx * 4 + 0],
det[1] = bottom_rpn_bbox[score_idx * 4 + 1],
det[2] = bottom_rpn_bbox[score_idx * 4 + 2],
det[3] = bottom_rpn_bbox[score_idx * 4 + 3]
};
float src_w = box[2] - box[0];// + 1 / im_width;
float src_h = box[3] - box[1];// + 1 / im_height;
float src_ctr_x = box[0] + 0.5 * src_w;
float src_ctr_y = box[1] + 0.5 * src_h;
float pred_ctr_x = prior_variances[score_idx * 4] * det[0] * src_w + src_ctr_x;
float pred_ctr_y = prior_variances[score_idx * 4 + 1] * det[1] * src_h + src_ctr_y;
float pred_w = exp(prior_variances[score_idx * 4 + 2] * det[2]) * src_w;
float pred_h = exp(prior_variances[score_idx * 4 + 3] * det[3]) * src_h;
box[0] = (pred_ctr_x - 0.5 * pred_w)*im_width;;
box[1] = (pred_ctr_y - 0.5 * pred_h)*im_height;;
box[2] = (pred_ctr_x + 0.5 * pred_w)*im_width;;
box[3] = (pred_ctr_y + 0.5 * pred_h)*im_height;;
box[0] = max(0.0f, min(box[0], im_width)); // im_width - 1.0
box[1] = max(0.0f, min(box[1], im_height)); // im_height - 1.0
box[2] = max(0.0f, min(box[2], im_width)); // im_width - 1.0
box[3] = max(0.0f, min(box[3], im_height)); // im_height - 1.0
}
}
}
}
__global__ void SelectBox(const int nthreads, const float *box, float min_size,
int *flags) {
CUDA_KERNEL_LOOP(index, nthreads) {
if ((box[index * 7 + 2] - box[index * 7 + 0] < min_size) ||
(box[index * 7 + 3] - box[index * 7 + 1] < min_size)) {
flags[index] = 0;
}
else {
flags[index] = 1;
}
}
}
template <typename Dtype>
__global__ void SelectBoxByIndices(const int nthreads, const float *in_box, int *selected_indices,
float *out_box, const Dtype *in_score, Dtype *out_score) {
CUDA_KERNEL_LOOP(index, nthreads) {
if ((index == 0 && selected_indices[index] == 1) ||
(index > 0 && selected_indices[index] == selected_indices[index - 1] + 1)) {
out_box[(selected_indices[index] - 1) * 7 + 0] = in_box[index * 7 + 0];
out_box[(selected_indices[index] - 1) * 7 + 1] = in_box[index * 7 + 1];
out_box[(selected_indices[index] - 1) * 7 + 2] = in_box[index * 7 + 2];
out_box[(selected_indices[index] - 1) * 7 + 3] = in_box[index * 7 + 3];
out_box[(selected_indices[index] - 1) * 7 + 4] = in_box[index * 7 + 4];
out_box[(selected_indices[index] - 1) * 7 + 5] = in_box[index * 7 + 5];
out_box[(selected_indices[index] - 1) * 7 + 6] = in_box[index * 7 + 6];
if (in_score != NULL && out_score != NULL) {
out_score[selected_indices[index] - 1] = in_score[index];
}
}
}
}
template <typename Dtype>
__global__ void SelectBoxAftNMS(int batch_index, int box_begin, const int nthreads, const float *in_box, int *keep_indices,
Dtype *top_data, const Dtype *in_score, Dtype* top_info) {
CUDA_KERNEL_LOOP(index, nthreads) {
int keep_idx = keep_indices[index];
top_data[box_begin * 5 + index * 5] = batch_index;// batch_index
for (int j = 1; j < 5; ++j) {
top_data[box_begin * 5 + index * 5 + j] = in_box[keep_idx * 7 + j - 1];
}
if (top_info != NULL) {
top_info[box_begin * 2 + index * 2] = in_box[keep_idx * 7 + 4];
top_info[box_begin * 2 + index * 2 + 1] = in_box[keep_idx * 7 + 5];
top_info[box_begin * 2 + index * 2 + 2] = in_box[keep_idx * 7 + 6];
}
}
}
template <typename Dtype>
void FrcnnProposalLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype> *> &bottom,
const vector<Blob<Dtype> *> &top) {
DLOG(ERROR) << "========== enter proposal layer";
const Dtype *bottom_rpn_score = bottom[0]->gpu_data();
const Dtype *bottom_rpn_bbox = bottom[1]->gpu_data();
const Dtype *prior_data = bottom[2]->gpu_data(); // prior box
const Dtype* match_info = bottom[3]->gpu_data(); // match_info
const int num = bottom[1]->num();// batch size
const int channes = bottom[1]->channels();
const int height = bottom[1]->height();
const int width = bottom[1]->width();
/*-------------------------改写-------------------------*/
CHECK(channes % 4 == 0) << "rpn bbox pred channels should be divided by 4";
const float im_height = FrcnnParam::im_height;
const float im_width = FrcnnParam::im_width;
/*-------------------------改写-------------------------*/
int rpn_pre_nms_top_n;
int rpn_post_nms_top_n;
float rpn_nms_thresh;
int rpn_min_size;
if (this->phase_ == TRAIN) {
rpn_pre_nms_top_n = FrcnnParam::rpn_pre_nms_top_n;
rpn_post_nms_top_n = FrcnnParam::rpn_post_nms_top_n;
rpn_nms_thresh = FrcnnParam::rpn_nms_thresh;
rpn_min_size = FrcnnParam::rpn_min_size;
}
else {
rpn_pre_nms_top_n = FrcnnParam::test_rpn_pre_nms_top_n;
rpn_post_nms_top_n = FrcnnParam::test_rpn_post_nms_top_n;
rpn_nms_thresh = FrcnnParam::test_rpn_nms_thresh;
rpn_min_size = FrcnnParam::test_rpn_min_size;
}
LOG_IF(ERROR, rpn_pre_nms_top_n <= 0) << "rpn_pre_nms_top_n : " << rpn_pre_nms_top_n;
LOG_IF(ERROR, rpn_post_nms_top_n <= 0) << "rpn_post_nms_top_n : " << rpn_post_nms_top_n;
if (rpn_pre_nms_top_n <= 0 || rpn_post_nms_top_n <= 0) return;
const int config_n_anchors = FrcnnParam::anchors.size() / 4;
const int total_anchor_num = bottom[2]->height() / 4;
/*-------------------------改写-------------------------*/
vector<int> batch_keep_num;
vector<float*> batch_transform_bbox_;
vector<int *>batch_gpu_keep_indices_;
vector<Dtype *>batch_bbox_score_;
vector<int*> batch_match_gt_;// add output
for (int batch_index = 0; batch_index < num; batch_index++) {
//Step 1. -------------------------------Sort the rpn result----------------------
// the first half of rpn_score is the bg score
// Note that the sorting operator will change the order fg_scores (bottom_rpn_score)
const int fg_begin = batch_index * total_anchor_num;
const int bbox_begin = (4 * batch_index)*total_anchor_num;
const int transform_bbox_begin = 7 * batch_index * rpn_pre_nms_top_n;//
const int selected_flags_begin = batch_index * rpn_pre_nms_top_n;
const int gpu_keep_indices_begin = batch_index * rpn_post_nms_top_n;
Dtype *fg_scores = (Dtype*)(&bottom_rpn_score[fg_begin]);
Dtype *rpn_bbox = (Dtype*)(&bottom_rpn_bbox[bbox_begin]);
Dtype *fg_info = (Dtype*)(&match_info[fg_begin]); //匹配的信息
Dtype *sorted_scores = NULL;
CUDA_CHECK(cudaMalloc((void**)&sorted_scores, sizeof(Dtype) * total_anchor_num));
cub::DoubleBuffer<Dtype> d_keys(fg_scores, sorted_scores);
int *indices = NULL;
CUDA_CHECK(cudaMalloc((void**)&indices, sizeof(int) * total_anchor_num));
GetIndex << <caffe::CAFFE_GET_BLOCKS(total_anchor_num), caffe::CAFFE_CUDA_NUM_THREADS >> >(
total_anchor_num, indices);
cudaDeviceSynchronize();
int *sorted_indices = NULL;
CUDA_CHECK(cudaMalloc((void**)&sorted_indices, sizeof(int) * total_anchor_num));
cub::DoubleBuffer<int> d_values(indices, sorted_indices);
void *sort_temp_storage_ = NULL;
size_t sort_temp_storage_bytes_ = 0;
// calculate the temp_storage_bytes
cub::DeviceRadixSort::SortPairsDescending(sort_temp_storage_, sort_temp_storage_bytes_,
d_keys, d_values, total_anchor_num);
DLOG(ERROR) << "sort_temp_storage_bytes_ : " << sort_temp_storage_bytes_;
CUDA_CHECK(cudaMalloc(&sort_temp_storage_, sort_temp_storage_bytes_));
// sorting
cub::DeviceRadixSort::SortPairsDescending(sort_temp_storage_, sort_temp_storage_bytes_,
d_keys, d_values, total_anchor_num);
cudaDeviceSynchronize();
//Step 2. ---------------------------bbox transform----------------------------
const int retained_anchor_num = std::min(total_anchor_num, rpn_pre_nms_top_n);//3000?
// 这里将匹配gt=-2的box置0,如此filter out small box时会过滤掉
BBoxTransformInv<Dtype> << <caffe::CAFFE_GET_BLOCKS(retained_anchor_num), caffe::CAFFE_CUDA_NUM_THREADS >> >(
retained_anchor_num, rpn_bbox, im_height, im_width, sorted_indices,
prior_data, &prior_data[total_anchor_num * 4], &transform_bbox_[transform_bbox_begin],
fg_info);// 只存rpn_pre_nms_top_n个,这rpn_pre_nms_top_n个里面还有小于rpn_min_size的
cudaDeviceSynchronize();
/*-------------------------验证代码-------------------------*/
//if (retained_anchor_num > 0) {
// std::ofstream outfile;
// if (_access("anchor_before_nmsGPU.txt", 0) != -1) // 如果临时文件存在,删除!
// remove("anchor_before_nmsGPU.txt");
// outfile.open("anchor_before_nmsGPU.txt", ios::out | ios::app);
// vector<float> boxes(rpn_pre_nms_top_n * 7);
// vector<Dtype> scores(total_anchor_num);
// /*vector<int> indices(total_anchor_num);*/
// cudaMemcpy(&boxes[0], &transform_bbox_[transform_bbox_begin], 7 * rpn_pre_nms_top_n * sizeof(float), cudaMemcpyDeviceToHost);
// cudaMemcpy(&scores[0], sorted_scores, total_anchor_num * sizeof(Dtype), cudaMemcpyDeviceToHost);
// /*cudaMemcpy(&indices[0], sorted_indices, total_anchor_num * sizeof(int), cudaMemcpyDeviceToHost);*/
// outfile << retained_anchor_num << std::endl;
// for (int i = 0; i < retained_anchor_num; i++) {
// outfile << scores[i] << " ";
// outfile << boxes[7 * i] << " "
// << boxes[7 * i + 1] << " "
// << boxes[7 * i + 2] << " "
// << boxes[7 * i + 3] << " "
// << boxes[7 * i + 4] << " "
// << boxes[7 * i + 5] << " "
// << boxes[7 * i + 6] << std::endl;
// }
// outfile.close();
//}
/*-------------------------验证代码-------------------------*/
//Step 3. -------------------------filter out small box-----------------------
SelectBox << <caffe::CAFFE_GET_BLOCKS(retained_anchor_num), caffe::CAFFE_CUDA_NUM_THREADS >> >(
retained_anchor_num, &transform_bbox_[transform_bbox_begin], rpn_min_size, &selected_flags_[selected_flags_begin]);
cudaDeviceSynchronize();
/*-------------------------验证代码-------------------------*/
//if (selected_flags_ != NULL)
//{
// vector<int> flags(retained_anchor_num);
// cudaMemcpy(&flags[0], &selected_flags_[selected_flags_begin], retained_anchor_num * sizeof(int), cudaMemcpyDeviceToHost);
// std::ofstream outfile;
// if (_access("flags_GPU.txt", 0) != -1) // 如果临时文件存在,删除!
// remove("flags_GPU.txt");
// outfile.open("flags_GPU.txt", ios::out | ios::app);
// for (int i = 0; i < retained_anchor_num; i++) {
// outfile << flags[i] << std::endl;
// }
// outfile.close();
//}
/*-------------------------验证代码-------------------------*/
// cumulative sum up the flags to get the copy index
int *selected_indices_ = NULL;
CUDA_CHECK(cudaMalloc((void**)&selected_indices_, sizeof(int) * retained_anchor_num));
void *cumsum_temp_storage_ = NULL;
size_t cumsum_temp_storage_bytes_ = 0;
cub::DeviceScan::InclusiveSum(cumsum_temp_storage_, cumsum_temp_storage_bytes_,
&selected_flags_[selected_flags_begin], selected_indices_, retained_anchor_num);
DLOG(ERROR) << "cumsum_temp_storage_bytes : " << cumsum_temp_storage_bytes_;
CUDA_CHECK(cudaMalloc(&cumsum_temp_storage_, cumsum_temp_storage_bytes_));
cub::DeviceScan::InclusiveSum(sort_temp_storage_, cumsum_temp_storage_bytes_,
&selected_flags_[selected_flags_begin], selected_indices_, retained_anchor_num);
/*-------------------------验证代码-------------------------*/
//if (selected_indices_ != NULL)
//{
// vector<int> indices_(retained_anchor_num);
// cudaMemcpy(&indices_[0], &selected_indices_[selected_flags_begin], retained_anchor_num * sizeof(int), cudaMemcpyDeviceToHost);
// std::ofstream outfile;
// if (_access("indices_.txt", 0) != -1) // 如果临时文件存在,删除!
// remove("indices_.txt");
// outfile.open("indices_.txt", ios::out | ios::app);
// for (int i = 0; i < retained_anchor_num; i++) {
// outfile << indices_[i] << std::endl;
// }
// outfile.close();
//}
/*-------------------------验证代码-------------------------*/
int selected_num = -1;
cudaMemcpy(&selected_num, &selected_indices_[retained_anchor_num - 1], sizeof(int), cudaMemcpyDeviceToHost);
CHECK_GT(selected_num, 0);
float* tmp_transform_bbox = NULL;
CUDA_CHECK(cudaMalloc(&tmp_transform_bbox, 7 * sizeof(Dtype) * rpn_pre_nms_top_n));//修改retained_anchor_num
cudaMemcpy(tmp_transform_bbox, &transform_bbox_[transform_bbox_begin], rpn_pre_nms_top_n * sizeof(Dtype) * 7, cudaMemcpyDeviceToDevice);
Dtype *bbox_score_ = NULL;
if (top.size() > 1)
{
CUDA_CHECK(cudaMalloc(&bbox_score_, sizeof(Dtype) * rpn_pre_nms_top_n));//修改retained_anchor_num
}
SelectBoxByIndices << <caffe::CAFFE_GET_BLOCKS(retained_anchor_num), caffe::CAFFE_CUDA_NUM_THREADS >> >(
retained_anchor_num, &tmp_transform_bbox[transform_bbox_begin], &selected_indices_[selected_flags_begin], &transform_bbox_[transform_bbox_begin], sorted_scores, bbox_score_);
cudaDeviceSynchronize();
/*-------------------------验证代码-------------------------*/
// if (selected_num > 0) {
// std::ofstream outfile;
// if (_access("anchor_before_nmsGPU.txt", 0) != -1) // 如果临时文件存在,删除!
// remove("anchor_before_nmsGPU.txt");
// outfile.open("anchor_before_nmsGPU.txt", ios::out | ios::app);
// vector<float> boxes(retained_anchor_num * 7);
// vector<float> scores(retained_anchor_num);
// /*vector<int> flags(retained_anchor_num);
// cudaMemcpy(&flags[0], &selected_flags_[selected_flags_begin], retained_anchor_num * sizeof(int), cudaMemcpyDeviceToHost);*/
// cudaMemcpy(&boxes[0], &transform_bbox_[transform_bbox_begin], 7 * retained_anchor_num * sizeof(float), cudaMemcpyDeviceToHost);
// cudaMemcpy(&scores[0], bbox_score_, retained_anchor_num * sizeof(float), cudaMemcpyDeviceToHost);
// /*vector<float>::iterator itBox;
// vector<float>::iterator itScore;
// vector<int>::iterator itFlag;
// for (itFlag = flags.begin(), itBox = boxes.begin(), itScore = scores.begin(); itFlag != flags.end();)
// {
// if ((*itFlag) == 0)
// {
// itFlag = flags.erase(itFlag);
// itBox = boxes.erase(itBox);
// itBox = boxes.erase(itBox);
// itBox = boxes.erase(itBox);
// itBox = boxes.erase(itBox);
// itBox = boxes.erase(itBox);
// itBox = boxes.erase(itBox);
// itBox = boxes.erase(itBox);
// itScore = scores.erase(itScore);
// }
// else
// {
// itFlag++;
// itBox = itBox + 7;
// itScore++;
// }
// }
//*/
// outfile << selected_num << std::endl;
// for (int i = 0; i < selected_num; i++) {
// outfile << scores[i] << " ";
// outfile << boxes[7 * i] << " "
// << boxes[7 * i + 1] << " "
// << boxes[7 * i + 2] << " "
// << boxes[7 * i + 3] << " "
// << boxes[7 * i + 4] << " "
// << boxes[7 * i + 5] << " "
// << boxes[7 * i + 6] << std::endl;
// }
// outfile.close();
// }
/*-------------------------验证代码-------------------------*/
//Step 4. -----------------------------apply nms-------------------------------
DLOG(ERROR) << "========== apply nms with rpn_nms_thresh : " << rpn_nms_thresh;
vector<int> keep_indices(selected_num);
int keep_num = -1;
gpu_nms(&keep_indices[0], &keep_num, &transform_bbox_[transform_bbox_begin], selected_num, 4, rpn_nms_thresh);
DLOG(ERROR) << "rpn num after gpu nms: " << keep_num;
keep_num = std::min(keep_num, rpn_post_nms_top_n);
DLOG(ERROR) << "========== copy to top";
cudaMemcpy(&gpu_keep_indices_[gpu_keep_indices_begin], &keep_indices[0], sizeof(int) * keep_num, cudaMemcpyHostToDevice);
////////////////////////////////////
// do not forget to free the malloc memory
CUDA_CHECK(cudaFree(sorted_scores));
CUDA_CHECK(cudaFree(indices));
CUDA_CHECK(cudaFree(sorted_indices));
CUDA_CHECK(cudaFree(sort_temp_storage_));
CUDA_CHECK(cudaFree(cumsum_temp_storage_));
CUDA_CHECK(cudaFree(selected_indices_));
CUDA_CHECK(cudaFree(tmp_transform_bbox));
batch_keep_num.push_back(keep_num);
batch_bbox_score_.push_back(bbox_score_);
}
int total_boxes = 0;
for (size_t batch_index = 0; batch_index < batch_keep_num.size(); batch_index++) {
total_boxes += batch_keep_num[batch_index];
}
top[0]->Reshape(total_boxes, 5, 1, 1);
Dtype *top_data = top[0]->mutable_gpu_data();
Dtype *top_info = NULL;
if (top.size() > 1) {
top[1]->Reshape(total_boxes, 3, 1, 1);
top_info = top[1]->mutable_gpu_data();
}
int box_begin = 0;
/*-------------------------验证代码-------------------------*/
//std::ofstream outfile;
//if (_access("frcnn_proposal_layer_outputGPU.txt", 0) != -1) // 如果临时文件存在,删除!
// remove("frcnn_proposal_layer_outputGPU.txt");
//outfile.open("frcnn_proposal_layer_outputGPU.txt", ios::out | ios::app);
/*-------------------------验证代码-------------------------*/
for (size_t batch_index = 0; batch_index < batch_keep_num.size(); batch_index++) {
const int keep_num = batch_keep_num[batch_index];
SelectBoxAftNMS << <caffe::CAFFE_GET_BLOCKS(keep_num), caffe::CAFFE_CUDA_NUM_THREADS >> >(
batch_index, box_begin, keep_num,
&transform_bbox_[rpn_pre_nms_top_n * batch_index * 7],
&gpu_keep_indices_[rpn_post_nms_top_n * batch_index],
top_data, batch_bbox_score_[batch_index], top_info);
/*-------------------------验证代码-------------------------*/
/*vector<float> boxes(rpn_pre_nms_top_n * 7);
vector<Dtype> scores(rpn_pre_nms_top_n);
vector<int> indexes(keep_num);
cudaMemcpy(&boxes[0], &transform_bbox_[rpn_pre_nms_top_n * batch_index * 7], 7 * rpn_pre_nms_top_n * sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(&scores[0], batch_bbox_score_[batch_index], rpn_pre_nms_top_n * sizeof(Dtype), cudaMemcpyDeviceToHost);
cudaMemcpy(&indexes[0], &gpu_keep_indices_[rpn_post_nms_top_n * batch_index], keep_num * sizeof(int), cudaMemcpyDeviceToHost);
outfile << "batch index : " << batch_index << " " << keep_num << std::endl;
for (int i = 0; i < keep_num; i++) {
outfile << scores[indexes[i]] << " ";
outfile << boxes[7 * indexes[i]] << " "
<< boxes[7 * indexes[i] + 1] << " "
<< boxes[7 * indexes[i] + 2] << " "
<< boxes[7 * indexes[i] + 3] << " "
<< boxes[7 * indexes[i] + 4] << " "
<< boxes[7 * indexes[i] + 5] << " "
<< boxes[7 * indexes[i] + 6] << std::endl;
}*/
/*-------------------------验证代码-------------------------*/
box_begin += keep_num;
}
/*outfile.close();*/
DLOG(ERROR) << "========== exit proposal layer";
////////////////////////////////////
// do not forget to free the malloc memory
for (size_t batch_index = 0; batch_index < batch_keep_num.size(); batch_index++) {
if (batch_bbox_score_[batch_index] != NULL)
CUDA_CHECK(cudaFree(batch_bbox_score_[batch_index]));
}
}
template <typename Dtype>
void FrcnnProposalLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype> *> &top,
const vector<bool> &propagate_down, const vector<Blob<Dtype> *> &bottom) {
for (int i = 0; i < propagate_down.size(); ++i) {
if (propagate_down[i]) {
NOT_IMPLEMENTED;
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(FrcnnProposalLayer);
} // namespace frcnn
} // namespace caffe
|
5cffb771e69de06a36e49f229c78d74de7f2d2b0.hip | // !!! This is a file automatically generated by hipify!!!
/*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "oneflow/core/kernel/new_kernel_util.h"
#include "oneflow/core/device/cuda_util.h"
namespace oneflow {
template<>
void Memcpy<DeviceType::kGPU>(DeviceCtx* ctx, void* dst, const void* src, size_t sz) {
if (dst == src) { return; }
OF_CUDA_CHECK(hipMemcpyAsync(dst, src, sz, hipMemcpyDefault, ctx->cuda_stream()));
}
template<>
void Memset<DeviceType::kGPU>(DeviceCtx* ctx, void* dst, const char value, size_t sz) {
OF_CUDA_CHECK(hipMemsetAsync(dst, value, sz, ctx->cuda_stream()));
}
} // namespace oneflow
| 5cffb771e69de06a36e49f229c78d74de7f2d2b0.cu | /*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "oneflow/core/kernel/new_kernel_util.h"
#include "oneflow/core/device/cuda_util.h"
namespace oneflow {
template<>
void Memcpy<DeviceType::kGPU>(DeviceCtx* ctx, void* dst, const void* src, size_t sz) {
if (dst == src) { return; }
OF_CUDA_CHECK(cudaMemcpyAsync(dst, src, sz, cudaMemcpyDefault, ctx->cuda_stream()));
}
template<>
void Memset<DeviceType::kGPU>(DeviceCtx* ctx, void* dst, const char value, size_t sz) {
OF_CUDA_CHECK(cudaMemsetAsync(dst, value, sz, ctx->cuda_stream()));
}
} // namespace oneflow
|
db0872781c7f5a3c4d60ab07668a08f1b1cc9833.hip | // !!! This is a file automatically generated by hipify!!!
//
// Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
#include <optix.h>
#include "optixPathTracer.h"
#include "random.h"
#include <sutil/vec_math.h>
#include <cuda/helpers.h>
#include <hip/hip_runtime.h>
//#include <stdio.h>
#define EPS 1.19209290E-07F
#define TWO_PI 6.28318530717959f //2*pi
/*const unsigned int WIDTH = 600;
const unsigned int HEIGHT = 600;
const unsigned int DEPTH = 1100;*/
const unsigned int WIDTH = 400;
const unsigned int HEIGHT = 400;
const unsigned int DEPTH = 500;
extern "C" {
__constant__ Params params;
}
//------------------------------------------------------------------------------
//
//
//
//------------------------------------------------------------------------------
struct RadiancePRD
{
// TODO: move some state directly into payload registers?
float3 emitted;
float3 radiance;
float3 attenuation;
float3 origin;
float3 direction;
unsigned int seed;
int countEmitted;
int done;
float slen;
float dist_so_far;
unsigned int mc_seed[4];
float weight;
int print;
int depth;
//int pad;
};
struct Onb
{
__forceinline__ __device__ Onb(const float3& normal)
{
m_normal = normal;
if (fabs(m_normal.x) > fabs(m_normal.z))
{
m_binormal.x = -m_normal.y;
m_binormal.y = m_normal.x;
m_binormal.z = 0;
}
else
{
m_binormal.x = 0;
m_binormal.y = -m_normal.z;
m_binormal.z = m_normal.y;
}
m_binormal = normalize(m_binormal);
m_tangent = cross(m_binormal, m_normal);
}
__forceinline__ __device__ void inverse_transform(float3& p) const
{
p = p.x * m_tangent + p.y * m_binormal + p.z * m_normal;
}
float3 m_tangent;
float3 m_binormal;
float3 m_normal;
};
//------------------------------------------------------------------------------
//
//
//
//------------------------------------------------------------------------------
static __forceinline__ __device__ float xorshift128p_nextf(unsigned long t[2]) {
union {
unsigned long i;
float f[2];
unsigned int u[2];
} s1;
const unsigned long s0 = t[1];
s1.i = t[0];
t[0] = s0;
s1.i ^= s1.i << 23; // a
t[1] = s1.i ^ s0 ^ (s1.i >> 18) ^ (s0 >> 5); // b, c
s1.i = t[1] + s0;
s1.u[0] = 0x3F800000U | (s1.u[0] >> 9);
return s1.f[0] - 1.0f;
}
static __forceinline__ __device__ float mc_next_scatter(float g, unsigned long ran[2], float3* dir) {
float nextslen;
float sphi, cphi, tmp0, theta, stheta, ctheta, tmp1;
float3 p;
//random scattering length (normalized)
nextslen = -log(xorshift128p_nextf(ran) + EPS);
tmp0 = TWO_PI * xorshift128p_nextf(ran); //next arimuth angle
sphi = sin(tmp0);
cphi = cos(tmp0);
if (g > EPS) { //if g is too small, the distribution of theta is bad
tmp0 = (1.f - g * g) / (1.f - g + 2.f * g * xorshift128p_nextf(ran));
tmp0 *= tmp0;
tmp0 = (1.f + g * g - tmp0) / (2.f * g);
tmp0 = clamp(tmp0, -1.f, 1.f);
theta = acos(tmp0);
stheta = sqrt(1.f - tmp0 * tmp0);
//stheta=MCX_MATHFUN(sin)(theta);
ctheta = tmp0;
}
else {
theta = acos(2.f * xorshift128p_nextf(ran) - 1.f);
stheta = sin(theta);
ctheta = cos(theta);
}
if (dir->z > -1.f + EPS && dir->z < 1.f - EPS) {
tmp0 = 1.f - dir->z * dir->z; //reuse tmp to minimize registers
tmp1 = 1 / sqrt(tmp0);
tmp1 = stheta * tmp1;
p.x = tmp1 * (dir->x * dir->z * cphi - dir->y * sphi) + dir->x * ctheta;
p.y = tmp1 * (dir->y * dir->z * cphi + dir->x * sphi) + dir->y * ctheta;
p.z = -tmp1 * tmp0 * cphi + dir->z * ctheta;
}
else {
p.x = stheta * cphi;
p.y = stheta * sphi;
p.z = (dir->z > 0.f) ? ctheta : -ctheta;
}
dir->x = p.x;
dir->y = p.y;
dir->z = p.z;
return nextslen;
}
static __forceinline__ __device__ void* unpackPointer(unsigned int i0, unsigned int i1)
{
const unsigned long long uptr = static_cast<unsigned long long>(i0) << 32 | i1;
void* ptr = reinterpret_cast<void*>(uptr);
return ptr;
}
static __forceinline__ __device__ void packPointer(void* ptr, unsigned int& i0, unsigned int& i1)
{
const unsigned long long uptr = reinterpret_cast<unsigned long long>(ptr);
i0 = uptr >> 32;
i1 = uptr & 0x00000000ffffffff;
}
static __forceinline__ __device__ RadiancePRD* getPRD()
{
const unsigned int u0 = optixGetPayload_0();
const unsigned int u1 = optixGetPayload_1();
return reinterpret_cast<RadiancePRD*>(unpackPointer(u0, u1));
}
static __forceinline__ __device__ void setPayloadOcclusion(bool occluded)
{
optixSetPayload_0(static_cast<unsigned int>(occluded));
}
static __forceinline__ __device__ void cosine_sample_hemisphere(const float u1, const float u2, float3& p)
{
// Uniformly sample disk.
const float r = sqrtf(u1);
const float phi = 2.0f * M_PIf * u2;
p.x = r * cosf(phi);
p.y = r * sinf(phi);
// Project up to hemisphere.
p.z = sqrtf(fmaxf(0.0f, 1.0f - p.x * p.x - p.y * p.y));
}
static __forceinline__ __device__ void traceRadiance(
OptixTraversableHandle handle,
float3 ray_origin,
float3 ray_direction,
float tmin,
float tmax,
RadiancePRD* prd
)
{
// TODO: deduce stride from num ray-types passed in params
unsigned int u0, u1;
packPointer(prd, u0, u1);
optixTrace(
handle,
ray_origin,
ray_direction,
tmin,
tmax,
0.0f, // rayTime
OptixVisibilityMask(1),
OPTIX_RAY_FLAG_NONE,
RAY_TYPE_RADIANCE, // SBT offset
RAY_TYPE_COUNT, // SBT stride
RAY_TYPE_RADIANCE, // missSBTIndex
u0, u1);
}
static __forceinline__ __device__ bool traceOcclusion(
OptixTraversableHandle handle,
float3 ray_origin,
float3 ray_direction,
float tmin,
float tmax
)
{
unsigned int occluded = 0u;
optixTrace(
handle,
ray_origin,
ray_direction,
tmin,
tmax,
0.0f, // rayTime
OptixVisibilityMask(1),
OPTIX_RAY_FLAG_TERMINATE_ON_FIRST_HIT,
RAY_TYPE_OCCLUSION, // SBT offset
RAY_TYPE_COUNT, // SBT stride
RAY_TYPE_OCCLUSION, // missSBTIndex
occluded);
return occluded;
}
//------------------------------------------------------------------------------
//
//
//
//------------------------------------------------------------------------------
extern "C" __global__ void __raygen__rg()
{
const int w = params.width;
const int h = params.height;
const float3 eye = params.eye;
const float3 U = params.U;
const float3 V = params.V;
const float3 W = params.W;
const uint3 idx = optixGetLaunchIndex();
const int subframe_index = params.subframe_index;
unsigned int seed = tea<4>(idx.y * w + idx.x, subframe_index);
unsigned int seed1 = tea<4>((idx.y * w + idx.x) + 1, subframe_index);
unsigned int seed2 = tea<4>((idx.y * w + idx.x) + 2, subframe_index);
unsigned int seed3 = tea<4>((idx.y * w + idx.x) + 3, subframe_index);
float3 result = make_float3(0.0f);
int i = params.samples_per_launch;
//do
{
// The center of each pixel is at fraction (0.5,0.5)
const float2 subpixel_jitter = make_float2(rnd(seed), rnd(seed));
const float2 d = 2.0f * make_float2(
(static_cast<float>(idx.x) + subpixel_jitter.x) / static_cast<float>(w),
(static_cast<float>(idx.y) + subpixel_jitter.y) / static_cast<float>(h)
) - 1.0f;
float3 ray_direction = normalize(-0.3 * U + 0.5 * V + W); //normalize(d.x * U + d.y * V + W);
float3 ray_origin = eye;
RadiancePRD prd;
prd.emitted = make_float3(0.f);
prd.radiance = make_float3(0.f);
prd.attenuation = make_float3(1.f);
prd.countEmitted = true;
prd.done = false;
prd.mc_seed[0] = seed;
prd.mc_seed[1] = seed1;
prd.mc_seed[2] = seed2;
prd.mc_seed[3] = seed3;
prd.slen = rnd(seed) * 10;
prd.dist_so_far = 0.0f;
prd.weight = 5.0f;
prd.origin = ray_origin;
prd.direction = ray_direction;
int depth = 0;
for (;; )
{
printf("origin: %f, %f, %f; direction: %f, %f, %f \n", ray_origin.x, ray_origin.y, ray_origin.z,
ray_direction.x, ray_direction.y, ray_direction.z);
traceRadiance(
params.handle,
ray_origin,
ray_direction,
0.01f, // tmin // TODO: smarter offset
1e16f, // tmax
&prd);
result += prd.emitted;
result += prd.radiance * prd.attenuation;
if (prd.done || depth >= 5) // TODO RR, variable for depth
break;
// Ray origin and direction are updated from the trace
ray_origin = prd.origin;
ray_direction = prd.direction;
++depth;
}
}
//while (--i);
const uint3 launch_index = optixGetLaunchIndex();
const unsigned int image_index = launch_index.y * params.width + launch_index.x;
float3 accum_color = result / static_cast<float>(params.samples_per_launch);
if (subframe_index > 0)
{
const float a = 1.0f / static_cast<float>(subframe_index + 1);
const float3 accum_color_prev = make_float3(params.accum_buffer[image_index]);
accum_color = lerp(accum_color_prev, accum_color, a);
}
params.accum_buffer[image_index] = make_float4(accum_color, 1.0f);
params.frame_buffer[image_index] = make_color(accum_color);
}
extern "C" __global__ void __miss__radiance()
{
MissData* rt_data = reinterpret_cast<MissData*>(optixGetSbtDataPointer());
RadiancePRD* prd = getPRD();
prd->radiance = make_float3(rt_data->bg_color);
prd->done = true;
}
extern "C" __global__ void __closesthit__occlusion()
{
setPayloadOcclusion(true);
}
extern "C" __global__ void __closesthit__radiance()
{
HitGroupData* rt_data = (HitGroupData*)optixGetSbtDataPointer();
const int prim_idx = optixGetPrimitiveIndex();
const float3 ray_dir = optixGetWorldRayDirection();
const int vert_idx_offset = prim_idx * 3;
const float3 v0 = make_float3(rt_data->vertices[vert_idx_offset + 0]);
const float3 v1 = make_float3(rt_data->vertices[vert_idx_offset + 1]);
const float3 v2 = make_float3(rt_data->vertices[vert_idx_offset + 2]);
const float3 N_0 = normalize(cross(v1 - v0, v2 - v0));
const float3 N = faceforward(N_0, -ray_dir, N_0);
const float dist_travelled = optixGetRayTmax();
const float3 inters_point = optixGetWorldRayOrigin() + dist_travelled * ray_dir;
RadiancePRD* prd = getPRD();
printf("hitPoint: %f, %f, %f \n", inters_point.x, inters_point.y, inters_point.z);
// Smaller scene
// x:100-500 ; y:0-400 ; z: 300-800
if ( (inters_point.x > 500 || inters_point.x < 100) || (inters_point.y > 400 || inters_point.y < 0) || (inters_point.z > 800 || inters_point.z < 300) )
{
prd->radiance = make_float3(0.0, 0.0, 0.0);
printf("out of bounds \n");
prd->done = true;
return;
}
if (prd->countEmitted)
prd->emitted = rt_data->emission_color;
else
prd->emitted = make_float3(0.0f);
unsigned int seed = prd->seed;
float3 prev_origin = prd->origin;
float3 prev_dir = prd->direction;
// CHECK g and medium ID
//printf("g: %f , ID: %f \n", rt_data->g, rt_data->medium_id);
int medium_id = rt_data->medium_id_down;
if(dot(-ray_dir, N_0) < 0)
medium_id = rt_data->medium_id_up;
// Update ray origin and direction
// Ray has travelled past its scattering length
unsigned long rand[2];
rand[0] = (unsigned long)prd->mc_seed[0] << 32 | prd->mc_seed[1];
rand[1] = (unsigned long)prd->mc_seed[2] << 32 | prd->mc_seed[3];
if (prd->dist_so_far >= (prd->slen / params.mu_s[medium_id]))
{
prd->origin = prd->origin + (prd->slen * prd->direction);
prd->dist_so_far = 0;
const float z1 = rnd(seed);
const float z2 = rnd(seed);
prd->slen = mc_next_scatter(params.g[medium_id], rand, &prd->direction);
}
// Ray has not reached scatter length
else
{
prd->dist_so_far += dist_travelled * params.mu_s[medium_id]; // multiply by mu_s
prd->origin = inters_point;
//float slen = mc_next_scatter(params.g[medium_id], rand, &prd->direction);
}
//printf("changed origin: %f, %f, %f ; direction: %f, %f, %f \n", prd->origin.x, prd->origin.y, prd->origin.z,
// prd->direction.x, prd->direction.y, prd->direction.z);
// Compute the ray attenuation
//float distance2 = (prev_origin.x - prd->origin.x) * (prev_origin.x - prd->origin.x) + (prev_origin.y - prd->origin.y) * (prev_origin.y - prd->origin.y) + (prev_origin.z - prd->origin.z) * (prev_origin.z - prd->origin.z);
//float distance = sqrt(distance2);
float3 t3 = (prd->origin - prev_origin) / (prev_dir);
float distance = (t3.x + t3.y + t3.z) / 3;
uint3 prev_index;
int change_color = 1;
for (float i = 0; i < distance; i += 10)
{
float3 curr_location = prev_origin + i * prev_dir;
printf("i: %f, curr_location: %f, %f, %f; weight: %f \n", i, curr_location.x, curr_location.y, curr_location.z, prd->weight);
uint3 index = make_uint3(curr_location.x-100, curr_location.y, curr_location.z-300);
if (i > 0 && prev_index == index)
continue;
prev_index = index;
if (index.x > WIDTH || index.y > HEIGHT || index.z > DEPTH)
{
//printf("inters: %f, origin: %f, slen: %f \n", inters_point, prd->origin, prd->slen);
printf("X: %d, Y: %d, Z: %d \n", index.x, index.y, index.z);
}
//printf("mu_a: %f\n", params.mu_a[medium_id]);
float weight_change = prd->weight * (1 - exp(-params.mu_a[medium_id]));
params.atten_buffer[index.x + ((index.y + (index.z * HEIGHT)) * WIDTH)] += weight_change;
//printf("weight: %f; index: %d, %d, %d; bufferVal: %f\n", prd->weight, index.x, index.y, index.z, params.atten_buffer[index.x + ((index.y + (index.z * HEIGHT)) * WIDTH)]);
//printf("weight: %f \n", prd->weight);
prd->weight -= weight_change;
}
printf("======= Final weight value ========== : %f \n", prd->weight);
{
{
prd->attenuation *= rt_data->diffuse_color;
}
prd->countEmitted = false;
}
const float z1 = rnd(seed);
const float z2 = rnd(seed);
prd->seed = seed;
ParallelogramLight light = params.light;
const float3 light_pos = light.corner + light.v1 * z1 + light.v2 * z2;
// Calculate properties of light sample (for area based pdf)
const float Ldist = length(light_pos - inters_point);
const float3 L = normalize(light_pos - inters_point);
const float nDl = dot(N, L);
const float LnDl = -dot(light.normal, L);
float weight = 0.0f;
if (nDl > 0.0f && LnDl > 0.0f)
{
{
const float A = length(cross(light.v1, light.v2));
weight = 3 * nDl * LnDl * A / (M_PIf * Ldist * Ldist);
}
}
prd->radiance += light.emission * weight;
/*traceRadiance(
params.handle,
prd->origin,
prd->direction,
0.01f, // tmin // TODO: smarter offset
1e16f, // tmax
prd);*/
}
| db0872781c7f5a3c4d60ab07668a08f1b1cc9833.cu | //
// Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
#include <optix.h>
#include "optixPathTracer.h"
#include "random.h"
#include <sutil/vec_math.h>
#include <cuda/helpers.h>
#include <cuda_runtime.h>
//#include <stdio.h>
#define EPS 1.19209290E-07F
#define TWO_PI 6.28318530717959f //2*pi
/*const unsigned int WIDTH = 600;
const unsigned int HEIGHT = 600;
const unsigned int DEPTH = 1100;*/
const unsigned int WIDTH = 400;
const unsigned int HEIGHT = 400;
const unsigned int DEPTH = 500;
extern "C" {
__constant__ Params params;
}
//------------------------------------------------------------------------------
//
//
//
//------------------------------------------------------------------------------
struct RadiancePRD
{
// TODO: move some state directly into payload registers?
float3 emitted;
float3 radiance;
float3 attenuation;
float3 origin;
float3 direction;
unsigned int seed;
int countEmitted;
int done;
float slen;
float dist_so_far;
unsigned int mc_seed[4];
float weight;
int print;
int depth;
//int pad;
};
struct Onb
{
__forceinline__ __device__ Onb(const float3& normal)
{
m_normal = normal;
if (fabs(m_normal.x) > fabs(m_normal.z))
{
m_binormal.x = -m_normal.y;
m_binormal.y = m_normal.x;
m_binormal.z = 0;
}
else
{
m_binormal.x = 0;
m_binormal.y = -m_normal.z;
m_binormal.z = m_normal.y;
}
m_binormal = normalize(m_binormal);
m_tangent = cross(m_binormal, m_normal);
}
__forceinline__ __device__ void inverse_transform(float3& p) const
{
p = p.x * m_tangent + p.y * m_binormal + p.z * m_normal;
}
float3 m_tangent;
float3 m_binormal;
float3 m_normal;
};
//------------------------------------------------------------------------------
//
//
//
//------------------------------------------------------------------------------
static __forceinline__ __device__ float xorshift128p_nextf(unsigned long t[2]) {
union {
unsigned long i;
float f[2];
unsigned int u[2];
} s1;
const unsigned long s0 = t[1];
s1.i = t[0];
t[0] = s0;
s1.i ^= s1.i << 23; // a
t[1] = s1.i ^ s0 ^ (s1.i >> 18) ^ (s0 >> 5); // b, c
s1.i = t[1] + s0;
s1.u[0] = 0x3F800000U | (s1.u[0] >> 9);
return s1.f[0] - 1.0f;
}
static __forceinline__ __device__ float mc_next_scatter(float g, unsigned long ran[2], float3* dir) {
float nextslen;
float sphi, cphi, tmp0, theta, stheta, ctheta, tmp1;
float3 p;
//random scattering length (normalized)
nextslen = -log(xorshift128p_nextf(ran) + EPS);
tmp0 = TWO_PI * xorshift128p_nextf(ran); //next arimuth angle
sphi = sin(tmp0);
cphi = cos(tmp0);
if (g > EPS) { //if g is too small, the distribution of theta is bad
tmp0 = (1.f - g * g) / (1.f - g + 2.f * g * xorshift128p_nextf(ran));
tmp0 *= tmp0;
tmp0 = (1.f + g * g - tmp0) / (2.f * g);
tmp0 = clamp(tmp0, -1.f, 1.f);
theta = acos(tmp0);
stheta = sqrt(1.f - tmp0 * tmp0);
//stheta=MCX_MATHFUN(sin)(theta);
ctheta = tmp0;
}
else {
theta = acos(2.f * xorshift128p_nextf(ran) - 1.f);
stheta = sin(theta);
ctheta = cos(theta);
}
if (dir->z > -1.f + EPS && dir->z < 1.f - EPS) {
tmp0 = 1.f - dir->z * dir->z; //reuse tmp to minimize registers
tmp1 = 1 / sqrt(tmp0);
tmp1 = stheta * tmp1;
p.x = tmp1 * (dir->x * dir->z * cphi - dir->y * sphi) + dir->x * ctheta;
p.y = tmp1 * (dir->y * dir->z * cphi + dir->x * sphi) + dir->y * ctheta;
p.z = -tmp1 * tmp0 * cphi + dir->z * ctheta;
}
else {
p.x = stheta * cphi;
p.y = stheta * sphi;
p.z = (dir->z > 0.f) ? ctheta : -ctheta;
}
dir->x = p.x;
dir->y = p.y;
dir->z = p.z;
return nextslen;
}
static __forceinline__ __device__ void* unpackPointer(unsigned int i0, unsigned int i1)
{
const unsigned long long uptr = static_cast<unsigned long long>(i0) << 32 | i1;
void* ptr = reinterpret_cast<void*>(uptr);
return ptr;
}
static __forceinline__ __device__ void packPointer(void* ptr, unsigned int& i0, unsigned int& i1)
{
const unsigned long long uptr = reinterpret_cast<unsigned long long>(ptr);
i0 = uptr >> 32;
i1 = uptr & 0x00000000ffffffff;
}
static __forceinline__ __device__ RadiancePRD* getPRD()
{
const unsigned int u0 = optixGetPayload_0();
const unsigned int u1 = optixGetPayload_1();
return reinterpret_cast<RadiancePRD*>(unpackPointer(u0, u1));
}
static __forceinline__ __device__ void setPayloadOcclusion(bool occluded)
{
optixSetPayload_0(static_cast<unsigned int>(occluded));
}
static __forceinline__ __device__ void cosine_sample_hemisphere(const float u1, const float u2, float3& p)
{
// Uniformly sample disk.
const float r = sqrtf(u1);
const float phi = 2.0f * M_PIf * u2;
p.x = r * cosf(phi);
p.y = r * sinf(phi);
// Project up to hemisphere.
p.z = sqrtf(fmaxf(0.0f, 1.0f - p.x * p.x - p.y * p.y));
}
static __forceinline__ __device__ void traceRadiance(
OptixTraversableHandle handle,
float3 ray_origin,
float3 ray_direction,
float tmin,
float tmax,
RadiancePRD* prd
)
{
// TODO: deduce stride from num ray-types passed in params
unsigned int u0, u1;
packPointer(prd, u0, u1);
optixTrace(
handle,
ray_origin,
ray_direction,
tmin,
tmax,
0.0f, // rayTime
OptixVisibilityMask(1),
OPTIX_RAY_FLAG_NONE,
RAY_TYPE_RADIANCE, // SBT offset
RAY_TYPE_COUNT, // SBT stride
RAY_TYPE_RADIANCE, // missSBTIndex
u0, u1);
}
static __forceinline__ __device__ bool traceOcclusion(
OptixTraversableHandle handle,
float3 ray_origin,
float3 ray_direction,
float tmin,
float tmax
)
{
unsigned int occluded = 0u;
optixTrace(
handle,
ray_origin,
ray_direction,
tmin,
tmax,
0.0f, // rayTime
OptixVisibilityMask(1),
OPTIX_RAY_FLAG_TERMINATE_ON_FIRST_HIT,
RAY_TYPE_OCCLUSION, // SBT offset
RAY_TYPE_COUNT, // SBT stride
RAY_TYPE_OCCLUSION, // missSBTIndex
occluded);
return occluded;
}
//------------------------------------------------------------------------------
//
//
//
//------------------------------------------------------------------------------
extern "C" __global__ void __raygen__rg()
{
const int w = params.width;
const int h = params.height;
const float3 eye = params.eye;
const float3 U = params.U;
const float3 V = params.V;
const float3 W = params.W;
const uint3 idx = optixGetLaunchIndex();
const int subframe_index = params.subframe_index;
unsigned int seed = tea<4>(idx.y * w + idx.x, subframe_index);
unsigned int seed1 = tea<4>((idx.y * w + idx.x) + 1, subframe_index);
unsigned int seed2 = tea<4>((idx.y * w + idx.x) + 2, subframe_index);
unsigned int seed3 = tea<4>((idx.y * w + idx.x) + 3, subframe_index);
float3 result = make_float3(0.0f);
int i = params.samples_per_launch;
//do
{
// The center of each pixel is at fraction (0.5,0.5)
const float2 subpixel_jitter = make_float2(rnd(seed), rnd(seed));
const float2 d = 2.0f * make_float2(
(static_cast<float>(idx.x) + subpixel_jitter.x) / static_cast<float>(w),
(static_cast<float>(idx.y) + subpixel_jitter.y) / static_cast<float>(h)
) - 1.0f;
float3 ray_direction = normalize(-0.3 * U + 0.5 * V + W); //normalize(d.x * U + d.y * V + W);
float3 ray_origin = eye;
RadiancePRD prd;
prd.emitted = make_float3(0.f);
prd.radiance = make_float3(0.f);
prd.attenuation = make_float3(1.f);
prd.countEmitted = true;
prd.done = false;
prd.mc_seed[0] = seed;
prd.mc_seed[1] = seed1;
prd.mc_seed[2] = seed2;
prd.mc_seed[3] = seed3;
prd.slen = rnd(seed) * 10;
prd.dist_so_far = 0.0f;
prd.weight = 5.0f;
prd.origin = ray_origin;
prd.direction = ray_direction;
int depth = 0;
for (;; )
{
printf("origin: %f, %f, %f; direction: %f, %f, %f \n", ray_origin.x, ray_origin.y, ray_origin.z,
ray_direction.x, ray_direction.y, ray_direction.z);
traceRadiance(
params.handle,
ray_origin,
ray_direction,
0.01f, // tmin // TODO: smarter offset
1e16f, // tmax
&prd);
result += prd.emitted;
result += prd.radiance * prd.attenuation;
if (prd.done || depth >= 5) // TODO RR, variable for depth
break;
// Ray origin and direction are updated from the trace
ray_origin = prd.origin;
ray_direction = prd.direction;
++depth;
}
}
//while (--i);
const uint3 launch_index = optixGetLaunchIndex();
const unsigned int image_index = launch_index.y * params.width + launch_index.x;
float3 accum_color = result / static_cast<float>(params.samples_per_launch);
if (subframe_index > 0)
{
const float a = 1.0f / static_cast<float>(subframe_index + 1);
const float3 accum_color_prev = make_float3(params.accum_buffer[image_index]);
accum_color = lerp(accum_color_prev, accum_color, a);
}
params.accum_buffer[image_index] = make_float4(accum_color, 1.0f);
params.frame_buffer[image_index] = make_color(accum_color);
}
extern "C" __global__ void __miss__radiance()
{
MissData* rt_data = reinterpret_cast<MissData*>(optixGetSbtDataPointer());
RadiancePRD* prd = getPRD();
prd->radiance = make_float3(rt_data->bg_color);
prd->done = true;
}
extern "C" __global__ void __closesthit__occlusion()
{
setPayloadOcclusion(true);
}
extern "C" __global__ void __closesthit__radiance()
{
HitGroupData* rt_data = (HitGroupData*)optixGetSbtDataPointer();
const int prim_idx = optixGetPrimitiveIndex();
const float3 ray_dir = optixGetWorldRayDirection();
const int vert_idx_offset = prim_idx * 3;
const float3 v0 = make_float3(rt_data->vertices[vert_idx_offset + 0]);
const float3 v1 = make_float3(rt_data->vertices[vert_idx_offset + 1]);
const float3 v2 = make_float3(rt_data->vertices[vert_idx_offset + 2]);
const float3 N_0 = normalize(cross(v1 - v0, v2 - v0));
const float3 N = faceforward(N_0, -ray_dir, N_0);
const float dist_travelled = optixGetRayTmax();
const float3 inters_point = optixGetWorldRayOrigin() + dist_travelled * ray_dir;
RadiancePRD* prd = getPRD();
printf("hitPoint: %f, %f, %f \n", inters_point.x, inters_point.y, inters_point.z);
// Smaller scene
// x:100-500 ; y:0-400 ; z: 300-800
if ( (inters_point.x > 500 || inters_point.x < 100) || (inters_point.y > 400 || inters_point.y < 0) || (inters_point.z > 800 || inters_point.z < 300) )
{
prd->radiance = make_float3(0.0, 0.0, 0.0);
printf("out of bounds \n");
prd->done = true;
return;
}
if (prd->countEmitted)
prd->emitted = rt_data->emission_color;
else
prd->emitted = make_float3(0.0f);
unsigned int seed = prd->seed;
float3 prev_origin = prd->origin;
float3 prev_dir = prd->direction;
// CHECK g and medium ID
//printf("g: %f , ID: %f \n", rt_data->g, rt_data->medium_id);
int medium_id = rt_data->medium_id_down;
if(dot(-ray_dir, N_0) < 0)
medium_id = rt_data->medium_id_up;
// Update ray origin and direction
// Ray has travelled past its scattering length
unsigned long rand[2];
rand[0] = (unsigned long)prd->mc_seed[0] << 32 | prd->mc_seed[1];
rand[1] = (unsigned long)prd->mc_seed[2] << 32 | prd->mc_seed[3];
if (prd->dist_so_far >= (prd->slen / params.mu_s[medium_id]))
{
prd->origin = prd->origin + (prd->slen * prd->direction);
prd->dist_so_far = 0;
const float z1 = rnd(seed);
const float z2 = rnd(seed);
prd->slen = mc_next_scatter(params.g[medium_id], rand, &prd->direction);
}
// Ray has not reached scatter length
else
{
prd->dist_so_far += dist_travelled * params.mu_s[medium_id]; // multiply by mu_s
prd->origin = inters_point;
//float slen = mc_next_scatter(params.g[medium_id], rand, &prd->direction);
}
//printf("changed origin: %f, %f, %f ; direction: %f, %f, %f \n", prd->origin.x, prd->origin.y, prd->origin.z,
// prd->direction.x, prd->direction.y, prd->direction.z);
// Compute the ray attenuation
//float distance2 = (prev_origin.x - prd->origin.x) * (prev_origin.x - prd->origin.x) + (prev_origin.y - prd->origin.y) * (prev_origin.y - prd->origin.y) + (prev_origin.z - prd->origin.z) * (prev_origin.z - prd->origin.z);
//float distance = sqrt(distance2);
float3 t3 = (prd->origin - prev_origin) / (prev_dir);
float distance = (t3.x + t3.y + t3.z) / 3;
uint3 prev_index;
int change_color = 1;
for (float i = 0; i < distance; i += 10)
{
float3 curr_location = prev_origin + i * prev_dir;
printf("i: %f, curr_location: %f, %f, %f; weight: %f \n", i, curr_location.x, curr_location.y, curr_location.z, prd->weight);
uint3 index = make_uint3(curr_location.x-100, curr_location.y, curr_location.z-300);
if (i > 0 && prev_index == index)
continue;
prev_index = index;
if (index.x > WIDTH || index.y > HEIGHT || index.z > DEPTH)
{
//printf("inters: %f, origin: %f, slen: %f \n", inters_point, prd->origin, prd->slen);
printf("X: %d, Y: %d, Z: %d \n", index.x, index.y, index.z);
}
//printf("mu_a: %f\n", params.mu_a[medium_id]);
float weight_change = prd->weight * (1 - exp(-params.mu_a[medium_id]));
params.atten_buffer[index.x + ((index.y + (index.z * HEIGHT)) * WIDTH)] += weight_change;
//printf("weight: %f; index: %d, %d, %d; bufferVal: %f\n", prd->weight, index.x, index.y, index.z, params.atten_buffer[index.x + ((index.y + (index.z * HEIGHT)) * WIDTH)]);
//printf("weight: %f \n", prd->weight);
prd->weight -= weight_change;
}
printf("======= Final weight value ========== : %f \n", prd->weight);
{
{
prd->attenuation *= rt_data->diffuse_color;
}
prd->countEmitted = false;
}
const float z1 = rnd(seed);
const float z2 = rnd(seed);
prd->seed = seed;
ParallelogramLight light = params.light;
const float3 light_pos = light.corner + light.v1 * z1 + light.v2 * z2;
// Calculate properties of light sample (for area based pdf)
const float Ldist = length(light_pos - inters_point);
const float3 L = normalize(light_pos - inters_point);
const float nDl = dot(N, L);
const float LnDl = -dot(light.normal, L);
float weight = 0.0f;
if (nDl > 0.0f && LnDl > 0.0f)
{
{
const float A = length(cross(light.v1, light.v2));
weight = 3 * nDl * LnDl * A / (M_PIf * Ldist * Ldist);
}
}
prd->radiance += light.emission * weight;
/*traceRadiance(
params.handle,
prd->origin,
prd->direction,
0.01f, // tmin // TODO: smarter offset
1e16f, // tmax
prd);*/
}
|
c5858b32956a8c2e7ac20673f0081e165ac87b06.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <assert.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#define EPS 1e-8
#define N 10000000
#define MAX_ERR 1e-6
//#define nb 23814 //no. of n bodies
//#define nb 1350
//#define nb 294
//#define nb 5766
//#define p 31 //no of threads in each block
#define PI 3.14159265358979323846
#define PIx8 25.132741228718345
extern "C" __device__ double smoothfun2(double x)
{
double a = erf(x);
double b = (4.0*x*x*x*x - 14.0*x*x + 3.0);
double c = -(2.0/3.0)*x*b*exp(-1.0*x*x)*(1.0/sqrt(M_PI));
return a+c;
}
extern "C" __device__ double smoothfun1(double x)
{
double a = erf(x);
double b = (2.0*x*x - 5.0);
double c = -(2.0/3.0)*x*b*exp(-1.0*x*x)*(1.0/sqrt(M_PI));
return a+c;
}
extern "C" __device__ double3 bodyBodyInteractionStokes(double3 bi, double3 bj, double3 fj, double3 ai, double delta)
{
//TODO: add delta parameter as input reg parameter
//33 FLOP ; change this to include regularization
//fj in shared memory too, fj has SL density,pou, area element, mesh size already in it
double3 r;
//double delta = 0.1114403363930094; //0.20795502088527543; //0.38805779048337563;
//delta = 0.20795502088527543; //0.38805779048337563;
//double delta = 0.38805779048337563;
r.x = bj.x - bi.x;
r.y = bj.y - bi.y;
r.z = bj.z - bi.z;
double distSqr_zc; //zero corrected
double distSqr = r.x *r.x + r.y*r.y + r.z*r.z;
distSqr_zc = distSqr;
if(distSqr <= 0.0) distSqr_zc = 1.0;
double dist = sqrt(distSqr);
double distSixth = distSqr_zc*distSqr_zc*distSqr_zc;
double invDistCube = 1.0/sqrt(distSixth);
double invDist = 1.0/sqrt(distSqr_zc);
double fdotr = fj.x*r.x + fj.y*r.y + fj.z*r.z;
double s1 = invDist*smoothfun1(dist/delta); //smoothing function 1
double s2 = fdotr*invDistCube*smoothfun2(dist/delta); //smoothing function 2
if(distSqr<=0.0)
{
s1 = 16.0/(3*sqrt(M_PI)*delta);
s2 = fdotr*32.0/(3*sqrt(M_PI)*delta*delta*delta);
}
//ai.x += fj.x*s1;
//ai.y += fj.y*s1;
//ai.z += fj.z*s1;
ai.x += fj.x*s1 + r.x*s2;
ai.y += fj.y*s1 + r.y*s2;
ai.z += fj.z*s1 + r.z*s2;
return ai;
}
extern "C" __global__ void calculate_forces_stokes(void *devX, void *devY, void *devF, void *devA, void *delta, int nb_x, int p_x, int nb_y, int p_y, void *bn, int blk_per_box, int last_box_threads, int points_per_box, void *ppb, void *bpi)
{
//extern __shared__ float3 shPosition[];
//extern __shared__ float3 shDensity[];
extern __shared__ double3 shPosDen[];
double3 *globalX = (double3 *)devX;
double3 *globalY = (double3 *)devY;
double3 *globalF = (double3 *)devF; //SL density
double3 *globalA = (double3 *)devA;
double *globalDelta = (double *)delta; //reg parameter
double *globalBoxNbr = (double *)bn; //box neighbors k^2, 0 or 1 values
double *ppbox = (double *)ppb; //particles per box
double *box_points_idx = (double *)bpi; //idx of each box point in original,
double3 myPosition;
double myDelta;
int i, tile;
double3 acc = {0.0, 0.0, 0.0};
int box_pid,extra_blocks,gtid,pid,box_num; //pid is particle id
//gtid = blockIdx.x * blockDim.x + threadIdx.x;
box_num = blockIdx.x/blk_per_box;
pid = box_num*points_per_box;
extra_blocks = blockIdx.x+1-box_num*blk_per_box;
pid += (extra_blocks-1)*blockDim.x;
pid += threadIdx.x;
gtid = pid;
box_pid = (extra_blocks-1)*blockDim.x + threadIdx.x;
if(!( (box_pid+1>ppbox[box_num]) && (blockIdx.x+1)%blk_per_box==0 && (threadIdx.x+1)>last_box_threads)){
myPosition = globalX[gtid];
myDelta = globalDelta[gtid];
}
for(i=0, tile=0; i< nb_y; i+=p_y, tile++)
{
if (globalBoxNbr[(blockIdx.x/blk_per_box)*(gridDim.x/blk_per_box)+tile] != 1){
if (threadIdx.x < p_y){
int idx = tile*p_y + threadIdx.x;
//shPosition[threadIdx.x] = globalX[idx];
//shDensity[threadIdx.x] = globalF[idx];
shPosDen[2*threadIdx.x+0] = globalY[idx];
shPosDen[2*threadIdx.x+1] = globalF[idx];
}
__syncthreads();
//acc = tile_calculation(myPosition, acc);
if(!((box_pid+1>ppbox[box_num]) && (blockIdx.x+1)%blk_per_box==0 && (threadIdx.x+1)>last_box_threads)){
#pragma unroll
for(unsigned int counter = 0; counter < p_y; counter++)
{
//acc.x += 1; acc.y += 1; acc.z += 1;
acc = bodyBodyInteractionStokes(myPosition, shPosDen[2*counter+0], shPosDen[2*counter+1], acc, myDelta);
}
}
}
__syncthreads();
}
//save result in global memory
if(!( (box_pid+1>ppbox[box_num]) && (blockIdx.x+1)%blk_per_box==0 && (threadIdx.x+1)>last_box_threads)){
double3 acc3 = {1.0/(8*M_PI)*acc.x, 1.0/(8*M_PI)*acc.y, 1.0/(8*M_PI)*acc.z};
//double3 acc3 = {1.0*acc.x, 1.0*acc.y, 1.0*acc.z};
int actual_gtid = box_points_idx[gtid]-1;
if (actual_gtid >= 0) { globalA[actual_gtid] = acc3;}
//globalA[gtid] = acc3;
}
}
| c5858b32956a8c2e7ac20673f0081e165ac87b06.cu | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <assert.h>
#include <cuda.h>
#include <cuda_runtime.h>
#define EPS 1e-8
#define N 10000000
#define MAX_ERR 1e-6
//#define nb 23814 //no. of n bodies
//#define nb 1350
//#define nb 294
//#define nb 5766
//#define p 31 //no of threads in each block
#define PI 3.14159265358979323846
#define PIx8 25.132741228718345
extern "C" __device__ double smoothfun2(double x)
{
double a = erf(x);
double b = (4.0*x*x*x*x - 14.0*x*x + 3.0);
double c = -(2.0/3.0)*x*b*exp(-1.0*x*x)*(1.0/sqrt(M_PI));
return a+c;
}
extern "C" __device__ double smoothfun1(double x)
{
double a = erf(x);
double b = (2.0*x*x - 5.0);
double c = -(2.0/3.0)*x*b*exp(-1.0*x*x)*(1.0/sqrt(M_PI));
return a+c;
}
extern "C" __device__ double3 bodyBodyInteractionStokes(double3 bi, double3 bj, double3 fj, double3 ai, double delta)
{
//TODO: add delta parameter as input reg parameter
//33 FLOP ; change this to include regularization
//fj in shared memory too, fj has SL density,pou, area element, mesh size already in it
double3 r;
//double delta = 0.1114403363930094; //0.20795502088527543; //0.38805779048337563;
//delta = 0.20795502088527543; //0.38805779048337563;
//double delta = 0.38805779048337563;
r.x = bj.x - bi.x;
r.y = bj.y - bi.y;
r.z = bj.z - bi.z;
double distSqr_zc; //zero corrected
double distSqr = r.x *r.x + r.y*r.y + r.z*r.z;
distSqr_zc = distSqr;
if(distSqr <= 0.0) distSqr_zc = 1.0;
double dist = sqrt(distSqr);
double distSixth = distSqr_zc*distSqr_zc*distSqr_zc;
double invDistCube = 1.0/sqrt(distSixth);
double invDist = 1.0/sqrt(distSqr_zc);
double fdotr = fj.x*r.x + fj.y*r.y + fj.z*r.z;
double s1 = invDist*smoothfun1(dist/delta); //smoothing function 1
double s2 = fdotr*invDistCube*smoothfun2(dist/delta); //smoothing function 2
if(distSqr<=0.0)
{
s1 = 16.0/(3*sqrt(M_PI)*delta);
s2 = fdotr*32.0/(3*sqrt(M_PI)*delta*delta*delta);
}
//ai.x += fj.x*s1;
//ai.y += fj.y*s1;
//ai.z += fj.z*s1;
ai.x += fj.x*s1 + r.x*s2;
ai.y += fj.y*s1 + r.y*s2;
ai.z += fj.z*s1 + r.z*s2;
return ai;
}
extern "C" __global__ void calculate_forces_stokes(void *devX, void *devY, void *devF, void *devA, void *delta, int nb_x, int p_x, int nb_y, int p_y, void *bn, int blk_per_box, int last_box_threads, int points_per_box, void *ppb, void *bpi)
{
//extern __shared__ float3 shPosition[];
//extern __shared__ float3 shDensity[];
extern __shared__ double3 shPosDen[];
double3 *globalX = (double3 *)devX;
double3 *globalY = (double3 *)devY;
double3 *globalF = (double3 *)devF; //SL density
double3 *globalA = (double3 *)devA;
double *globalDelta = (double *)delta; //reg parameter
double *globalBoxNbr = (double *)bn; //box neighbors k^2, 0 or 1 values
double *ppbox = (double *)ppb; //particles per box
double *box_points_idx = (double *)bpi; //idx of each box point in original,
double3 myPosition;
double myDelta;
int i, tile;
double3 acc = {0.0, 0.0, 0.0};
int box_pid,extra_blocks,gtid,pid,box_num; //pid is particle id
//gtid = blockIdx.x * blockDim.x + threadIdx.x;
box_num = blockIdx.x/blk_per_box;
pid = box_num*points_per_box;
extra_blocks = blockIdx.x+1-box_num*blk_per_box;
pid += (extra_blocks-1)*blockDim.x;
pid += threadIdx.x;
gtid = pid;
box_pid = (extra_blocks-1)*blockDim.x + threadIdx.x;
if(!( (box_pid+1>ppbox[box_num]) && (blockIdx.x+1)%blk_per_box==0 && (threadIdx.x+1)>last_box_threads)){
myPosition = globalX[gtid];
myDelta = globalDelta[gtid];
}
for(i=0, tile=0; i< nb_y; i+=p_y, tile++)
{
if (globalBoxNbr[(blockIdx.x/blk_per_box)*(gridDim.x/blk_per_box)+tile] != 1){
if (threadIdx.x < p_y){
int idx = tile*p_y + threadIdx.x;
//shPosition[threadIdx.x] = globalX[idx];
//shDensity[threadIdx.x] = globalF[idx];
shPosDen[2*threadIdx.x+0] = globalY[idx];
shPosDen[2*threadIdx.x+1] = globalF[idx];
}
__syncthreads();
//acc = tile_calculation(myPosition, acc);
if(!((box_pid+1>ppbox[box_num]) && (blockIdx.x+1)%blk_per_box==0 && (threadIdx.x+1)>last_box_threads)){
#pragma unroll
for(unsigned int counter = 0; counter < p_y; counter++)
{
//acc.x += 1; acc.y += 1; acc.z += 1;
acc = bodyBodyInteractionStokes(myPosition, shPosDen[2*counter+0], shPosDen[2*counter+1], acc, myDelta);
}
}
}
__syncthreads();
}
//save result in global memory
if(!( (box_pid+1>ppbox[box_num]) && (blockIdx.x+1)%blk_per_box==0 && (threadIdx.x+1)>last_box_threads)){
double3 acc3 = {1.0/(8*M_PI)*acc.x, 1.0/(8*M_PI)*acc.y, 1.0/(8*M_PI)*acc.z};
//double3 acc3 = {1.0*acc.x, 1.0*acc.y, 1.0*acc.z};
int actual_gtid = box_points_idx[gtid]-1;
if (actual_gtid >= 0) { globalA[actual_gtid] = acc3;}
//globalA[gtid] = acc3;
}
}
|
c41a276f7d6f3ea8971159f79975ae63f7d2232e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#if GOOGLE_CUDA
#include "ew_op_gpu.h"
__device__ __forceinline__ int div16(int numerator, int magic, int shift)
{
int res;
asm("vmad.s32.u32.u32 %0, %1.h0, %2.h0, 0;" : "=r"(res) : "r"(numerator), "r"(magic));
return res >> shift;
}
__device__ __forceinline__ int mod16(int numerator, int div, int maxdiv)
{
int res;
asm("vmad.s32.u32.u32 %0, -%1.h0, %2.h0, %3;" : "=r"(res) : "r"(div), "r"(maxdiv), "r"(numerator));
return res;
}
__device__ __forceinline__ int mad16(int a, int b, int c)
{
int res;
asm("vmad.s32.u32.u32 %0, %1.h0, %2.h0, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(c));
return res;
}
// y = gain * x / sqrt(max(sum(x**2), epsilon))
template <typename TY, typename TX>
__global__ void __launch_bounds__(32) l2_normalize_KCTRS(
TY* Y,
float* S,
const TX* __restrict__ X,
const float* __restrict__ G,
const int2* __restrict__ Lut,
float epsilon, int apply_gain)
{
int tid = threadIdx.x;
int k = blockIdx.x;
int2 block_data = Lut[k];
float gain = 1.0f;
if (apply_gain) gain = G[k];
int offset = block_data.x + tid; // block_F + idx_k * CTRS + tid
int CTRS = block_data.y; // block_C * TRS
const TX* X1 = X + offset;
const TX* X2 = X + offset;
Y += offset;
// sum_sqr_x = sum(x**2)
float sum_sqr_x = 0.0f;
for (int i = tid; i < CTRS; i += 32)
{
float x = load(X1);
X1 += 32;
sum_sqr_x += x * x;
}
#pragma unroll
for (int i = 16; i > 0; i >>= 1)
sum_sqr_x += shfl_xor(sum_sqr_x, i);
// store reduction for gradient pass
if (tid == 0)
store(S, sum_sqr_x, k);
// rnorm = 1.0f / sqrt(max(sum_sqr_x, epsilon)) * gain
float rnorm = rsqrtf(fmaxf(sum_sqr_x, epsilon)) * gain;
// y = x * rnorm
for (int i = tid; i < CTRS; i += 32)
{
float x = load(X2);
store(Y, x * rnorm);
X2 += 32;
Y += 32;
}
}
// y = gain * x / sqrt(max(sum(x**2), epsilon))
template <typename TY, typename TX>
__global__ void __launch_bounds__(32) l2_normalize_CKTRS(
TY* Y,
float* S,
const TX* __restrict__ X,
const float* __restrict__ G,
const int4* __restrict__ Lut,
float epsilon, int apply_gain, int TRS, int magic_TRS, int shift_TRS)
{
int tid = threadIdx.x;
int k = blockIdx.x;
int4 block_data = Lut[k];
float gain = 1.0f;
if (apply_gain) gain = G[k];
int idx_k = block_data.x;
int CTRS = block_data.y;
int KTRS = block_data.z;
int block_F = block_data.w;
int offset_F = block_F + idx_k * TRS;
const TX* X1 = X + offset_F;
const TX* X2 = X + offset_F;
Y += offset_F;
// y_val = sum(x**2)
float sum_sqr_x = 0.0f;
for (int ctrs = tid; ctrs < CTRS; ctrs += 32)
{
// c = i / TRS;
// trs = i % TRS;
// offset = c * KTRS + trs
int c = div16(ctrs, magic_TRS, shift_TRS);
int trs = mod16(ctrs, c, TRS);
int offset = mad16(c, KTRS, trs);
float x = load(X1, offset);
sum_sqr_x += x * x;
}
#pragma unroll
for (int i = 16; i > 0; i >>= 1)
sum_sqr_x += shfl_xor(sum_sqr_x, i);
// store reduction for gradient pass
if (tid == 0)
store(S, sum_sqr_x, k);
// rnorm = 1.0f / sqrt(max(sum_sqr_x, epsilon)) * gain
float rnorm = rsqrtf(fmaxf(sum_sqr_x, epsilon)) * gain;
// y = x * rnorm
for (int ctrs = tid; ctrs < CTRS; ctrs += 32)
{
int c = div16(ctrs, magic_TRS, shift_TRS);
int trs = mod16(ctrs, c, TRS);
int offset = mad16(c, KTRS, trs);
float x = load(X2, offset);
store(Y, x * rnorm, offset);
}
}
// y = gain * x / sqrt(max(sum(x**2), epsilon))
template <typename TY, typename TX>
__global__ void __launch_bounds__(128) l2_normalize_CK_32(
TY* Y,
float* S,
const TX* __restrict__ X,
const float* __restrict__ G,
const int* __restrict__ Lut,
float epsilon, int apply_gain)
{
extern __shared__ int iShare[]; // 96 + max(lut_size)
extern __shared__ float fShare[]; // 96 + max(lut_size)
int tid = threadIdx.x;
int idx_L = blockIdx.x;
int4 lut_head = ((const int4*)Lut)[idx_L];
// unpack lut header
int lut_offset = lut_head.x;
int lut_size = lut_head.y;
int idx_K = lut_head.z;
int k = idx_K*32 + (tid & 31);
float gain = 1.0f;
if (apply_gain) gain = G[k];
Lut += lut_offset;
#pragma unroll 1
for (int i = tid; i < lut_size; i += 128)
iShare[i + 96] = Lut[i] * 32 * 32;
__syncthreads();
// sum_sqr_x = sum(x**2)
float sum_sqr_x = 0.0f;
#pragma unroll 1
for (int i = 0; i < lut_size; i++)
{
const TX* X1 = X + iShare[i + 96] + tid;
#pragma unroll
for (int j = 0; j < 8; j++)
{
float x = load(X1, j*128);
sum_sqr_x += x * x;
}
}
// reduce sum_sqr_x across the 4 warps
if (tid >= 32)
fShare[tid-32] = sum_sqr_x;
__syncthreads();
if (tid < 32)
{
sum_sqr_x += fShare[tid] + fShare[tid + 32] + fShare[tid + 64];
fShare[tid] = sum_sqr_x;
// store reduction for gradient pass
store(S, sum_sqr_x, k);
}
__syncthreads();
// get the final reduced value for all warps:
sum_sqr_x = fShare[tid & 31];
// rnorm = 1.0f / sqrt(max(sum_sqr_x, epsilon)) * gain
float rnorm = rsqrtf(fmaxf(sum_sqr_x, epsilon)) * gain;
// y = x * rnorm
#pragma unroll 1
for (int i = 0; i < lut_size; i++)
{
int block_offset = iShare[i + 96];
const TX* X2 = X + block_offset + tid;
TY* Y2 = Y + block_offset + tid;
#pragma unroll
for (int j = 0; j < 8; j++)
{
float x = load(X2, j*128);
store(Y2, x * rnorm, j*128);
}
}
}
// y = gain * x / sqrt(max(sum(x**2), epsilon))
template <typename TY, typename TX>
__global__ void __launch_bounds__(32) l2_normalize_CK_16(
TY* Y,
float* S,
const TX* __restrict__ X,
const float* __restrict__ G,
const int* __restrict__ Lut,
float epsilon, int apply_gain)
{
extern __shared__ int lut[]; // max(lut_size)
int tid = threadIdx.x;
int idx_L = blockIdx.x;
int4 lut_head = ((const int4*)Lut)[idx_L];
// unpack lut header
int lut_offset = lut_head.x;
int lut_size = lut_head.y;
int idx_K = lut_head.z;
int k = idx_K*16 + (tid & 15);
float gain = 1.0f;
if (apply_gain) gain = G[k];
Lut += lut_offset;
#pragma unroll 1
for (int i = tid; i < lut_size; i += 32)
lut[i] = Lut[i] * 16 * 16;
// sum_sqr_x = sum(x**2)
float sum_sqr_x = 0.0f;
#pragma unroll 1
for (int i = 0; i < lut_size; i++)
{
const TX* X0 = X + lut[i] + tid;
#pragma unroll
for (int j = 0; j < 8; j++)
{
float x = load(X0, j*32);
sum_sqr_x += x * x;
}
}
// reduce sum_sqr_x across the 4 rows of the warp
sum_sqr_x += shfl_xor(sum_sqr_x, 16);
store(S, sum_sqr_x, k, tid < 16);
// rnorm = 1.0f / sqrt(max(sum_sqr_x, epsilon)) * gain
float rnorm = rsqrtf(fmaxf(sum_sqr_x, epsilon)) * gain;
// y = x * rnorm
#pragma unroll 1
for (int i = 0; i < lut_size; i++)
{
int block_offset = lut[i];
const TX* X0 = X + block_offset + tid;
TY* Y0 = Y + block_offset + tid;
#pragma unroll
for (int j = 0; j < 8; j++)
{
float x = load(X0, j*32);
store(Y0, x * rnorm, j*32);
}
}
}
// y = gain * x / sqrt(max(sum(x**2), epsilon))
template <typename TY, typename TX>
__global__ void __launch_bounds__(32) l2_normalize_CK_8(
TY* Y,
float* S,
const TX* __restrict__ X,
const float* __restrict__ G,
const int* __restrict__ Lut,
float epsilon, int apply_gain)
{
extern __shared__ int lut[]; // max(lut_size)
int tid = threadIdx.x;
int idx_L = blockIdx.x;
int4 lut_head = ((const int4*)Lut)[idx_L];
// unpack lut header
int lut_offset = lut_head.x;
int lut_size = lut_head.y;
int idx_K = lut_head.z;
int k = idx_K*8 + (tid & 7);
float gain = 1.0f;
if (apply_gain) gain = G[k];
Lut += lut_offset;
#pragma unroll 1
for (int i = tid; i < lut_size; i += 32)
lut[i] = Lut[i] * 8 * 8;
// sum_sqr_x = sum(x**2)
float sum_sqr_x = 0.0f;
#pragma unroll 1
for (int i = 0; i < lut_size; i++)
{
const TX* X0 = X + lut[i] + tid;
float x0 = load(X0, 0*32);
float x1 = load(X0, 1*32);
sum_sqr_x += x0 * x0 + x1 * x1;
}
// reduce sum_sqr_x across the 4 rows of the warp
sum_sqr_x += shfl_xor(sum_sqr_x, 16);
sum_sqr_x += shfl_xor(sum_sqr_x, 8);
store(S, sum_sqr_x, k, tid < 8);
// rnorm = 1.0f / sqrt(max(sum_sqr_x, epsilon)) * gain
float rnorm = rsqrtf(fmaxf(sum_sqr_x, epsilon)) * gain;
// y = x * rnorm
#pragma unroll 1
for (int i = 0; i < lut_size; i++)
{
int block_offset = lut[i];
const TX* X0 = X + block_offset + tid;
TY* Y0 = Y + block_offset + tid;
float x0 = load(X0, 0*32);
float x1 = load(X0, 1*32);
store(Y0, x0 * rnorm, 0*32);
store(Y0, x1 * rnorm, 1*32);
}
}
template <typename TY, typename TX>
bool L2NormalizeKCTRS(hipStream_t stream, TY* y, float* sum_sqr_x, const TX* x, const float* g, const int* lut, float epsilon, int K)
{
dim3 grid(K, 1, 1);
dim3 block(32, 1, 1);
hipLaunchKernelGGL(( l2_normalize_KCTRS<TY,TX>), dim3(grid), dim3(block), 0, stream, y, sum_sqr_x, x, g, (const int2*)lut, epsilon, g != 0);
return true; // TODO
}
template <typename TY, typename TX>
bool L2NormalizeCKTRS(hipStream_t stream, TY* y, float* sum_sqr_x, const TX* x, const float* g, const int* lut, float epsilon, int K, int TRS, int magic_TRS, int shift_TRS)
{
dim3 grid(K, 1, 1);
dim3 block(32, 1, 1);
hipLaunchKernelGGL(( l2_normalize_CKTRS<TY,TX>), dim3(grid), dim3(block), 0, stream, y, sum_sqr_x, x, g, (const int4*)lut, epsilon, g != 0, TRS, magic_TRS, shift_TRS);
return true; // TODO
}
template <typename TY, typename TX>
bool L2NormalizeCK(hipStream_t stream, TY* y, float* sum_sqr_x, const TX* x, const float* g, const int* lut, float epsilon, int K, int shared, int bsize)
{
if (bsize == 32)
{
dim3 grid(K>>5, 1, 1);
dim3 block(128, 1, 1);
hipLaunchKernelGGL(( l2_normalize_CK_32<TY,TX>), dim3(grid), dim3(block), shared+96*4, stream, y, sum_sqr_x, x, g, lut, epsilon, g != 0);
}
else if (bsize == 16)
{
dim3 grid(K>>4, 1, 1);
dim3 block(32, 1, 1);
hipLaunchKernelGGL(( l2_normalize_CK_16<TY,TX>), dim3(grid), dim3(block), shared, stream, y, sum_sqr_x, x, g, lut, epsilon, g != 0);
}
else // if (bsize == 8)
{
dim3 grid(K>>3, 1, 1);
dim3 block(32, 1, 1);
hipLaunchKernelGGL(( l2_normalize_CK_8<TY,TX>), dim3(grid), dim3(block), shared, stream, y, sum_sqr_x, x, g, lut, epsilon, g != 0);
}
return true; // TODO
}
/////////////////////////////////////// Gradients ///////////////////////////////////////////
// sum_sqr_x = sum(x**2)
// norm_x = sqrt(maximum(sum_sqr_x, epsilon))
// grad_x = ( grad_y*g + x * (sum_sqr_x >= epsilon) * sum(-grad_y*g * x / norm_x**2) ) / norm_x
// grad_g = sum(grad_y * l2_norm(x))
template <typename TY, typename TX>
__global__ void __launch_bounds__(32) l2_normalize_grad_KCTRS(
TX* DX,
float* DG,
const TY* __restrict__ DY,
const TX* __restrict__ X,
const float* __restrict__ G,
const float* __restrict__ S,
const int2* __restrict__ Lut,
float epsilon, int apply_gain)
{
int tid = threadIdx.x;
int k = blockIdx.x;
int2 block_data = Lut[k];
float gain = 1.0f;
if (apply_gain) gain = G[k];
int offset = block_data.x + tid; // block_F + idx_k * CTRS + tid
int CTRS = block_data.y; // block_C * TRS
const TX* X1 = X + offset;
const TX* X2 = X + offset;
const TY* DY1 = DY + offset;
const TY* DY2 = DY + offset;
DX += offset;
float sum_sqr_x = S[k];
float max_sum_sqr_x = fmaxf(sum_sqr_x, epsilon);
float norm_xi = rsqrtf(max_sum_sqr_x);
float norm_x2i = 1.0f / max_sum_sqr_x;
// sum(-d * x / norm_x**2)
float red_val = 0.0f;
float dg = 0.0f;
for (int i = tid; i < CTRS; i += 32)
{
float dy = load(DY1);
float x = load(X1);
DY1 += 32;
X1 += 32;
dg += dy * x * norm_xi;
red_val += (-dy * x * gain) * norm_x2i;
}
#pragma unroll
for (int i = 16; i > 0; i >>= 1)
{
red_val += shfl_xor(red_val, i);
dg += shfl_xor(dg, i);
}
if (apply_gain && tid == 0)
DG[k] = dg;
red_val *= sum_sqr_x >= epsilon;
for (int i = tid; i < CTRS; i += 32)
{
float dy = load(DY2);
float x = load(X2);
float dx = dy * gain + x * red_val;
store(DX, dx * norm_xi, 0);
DY2 += 32;
X2 += 32;
DX += 32;
}
}
// sum_sqr_x = sum(x**2)
// norm_x = sqrt(maximum(sum_sqr_x, epsilon))
// grad_x = ( grad_y*g + x * (sum_sqr_x >= epsilon) * sum(-grad_y*g * x / norm_x**2) ) / norm_x
// grad_g = sum(grad_y * l2_norm(x))
template <typename TY, typename TX>
__global__ void __launch_bounds__(32) l2_normalize_grad_CKTRS(
TX* DX,
float* DG,
const TY* __restrict__ DY,
const TX* __restrict__ X,
const float* __restrict__ G,
const float* __restrict__ S,
const int4* __restrict__ Lut,
float epsilon, int apply_gain, int TRS, int magic_TRS, int shift_TRS)
{
int tid = threadIdx.x;
int k = blockIdx.x;
int4 block_data = Lut[k];
float gain = 1.0f;
if (apply_gain) gain = G[k];
int idx_k = block_data.x;
int CTRS = block_data.y;
int KTRS = block_data.z;
int block_F = block_data.w;
int offset_F = block_F + idx_k * TRS;
const TX* X1 = X + offset_F;
const TX* X2 = X + offset_F;
const TY* DY1 = DY + offset_F;
const TY* DY2 = DY + offset_F;
DX += offset_F;
float sum_sqr_x = S[k];
float max_sum_sqr_x = fmaxf(sum_sqr_x, epsilon);
float norm_xi = rsqrtf(max_sum_sqr_x);
float norm_x2i = 1.0f / max_sum_sqr_x;
// sum(-d * x / norm_x**2)
float red_val = 0.0f;
float dg = 0.0f;
for (int ctrs = tid; ctrs < CTRS; ctrs += 32)
{
// c = i / TRS;
// trs = i % TRS;
// offset = c * KTRS + trs
int c = div16(ctrs, magic_TRS, shift_TRS);
int trs = mod16(ctrs, c, TRS);
int offset = mad16(c, KTRS, trs);
float x = load( X1, offset);
float dy = load(DY1, offset);
dg += dy * x * norm_xi;
red_val += (-dy * x * gain) * norm_x2i;
}
#pragma unroll
for (int i = 16; i > 0; i >>= 1)
{
red_val += shfl_xor(red_val, i);
dg += shfl_xor(dg, i);
}
if (apply_gain && tid == 0)
DG[k] = dg;
red_val *= sum_sqr_x >= epsilon;
for (int ctrs = tid; ctrs < CTRS; ctrs += 32)
{
int c = div16(ctrs, magic_TRS, shift_TRS);
int trs = mod16(ctrs, c, TRS);
int offset = mad16(c, KTRS, trs);
float x = load( X2, offset);
float dy = load(DY2, offset);
float dx = dy * gain + x * red_val;
store(DX, dx * norm_xi, offset);
}
}
// sum_sqr_x = sum(x**2)
// norm_x = sqrt(maximum(sum_sqr_x, epsilon))
// grad_x = ( grad_y*g + x * (sum_sqr_x >= epsilon) * sum(-grad_y*g * x / norm_x**2) ) / norm_x
// grad_g = sum(grad_y * l2_norm(x))
template <typename TY, typename TX>
__global__ void __launch_bounds__(128) l2_normalize_grad_CK_32(
TX* DX,
float* DG,
const TY* __restrict__ DY,
const TX* __restrict__ X,
const float* __restrict__ G,
const float* __restrict__ S,
const int* __restrict__ Lut,
float epsilon, int apply_gain)
{
extern __shared__ float fShare[]; // 96*2 + max(lut_size)
extern __shared__ int iShare[]; // 96*2 + max(lut_size)
float* redShare1 = &fShare[96*0];
float* redShare2 = &fShare[96*1];
int* lutShare = &iShare[96*2];
int tid = threadIdx.x;
int idx_L = blockIdx.x;
int4 lut_head = ((const int4*)Lut)[idx_L];
// unpack lut header
int lut_offset = lut_head.x;
int lut_size = lut_head.y;
int idx_K = lut_head.z;
int k = idx_K*32 + (tid & 31);
float gain = 1.0f;
if (apply_gain) gain = G[k];
float sum_sqr_x = S[k];
Lut += lut_offset;
#pragma unroll 1
for (int i = tid; i < lut_size; i += 128)
lutShare[i] = Lut[i] * 32 * 32;
__syncthreads();
float max_sum_sqr_x = fmaxf(sum_sqr_x, epsilon);
float norm_xi = rsqrtf(max_sum_sqr_x);
float norm_x2i = 1.0f / max_sum_sqr_x;
float red_val = 0.0f;
float dg = 0.0f;
#pragma unroll 1
for (int i = 0; i < lut_size; i++)
{
int offset = lutShare[i] + tid;
const TY* DY1 = DY + offset;
const TX* X1 = X + offset;
#pragma unroll
for (int j = 0; j < 8; j++)
{
float x = load( X1, j*128);
float dy = load(DY1, j*128);
red_val += (-dy * gain * x) * norm_x2i;
dg += dy * x * norm_xi;
}
}
// reduce red_val across the 4 warps
if (tid >= 32)
{
redShare1[tid-32] = red_val;
redShare2[tid-32] = dg;
}
__syncthreads();
if (tid < 32)
{
red_val += redShare1[tid] + redShare1[tid + 32] + redShare1[tid + 64];
dg += redShare2[tid] + redShare2[tid + 32] + redShare2[tid + 64];
redShare1[tid] = red_val;
if (apply_gain)
DG[k] = dg;
}
__syncthreads();
// get the final reduced value for all warps:
red_val = redShare1[tid & 31];
red_val *= sum_sqr_x >= epsilon;
#pragma unroll 1
for (int i = 0; i < lut_size; i++)
{
int offset = lutShare[i] + tid;
TX* DX2 = DX + offset;
const TY* DY2 = DY + offset;
const TX* X2 = X + offset;
#pragma unroll
for (int j = 0; j < 8; j++)
{
float x = load( X2, j*128);
float dy = load(DY2, j*128);
float dx = dy * gain + x * red_val;
store(DX2, dx * norm_xi, j*128);
}
}
}
// sum_sqr_x = sum(x**2)
// norm_x = sqrt(maximum(sum_sqr_x, epsilon))
// grad_x = ( grad_y*g + x * (sum_sqr_x >= epsilon) * sum(-grad_y*g * x / norm_x**2) ) / norm_x
// grad_g = sum(grad_y * l2_norm(x))
template <typename TY, typename TX>
__global__ void __launch_bounds__(32) l2_normalize_grad_CK_16(
TX* DX,
float* DG,
const TY* __restrict__ DY,
const TX* __restrict__ X,
const float* __restrict__ G,
const float* __restrict__ S,
const int* __restrict__ Lut,
float epsilon, int apply_gain)
{
extern __shared__ int lut[]; // max(lut_size)
int tid = threadIdx.x;
int idx_L = blockIdx.x;
int4 lut_head = ((const int4*)Lut)[idx_L];
// unpack lut header
int lut_offset = lut_head.x;
int lut_size = lut_head.y;
int idx_K = lut_head.z;
int k = idx_K*16 + (tid & 15);
float gain = 1.0f;
if (apply_gain) gain = G[k];
float sum_sqr_x = S[k];
Lut += lut_offset;
#pragma unroll 1
for (int i = tid; i < lut_size; i += 32)
lut[i] = Lut[i] * 16 * 16;
float max_sum_sqr_x = fmaxf(sum_sqr_x, epsilon);
float norm_xi = rsqrtf(max_sum_sqr_x);
float norm_x2i = 1.0f / max_sum_sqr_x;
float red_val = 0.0f;
float dg = 0.0f;
#pragma unroll 1
for (int i = 0; i < lut_size; i++)
{
int offset = lut[i] + tid;
const TY* DY1 = DY + offset;
const TX* X1 = X + offset;
#pragma unroll
for (int j = 0; j < 8; j++)
{
float x = load( X1, j*32);
float dy = load(DY1, j*32);
red_val += (-dy * gain * x) * norm_x2i;
dg += dy * x * norm_xi;
}
}
// reduce red_val,dg across the 4 rows of the warp
red_val += shfl_xor(red_val, 16);
dg += shfl_xor(dg, 16);
store(DG, dg, k, apply_gain && tid < 16);
red_val *= sum_sqr_x >= epsilon;
#pragma unroll 1
for (int i = 0; i < lut_size; i++)
{
int offset = lut[i] + tid;
TX* DX2 = DX + offset;
const TY* DY2 = DY + offset;
const TX* X2 = X + offset;
#pragma unroll
for (int j = 0; j < 8; j++)
{
float x = load( X2, j*32);
float dy = load(DY2, j*32);
float dx = dy * gain + x * red_val;
store(DX2, dx * norm_xi, j*32);
}
}
}
// sum_sqr_x = sum(x**2)
// norm_x = sqrt(maximum(sum_sqr_x, epsilon))
// grad_x = ( grad_y*g + x * (sum_sqr_x >= epsilon) * sum(-grad_y*g * x / norm_x**2) ) / norm_x
// grad_g = sum(grad_y * l2_norm(x))
template <typename TY, typename TX>
__global__ void __launch_bounds__(32) l2_normalize_grad_CK_8(
TX* DX,
float* DG,
const TY* __restrict__ DY,
const TX* __restrict__ X,
const float* __restrict__ G,
const float* __restrict__ S,
const int* __restrict__ Lut,
float epsilon, int apply_gain)
{
extern __shared__ int lut[]; // max(lut_size)
int tid = threadIdx.x;
int idx_L = blockIdx.x;
int4 lut_head = ((const int4*)Lut)[idx_L];
// unpack lut header
int lut_offset = lut_head.x;
int lut_size = lut_head.y;
int idx_K = lut_head.z;
int k = idx_K*8 + (tid & 7);
float gain = 1.0f;
if (apply_gain) gain = G[k];
float sum_sqr_x = S[k];
Lut += lut_offset;
#pragma unroll 1
for (int i = tid; i < lut_size; i += 32)
lut[i] = Lut[i] * 8 * 8;
float max_sum_sqr_x = fmaxf(sum_sqr_x, epsilon);
float norm_xi = rsqrtf(max_sum_sqr_x);
float norm_x2i = 1.0f / max_sum_sqr_x;
float red_val = 0.0f;
float dg = 0.0f;
#pragma unroll 1
for (int i = 0; i < lut_size; i++)
{
int offset = lut[i] + tid;
const TY* DY1 = DY + offset;
const TX* X1 = X + offset;
#pragma unroll
for (int j = 0; j < 2; j++)
{
float x = load( X1, j*32);
float dy = load(DY1, j*32);
red_val += (-dy * gain * x) * norm_x2i;
dg += dy * x * norm_xi;
}
}
// reduce red_val,dg across the 4 rows of the warp
red_val += shfl_xor(red_val, 16);
dg += shfl_xor(dg, 16);
red_val += shfl_xor(red_val, 8);
dg += shfl_xor(dg, 8);
store(DG, dg, k, apply_gain && tid < 8);
red_val *= sum_sqr_x >= epsilon;
#pragma unroll 1
for (int i = 0; i < lut_size; i++)
{
int offset = lut[i] + tid;
TX* DX2 = DX + offset;
const TY* DY2 = DY + offset;
const TX* X2 = X + offset;
#pragma unroll
for (int j = 0; j < 2; j++)
{
float x = load( X2, j*32);
float dy = load(DY2, j*32);
float dx = dy * gain + x * red_val;
store(DX2, dx * norm_xi, j*32);
}
}
}
template <typename TY, typename TX>
bool L2NormalizeGradKCTRS(hipStream_t stream, TX* grad_x, float* grad_g, const TY* grad_y, const TX* x, const float* g, const float* sum_sqr_x_p, const int* lut, float epsilon, int K)
{
dim3 grid(K, 1, 1);
dim3 block(32, 1, 1);
hipLaunchKernelGGL(( l2_normalize_grad_KCTRS<TY,TX>), dim3(grid), dim3(block), 0, stream, grad_x, grad_g, grad_y, x, g, sum_sqr_x_p, (const int2*)lut, epsilon, g != 0);
return true; // TODO
}
template <typename TY, typename TX>
bool L2NormalizeGradCKTRS(hipStream_t stream, TX* grad_x, float* grad_g, const TY* grad_y, const TX* x, const float* g, const float* sum_sqr_x_p, const int* lut, float epsilon, int K, int TRS, int magic_TRS, int shift_TRS)
{
dim3 grid(K, 1, 1);
dim3 block(32, 1, 1);
hipLaunchKernelGGL(( l2_normalize_grad_CKTRS<TY,TX>), dim3(grid), dim3(block), 0, stream, grad_x, grad_g, grad_y, x, g, sum_sqr_x_p, (const int4*)lut, epsilon, g != 0, TRS, magic_TRS, shift_TRS);
return true; // TODO
}
template <typename TY, typename TX>
bool L2NormalizeGradCK (hipStream_t stream, TX* grad_x, float* grad_g, const TY* grad_y, const TX* x, const float* g, const float* sum_sqr_x_p, const int* lut, float epsilon, int K, int shared, int bsize)
{
if (bsize == 32)
{
dim3 grid(K>>5, 1, 1);
dim3 block(128, 1, 1);
hipLaunchKernelGGL(( l2_normalize_grad_CK_32<TY,TX>), dim3(grid), dim3(block), shared+96*2*4, stream, grad_x, grad_g, grad_y, x, g, sum_sqr_x_p, lut, epsilon, g != 0);
}
else if (bsize == 16)
{
dim3 grid(K>>4, 1, 1);
dim3 block(32, 1, 1);
hipLaunchKernelGGL(( l2_normalize_grad_CK_16<TY,TX>), dim3(grid), dim3(block), shared, stream, grad_x, grad_g, grad_y, x, g, sum_sqr_x_p, lut, epsilon, g != 0);
}
else // if (bsize == 8)
{
dim3 grid(K>>3, 1, 1);
dim3 block(32, 1, 1);
hipLaunchKernelGGL(( l2_normalize_grad_CK_8<TY,TX>), dim3(grid), dim3(block), shared, stream, grad_x, grad_g, grad_y, x, g, sum_sqr_x_p, lut, epsilon, g != 0);
}
return true; // TODO
}
template bool L2NormalizeKCTRS<float, float>(hipStream_t stream, float* y, float* sum_sqr_x, const float* x, const float* g, const int* lut, float epsilon, int K);
template bool L2NormalizeCKTRS<float, float>(hipStream_t stream, float* y, float* sum_sqr_x, const float* x, const float* g, const int* lut, float epsilon, int K, int TRS, int magic_TRS, int shift_TRS);
template bool L2NormalizeCK <float, float>(hipStream_t stream, float* y, float* sum_sqr_x, const float* x, const float* g, const int* lut, float epsilon, int K, int shared, int bsize);
template bool L2NormalizeGradKCTRS<float, float>(hipStream_t stream, float* grad_x, float* grad_g, const float* grad_y, const float* x, const float* g, const float* sum_sqr_x_p, const int* lut, float epsilon, int K);
template bool L2NormalizeGradCKTRS<float, float>(hipStream_t stream, float* grad_x, float* grad_g, const float* grad_y, const float* x, const float* g, const float* sum_sqr_x_p, const int* lut, float epsilon, int K, int TRS, int magic_TRS, int shift_TRS);
template bool L2NormalizeGradCK <float, float>(hipStream_t stream, float* grad_x, float* grad_g, const float* grad_y, const float* x, const float* g, const float* sum_sqr_x_p, const int* lut, float epsilon, int K, int shared, int bsize);
template bool L2NormalizeKCTRS<ehalf, ehalf>(hipStream_t stream, ehalf* y, float* sum_sqr_x, const ehalf* x, const float* g, const int* lut, float epsilon, int K);
template bool L2NormalizeCKTRS<ehalf, ehalf>(hipStream_t stream, ehalf* y, float* sum_sqr_x, const ehalf* x, const float* g, const int* lut, float epsilon, int K, int TRS, int magic_TRS, int shift_TRS);
template bool L2NormalizeCK <ehalf, ehalf>(hipStream_t stream, ehalf* y, float* sum_sqr_x, const ehalf* x, const float* g, const int* lut, float epsilon, int K, int shared, int bsize);
template bool L2NormalizeGradKCTRS<ehalf, ehalf>(hipStream_t stream, ehalf* grad_x, float* grad_g, const ehalf* grad_y, const ehalf* x, const float* g, const float* sum_sqr_x_p, const int* lut, float epsilon, int K);
template bool L2NormalizeGradCKTRS<ehalf, ehalf>(hipStream_t stream, ehalf* grad_x, float* grad_g, const ehalf* grad_y, const ehalf* x, const float* g, const float* sum_sqr_x_p, const int* lut, float epsilon, int K, int TRS, int magic_TRS, int shift_TRS);
template bool L2NormalizeGradCK <ehalf, ehalf>(hipStream_t stream, ehalf* grad_x, float* grad_g, const ehalf* grad_y, const ehalf* x, const float* g, const float* sum_sqr_x_p, const int* lut, float epsilon, int K, int shared, int bsize);
template bool L2NormalizeKCTRS<ehalf, float>(hipStream_t stream, ehalf* y, float* sum_sqr_x, const float* x, const float* g, const int* lut, float epsilon, int K);
template bool L2NormalizeCKTRS<ehalf, float>(hipStream_t stream, ehalf* y, float* sum_sqr_x, const float* x, const float* g, const int* lut, float epsilon, int K, int TRS, int magic_TRS, int shift_TRS);
template bool L2NormalizeCK <ehalf, float>(hipStream_t stream, ehalf* y, float* sum_sqr_x, const float* x, const float* g, const int* lut, float epsilon, int K, int shared, int bsize);
template bool L2NormalizeGradKCTRS<ehalf, float>(hipStream_t stream, float* grad_x, float* grad_g, const ehalf* grad_y, const float* x, const float* g, const float* sum_sqr_x_p, const int* lut, float epsilon, int K);
template bool L2NormalizeGradCKTRS<ehalf, float>(hipStream_t stream, float* grad_x, float* grad_g, const ehalf* grad_y, const float* x, const float* g, const float* sum_sqr_x_p, const int* lut, float epsilon, int K, int TRS, int magic_TRS, int shift_TRS);
template bool L2NormalizeGradCK <ehalf, float>(hipStream_t stream, float* grad_x, float* grad_g, const ehalf* grad_y, const float* x, const float* g, const float* sum_sqr_x_p, const int* lut, float epsilon, int K, int shared, int bsize);
template bool L2NormalizeKCTRS<bhalf, bhalf>(hipStream_t stream, bhalf* y, float* sum_sqr_x, const bhalf* x, const float* g, const int* lut, float epsilon, int K);
template bool L2NormalizeCKTRS<bhalf, bhalf>(hipStream_t stream, bhalf* y, float* sum_sqr_x, const bhalf* x, const float* g, const int* lut, float epsilon, int K, int TRS, int magic_TRS, int shift_TRS);
template bool L2NormalizeCK <bhalf, bhalf>(hipStream_t stream, bhalf* y, float* sum_sqr_x, const bhalf* x, const float* g, const int* lut, float epsilon, int K, int shared, int bsize);
template bool L2NormalizeGradKCTRS<bhalf, bhalf>(hipStream_t stream, bhalf* grad_x, float* grad_g, const bhalf* grad_y, const bhalf* x, const float* g, const float* sum_sqr_x_p, const int* lut, float epsilon, int K);
template bool L2NormalizeGradCKTRS<bhalf, bhalf>(hipStream_t stream, bhalf* grad_x, float* grad_g, const bhalf* grad_y, const bhalf* x, const float* g, const float* sum_sqr_x_p, const int* lut, float epsilon, int K, int TRS, int magic_TRS, int shift_TRS);
template bool L2NormalizeGradCK <bhalf, bhalf>(hipStream_t stream, bhalf* grad_x, float* grad_g, const bhalf* grad_y, const bhalf* x, const float* g, const float* sum_sqr_x_p, const int* lut, float epsilon, int K, int shared, int bsize);
template bool L2NormalizeKCTRS<bhalf, float>(hipStream_t stream, bhalf* y, float* sum_sqr_x, const float* x, const float* g, const int* lut, float epsilon, int K);
template bool L2NormalizeCKTRS<bhalf, float>(hipStream_t stream, bhalf* y, float* sum_sqr_x, const float* x, const float* g, const int* lut, float epsilon, int K, int TRS, int magic_TRS, int shift_TRS);
template bool L2NormalizeCK <bhalf, float>(hipStream_t stream, bhalf* y, float* sum_sqr_x, const float* x, const float* g, const int* lut, float epsilon, int K, int shared, int bsize);
template bool L2NormalizeGradKCTRS<bhalf, float>(hipStream_t stream, float* grad_x, float* grad_g, const bhalf* grad_y, const float* x, const float* g, const float* sum_sqr_x_p, const int* lut, float epsilon, int K);
template bool L2NormalizeGradCKTRS<bhalf, float>(hipStream_t stream, float* grad_x, float* grad_g, const bhalf* grad_y, const float* x, const float* g, const float* sum_sqr_x_p, const int* lut, float epsilon, int K, int TRS, int magic_TRS, int shift_TRS);
template bool L2NormalizeGradCK <bhalf, float>(hipStream_t stream, float* grad_x, float* grad_g, const bhalf* grad_y, const float* x, const float* g, const float* sum_sqr_x_p, const int* lut, float epsilon, int K, int shared, int bsize);
#endif // GOOGLE_CUDA
| c41a276f7d6f3ea8971159f79975ae63f7d2232e.cu |
#if GOOGLE_CUDA
#include "ew_op_gpu.h"
__device__ __forceinline__ int div16(int numerator, int magic, int shift)
{
int res;
asm("vmad.s32.u32.u32 %0, %1.h0, %2.h0, 0;" : "=r"(res) : "r"(numerator), "r"(magic));
return res >> shift;
}
__device__ __forceinline__ int mod16(int numerator, int div, int maxdiv)
{
int res;
asm("vmad.s32.u32.u32 %0, -%1.h0, %2.h0, %3;" : "=r"(res) : "r"(div), "r"(maxdiv), "r"(numerator));
return res;
}
__device__ __forceinline__ int mad16(int a, int b, int c)
{
int res;
asm("vmad.s32.u32.u32 %0, %1.h0, %2.h0, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(c));
return res;
}
// y = gain * x / sqrt(max(sum(x**2), epsilon))
template <typename TY, typename TX>
__global__ void __launch_bounds__(32) l2_normalize_KCTRS(
TY* Y,
float* S,
const TX* __restrict__ X,
const float* __restrict__ G,
const int2* __restrict__ Lut,
float epsilon, int apply_gain)
{
int tid = threadIdx.x;
int k = blockIdx.x;
int2 block_data = Lut[k];
float gain = 1.0f;
if (apply_gain) gain = G[k];
int offset = block_data.x + tid; // block_F + idx_k * CTRS + tid
int CTRS = block_data.y; // block_C * TRS
const TX* X1 = X + offset;
const TX* X2 = X + offset;
Y += offset;
// sum_sqr_x = sum(x**2)
float sum_sqr_x = 0.0f;
for (int i = tid; i < CTRS; i += 32)
{
float x = load(X1);
X1 += 32;
sum_sqr_x += x * x;
}
#pragma unroll
for (int i = 16; i > 0; i >>= 1)
sum_sqr_x += shfl_xor(sum_sqr_x, i);
// store reduction for gradient pass
if (tid == 0)
store(S, sum_sqr_x, k);
// rnorm = 1.0f / sqrt(max(sum_sqr_x, epsilon)) * gain
float rnorm = rsqrtf(fmaxf(sum_sqr_x, epsilon)) * gain;
// y = x * rnorm
for (int i = tid; i < CTRS; i += 32)
{
float x = load(X2);
store(Y, x * rnorm);
X2 += 32;
Y += 32;
}
}
// y = gain * x / sqrt(max(sum(x**2), epsilon))
template <typename TY, typename TX>
__global__ void __launch_bounds__(32) l2_normalize_CKTRS(
TY* Y,
float* S,
const TX* __restrict__ X,
const float* __restrict__ G,
const int4* __restrict__ Lut,
float epsilon, int apply_gain, int TRS, int magic_TRS, int shift_TRS)
{
int tid = threadIdx.x;
int k = blockIdx.x;
int4 block_data = Lut[k];
float gain = 1.0f;
if (apply_gain) gain = G[k];
int idx_k = block_data.x;
int CTRS = block_data.y;
int KTRS = block_data.z;
int block_F = block_data.w;
int offset_F = block_F + idx_k * TRS;
const TX* X1 = X + offset_F;
const TX* X2 = X + offset_F;
Y += offset_F;
// y_val = sum(x**2)
float sum_sqr_x = 0.0f;
for (int ctrs = tid; ctrs < CTRS; ctrs += 32)
{
// c = i / TRS;
// trs = i % TRS;
// offset = c * KTRS + trs
int c = div16(ctrs, magic_TRS, shift_TRS);
int trs = mod16(ctrs, c, TRS);
int offset = mad16(c, KTRS, trs);
float x = load(X1, offset);
sum_sqr_x += x * x;
}
#pragma unroll
for (int i = 16; i > 0; i >>= 1)
sum_sqr_x += shfl_xor(sum_sqr_x, i);
// store reduction for gradient pass
if (tid == 0)
store(S, sum_sqr_x, k);
// rnorm = 1.0f / sqrt(max(sum_sqr_x, epsilon)) * gain
float rnorm = rsqrtf(fmaxf(sum_sqr_x, epsilon)) * gain;
// y = x * rnorm
for (int ctrs = tid; ctrs < CTRS; ctrs += 32)
{
int c = div16(ctrs, magic_TRS, shift_TRS);
int trs = mod16(ctrs, c, TRS);
int offset = mad16(c, KTRS, trs);
float x = load(X2, offset);
store(Y, x * rnorm, offset);
}
}
// y = gain * x / sqrt(max(sum(x**2), epsilon))
template <typename TY, typename TX>
__global__ void __launch_bounds__(128) l2_normalize_CK_32(
TY* Y,
float* S,
const TX* __restrict__ X,
const float* __restrict__ G,
const int* __restrict__ Lut,
float epsilon, int apply_gain)
{
extern __shared__ int iShare[]; // 96 + max(lut_size)
extern __shared__ float fShare[]; // 96 + max(lut_size)
int tid = threadIdx.x;
int idx_L = blockIdx.x;
int4 lut_head = ((const int4*)Lut)[idx_L];
// unpack lut header
int lut_offset = lut_head.x;
int lut_size = lut_head.y;
int idx_K = lut_head.z;
int k = idx_K*32 + (tid & 31);
float gain = 1.0f;
if (apply_gain) gain = G[k];
Lut += lut_offset;
#pragma unroll 1
for (int i = tid; i < lut_size; i += 128)
iShare[i + 96] = Lut[i] * 32 * 32;
__syncthreads();
// sum_sqr_x = sum(x**2)
float sum_sqr_x = 0.0f;
#pragma unroll 1
for (int i = 0; i < lut_size; i++)
{
const TX* X1 = X + iShare[i + 96] + tid;
#pragma unroll
for (int j = 0; j < 8; j++)
{
float x = load(X1, j*128);
sum_sqr_x += x * x;
}
}
// reduce sum_sqr_x across the 4 warps
if (tid >= 32)
fShare[tid-32] = sum_sqr_x;
__syncthreads();
if (tid < 32)
{
sum_sqr_x += fShare[tid] + fShare[tid + 32] + fShare[tid + 64];
fShare[tid] = sum_sqr_x;
// store reduction for gradient pass
store(S, sum_sqr_x, k);
}
__syncthreads();
// get the final reduced value for all warps:
sum_sqr_x = fShare[tid & 31];
// rnorm = 1.0f / sqrt(max(sum_sqr_x, epsilon)) * gain
float rnorm = rsqrtf(fmaxf(sum_sqr_x, epsilon)) * gain;
// y = x * rnorm
#pragma unroll 1
for (int i = 0; i < lut_size; i++)
{
int block_offset = iShare[i + 96];
const TX* X2 = X + block_offset + tid;
TY* Y2 = Y + block_offset + tid;
#pragma unroll
for (int j = 0; j < 8; j++)
{
float x = load(X2, j*128);
store(Y2, x * rnorm, j*128);
}
}
}
// y = gain * x / sqrt(max(sum(x**2), epsilon))
template <typename TY, typename TX>
__global__ void __launch_bounds__(32) l2_normalize_CK_16(
TY* Y,
float* S,
const TX* __restrict__ X,
const float* __restrict__ G,
const int* __restrict__ Lut,
float epsilon, int apply_gain)
{
extern __shared__ int lut[]; // max(lut_size)
int tid = threadIdx.x;
int idx_L = blockIdx.x;
int4 lut_head = ((const int4*)Lut)[idx_L];
// unpack lut header
int lut_offset = lut_head.x;
int lut_size = lut_head.y;
int idx_K = lut_head.z;
int k = idx_K*16 + (tid & 15);
float gain = 1.0f;
if (apply_gain) gain = G[k];
Lut += lut_offset;
#pragma unroll 1
for (int i = tid; i < lut_size; i += 32)
lut[i] = Lut[i] * 16 * 16;
// sum_sqr_x = sum(x**2)
float sum_sqr_x = 0.0f;
#pragma unroll 1
for (int i = 0; i < lut_size; i++)
{
const TX* X0 = X + lut[i] + tid;
#pragma unroll
for (int j = 0; j < 8; j++)
{
float x = load(X0, j*32);
sum_sqr_x += x * x;
}
}
// reduce sum_sqr_x across the 4 rows of the warp
sum_sqr_x += shfl_xor(sum_sqr_x, 16);
store(S, sum_sqr_x, k, tid < 16);
// rnorm = 1.0f / sqrt(max(sum_sqr_x, epsilon)) * gain
float rnorm = rsqrtf(fmaxf(sum_sqr_x, epsilon)) * gain;
// y = x * rnorm
#pragma unroll 1
for (int i = 0; i < lut_size; i++)
{
int block_offset = lut[i];
const TX* X0 = X + block_offset + tid;
TY* Y0 = Y + block_offset + tid;
#pragma unroll
for (int j = 0; j < 8; j++)
{
float x = load(X0, j*32);
store(Y0, x * rnorm, j*32);
}
}
}
// y = gain * x / sqrt(max(sum(x**2), epsilon))
template <typename TY, typename TX>
__global__ void __launch_bounds__(32) l2_normalize_CK_8(
TY* Y,
float* S,
const TX* __restrict__ X,
const float* __restrict__ G,
const int* __restrict__ Lut,
float epsilon, int apply_gain)
{
extern __shared__ int lut[]; // max(lut_size)
int tid = threadIdx.x;
int idx_L = blockIdx.x;
int4 lut_head = ((const int4*)Lut)[idx_L];
// unpack lut header
int lut_offset = lut_head.x;
int lut_size = lut_head.y;
int idx_K = lut_head.z;
int k = idx_K*8 + (tid & 7);
float gain = 1.0f;
if (apply_gain) gain = G[k];
Lut += lut_offset;
#pragma unroll 1
for (int i = tid; i < lut_size; i += 32)
lut[i] = Lut[i] * 8 * 8;
// sum_sqr_x = sum(x**2)
float sum_sqr_x = 0.0f;
#pragma unroll 1
for (int i = 0; i < lut_size; i++)
{
const TX* X0 = X + lut[i] + tid;
float x0 = load(X0, 0*32);
float x1 = load(X0, 1*32);
sum_sqr_x += x0 * x0 + x1 * x1;
}
// reduce sum_sqr_x across the 4 rows of the warp
sum_sqr_x += shfl_xor(sum_sqr_x, 16);
sum_sqr_x += shfl_xor(sum_sqr_x, 8);
store(S, sum_sqr_x, k, tid < 8);
// rnorm = 1.0f / sqrt(max(sum_sqr_x, epsilon)) * gain
float rnorm = rsqrtf(fmaxf(sum_sqr_x, epsilon)) * gain;
// y = x * rnorm
#pragma unroll 1
for (int i = 0; i < lut_size; i++)
{
int block_offset = lut[i];
const TX* X0 = X + block_offset + tid;
TY* Y0 = Y + block_offset + tid;
float x0 = load(X0, 0*32);
float x1 = load(X0, 1*32);
store(Y0, x0 * rnorm, 0*32);
store(Y0, x1 * rnorm, 1*32);
}
}
template <typename TY, typename TX>
bool L2NormalizeKCTRS(CUstream stream, TY* y, float* sum_sqr_x, const TX* x, const float* g, const int* lut, float epsilon, int K)
{
dim3 grid(K, 1, 1);
dim3 block(32, 1, 1);
l2_normalize_KCTRS<TY,TX><<<grid, block, 0, stream>>>(y, sum_sqr_x, x, g, (const int2*)lut, epsilon, g != 0);
return true; // TODO
}
template <typename TY, typename TX>
bool L2NormalizeCKTRS(CUstream stream, TY* y, float* sum_sqr_x, const TX* x, const float* g, const int* lut, float epsilon, int K, int TRS, int magic_TRS, int shift_TRS)
{
dim3 grid(K, 1, 1);
dim3 block(32, 1, 1);
l2_normalize_CKTRS<TY,TX><<<grid, block, 0, stream>>>(y, sum_sqr_x, x, g, (const int4*)lut, epsilon, g != 0, TRS, magic_TRS, shift_TRS);
return true; // TODO
}
template <typename TY, typename TX>
bool L2NormalizeCK(CUstream stream, TY* y, float* sum_sqr_x, const TX* x, const float* g, const int* lut, float epsilon, int K, int shared, int bsize)
{
if (bsize == 32)
{
dim3 grid(K>>5, 1, 1);
dim3 block(128, 1, 1);
l2_normalize_CK_32<TY,TX><<<grid, block, shared+96*4, stream>>>(y, sum_sqr_x, x, g, lut, epsilon, g != 0);
}
else if (bsize == 16)
{
dim3 grid(K>>4, 1, 1);
dim3 block(32, 1, 1);
l2_normalize_CK_16<TY,TX><<<grid, block, shared, stream>>>(y, sum_sqr_x, x, g, lut, epsilon, g != 0);
}
else // if (bsize == 8)
{
dim3 grid(K>>3, 1, 1);
dim3 block(32, 1, 1);
l2_normalize_CK_8<TY,TX><<<grid, block, shared, stream>>>(y, sum_sqr_x, x, g, lut, epsilon, g != 0);
}
return true; // TODO
}
/////////////////////////////////////// Gradients ///////////////////////////////////////////
// sum_sqr_x = sum(x**2)
// norm_x = sqrt(maximum(sum_sqr_x, epsilon))
// grad_x = ( grad_y*g + x * (sum_sqr_x >= epsilon) * sum(-grad_y*g * x / norm_x**2) ) / norm_x
// grad_g = sum(grad_y * l2_norm(x))
template <typename TY, typename TX>
__global__ void __launch_bounds__(32) l2_normalize_grad_KCTRS(
TX* DX,
float* DG,
const TY* __restrict__ DY,
const TX* __restrict__ X,
const float* __restrict__ G,
const float* __restrict__ S,
const int2* __restrict__ Lut,
float epsilon, int apply_gain)
{
int tid = threadIdx.x;
int k = blockIdx.x;
int2 block_data = Lut[k];
float gain = 1.0f;
if (apply_gain) gain = G[k];
int offset = block_data.x + tid; // block_F + idx_k * CTRS + tid
int CTRS = block_data.y; // block_C * TRS
const TX* X1 = X + offset;
const TX* X2 = X + offset;
const TY* DY1 = DY + offset;
const TY* DY2 = DY + offset;
DX += offset;
float sum_sqr_x = S[k];
float max_sum_sqr_x = fmaxf(sum_sqr_x, epsilon);
float norm_xi = rsqrtf(max_sum_sqr_x);
float norm_x2i = 1.0f / max_sum_sqr_x;
// sum(-d * x / norm_x**2)
float red_val = 0.0f;
float dg = 0.0f;
for (int i = tid; i < CTRS; i += 32)
{
float dy = load(DY1);
float x = load(X1);
DY1 += 32;
X1 += 32;
dg += dy * x * norm_xi;
red_val += (-dy * x * gain) * norm_x2i;
}
#pragma unroll
for (int i = 16; i > 0; i >>= 1)
{
red_val += shfl_xor(red_val, i);
dg += shfl_xor(dg, i);
}
if (apply_gain && tid == 0)
DG[k] = dg;
red_val *= sum_sqr_x >= epsilon;
for (int i = tid; i < CTRS; i += 32)
{
float dy = load(DY2);
float x = load(X2);
float dx = dy * gain + x * red_val;
store(DX, dx * norm_xi, 0);
DY2 += 32;
X2 += 32;
DX += 32;
}
}
// sum_sqr_x = sum(x**2)
// norm_x = sqrt(maximum(sum_sqr_x, epsilon))
// grad_x = ( grad_y*g + x * (sum_sqr_x >= epsilon) * sum(-grad_y*g * x / norm_x**2) ) / norm_x
// grad_g = sum(grad_y * l2_norm(x))
template <typename TY, typename TX>
__global__ void __launch_bounds__(32) l2_normalize_grad_CKTRS(
TX* DX,
float* DG,
const TY* __restrict__ DY,
const TX* __restrict__ X,
const float* __restrict__ G,
const float* __restrict__ S,
const int4* __restrict__ Lut,
float epsilon, int apply_gain, int TRS, int magic_TRS, int shift_TRS)
{
int tid = threadIdx.x;
int k = blockIdx.x;
int4 block_data = Lut[k];
float gain = 1.0f;
if (apply_gain) gain = G[k];
int idx_k = block_data.x;
int CTRS = block_data.y;
int KTRS = block_data.z;
int block_F = block_data.w;
int offset_F = block_F + idx_k * TRS;
const TX* X1 = X + offset_F;
const TX* X2 = X + offset_F;
const TY* DY1 = DY + offset_F;
const TY* DY2 = DY + offset_F;
DX += offset_F;
float sum_sqr_x = S[k];
float max_sum_sqr_x = fmaxf(sum_sqr_x, epsilon);
float norm_xi = rsqrtf(max_sum_sqr_x);
float norm_x2i = 1.0f / max_sum_sqr_x;
// sum(-d * x / norm_x**2)
float red_val = 0.0f;
float dg = 0.0f;
for (int ctrs = tid; ctrs < CTRS; ctrs += 32)
{
// c = i / TRS;
// trs = i % TRS;
// offset = c * KTRS + trs
int c = div16(ctrs, magic_TRS, shift_TRS);
int trs = mod16(ctrs, c, TRS);
int offset = mad16(c, KTRS, trs);
float x = load( X1, offset);
float dy = load(DY1, offset);
dg += dy * x * norm_xi;
red_val += (-dy * x * gain) * norm_x2i;
}
#pragma unroll
for (int i = 16; i > 0; i >>= 1)
{
red_val += shfl_xor(red_val, i);
dg += shfl_xor(dg, i);
}
if (apply_gain && tid == 0)
DG[k] = dg;
red_val *= sum_sqr_x >= epsilon;
for (int ctrs = tid; ctrs < CTRS; ctrs += 32)
{
int c = div16(ctrs, magic_TRS, shift_TRS);
int trs = mod16(ctrs, c, TRS);
int offset = mad16(c, KTRS, trs);
float x = load( X2, offset);
float dy = load(DY2, offset);
float dx = dy * gain + x * red_val;
store(DX, dx * norm_xi, offset);
}
}
// sum_sqr_x = sum(x**2)
// norm_x = sqrt(maximum(sum_sqr_x, epsilon))
// grad_x = ( grad_y*g + x * (sum_sqr_x >= epsilon) * sum(-grad_y*g * x / norm_x**2) ) / norm_x
// grad_g = sum(grad_y * l2_norm(x))
template <typename TY, typename TX>
__global__ void __launch_bounds__(128) l2_normalize_grad_CK_32(
TX* DX,
float* DG,
const TY* __restrict__ DY,
const TX* __restrict__ X,
const float* __restrict__ G,
const float* __restrict__ S,
const int* __restrict__ Lut,
float epsilon, int apply_gain)
{
extern __shared__ float fShare[]; // 96*2 + max(lut_size)
extern __shared__ int iShare[]; // 96*2 + max(lut_size)
float* redShare1 = &fShare[96*0];
float* redShare2 = &fShare[96*1];
int* lutShare = &iShare[96*2];
int tid = threadIdx.x;
int idx_L = blockIdx.x;
int4 lut_head = ((const int4*)Lut)[idx_L];
// unpack lut header
int lut_offset = lut_head.x;
int lut_size = lut_head.y;
int idx_K = lut_head.z;
int k = idx_K*32 + (tid & 31);
float gain = 1.0f;
if (apply_gain) gain = G[k];
float sum_sqr_x = S[k];
Lut += lut_offset;
#pragma unroll 1
for (int i = tid; i < lut_size; i += 128)
lutShare[i] = Lut[i] * 32 * 32;
__syncthreads();
float max_sum_sqr_x = fmaxf(sum_sqr_x, epsilon);
float norm_xi = rsqrtf(max_sum_sqr_x);
float norm_x2i = 1.0f / max_sum_sqr_x;
float red_val = 0.0f;
float dg = 0.0f;
#pragma unroll 1
for (int i = 0; i < lut_size; i++)
{
int offset = lutShare[i] + tid;
const TY* DY1 = DY + offset;
const TX* X1 = X + offset;
#pragma unroll
for (int j = 0; j < 8; j++)
{
float x = load( X1, j*128);
float dy = load(DY1, j*128);
red_val += (-dy * gain * x) * norm_x2i;
dg += dy * x * norm_xi;
}
}
// reduce red_val across the 4 warps
if (tid >= 32)
{
redShare1[tid-32] = red_val;
redShare2[tid-32] = dg;
}
__syncthreads();
if (tid < 32)
{
red_val += redShare1[tid] + redShare1[tid + 32] + redShare1[tid + 64];
dg += redShare2[tid] + redShare2[tid + 32] + redShare2[tid + 64];
redShare1[tid] = red_val;
if (apply_gain)
DG[k] = dg;
}
__syncthreads();
// get the final reduced value for all warps:
red_val = redShare1[tid & 31];
red_val *= sum_sqr_x >= epsilon;
#pragma unroll 1
for (int i = 0; i < lut_size; i++)
{
int offset = lutShare[i] + tid;
TX* DX2 = DX + offset;
const TY* DY2 = DY + offset;
const TX* X2 = X + offset;
#pragma unroll
for (int j = 0; j < 8; j++)
{
float x = load( X2, j*128);
float dy = load(DY2, j*128);
float dx = dy * gain + x * red_val;
store(DX2, dx * norm_xi, j*128);
}
}
}
// sum_sqr_x = sum(x**2)
// norm_x = sqrt(maximum(sum_sqr_x, epsilon))
// grad_x = ( grad_y*g + x * (sum_sqr_x >= epsilon) * sum(-grad_y*g * x / norm_x**2) ) / norm_x
// grad_g = sum(grad_y * l2_norm(x))
template <typename TY, typename TX>
__global__ void __launch_bounds__(32) l2_normalize_grad_CK_16(
TX* DX,
float* DG,
const TY* __restrict__ DY,
const TX* __restrict__ X,
const float* __restrict__ G,
const float* __restrict__ S,
const int* __restrict__ Lut,
float epsilon, int apply_gain)
{
extern __shared__ int lut[]; // max(lut_size)
int tid = threadIdx.x;
int idx_L = blockIdx.x;
int4 lut_head = ((const int4*)Lut)[idx_L];
// unpack lut header
int lut_offset = lut_head.x;
int lut_size = lut_head.y;
int idx_K = lut_head.z;
int k = idx_K*16 + (tid & 15);
float gain = 1.0f;
if (apply_gain) gain = G[k];
float sum_sqr_x = S[k];
Lut += lut_offset;
#pragma unroll 1
for (int i = tid; i < lut_size; i += 32)
lut[i] = Lut[i] * 16 * 16;
float max_sum_sqr_x = fmaxf(sum_sqr_x, epsilon);
float norm_xi = rsqrtf(max_sum_sqr_x);
float norm_x2i = 1.0f / max_sum_sqr_x;
float red_val = 0.0f;
float dg = 0.0f;
#pragma unroll 1
for (int i = 0; i < lut_size; i++)
{
int offset = lut[i] + tid;
const TY* DY1 = DY + offset;
const TX* X1 = X + offset;
#pragma unroll
for (int j = 0; j < 8; j++)
{
float x = load( X1, j*32);
float dy = load(DY1, j*32);
red_val += (-dy * gain * x) * norm_x2i;
dg += dy * x * norm_xi;
}
}
// reduce red_val,dg across the 4 rows of the warp
red_val += shfl_xor(red_val, 16);
dg += shfl_xor(dg, 16);
store(DG, dg, k, apply_gain && tid < 16);
red_val *= sum_sqr_x >= epsilon;
#pragma unroll 1
for (int i = 0; i < lut_size; i++)
{
int offset = lut[i] + tid;
TX* DX2 = DX + offset;
const TY* DY2 = DY + offset;
const TX* X2 = X + offset;
#pragma unroll
for (int j = 0; j < 8; j++)
{
float x = load( X2, j*32);
float dy = load(DY2, j*32);
float dx = dy * gain + x * red_val;
store(DX2, dx * norm_xi, j*32);
}
}
}
// sum_sqr_x = sum(x**2)
// norm_x = sqrt(maximum(sum_sqr_x, epsilon))
// grad_x = ( grad_y*g + x * (sum_sqr_x >= epsilon) * sum(-grad_y*g * x / norm_x**2) ) / norm_x
// grad_g = sum(grad_y * l2_norm(x))
template <typename TY, typename TX>
__global__ void __launch_bounds__(32) l2_normalize_grad_CK_8(
TX* DX,
float* DG,
const TY* __restrict__ DY,
const TX* __restrict__ X,
const float* __restrict__ G,
const float* __restrict__ S,
const int* __restrict__ Lut,
float epsilon, int apply_gain)
{
extern __shared__ int lut[]; // max(lut_size)
int tid = threadIdx.x;
int idx_L = blockIdx.x;
int4 lut_head = ((const int4*)Lut)[idx_L];
// unpack lut header
int lut_offset = lut_head.x;
int lut_size = lut_head.y;
int idx_K = lut_head.z;
int k = idx_K*8 + (tid & 7);
float gain = 1.0f;
if (apply_gain) gain = G[k];
float sum_sqr_x = S[k];
Lut += lut_offset;
#pragma unroll 1
for (int i = tid; i < lut_size; i += 32)
lut[i] = Lut[i] * 8 * 8;
float max_sum_sqr_x = fmaxf(sum_sqr_x, epsilon);
float norm_xi = rsqrtf(max_sum_sqr_x);
float norm_x2i = 1.0f / max_sum_sqr_x;
float red_val = 0.0f;
float dg = 0.0f;
#pragma unroll 1
for (int i = 0; i < lut_size; i++)
{
int offset = lut[i] + tid;
const TY* DY1 = DY + offset;
const TX* X1 = X + offset;
#pragma unroll
for (int j = 0; j < 2; j++)
{
float x = load( X1, j*32);
float dy = load(DY1, j*32);
red_val += (-dy * gain * x) * norm_x2i;
dg += dy * x * norm_xi;
}
}
// reduce red_val,dg across the 4 rows of the warp
red_val += shfl_xor(red_val, 16);
dg += shfl_xor(dg, 16);
red_val += shfl_xor(red_val, 8);
dg += shfl_xor(dg, 8);
store(DG, dg, k, apply_gain && tid < 8);
red_val *= sum_sqr_x >= epsilon;
#pragma unroll 1
for (int i = 0; i < lut_size; i++)
{
int offset = lut[i] + tid;
TX* DX2 = DX + offset;
const TY* DY2 = DY + offset;
const TX* X2 = X + offset;
#pragma unroll
for (int j = 0; j < 2; j++)
{
float x = load( X2, j*32);
float dy = load(DY2, j*32);
float dx = dy * gain + x * red_val;
store(DX2, dx * norm_xi, j*32);
}
}
}
template <typename TY, typename TX>
bool L2NormalizeGradKCTRS(CUstream stream, TX* grad_x, float* grad_g, const TY* grad_y, const TX* x, const float* g, const float* sum_sqr_x_p, const int* lut, float epsilon, int K)
{
dim3 grid(K, 1, 1);
dim3 block(32, 1, 1);
l2_normalize_grad_KCTRS<TY,TX><<<grid, block, 0, stream>>>(grad_x, grad_g, grad_y, x, g, sum_sqr_x_p, (const int2*)lut, epsilon, g != 0);
return true; // TODO
}
template <typename TY, typename TX>
bool L2NormalizeGradCKTRS(CUstream stream, TX* grad_x, float* grad_g, const TY* grad_y, const TX* x, const float* g, const float* sum_sqr_x_p, const int* lut, float epsilon, int K, int TRS, int magic_TRS, int shift_TRS)
{
dim3 grid(K, 1, 1);
dim3 block(32, 1, 1);
l2_normalize_grad_CKTRS<TY,TX><<<grid, block, 0, stream>>>(grad_x, grad_g, grad_y, x, g, sum_sqr_x_p, (const int4*)lut, epsilon, g != 0, TRS, magic_TRS, shift_TRS);
return true; // TODO
}
template <typename TY, typename TX>
bool L2NormalizeGradCK (CUstream stream, TX* grad_x, float* grad_g, const TY* grad_y, const TX* x, const float* g, const float* sum_sqr_x_p, const int* lut, float epsilon, int K, int shared, int bsize)
{
if (bsize == 32)
{
dim3 grid(K>>5, 1, 1);
dim3 block(128, 1, 1);
l2_normalize_grad_CK_32<TY,TX><<<grid, block, shared+96*2*4, stream>>>(grad_x, grad_g, grad_y, x, g, sum_sqr_x_p, lut, epsilon, g != 0);
}
else if (bsize == 16)
{
dim3 grid(K>>4, 1, 1);
dim3 block(32, 1, 1);
l2_normalize_grad_CK_16<TY,TX><<<grid, block, shared, stream>>>(grad_x, grad_g, grad_y, x, g, sum_sqr_x_p, lut, epsilon, g != 0);
}
else // if (bsize == 8)
{
dim3 grid(K>>3, 1, 1);
dim3 block(32, 1, 1);
l2_normalize_grad_CK_8<TY,TX><<<grid, block, shared, stream>>>(grad_x, grad_g, grad_y, x, g, sum_sqr_x_p, lut, epsilon, g != 0);
}
return true; // TODO
}
template bool L2NormalizeKCTRS<float, float>(CUstream stream, float* y, float* sum_sqr_x, const float* x, const float* g, const int* lut, float epsilon, int K);
template bool L2NormalizeCKTRS<float, float>(CUstream stream, float* y, float* sum_sqr_x, const float* x, const float* g, const int* lut, float epsilon, int K, int TRS, int magic_TRS, int shift_TRS);
template bool L2NormalizeCK <float, float>(CUstream stream, float* y, float* sum_sqr_x, const float* x, const float* g, const int* lut, float epsilon, int K, int shared, int bsize);
template bool L2NormalizeGradKCTRS<float, float>(CUstream stream, float* grad_x, float* grad_g, const float* grad_y, const float* x, const float* g, const float* sum_sqr_x_p, const int* lut, float epsilon, int K);
template bool L2NormalizeGradCKTRS<float, float>(CUstream stream, float* grad_x, float* grad_g, const float* grad_y, const float* x, const float* g, const float* sum_sqr_x_p, const int* lut, float epsilon, int K, int TRS, int magic_TRS, int shift_TRS);
template bool L2NormalizeGradCK <float, float>(CUstream stream, float* grad_x, float* grad_g, const float* grad_y, const float* x, const float* g, const float* sum_sqr_x_p, const int* lut, float epsilon, int K, int shared, int bsize);
template bool L2NormalizeKCTRS<ehalf, ehalf>(CUstream stream, ehalf* y, float* sum_sqr_x, const ehalf* x, const float* g, const int* lut, float epsilon, int K);
template bool L2NormalizeCKTRS<ehalf, ehalf>(CUstream stream, ehalf* y, float* sum_sqr_x, const ehalf* x, const float* g, const int* lut, float epsilon, int K, int TRS, int magic_TRS, int shift_TRS);
template bool L2NormalizeCK <ehalf, ehalf>(CUstream stream, ehalf* y, float* sum_sqr_x, const ehalf* x, const float* g, const int* lut, float epsilon, int K, int shared, int bsize);
template bool L2NormalizeGradKCTRS<ehalf, ehalf>(CUstream stream, ehalf* grad_x, float* grad_g, const ehalf* grad_y, const ehalf* x, const float* g, const float* sum_sqr_x_p, const int* lut, float epsilon, int K);
template bool L2NormalizeGradCKTRS<ehalf, ehalf>(CUstream stream, ehalf* grad_x, float* grad_g, const ehalf* grad_y, const ehalf* x, const float* g, const float* sum_sqr_x_p, const int* lut, float epsilon, int K, int TRS, int magic_TRS, int shift_TRS);
template bool L2NormalizeGradCK <ehalf, ehalf>(CUstream stream, ehalf* grad_x, float* grad_g, const ehalf* grad_y, const ehalf* x, const float* g, const float* sum_sqr_x_p, const int* lut, float epsilon, int K, int shared, int bsize);
template bool L2NormalizeKCTRS<ehalf, float>(CUstream stream, ehalf* y, float* sum_sqr_x, const float* x, const float* g, const int* lut, float epsilon, int K);
template bool L2NormalizeCKTRS<ehalf, float>(CUstream stream, ehalf* y, float* sum_sqr_x, const float* x, const float* g, const int* lut, float epsilon, int K, int TRS, int magic_TRS, int shift_TRS);
template bool L2NormalizeCK <ehalf, float>(CUstream stream, ehalf* y, float* sum_sqr_x, const float* x, const float* g, const int* lut, float epsilon, int K, int shared, int bsize);
template bool L2NormalizeGradKCTRS<ehalf, float>(CUstream stream, float* grad_x, float* grad_g, const ehalf* grad_y, const float* x, const float* g, const float* sum_sqr_x_p, const int* lut, float epsilon, int K);
template bool L2NormalizeGradCKTRS<ehalf, float>(CUstream stream, float* grad_x, float* grad_g, const ehalf* grad_y, const float* x, const float* g, const float* sum_sqr_x_p, const int* lut, float epsilon, int K, int TRS, int magic_TRS, int shift_TRS);
template bool L2NormalizeGradCK <ehalf, float>(CUstream stream, float* grad_x, float* grad_g, const ehalf* grad_y, const float* x, const float* g, const float* sum_sqr_x_p, const int* lut, float epsilon, int K, int shared, int bsize);
template bool L2NormalizeKCTRS<bhalf, bhalf>(CUstream stream, bhalf* y, float* sum_sqr_x, const bhalf* x, const float* g, const int* lut, float epsilon, int K);
template bool L2NormalizeCKTRS<bhalf, bhalf>(CUstream stream, bhalf* y, float* sum_sqr_x, const bhalf* x, const float* g, const int* lut, float epsilon, int K, int TRS, int magic_TRS, int shift_TRS);
template bool L2NormalizeCK <bhalf, bhalf>(CUstream stream, bhalf* y, float* sum_sqr_x, const bhalf* x, const float* g, const int* lut, float epsilon, int K, int shared, int bsize);
template bool L2NormalizeGradKCTRS<bhalf, bhalf>(CUstream stream, bhalf* grad_x, float* grad_g, const bhalf* grad_y, const bhalf* x, const float* g, const float* sum_sqr_x_p, const int* lut, float epsilon, int K);
template bool L2NormalizeGradCKTRS<bhalf, bhalf>(CUstream stream, bhalf* grad_x, float* grad_g, const bhalf* grad_y, const bhalf* x, const float* g, const float* sum_sqr_x_p, const int* lut, float epsilon, int K, int TRS, int magic_TRS, int shift_TRS);
template bool L2NormalizeGradCK <bhalf, bhalf>(CUstream stream, bhalf* grad_x, float* grad_g, const bhalf* grad_y, const bhalf* x, const float* g, const float* sum_sqr_x_p, const int* lut, float epsilon, int K, int shared, int bsize);
template bool L2NormalizeKCTRS<bhalf, float>(CUstream stream, bhalf* y, float* sum_sqr_x, const float* x, const float* g, const int* lut, float epsilon, int K);
template bool L2NormalizeCKTRS<bhalf, float>(CUstream stream, bhalf* y, float* sum_sqr_x, const float* x, const float* g, const int* lut, float epsilon, int K, int TRS, int magic_TRS, int shift_TRS);
template bool L2NormalizeCK <bhalf, float>(CUstream stream, bhalf* y, float* sum_sqr_x, const float* x, const float* g, const int* lut, float epsilon, int K, int shared, int bsize);
template bool L2NormalizeGradKCTRS<bhalf, float>(CUstream stream, float* grad_x, float* grad_g, const bhalf* grad_y, const float* x, const float* g, const float* sum_sqr_x_p, const int* lut, float epsilon, int K);
template bool L2NormalizeGradCKTRS<bhalf, float>(CUstream stream, float* grad_x, float* grad_g, const bhalf* grad_y, const float* x, const float* g, const float* sum_sqr_x_p, const int* lut, float epsilon, int K, int TRS, int magic_TRS, int shift_TRS);
template bool L2NormalizeGradCK <bhalf, float>(CUstream stream, float* grad_x, float* grad_g, const bhalf* grad_y, const float* x, const float* g, const float* sum_sqr_x_p, const int* lut, float epsilon, int K, int shared, int bsize);
#endif // GOOGLE_CUDA
|
241c35ae89fe0d44b0366abd077954dde6fac5c6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.1.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date August 2016
@precisions normal z -> s d c
@author Azzam Haidar
*/
#include "magma_internal.h"
#include "magma_templates.h"
// 512 is maximum number of threads for CUDA capability 1.x
#define BLOCK_SIZE 512
#define BLOCK_SIZEx 32
#define BLOCK_SIZEy 16
/******************************************************************************/
__global__
void magma_zlarf_kernel(
int m, const magmaDoubleComplex *dv, const magmaDoubleComplex *dtau,
magmaDoubleComplex *dc, int lddc )
{
if ( !MAGMA_Z_EQUAL(*dtau, MAGMA_Z_ZERO) ) {
const int tx = threadIdx.x;
dc = dc + blockIdx.x * lddc;
__shared__ magmaDoubleComplex sum[ BLOCK_SIZE ];
magmaDoubleComplex tmp;
/* perform w := v**H * C */
if (tx == 0)
tmp = dc[0]; //since V[0] should be one
else
tmp = MAGMA_Z_ZERO;
for( int j = tx+1; j < m; j += BLOCK_SIZE ) {
tmp += MAGMA_Z_MUL( MAGMA_Z_CONJ( dv[j] ), dc[j] );
}
sum[tx] = tmp;
magma_sum_reduce< BLOCK_SIZE >( tx, sum );
/* C := C - v * w */
__syncthreads();
tmp = - MAGMA_Z_CONJ(*dtau) * sum[0];
for( int j = m-tx-1; j > 0; j -= BLOCK_SIZE )
dc[j] += tmp * dv[j];
if (tx == 0) dc[0] += tmp;
}
}
/******************************************************************************/
__global__
void magma_zlarf_smkernel(
int m, int n, magmaDoubleComplex *dv, magmaDoubleComplex *dtau,
magmaDoubleComplex *dc, int lddc )
{
if ( ! MAGMA_Z_EQUAL(*dtau, MAGMA_Z_ZERO) ) {
const int i = threadIdx.x, col= threadIdx.y;
for( int k = col; k < n; k += BLOCK_SIZEy ) {
dc = dc + k * lddc;
__shared__ magmaDoubleComplex sum[ BLOCK_SIZEx ][ BLOCK_SIZEy + 1];
magmaDoubleComplex lsum;
/* w := v**H * C */
lsum = MAGMA_Z_ZERO;
for( int j = i; j < m; j += BLOCK_SIZEx ) {
if (j == 0)
lsum += MAGMA_Z_MUL( MAGMA_Z_ONE, dc[j] );
else
lsum += MAGMA_Z_MUL( MAGMA_Z_CONJ( dv[j] ), dc[j] );
}
sum[i][col] = lsum;
magma_sum_reduce_2d< BLOCK_SIZEx, BLOCK_SIZEy+1 >( i, col, sum );
/* C := C - v * w */
__syncthreads();
magmaDoubleComplex z__1 = - MAGMA_Z_CONJ(*dtau) * sum[0][col];
for( int j = m-i-1; j >= 0; j -= BLOCK_SIZEx ) {
if (j == 0)
dc[j] += z__1;
else
dc[j] += z__1 * dv[j];
}
}
}
}
/******************************************************************************/
/*
Apply a complex elementary reflector H to a complex M-by-N
matrix C from the left. H is represented in the form
H = I - tau * v * v**H
where tau is a complex scalar and v is a complex vector.
If tau = 0, then H is taken to be the unit matrix.
To apply H**H (the conjugate transpose of H), supply conjg(tau)
instead tau.
This routine uses only one SM (block).
*/
extern "C" void
magma_zlarf_sm(
magma_int_t m, magma_int_t n,
magmaDoubleComplex *dv, magmaDoubleComplex *dtau,
magmaDoubleComplex *dc, magma_int_t lddc,
magma_queue_t queue )
{
dim3 blocks( 1 );
dim3 threads( BLOCK_SIZEx, BLOCK_SIZEy );
hipLaunchKernelGGL(( magma_zlarf_smkernel)
, dim3(blocks), dim3(threads), 0, queue->cuda_stream() ,
m, n, dv, dtau, dc, lddc );
}
/***************************************************************************//**
Apply a complex elementary reflector H to a complex M-by-N
matrix C from the left. H is represented in the form
H = I - tau * v * v**H
where tau is a complex scalar and v is a complex vector.
If tau = 0, then H is taken to be the unit matrix.
To apply H**H (the conjugate transpose of H), supply conjg(tau)
instead tau.
*******************************************************************************/
extern "C" magma_int_t
magma_zlarf_gpu(
magma_int_t m, magma_int_t n,
magmaDoubleComplex_const_ptr dv,
magmaDoubleComplex_const_ptr dtau,
magmaDoubleComplex_ptr dC, magma_int_t lddc,
magma_queue_t queue )
{
dim3 grid( n, 1, 1 );
dim3 threads( BLOCK_SIZE );
if ( n > 0 ) {
hipLaunchKernelGGL(( magma_zlarf_kernel)
, dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
m, dv, dtau, dC, lddc);
}
// The computation can be done on 1 SM with the following routine.
// magma_zlarf_sm(m, n, dv, dtau, dc, lddc);
return MAGMA_SUCCESS;
}
| 241c35ae89fe0d44b0366abd077954dde6fac5c6.cu | /*
-- MAGMA (version 2.1.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date August 2016
@precisions normal z -> s d c
@author Azzam Haidar
*/
#include "magma_internal.h"
#include "magma_templates.h"
// 512 is maximum number of threads for CUDA capability 1.x
#define BLOCK_SIZE 512
#define BLOCK_SIZEx 32
#define BLOCK_SIZEy 16
/******************************************************************************/
__global__
void magma_zlarf_kernel(
int m, const magmaDoubleComplex *dv, const magmaDoubleComplex *dtau,
magmaDoubleComplex *dc, int lddc )
{
if ( !MAGMA_Z_EQUAL(*dtau, MAGMA_Z_ZERO) ) {
const int tx = threadIdx.x;
dc = dc + blockIdx.x * lddc;
__shared__ magmaDoubleComplex sum[ BLOCK_SIZE ];
magmaDoubleComplex tmp;
/* perform w := v**H * C */
if (tx == 0)
tmp = dc[0]; //since V[0] should be one
else
tmp = MAGMA_Z_ZERO;
for( int j = tx+1; j < m; j += BLOCK_SIZE ) {
tmp += MAGMA_Z_MUL( MAGMA_Z_CONJ( dv[j] ), dc[j] );
}
sum[tx] = tmp;
magma_sum_reduce< BLOCK_SIZE >( tx, sum );
/* C := C - v * w */
__syncthreads();
tmp = - MAGMA_Z_CONJ(*dtau) * sum[0];
for( int j = m-tx-1; j > 0; j -= BLOCK_SIZE )
dc[j] += tmp * dv[j];
if (tx == 0) dc[0] += tmp;
}
}
/******************************************************************************/
__global__
void magma_zlarf_smkernel(
int m, int n, magmaDoubleComplex *dv, magmaDoubleComplex *dtau,
magmaDoubleComplex *dc, int lddc )
{
if ( ! MAGMA_Z_EQUAL(*dtau, MAGMA_Z_ZERO) ) {
const int i = threadIdx.x, col= threadIdx.y;
for( int k = col; k < n; k += BLOCK_SIZEy ) {
dc = dc + k * lddc;
__shared__ magmaDoubleComplex sum[ BLOCK_SIZEx ][ BLOCK_SIZEy + 1];
magmaDoubleComplex lsum;
/* w := v**H * C */
lsum = MAGMA_Z_ZERO;
for( int j = i; j < m; j += BLOCK_SIZEx ) {
if (j == 0)
lsum += MAGMA_Z_MUL( MAGMA_Z_ONE, dc[j] );
else
lsum += MAGMA_Z_MUL( MAGMA_Z_CONJ( dv[j] ), dc[j] );
}
sum[i][col] = lsum;
magma_sum_reduce_2d< BLOCK_SIZEx, BLOCK_SIZEy+1 >( i, col, sum );
/* C := C - v * w */
__syncthreads();
magmaDoubleComplex z__1 = - MAGMA_Z_CONJ(*dtau) * sum[0][col];
for( int j = m-i-1; j >= 0; j -= BLOCK_SIZEx ) {
if (j == 0)
dc[j] += z__1;
else
dc[j] += z__1 * dv[j];
}
}
}
}
/******************************************************************************/
/*
Apply a complex elementary reflector H to a complex M-by-N
matrix C from the left. H is represented in the form
H = I - tau * v * v**H
where tau is a complex scalar and v is a complex vector.
If tau = 0, then H is taken to be the unit matrix.
To apply H**H (the conjugate transpose of H), supply conjg(tau)
instead tau.
This routine uses only one SM (block).
*/
extern "C" void
magma_zlarf_sm(
magma_int_t m, magma_int_t n,
magmaDoubleComplex *dv, magmaDoubleComplex *dtau,
magmaDoubleComplex *dc, magma_int_t lddc,
magma_queue_t queue )
{
dim3 blocks( 1 );
dim3 threads( BLOCK_SIZEx, BLOCK_SIZEy );
magma_zlarf_smkernel
<<< blocks, threads, 0, queue->cuda_stream() >>>
( m, n, dv, dtau, dc, lddc );
}
/***************************************************************************//**
Apply a complex elementary reflector H to a complex M-by-N
matrix C from the left. H is represented in the form
H = I - tau * v * v**H
where tau is a complex scalar and v is a complex vector.
If tau = 0, then H is taken to be the unit matrix.
To apply H**H (the conjugate transpose of H), supply conjg(tau)
instead tau.
*******************************************************************************/
extern "C" magma_int_t
magma_zlarf_gpu(
magma_int_t m, magma_int_t n,
magmaDoubleComplex_const_ptr dv,
magmaDoubleComplex_const_ptr dtau,
magmaDoubleComplex_ptr dC, magma_int_t lddc,
magma_queue_t queue )
{
dim3 grid( n, 1, 1 );
dim3 threads( BLOCK_SIZE );
if ( n > 0 ) {
magma_zlarf_kernel
<<< grid, threads, 0, queue->cuda_stream() >>>
( m, dv, dtau, dC, lddc);
}
// The computation can be done on 1 SM with the following routine.
// magma_zlarf_sm(m, n, dv, dtau, dc, lddc);
return MAGMA_SUCCESS;
}
|
22dad4c4dd4cb34c9b4b3ad8f81904d98ca4c5f7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
__global__ void double_kernel(const float* data, float* output, size_t n) {
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < n) {
output[idx] = data[idx] * 2;
}
}
__declspec(dllexport) void launch_double_ext_cuda_kernel(const float* data, float* output, size_t n) {
const int threads = 256;
const int blocks = (n + threads - 1) / threads;
hipLaunchKernelGGL(( double_kernel) , dim3(blocks), dim3(threads) , 0, 0, data, output, n);
}
| 22dad4c4dd4cb34c9b4b3ad8f81904d98ca4c5f7.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
__global__ void double_kernel(const float* data, float* output, size_t n) {
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < n) {
output[idx] = data[idx] * 2;
}
}
__declspec(dllexport) void launch_double_ext_cuda_kernel(const float* data, float* output, size_t n) {
const int threads = 256;
const int blocks = (n + threads - 1) / threads;
double_kernel <<<blocks, threads >>> (data, output, n);
}
|
7fcc145f55a3ea89bc7168edd6083f6c9629ed8e.hip | // !!! This is a file automatically generated by hipify!!!
/*
Copyright 2013--2018 James E. McClure, Virginia Polytechnic & State University
This file is part of the Open Porous Media project (OPM).
OPM is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
OPM is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with OPM. If not, see <http://www.gnu.org/licenses/>.
*/
#include <math.h>
#include <stdio.h>
#include <hip/hip_runtime_api.h>
#define NBLOCKS 8192
#define NTHREADS 256
__global__ void dvc_ScaLBL_Color_Init(char *ID, double *Den, double *Phi, double das, double dbs, int Nx, int Ny, int Nz)
{
//int i,j,k;
int n,N;
char id;
N = Nx*Ny*Nz;
int S = N/NBLOCKS/NTHREADS + 1;
for (int s=0; s<S; s++){
//........Get 1-D index for this thread....................
n = S*blockIdx.x*blockDim.x + s*blockDim.x + threadIdx.x;
if (n<N){
id=ID[n];
//.......Back out the 3-D indices for node n..............
//k = n/(Nx*Ny);
//j = (n-Nx*Ny*k)/Nx;
//i = n-Nx*Ny*k-Nx*j;
if ( id == 1){
Den[n] = 1.0;
Den[N+n] = 0.0;
Phi[n] = 1.0;
}
else if ( id == 2){
Den[n] = 0.0;
Den[N+n] = 1.0;
Phi[n] = -1.0;
}
else{
Den[n] = das;
Den[N+n] = dbs;
Phi[n] = (das-dbs)/(das+dbs);
}
}
}
}
__global__ void dvc_ScaLBL_Color_BC(int *list, int *Map, double *Phi, double *Den, double vA, double vB, int count, int Np)
{
int idx,n,nm;
// Fill the outlet with component b
idx = blockIdx.x*blockDim.x + threadIdx.x;
if (idx < count){
n = list[idx];
Den[n] = vA;
Den[Np+n] = vB;
nm = Map[n];
Phi[nm] = (vA-vB)/(vA+vB);
}
}
__global__ void dvc_ScaLBL_Color_BC_YDW(int *list, int *Map, double *Phi, double *Den, double *vAv, double *vBv, int count, int Np)
{
int idx,n,nm;
// Fill the outlet with component b
idx = blockIdx.x*blockDim.x + threadIdx.x;
if (idx < count){
n = list[idx];
Den[n] = vAv[idx];
Den[Np+n] = vBv[idx];
nm = Map[n];
Phi[nm] = (vAv[idx]-vBv[idx])/(vAv[idx]+vBv[idx]);
}
}
//*************************************************************************
__global__ void dvc_ScaLBL_SetSlice_z(double *Phi, double value, int Nx, int Ny, int Nz, int Slice)
{
int n = Slice*Nx*Ny + blockIdx.x*blockDim.x + threadIdx.x;
if (n < (Slice+1)*Nx*Ny){
Phi[n] = value;
}
}
__global__ void dvc_ScaLBL_CopySlice_z(double *Phi, int Nx, int Ny, int Nz, int Source, int Dest){
double value;
int n = blockIdx.x*blockDim.x + threadIdx.x;
if (n < Nx*Ny){
value = Phi[Source*Nx*Ny+n];
Phi[Dest*Nx*Ny+n] = value;
}
}
__global__ void dvc_ScaLBL_D3Q19_AAeven_Color(int *Map, double *dist, double *Aq, double *Bq, double *Den, double *Phi,
double *Velocity, double rhoA, double rhoB, double tauA, double tauB, double alpha, double beta,
double Fx, double Fy, double Fz, int strideY, int strideZ, int start, int finish, int Np){
int ijk,nn,n;
double fq;
// conserved momemnts
double rho,jx,jy,jz;
// non-conserved moments
double m1,m2,m4,m6,m8,m9,m10,m11,m12,m13,m14,m15,m16,m17,m18;
double m3,m5,m7;
double nA,nB; // number density
double a1,b1,a2,b2,nAB,delta;
double C,nx,ny,nz; //color gradient magnitude and direction
double ux,uy,uz;
double phi,tau,rho0,rlx_setA,rlx_setB;
const double mrt_V1=0.05263157894736842;
const double mrt_V2=0.012531328320802;
const double mrt_V3=0.04761904761904762;
const double mrt_V4=0.004594820384294068;
const double mrt_V5=0.01587301587301587;
const double mrt_V6=0.0555555555555555555555555;
const double mrt_V7=0.02777777777777778;
const double mrt_V8=0.08333333333333333;
const double mrt_V9=0.003341687552213868;
const double mrt_V10=0.003968253968253968;
const double mrt_V11=0.01388888888888889;
const double mrt_V12=0.04166666666666666;
int S = Np/NBLOCKS/NTHREADS + 1;
for (int s=0; s<S; s++){
//........Get 1-D index for this thread....................
n = S*blockIdx.x*blockDim.x + s*blockDim.x + threadIdx.x + start;
if (n<finish) {
// read the component number densities
nA = Den[n];
nB = Den[Np + n];
// compute phase indicator field
phi=(nA-nB)/(nA+nB);
// local density
rho0=rhoA + 0.5*(1.0-phi)*(rhoB-rhoA);
// local relaxation time
tau=tauA + 0.5*(1.0-phi)*(tauB-tauA);
rlx_setA = 1.f/tau;
rlx_setB = 8.f*(2.f-rlx_setA)/(8.f-rlx_setA);
// Get the 1D index based on regular data layout
ijk = Map[n];
// COMPUTE THE COLOR GRADIENT
//........................................................................
//.................Read Phase Indicator Values............................
//........................................................................
nn = ijk-1; // neighbor index (get convention)
m1 = Phi[nn]; // get neighbor for phi - 1
//........................................................................
nn = ijk+1; // neighbor index (get convention)
m2 = Phi[nn]; // get neighbor for phi - 2
//........................................................................
nn = ijk-strideY; // neighbor index (get convention)
m3 = Phi[nn]; // get neighbor for phi - 3
//........................................................................
nn = ijk+strideY; // neighbor index (get convention)
m4 = Phi[nn]; // get neighbor for phi - 4
//........................................................................
nn = ijk-strideZ; // neighbor index (get convention)
m5 = Phi[nn]; // get neighbor for phi - 5
//........................................................................
nn = ijk+strideZ; // neighbor index (get convention)
m6 = Phi[nn]; // get neighbor for phi - 6
//........................................................................
nn = ijk-strideY-1; // neighbor index (get convention)
m7 = Phi[nn]; // get neighbor for phi - 7
//........................................................................
nn = ijk+strideY+1; // neighbor index (get convention)
m8 = Phi[nn]; // get neighbor for phi - 8
//........................................................................
nn = ijk+strideY-1; // neighbor index (get convention)
m9 = Phi[nn]; // get neighbor for phi - 9
//........................................................................
nn = ijk-strideY+1; // neighbor index (get convention)
m10 = Phi[nn]; // get neighbor for phi - 10
//........................................................................
nn = ijk-strideZ-1; // neighbor index (get convention)
m11 = Phi[nn]; // get neighbor for phi - 11
//........................................................................
nn = ijk+strideZ+1; // neighbor index (get convention)
m12 = Phi[nn]; // get neighbor for phi - 12
//........................................................................
nn = ijk+strideZ-1; // neighbor index (get convention)
m13 = Phi[nn]; // get neighbor for phi - 13
//........................................................................
nn = ijk-strideZ+1; // neighbor index (get convention)
m14 = Phi[nn]; // get neighbor for phi - 14
//........................................................................
nn = ijk-strideZ-strideY; // neighbor index (get convention)
m15 = Phi[nn]; // get neighbor for phi - 15
//........................................................................
nn = ijk+strideZ+strideY; // neighbor index (get convention)
m16 = Phi[nn]; // get neighbor for phi - 16
//........................................................................
nn = ijk+strideZ-strideY; // neighbor index (get convention)
m17 = Phi[nn]; // get neighbor for phi - 17
//........................................................................
nn = ijk-strideZ+strideY; // neighbor index (get convention)
m18 = Phi[nn]; // get neighbor for phi - 18
//............Compute the Color Gradient...................................
nx = -(m1-m2+0.5*(m7-m8+m9-m10+m11-m12+m13-m14));
ny = -(m3-m4+0.5*(m7-m8-m9+m10+m15-m16+m17-m18));
nz = -(m5-m6+0.5*(m11-m12-m13+m14+m15-m16-m17+m18));
//...........Normalize the Color Gradient.................................
C = sqrt(nx*nx+ny*ny+nz*nz);
double ColorMag = C;
if (C==0.0) ColorMag=1.0;
nx = nx/ColorMag;
ny = ny/ColorMag;
nz = nz/ColorMag;
// q=0
fq = dist[n];
rho = fq;
m1 = -30.0*fq;
m2 = 12.0*fq;
// q=1
fq = dist[2*Np+n];
rho += fq;
m1 -= 11.0*fq;
m2 -= 4.0*fq;
jx = fq;
m4 = -4.0*fq;
m9 = 2.0*fq;
m10 = -4.0*fq;
// f2 = dist[10*Np+n];
fq = dist[1*Np+n];
rho += fq;
m1 -= 11.0*(fq);
m2 -= 4.0*(fq);
jx -= fq;
m4 += 4.0*(fq);
m9 += 2.0*(fq);
m10 -= 4.0*(fq);
// q=3
fq = dist[4*Np+n];
rho += fq;
m1 -= 11.0*fq;
m2 -= 4.0*fq;
jy = fq;
m6 = -4.0*fq;
m9 -= fq;
m10 += 2.0*fq;
m11 = fq;
m12 = -2.0*fq;
// q = 4
fq = dist[3*Np+n];
rho+= fq;
m1 -= 11.0*fq;
m2 -= 4.0*fq;
jy -= fq;
m6 += 4.0*fq;
m9 -= fq;
m10 += 2.0*fq;
m11 += fq;
m12 -= 2.0*fq;
// q=5
fq = dist[6*Np+n];
rho += fq;
m1 -= 11.0*fq;
m2 -= 4.0*fq;
jz = fq;
m8 = -4.0*fq;
m9 -= fq;
m10 += 2.0*fq;
m11 -= fq;
m12 += 2.0*fq;
// q = 6
fq = dist[5*Np+n];
rho+= fq;
m1 -= 11.0*fq;
m2 -= 4.0*fq;
jz -= fq;
m8 += 4.0*fq;
m9 -= fq;
m10 += 2.0*fq;
m11 -= fq;
m12 += 2.0*fq;
// q=7
fq = dist[8*Np+n];
rho += fq;
m1 += 8.0*fq;
m2 += fq;
jx += fq;
m4 += fq;
jy += fq;
m6 += fq;
m9 += fq;
m10 += fq;
m11 += fq;
m12 += fq;
m13 = fq;
m16 = fq;
m17 = -fq;
// q = 8
fq = dist[7*Np+n];
rho += fq;
m1 += 8.0*fq;
m2 += fq;
jx -= fq;
m4 -= fq;
jy -= fq;
m6 -= fq;
m9 += fq;
m10 += fq;
m11 += fq;
m12 += fq;
m13 += fq;
m16 -= fq;
m17 += fq;
// q=9
fq = dist[10*Np+n];
rho += fq;
m1 += 8.0*fq;
m2 += fq;
jx += fq;
m4 += fq;
jy -= fq;
m6 -= fq;
m9 += fq;
m10 += fq;
m11 += fq;
m12 += fq;
m13 -= fq;
m16 += fq;
m17 += fq;
// q = 10
fq = dist[9*Np+n];
rho += fq;
m1 += 8.0*fq;
m2 += fq;
jx -= fq;
m4 -= fq;
jy += fq;
m6 += fq;
m9 += fq;
m10 += fq;
m11 += fq;
m12 += fq;
m13 -= fq;
m16 -= fq;
m17 -= fq;
// q=11
fq = dist[12*Np+n];
rho += fq;
m1 += 8.0*fq;
m2 += fq;
jx += fq;
m4 += fq;
jz += fq;
m8 += fq;
m9 += fq;
m10 += fq;
m11 -= fq;
m12 -= fq;
m15 = fq;
m16 -= fq;
m18 = fq;
// q=12
fq = dist[11*Np+n];
rho += fq;
m1 += 8.0*fq;
m2 += fq;
jx -= fq;
m4 -= fq;
jz -= fq;
m8 -= fq;
m9 += fq;
m10 += fq;
m11 -= fq;
m12 -= fq;
m15 += fq;
m16 += fq;
m18 -= fq;
// q=13
fq = dist[14*Np+n];
rho += fq;
m1 += 8.0*fq;
m2 += fq;
jx += fq;
m4 += fq;
jz -= fq;
m8 -= fq;
m9 += fq;
m10 += fq;
m11 -= fq;
m12 -= fq;
m15 -= fq;
m16 -= fq;
m18 -= fq;
// q=14
fq = dist[13*Np+n];
rho += fq;
m1 += 8.0*fq;
m2 += fq;
jx -= fq;
m4 -= fq;
jz += fq;
m8 += fq;
m9 += fq;
m10 += fq;
m11 -= fq;
m12 -= fq;
m15 -= fq;
m16 += fq;
m18 += fq;
// q=15
fq = dist[16*Np+n];
rho += fq;
m1 += 8.0*fq;
m2 += fq;
jy += fq;
m6 += fq;
jz += fq;
m8 += fq;
m9 -= 2.0*fq;
m10 -= 2.0*fq;
m14 = fq;
m17 += fq;
m18 -= fq;
// q=16
fq = dist[15*Np+n];
rho += fq;
m1 += 8.0*fq;
m2 += fq;
jy -= fq;
m6 -= fq;
jz -= fq;
m8 -= fq;
m9 -= 2.0*fq;
m10 -= 2.0*fq;
m14 += fq;
m17 -= fq;
m18 += fq;
// q=17
fq = dist[18*Np+n];
rho += fq;
m1 += 8.0*fq;
m2 += fq;
jy += fq;
m6 += fq;
jz -= fq;
m8 -= fq;
m9 -= 2.0*fq;
m10 -= 2.0*fq;
m14 -= fq;
m17 += fq;
m18 += fq;
// q=18
fq = dist[17*Np+n];
rho += fq;
m1 += 8.0*fq;
m2 += fq;
jy -= fq;
m6 -= fq;
jz += fq;
m8 += fq;
m9 -= 2.0*fq;
m10 -= 2.0*fq;
m14 -= fq;
m17 -= fq;
m18 -= fq;
//........................................................................
//..............carry out relaxation process..............................
//..........Toelke, Fruediger et. al. 2006................................
if (C == 0.0) nx = ny = nz = 0.0;
m1 = m1 + rlx_setA*((19*(jx*jx+jy*jy+jz*jz)/rho0 - 11*rho) -19*alpha*C - m1);
m2 = m2 + rlx_setA*((3*rho - 5.5*(jx*jx+jy*jy+jz*jz)/rho0)- m2);
m4 = m4 + rlx_setB*((-0.6666666666666666*jx)- m4);
m6 = m6 + rlx_setB*((-0.6666666666666666*jy)- m6);
m8 = m8 + rlx_setB*((-0.6666666666666666*jz)- m8);
m9 = m9 + rlx_setA*(((2*jx*jx-jy*jy-jz*jz)/rho0) + 0.5*alpha*C*(2*nx*nx-ny*ny-nz*nz) - m9);
m10 = m10 + rlx_setA*( - m10);
m11 = m11 + rlx_setA*(((jy*jy-jz*jz)/rho0) + 0.5*alpha*C*(ny*ny-nz*nz)- m11);
m12 = m12 + rlx_setA*( - m12);
m13 = m13 + rlx_setA*( (jx*jy/rho0) + 0.5*alpha*C*nx*ny - m13);
m14 = m14 + rlx_setA*( (jy*jz/rho0) + 0.5*alpha*C*ny*nz - m14);
m15 = m15 + rlx_setA*( (jx*jz/rho0) + 0.5*alpha*C*nx*nz - m15);
m16 = m16 + rlx_setB*( - m16);
m17 = m17 + rlx_setB*( - m17);
m18 = m18 + rlx_setB*( - m18);
//.......................................................................................................
//.................inverse transformation......................................................
// q=0
fq = mrt_V1*rho-mrt_V2*m1+mrt_V3*m2;
dist[n] = fq;
// q = 1
fq = mrt_V1*rho-mrt_V4*m1-mrt_V5*m2+0.1*(jx-m4)+mrt_V6*(m9-m10) + 0.16666666*Fx;
dist[1*Np+n] = fq;
// q=2
fq = mrt_V1*rho-mrt_V4*m1-mrt_V5*m2+0.1*(m4-jx)+mrt_V6*(m9-m10) - 0.16666666*Fx;
dist[2*Np+n] = fq;
// q = 3
fq = mrt_V1*rho-mrt_V4*m1-mrt_V5*m2+0.1*(jy-m6)+mrt_V7*(m10-m9)+mrt_V8*(m11-m12) + 0.16666666*Fy;
dist[3*Np+n] = fq;
// q = 4
fq = mrt_V1*rho-mrt_V4*m1-mrt_V5*m2+0.1*(m6-jy)+mrt_V7*(m10-m9)+mrt_V8*(m11-m12) - 0.16666666*Fy;
dist[4*Np+n] = fq;
// q = 5
fq = mrt_V1*rho-mrt_V4*m1-mrt_V5*m2+0.1*(jz-m8)+mrt_V7*(m10-m9)+mrt_V8*(m12-m11) + 0.16666666*Fz;
dist[5*Np+n] = fq;
// q = 6
fq = mrt_V1*rho-mrt_V4*m1-mrt_V5*m2+0.1*(m8-jz)+mrt_V7*(m10-m9)+mrt_V8*(m12-m11) - 0.16666666*Fz;
dist[6*Np+n] = fq;
// q = 7
fq = mrt_V1*rho+mrt_V9*m1+mrt_V10*m2+0.1*(jx+jy)+0.025*(m4+m6)+
mrt_V7*m9+mrt_V11*m10+mrt_V8*m11+mrt_V12*m12+0.25*m13+0.125*(m16-m17) + 0.08333333333*(Fx+Fy);
dist[7*Np+n] = fq;
// q = 8
fq = mrt_V1*rho+mrt_V9*m1+mrt_V10*m2-0.1*(jx+jy)-0.025*(m4+m6) +mrt_V7*m9+mrt_V11*m10+mrt_V8*m11
+mrt_V12*m12+0.25*m13+0.125*(m17-m16) - 0.08333333333*(Fx+Fy);
dist[8*Np+n] = fq;
// q = 9
fq = mrt_V1*rho+mrt_V9*m1+mrt_V10*m2+0.1*(jx-jy)+0.025*(m4-m6)+
mrt_V7*m9+mrt_V11*m10+mrt_V8*m11+mrt_V12*m12-0.25*m13+0.125*(m16+m17) + 0.08333333333*(Fx-Fy);
dist[9*Np+n] = fq;
// q = 10
fq = mrt_V1*rho+mrt_V9*m1+mrt_V10*m2+0.1*(jy-jx)+0.025*(m6-m4)+
mrt_V7*m9+mrt_V11*m10+mrt_V8*m11+mrt_V12*m12-0.25*m13-0.125*(m16+m17)- 0.08333333333*(Fx-Fy);
dist[10*Np+n] = fq;
// q = 11
fq = mrt_V1*rho+mrt_V9*m1
+mrt_V10*m2+0.1*(jx+jz)+0.025*(m4+m8)
+mrt_V7*m9+mrt_V11*m10-mrt_V8*m11
-mrt_V12*m12+0.25*m15+0.125*(m18-m16) + 0.08333333333*(Fx+Fz);
dist[11*Np+n] = fq;
// q = 12
fq = mrt_V1*rho+mrt_V9*m1+mrt_V10*m2-0.1*(jx+jz)-0.025*(m4+m8)+
mrt_V7*m9+mrt_V11*m10-mrt_V8*m11-mrt_V12*m12+0.25*m15+0.125*(m16-m18)-0.08333333333*(Fx+Fz);
dist[12*Np+n] = fq;
// q = 13
fq = mrt_V1*rho+mrt_V9*m1
+mrt_V10*m2+0.1*(jx-jz)+0.025*(m4-m8)
+mrt_V7*m9+mrt_V11*m10-mrt_V8*m11
-mrt_V12*m12-0.25*m15-0.125*(m16+m18) + 0.08333333333*(Fx-Fz);
dist[13*Np+n] = fq;
// q= 14
fq = mrt_V1*rho+mrt_V9*m1
+mrt_V10*m2+0.1*(jz-jx)+0.025*(m8-m4)
+mrt_V7*m9+mrt_V11*m10-mrt_V8*m11
-mrt_V12*m12-0.25*m15+0.125*(m16+m18) - 0.08333333333*(Fx-Fz);
dist[14*Np+n] = fq;
// q = 15
fq = mrt_V1*rho+mrt_V9*m1
+mrt_V10*m2+0.1*(jy+jz)+0.025*(m6+m8)
-mrt_V6*m9-mrt_V7*m10+0.25*m14+0.125*(m17-m18) + 0.08333333333*(Fy+Fz);
dist[15*Np+n] = fq;
// q = 16
fq = mrt_V1*rho+mrt_V9*m1
+mrt_V10*m2-0.1*(jy+jz)-0.025*(m6+m8)
-mrt_V6*m9-mrt_V7*m10+0.25*m14+0.125*(m18-m17)- 0.08333333333*(Fy+Fz);
dist[16*Np+n] = fq;
// q = 17
fq = mrt_V1*rho+mrt_V9*m1
+mrt_V10*m2+0.1*(jy-jz)+0.025*(m6-m8)
-mrt_V6*m9-mrt_V7*m10-0.25*m14+0.125*(m17+m18) + 0.08333333333*(Fy-Fz);
dist[17*Np+n] = fq;
// q = 18
fq = mrt_V1*rho+mrt_V9*m1
+mrt_V10*m2+0.1*(jz-jy)+0.025*(m8-m6)
-mrt_V6*m9-mrt_V7*m10-0.25*m14-0.125*(m17+m18) - 0.08333333333*(Fy-Fz);
dist[18*Np+n] = fq;
//........................................................................
// write the velocity
ux = jx / rho0;
uy = jy / rho0;
uz = jz / rho0;
Velocity[n] = ux;
Velocity[Np+n] = uy;
Velocity[2*Np+n] = uz;
// Instantiate mass transport distributions
// Stationary value - distribution 0
nAB = 1.0/(nA+nB);
Aq[n] = 0.3333333333333333*nA;
Bq[n] = 0.3333333333333333*nB;
//...............................................
// q = 0,2,4
// Cq = {1,0,0}, {0,1,0}, {0,0,1}
delta = beta*nA*nB*nAB*0.1111111111111111*nx;
if (!(nA*nB*nAB>0)) delta=0;
a1 = nA*(0.1111111111111111*(1+4.5*ux))+delta;
b1 = nB*(0.1111111111111111*(1+4.5*ux))-delta;
a2 = nA*(0.1111111111111111*(1-4.5*ux))-delta;
b2 = nB*(0.1111111111111111*(1-4.5*ux))+delta;
Aq[1*Np+n] = a1;
Bq[1*Np+n] = b1;
Aq[2*Np+n] = a2;
Bq[2*Np+n] = b2;
//...............................................
// q = 2
// Cq = {0,1,0}
delta = beta*nA*nB*nAB*0.1111111111111111*ny;
if (!(nA*nB*nAB>0)) delta=0;
a1 = nA*(0.1111111111111111*(1+4.5*uy))+delta;
b1 = nB*(0.1111111111111111*(1+4.5*uy))-delta;
a2 = nA*(0.1111111111111111*(1-4.5*uy))-delta;
b2 = nB*(0.1111111111111111*(1-4.5*uy))+delta;
Aq[3*Np+n] = a1;
Bq[3*Np+n] = b1;
Aq[4*Np+n] = a2;
Bq[4*Np+n] = b2;
//...............................................
// q = 4
// Cq = {0,0,1}
delta = beta*nA*nB*nAB*0.1111111111111111*nz;
if (!(nA*nB*nAB>0)) delta=0;
a1 = nA*(0.1111111111111111*(1+4.5*uz))+delta;
b1 = nB*(0.1111111111111111*(1+4.5*uz))-delta;
a2 = nA*(0.1111111111111111*(1-4.5*uz))-delta;
b2 = nB*(0.1111111111111111*(1-4.5*uz))+delta;
Aq[5*Np+n] = a1;
Bq[5*Np+n] = b1;
Aq[6*Np+n] = a2;
Bq[6*Np+n] = b2;
//...............................................
}
}
}
__global__ void dvc_ScaLBL_D3Q19_AAodd_Color(int *neighborList, int *Map, double *dist, double *Aq, double *Bq, double *Den,
double *Phi, double *Velocity, double rhoA, double rhoB, double tauA, double tauB, double alpha, double beta,
double Fx, double Fy, double Fz, int strideY, int strideZ, int start, int finish, int Np){
int n,nn,ijk,nread;
int nr1,nr2,nr3,nr4,nr5,nr6;
int nr7,nr8,nr9,nr10;
int nr11,nr12,nr13,nr14;
//int nr15,nr16,nr17,nr18;
double fq;
// conserved momemnts
double rho,jx,jy,jz;
// non-conserved moments
double m1,m2,m4,m6,m8,m9,m10,m11,m12,m13,m14,m15,m16,m17,m18;
double m3,m5,m7;
double nA,nB; // number density
double a1,b1,a2,b2,nAB,delta;
double C,nx,ny,nz; //color gradient magnitude and direction
double ux,uy,uz;
double phi,tau,rho0,rlx_setA,rlx_setB;
const double mrt_V1=0.05263157894736842;
const double mrt_V2=0.012531328320802;
const double mrt_V3=0.04761904761904762;
const double mrt_V4=0.004594820384294068;
const double mrt_V5=0.01587301587301587;
const double mrt_V6=0.0555555555555555555555555;
const double mrt_V7=0.02777777777777778;
const double mrt_V8=0.08333333333333333;
const double mrt_V9=0.003341687552213868;
const double mrt_V10=0.003968253968253968;
const double mrt_V11=0.01388888888888889;
const double mrt_V12=0.04166666666666666;
int S = Np/NBLOCKS/NTHREADS + 1;
for (int s=0; s<S; s++){
//........Get 1-D index for this thread....................
n = S*blockIdx.x*blockDim.x + s*blockDim.x + threadIdx.x + start;
if (n<finish) {
// read the component number densities
nA = Den[n];
nB = Den[Np + n];
// compute phase indicator field
phi=(nA-nB)/(nA+nB);
// local density
rho0=rhoA + 0.5*(1.0-phi)*(rhoB-rhoA);
// local relaxation time
tau=tauA + 0.5*(1.0-phi)*(tauB-tauA);
rlx_setA = 1.f/tau;
rlx_setB = 8.f*(2.f-rlx_setA)/(8.f-rlx_setA);
// Get the 1D index based on regular data layout
ijk = Map[n];
// COMPUTE THE COLOR GRADIENT
//........................................................................
//.................Read Phase Indicator Values............................
//........................................................................
nn = ijk-1; // neighbor index (get convention)
m1 = Phi[nn]; // get neighbor for phi - 1
//........................................................................
nn = ijk+1; // neighbor index (get convention)
m2 = Phi[nn]; // get neighbor for phi - 2
//........................................................................
nn = ijk-strideY; // neighbor index (get convention)
m3 = Phi[nn]; // get neighbor for phi - 3
//........................................................................
nn = ijk+strideY; // neighbor index (get convention)
m4 = Phi[nn]; // get neighbor for phi - 4
//........................................................................
nn = ijk-strideZ; // neighbor index (get convention)
m5 = Phi[nn]; // get neighbor for phi - 5
//........................................................................
nn = ijk+strideZ; // neighbor index (get convention)
m6 = Phi[nn]; // get neighbor for phi - 6
//........................................................................
nn = ijk-strideY-1; // neighbor index (get convention)
m7 = Phi[nn]; // get neighbor for phi - 7
//........................................................................
nn = ijk+strideY+1; // neighbor index (get convention)
m8 = Phi[nn]; // get neighbor for phi - 8
//........................................................................
nn = ijk+strideY-1; // neighbor index (get convention)
m9 = Phi[nn]; // get neighbor for phi - 9
//........................................................................
nn = ijk-strideY+1; // neighbor index (get convention)
m10 = Phi[nn]; // get neighbor for phi - 10
//........................................................................
nn = ijk-strideZ-1; // neighbor index (get convention)
m11 = Phi[nn]; // get neighbor for phi - 11
//........................................................................
nn = ijk+strideZ+1; // neighbor index (get convention)
m12 = Phi[nn]; // get neighbor for phi - 12
//........................................................................
nn = ijk+strideZ-1; // neighbor index (get convention)
m13 = Phi[nn]; // get neighbor for phi - 13
//........................................................................
nn = ijk-strideZ+1; // neighbor index (get convention)
m14 = Phi[nn]; // get neighbor for phi - 14
//........................................................................
nn = ijk-strideZ-strideY; // neighbor index (get convention)
m15 = Phi[nn]; // get neighbor for phi - 15
//........................................................................
nn = ijk+strideZ+strideY; // neighbor index (get convention)
m16 = Phi[nn]; // get neighbor for phi - 16
//........................................................................
nn = ijk+strideZ-strideY; // neighbor index (get convention)
m17 = Phi[nn]; // get neighbor for phi - 17
//........................................................................
nn = ijk-strideZ+strideY; // neighbor index (get convention)
m18 = Phi[nn]; // get neighbor for phi - 18
//............Compute the Color Gradient...................................
nx = -(m1-m2+0.5*(m7-m8+m9-m10+m11-m12+m13-m14));
ny = -(m3-m4+0.5*(m7-m8-m9+m10+m15-m16+m17-m18));
nz = -(m5-m6+0.5*(m11-m12-m13+m14+m15-m16-m17+m18));
//...........Normalize the Color Gradient.................................
C = sqrt(nx*nx+ny*ny+nz*nz);
if (C==0.0) C=1.0;
nx = nx/C;
ny = ny/C;
nz = nz/C;
// q=0
fq = dist[n];
rho = fq;
m1 = -30.0*fq;
m2 = 12.0*fq;
// q=1
//nread = neighborList[n]; // neighbor 2
//fq = dist[nread]; // reading the f1 data into register fq
nr1 = neighborList[n];
fq = dist[nr1]; // reading the f1 data into register fq
rho += fq;
m1 -= 11.0*fq;
m2 -= 4.0*fq;
jx = fq;
m4 = -4.0*fq;
m9 = 2.0*fq;
m10 = -4.0*fq;
// f2 = dist[10*Np+n];
//nread = neighborList[n+Np]; // neighbor 1 ( < 10Np => even part of dist)
//fq = dist[nread]; // reading the f2 data into register fq
nr2 = neighborList[n+Np]; // neighbor 1 ( < 10Np => even part of dist)
fq = dist[nr2]; // reading the f2 data into register fq
rho += fq;
m1 -= 11.0*(fq);
m2 -= 4.0*(fq);
jx -= fq;
m4 += 4.0*(fq);
m9 += 2.0*(fq);
m10 -= 4.0*(fq);
// q=3
//nread = neighborList[n+2*Np]; // neighbor 4
//fq = dist[nread];
nr3 = neighborList[n+2*Np]; // neighbor 4
fq = dist[nr3];
rho += fq;
m1 -= 11.0*fq;
m2 -= 4.0*fq;
jy = fq;
m6 = -4.0*fq;
m9 -= fq;
m10 += 2.0*fq;
m11 = fq;
m12 = -2.0*fq;
// q = 4
//nread = neighborList[n+3*Np]; // neighbor 3
//fq = dist[nread];
nr4 = neighborList[n+3*Np]; // neighbor 3
fq = dist[nr4];
rho+= fq;
m1 -= 11.0*fq;
m2 -= 4.0*fq;
jy -= fq;
m6 += 4.0*fq;
m9 -= fq;
m10 += 2.0*fq;
m11 += fq;
m12 -= 2.0*fq;
// q=5
//nread = neighborList[n+4*Np];
//fq = dist[nread];
nr5 = neighborList[n+4*Np];
fq = dist[nr5];
rho += fq;
m1 -= 11.0*fq;
m2 -= 4.0*fq;
jz = fq;
m8 = -4.0*fq;
m9 -= fq;
m10 += 2.0*fq;
m11 -= fq;
m12 += 2.0*fq;
// q = 6
//nread = neighborList[n+5*Np];
//fq = dist[nread];
nr6 = neighborList[n+5*Np];
fq = dist[nr6];
rho+= fq;
m1 -= 11.0*fq;
m2 -= 4.0*fq;
jz -= fq;
m8 += 4.0*fq;
m9 -= fq;
m10 += 2.0*fq;
m11 -= fq;
m12 += 2.0*fq;
// q=7
//nread = neighborList[n+6*Np];
//fq = dist[nread];
nr7 = neighborList[n+6*Np];
fq = dist[nr7];
rho += fq;
m1 += 8.0*fq;
m2 += fq;
jx += fq;
m4 += fq;
jy += fq;
m6 += fq;
m9 += fq;
m10 += fq;
m11 += fq;
m12 += fq;
m13 = fq;
m16 = fq;
m17 = -fq;
// q = 8
//nread = neighborList[n+7*Np];
//fq = dist[nread];
nr8 = neighborList[n+7*Np];
fq = dist[nr8];
rho += fq;
m1 += 8.0*fq;
m2 += fq;
jx -= fq;
m4 -= fq;
jy -= fq;
m6 -= fq;
m9 += fq;
m10 += fq;
m11 += fq;
m12 += fq;
m13 += fq;
m16 -= fq;
m17 += fq;
// q=9
//nread = neighborList[n+8*Np];
//fq = dist[nread];
nr9 = neighborList[n+8*Np];
fq = dist[nr9];
rho += fq;
m1 += 8.0*fq;
m2 += fq;
jx += fq;
m4 += fq;
jy -= fq;
m6 -= fq;
m9 += fq;
m10 += fq;
m11 += fq;
m12 += fq;
m13 -= fq;
m16 += fq;
m17 += fq;
// q = 10
//nread = neighborList[n+9*Np];
//fq = dist[nread];
nr10 = neighborList[n+9*Np];
fq = dist[nr10];
rho += fq;
m1 += 8.0*fq;
m2 += fq;
jx -= fq;
m4 -= fq;
jy += fq;
m6 += fq;
m9 += fq;
m10 += fq;
m11 += fq;
m12 += fq;
m13 -= fq;
m16 -= fq;
m17 -= fq;
// q=11
//nread = neighborList[n+10*Np];
//fq = dist[nread];
nr11 = neighborList[n+10*Np];
fq = dist[nr11];
rho += fq;
m1 += 8.0*fq;
m2 += fq;
jx += fq;
m4 += fq;
jz += fq;
m8 += fq;
m9 += fq;
m10 += fq;
m11 -= fq;
m12 -= fq;
m15 = fq;
m16 -= fq;
m18 = fq;
// q=12
//nread = neighborList[n+11*Np];
//fq = dist[nread];
nr12 = neighborList[n+11*Np];
fq = dist[nr12];
rho += fq;
m1 += 8.0*fq;
m2 += fq;
jx -= fq;
m4 -= fq;
jz -= fq;
m8 -= fq;
m9 += fq;
m10 += fq;
m11 -= fq;
m12 -= fq;
m15 += fq;
m16 += fq;
m18 -= fq;
// q=13
//nread = neighborList[n+12*Np];
//fq = dist[nread];
nr13 = neighborList[n+12*Np];
fq = dist[nr13];
rho += fq;
m1 += 8.0*fq;
m2 += fq;
jx += fq;
m4 += fq;
jz -= fq;
m8 -= fq;
m9 += fq;
m10 += fq;
m11 -= fq;
m12 -= fq;
m15 -= fq;
m16 -= fq;
m18 -= fq;
// q=14
//nread = neighborList[n+13*Np];
//fq = dist[nread];
nr14 = neighborList[n+13*Np];
fq = dist[nr14];
rho += fq;
m1 += 8.0*fq;
m2 += fq;
jx -= fq;
m4 -= fq;
jz += fq;
m8 += fq;
m9 += fq;
m10 += fq;
m11 -= fq;
m12 -= fq;
m15 -= fq;
m16 += fq;
m18 += fq;
// q=15
nread = neighborList[n+14*Np];
fq = dist[nread];
//fq = dist[17*Np+n];
rho += fq;
m1 += 8.0*fq;
m2 += fq;
jy += fq;
m6 += fq;
jz += fq;
m8 += fq;
m9 -= 2.0*fq;
m10 -= 2.0*fq;
m14 = fq;
m17 += fq;
m18 -= fq;
// q=16
nread = neighborList[n+15*Np];
fq = dist[nread];
//fq = dist[8*Np+n];
rho += fq;
m1 += 8.0*fq;
m2 += fq;
jy -= fq;
m6 -= fq;
jz -= fq;
m8 -= fq;
m9 -= 2.0*fq;
m10 -= 2.0*fq;
m14 += fq;
m17 -= fq;
m18 += fq;
// q=17
//fq = dist[18*Np+n];
nread = neighborList[n+16*Np];
fq = dist[nread];
rho += fq;
m1 += 8.0*fq;
m2 += fq;
jy += fq;
m6 += fq;
jz -= fq;
m8 -= fq;
m9 -= 2.0*fq;
m10 -= 2.0*fq;
m14 -= fq;
m17 += fq;
m18 += fq;
// q=18
nread = neighborList[n+17*Np];
fq = dist[nread];
//fq = dist[9*Np+n];
rho += fq;
m1 += 8.0*fq;
m2 += fq;
jy -= fq;
m6 -= fq;
jz += fq;
m8 += fq;
m9 -= 2.0*fq;
m10 -= 2.0*fq;
m14 -= fq;
m17 -= fq;
m18 -= fq;
//........................................................................
//..............carry out relaxation process..............................
//..........Toelke, Fruediger et. al. 2006................................
if (C == 0.0) nx = ny = nz = 0.0;
m1 = m1 + rlx_setA*((19*(jx*jx+jy*jy+jz*jz)/rho0 - 11*rho) -19*alpha*C - m1);
m2 = m2 + rlx_setA*((3*rho - 5.5*(jx*jx+jy*jy+jz*jz)/rho0)- m2);
m4 = m4 + rlx_setB*((-0.6666666666666666*jx)- m4);
m6 = m6 + rlx_setB*((-0.6666666666666666*jy)- m6);
m8 = m8 + rlx_setB*((-0.6666666666666666*jz)- m8);
m9 = m9 + rlx_setA*(((2*jx*jx-jy*jy-jz*jz)/rho0) + 0.5*alpha*C*(2*nx*nx-ny*ny-nz*nz) - m9);
m10 = m10 + rlx_setA*( - m10);
m11 = m11 + rlx_setA*(((jy*jy-jz*jz)/rho0) + 0.5*alpha*C*(ny*ny-nz*nz)- m11);
m12 = m12 + rlx_setA*( - m12);
m13 = m13 + rlx_setA*( (jx*jy/rho0) + 0.5*alpha*C*nx*ny - m13);
m14 = m14 + rlx_setA*( (jy*jz/rho0) + 0.5*alpha*C*ny*nz - m14);
m15 = m15 + rlx_setA*( (jx*jz/rho0) + 0.5*alpha*C*nx*nz - m15);
m16 = m16 + rlx_setB*( - m16);
m17 = m17 + rlx_setB*( - m17);
m18 = m18 + rlx_setB*( - m18);
//.................inverse transformation......................................................
// q=0
fq = mrt_V1*rho-mrt_V2*m1+mrt_V3*m2;
dist[n] = fq;
// q = 1
fq = mrt_V1*rho-mrt_V4*m1-mrt_V5*m2+0.1*(jx-m4)+mrt_V6*(m9-m10)+0.16666666*Fx;
//nread = neighborList[n+Np];
dist[nr2] = fq;
// q=2
fq = mrt_V1*rho-mrt_V4*m1-mrt_V5*m2+0.1*(m4-jx)+mrt_V6*(m9-m10) - 0.16666666*Fx;
//nread = neighborList[n];
dist[nr1] = fq;
// q = 3
fq = mrt_V1*rho-mrt_V4*m1-mrt_V5*m2+0.1*(jy-m6)+mrt_V7*(m10-m9)+mrt_V8*(m11-m12) + 0.16666666*Fy;
//nread = neighborList[n+3*Np];
dist[nr4] = fq;
// q = 4
fq = mrt_V1*rho-mrt_V4*m1-mrt_V5*m2+0.1*(m6-jy)+mrt_V7*(m10-m9)+mrt_V8*(m11-m12) - 0.16666666*Fy;
//nread = neighborList[n+2*Np];
dist[nr3] = fq;
// q = 5
fq = mrt_V1*rho-mrt_V4*m1-mrt_V5*m2+0.1*(jz-m8)+mrt_V7*(m10-m9)+mrt_V8*(m12-m11) + 0.16666666*Fz;
//nread = neighborList[n+5*Np];
dist[nr6] = fq;
// q = 6
fq = mrt_V1*rho-mrt_V4*m1-mrt_V5*m2+0.1*(m8-jz)+mrt_V7*(m10-m9)+mrt_V8*(m12-m11) - 0.16666666*Fz;
//nread = neighborList[n+4*Np];
dist[nr5] = fq;
// q = 7
fq = mrt_V1*rho+mrt_V9*m1+mrt_V10*m2+0.1*(jx+jy)+0.025*(m4+m6)+
mrt_V7*m9+mrt_V11*m10+mrt_V8*m11+mrt_V12*m12+0.25*m13+0.125*(m16-m17) + 0.08333333333*(Fx+Fy);
//nread = neighborList[n+7*Np];
dist[nr8] = fq;
// q = 8
fq = mrt_V1*rho+mrt_V9*m1+mrt_V10*m2-0.1*(jx+jy)-0.025*(m4+m6) +mrt_V7*m9+mrt_V11*m10+mrt_V8*m11
+mrt_V12*m12+0.25*m13+0.125*(m17-m16) - 0.08333333333*(Fx+Fy);
//nread = neighborList[n+6*Np];
dist[nr7] = fq;
// q = 9
fq = mrt_V1*rho+mrt_V9*m1+mrt_V10*m2+0.1*(jx-jy)+0.025*(m4-m6)+
mrt_V7*m9+mrt_V11*m10+mrt_V8*m11+mrt_V12*m12-0.25*m13+0.125*(m16+m17) + 0.08333333333*(Fx-Fy);
//nread = neighborList[n+9*Np];
dist[nr10] = fq;
// q = 10
fq = mrt_V1*rho+mrt_V9*m1+mrt_V10*m2+0.1*(jy-jx)+0.025*(m6-m4)+
mrt_V7*m9+mrt_V11*m10+mrt_V8*m11+mrt_V12*m12-0.25*m13-0.125*(m16+m17)- 0.08333333333*(Fx-Fy);
//nread = neighborList[n+8*Np];
dist[nr9] = fq;
// q = 11
fq = mrt_V1*rho+mrt_V9*m1
+mrt_V10*m2+0.1*(jx+jz)+0.025*(m4+m8)
+mrt_V7*m9+mrt_V11*m10-mrt_V8*m11
-mrt_V12*m12+0.25*m15+0.125*(m18-m16) + 0.08333333333*(Fx+Fz);
//nread = neighborList[n+11*Np];
dist[nr12] = fq;
// q = 12
fq = mrt_V1*rho+mrt_V9*m1+mrt_V10*m2-0.1*(jx+jz)-0.025*(m4+m8)+
mrt_V7*m9+mrt_V11*m10-mrt_V8*m11-mrt_V12*m12+0.25*m15+0.125*(m16-m18) - 0.08333333333*(Fx+Fz);
//nread = neighborList[n+10*Np];
dist[nr11]= fq;
// q = 13
fq = mrt_V1*rho+mrt_V9*m1
+mrt_V10*m2+0.1*(jx-jz)+0.025*(m4-m8)
+mrt_V7*m9+mrt_V11*m10-mrt_V8*m11
-mrt_V12*m12-0.25*m15-0.125*(m16+m18) + 0.08333333333*(Fx-Fz);
//nread = neighborList[n+13*Np];
dist[nr14] = fq;
// q= 14
fq = mrt_V1*rho+mrt_V9*m1
+mrt_V10*m2+0.1*(jz-jx)+0.025*(m8-m4)
+mrt_V7*m9+mrt_V11*m10-mrt_V8*m11
-mrt_V12*m12-0.25*m15+0.125*(m16+m18) - 0.08333333333*(Fx-Fz);
//nread = neighborList[n+12*Np];
dist[nr13] = fq;
// q = 15
fq = mrt_V1*rho+mrt_V9*m1
+mrt_V10*m2+0.1*(jy+jz)+0.025*(m6+m8)
-mrt_V6*m9-mrt_V7*m10+0.25*m14+0.125*(m17-m18) + 0.08333333333*(Fy+Fz);
nread = neighborList[n+15*Np];
dist[nread] = fq;
// q = 16
fq = mrt_V1*rho+mrt_V9*m1
+mrt_V10*m2-0.1*(jy+jz)-0.025*(m6+m8)
-mrt_V6*m9-mrt_V7*m10+0.25*m14+0.125*(m18-m17)- 0.08333333333*(Fy+Fz);
nread = neighborList[n+14*Np];
dist[nread] = fq;
// q = 17
fq = mrt_V1*rho+mrt_V9*m1
+mrt_V10*m2+0.1*(jy-jz)+0.025*(m6-m8)
-mrt_V6*m9-mrt_V7*m10-0.25*m14+0.125*(m17+m18) + 0.08333333333*(Fy-Fz);
nread = neighborList[n+17*Np];
dist[nread] = fq;
// q = 18
fq = mrt_V1*rho+mrt_V9*m1
+mrt_V10*m2+0.1*(jz-jy)+0.025*(m8-m6)
-mrt_V6*m9-mrt_V7*m10-0.25*m14-0.125*(m17+m18) - 0.08333333333*(Fy-Fz);
nread = neighborList[n+16*Np];
dist[nread] = fq;
// write the velocity
ux = jx / rho0;
uy = jy / rho0;
uz = jz / rho0;
Velocity[n] = ux;
Velocity[Np+n] = uy;
Velocity[2*Np+n] = uz;
// Instantiate mass transport distributions
// Stationary value - distribution 0
nAB = 1.0/(nA+nB);
Aq[n] = 0.3333333333333333*nA;
Bq[n] = 0.3333333333333333*nB;
//...............................................
// q = 0,2,4
// Cq = {1,0,0}, {0,1,0}, {0,0,1}
delta = beta*nA*nB*nAB*0.1111111111111111*nx;
if (!(nA*nB*nAB>0)) delta=0;
a1 = nA*(0.1111111111111111*(1+4.5*ux))+delta;
b1 = nB*(0.1111111111111111*(1+4.5*ux))-delta;
a2 = nA*(0.1111111111111111*(1-4.5*ux))-delta;
b2 = nB*(0.1111111111111111*(1-4.5*ux))+delta;
// q = 1
//nread = neighborList[n+Np];
Aq[nr2] = a1;
Bq[nr2] = b1;
// q=2
//nread = neighborList[n];
Aq[nr1] = a2;
Bq[nr1] = b2;
//...............................................
// Cq = {0,1,0}
delta = beta*nA*nB*nAB*0.1111111111111111*ny;
if (!(nA*nB*nAB>0)) delta=0;
a1 = nA*(0.1111111111111111*(1+4.5*uy))+delta;
b1 = nB*(0.1111111111111111*(1+4.5*uy))-delta;
a2 = nA*(0.1111111111111111*(1-4.5*uy))-delta;
b2 = nB*(0.1111111111111111*(1-4.5*uy))+delta;
// q = 3
//nread = neighborList[n+3*Np];
Aq[nr4] = a1;
Bq[nr4] = b1;
// q = 4
//nread = neighborList[n+2*Np];
Aq[nr3] = a2;
Bq[nr3] = b2;
//...............................................
// q = 4
// Cq = {0,0,1}
delta = beta*nA*nB*nAB*0.1111111111111111*nz;
if (!(nA*nB*nAB>0)) delta=0;
a1 = nA*(0.1111111111111111*(1+4.5*uz))+delta;
b1 = nB*(0.1111111111111111*(1+4.5*uz))-delta;
a2 = nA*(0.1111111111111111*(1-4.5*uz))-delta;
b2 = nB*(0.1111111111111111*(1-4.5*uz))+delta;
// q = 5
//nread = neighborList[n+5*Np];
Aq[nr6] = a1;
Bq[nr6] = b1;
// q = 6
//nread = neighborList[n+4*Np];
Aq[nr5] = a2;
Bq[nr5] = b2;
//...............................................
}
}
}
__global__ void dvc_ScaLBL_D3Q7_AAodd_PhaseField(int *neighborList, int *Map, double *Aq, double *Bq,
double *Den, double *Phi, int start, int finish, int Np){
int idx,n,nread;
double fq,nA,nB;
int S = Np/NBLOCKS/NTHREADS + 1;
for (int s=0; s<S; s++){
//........Get 1-D index for this thread....................
n = S*blockIdx.x*blockDim.x + s*blockDim.x + threadIdx.x + start;
if (n<finish) {
//..........Compute the number density for each component ............
// q=0
fq = Aq[n];
nA = fq;
fq = Bq[n];
nB = fq;
// q=1
nread = neighborList[n];
fq = Aq[nread];
nA += fq;
fq = Bq[nread];
nB += fq;
// q=2
nread = neighborList[n+Np];
fq = Aq[nread];
nA += fq;
fq = Bq[nread];
nB += fq;
// q=3
nread = neighborList[n+2*Np];
fq = Aq[nread];
nA += fq;
fq = Bq[nread];
nB += fq;
// q = 4
nread = neighborList[n+3*Np];
fq = Aq[nread];
nA += fq;
fq = Bq[nread];
nB += fq;
// q=5
nread = neighborList[n+4*Np];
fq = Aq[nread];
nA += fq;
fq = Bq[nread];
nB += fq;
// q = 6
nread = neighborList[n+5*Np];
fq = Aq[nread];
nA += fq;
fq = Bq[nread];
nB += fq;
// save the number densities
Den[n] = nA;
Den[Np+n] = nB;
// save the phase indicator field
idx = Map[n];
Phi[idx] = (nA-nB)/(nA+nB);
}
}
}
__global__ void dvc_ScaLBL_D3Q7_AAeven_PhaseField(int *Map, double *Aq, double *Bq, double *Den, double *Phi,
int start, int finish, int Np){
int idx,n;
double fq,nA,nB;
int S = Np/NBLOCKS/NTHREADS + 1;
for (int s=0; s<S; s++){
//........Get 1-D index for this thread....................
n = S*blockIdx.x*blockDim.x + s*blockDim.x + threadIdx.x + start;
if (n<finish) {
// compute number density for each component
// q=0
fq = Aq[n];
nA = fq;
fq = Bq[n];
nB = fq;
// q=1
fq = Aq[2*Np+n];
nA += fq;
fq = Bq[2*Np+n];
nB += fq;
// q=2
fq = Aq[1*Np+n];
nA += fq;
fq = Bq[1*Np+n];
nB += fq;
// q=3
fq = Aq[4*Np+n];
nA += fq;
fq = Bq[4*Np+n];
nB += fq;
// q = 4
fq = Aq[3*Np+n];
nA += fq;
fq = Bq[3*Np+n];
nB += fq;
// q=5
fq = Aq[6*Np+n];
nA += fq;
fq = Bq[6*Np+n];
nB += fq;
// q = 6
fq = Aq[5*Np+n];
nA += fq;
fq = Bq[5*Np+n];
nB += fq;
// save the number densities
Den[n] = nA;
Den[Np+n] = nB;
// save the phase indicator field
idx = Map[n];
Phi[idx] = (nA-nB)/(nA+nB);
}
}
}
__global__ void dvc_ScaLBL_PhaseField_Init(int *Map, double *Phi, double *Den, double *Aq, double *Bq, int start, int finish, int Np){
int idx,n;
double phi,nA,nB;
int S = Np/NBLOCKS/NTHREADS + 1;
for (int s=0; s<S; s++){
//........Get 1-D index for this thread....................
idx = S*blockIdx.x*blockDim.x + s*blockDim.x + threadIdx.x + start;
if (idx<finish) {
n = Map[idx];
phi = Phi[n];
if (phi > 1.f){
nA = 1.0; nB = 0.f;
}
else if (phi < -1.f){
nB = 1.0; nA = 0.f;
}
else{
nA=0.5*(phi+1.f);
nB=0.5*(1.f-phi);
}
Den[idx] = nA;
Den[Np+idx] = nB;
Aq[idx]=0.3333333333333333*nA;
Aq[Np+idx]=0.1111111111111111*nA;
Aq[2*Np+idx]=0.1111111111111111*nA;
Aq[3*Np+idx]=0.1111111111111111*nA;
Aq[4*Np+idx]=0.1111111111111111*nA;
Aq[5*Np+idx]=0.1111111111111111*nA;
Aq[6*Np+idx]=0.1111111111111111*nA;
Bq[idx]=0.3333333333333333*nB;
Bq[Np+idx]=0.1111111111111111*nB;
Bq[2*Np+idx]=0.1111111111111111*nB;
Bq[3*Np+idx]=0.1111111111111111*nB;
Bq[4*Np+idx]=0.1111111111111111*nB;
Bq[5*Np+idx]=0.1111111111111111*nB;
Bq[6*Np+idx]=0.1111111111111111*nB;
}
}
}
extern "C" void ScaLBL_SetSlice_z(double *Phi, double value, int Nx, int Ny, int Nz, int Slice){
int GRID = Nx*Ny / 512 + 1;
hipLaunchKernelGGL(( dvc_ScaLBL_SetSlice_z), dim3(GRID),dim3(512), 0, 0, Phi,value,Nx,Ny,Nz,Slice);
}
extern "C" void ScaLBL_CopySlice_z(double *Phi, int Nx, int Ny, int Nz, int Source, int Dest){
int GRID = Nx*Ny / 512 + 1;
hipLaunchKernelGGL(( dvc_ScaLBL_CopySlice_z), dim3(GRID),dim3(512), 0, 0, Phi,Nx,Ny,Nz,Source,Dest);
}
extern "C" void ScaLBL_Color_BC(int *list, int *Map, double *Phi, double *Den, double vA, double vB, int count, int Np){
int GRID = count / 512 + 1;
hipLaunchKernelGGL(( dvc_ScaLBL_Color_BC), dim3(GRID),dim3(512), 0, 0, list, Map, Phi, Den, vA, vB, count, Np);
hipError_t err = hipGetLastError();
if (hipSuccess != err){
printf("CUDA error in ScaLBL_Color_BC: %s \n",hipGetErrorString(err));
}
}
extern "C" void ScaLBL_Color_BC_YDW(int *list, int *Map, double *Phi, double *Den, double *vAv, double *vBv, int count, int Np){
int GRID = count / 512 + 1;
hipLaunchKernelGGL(( dvc_ScaLBL_Color_BC_YDW), dim3(GRID),dim3(512), 0, 0, list, Map, Phi, Den, vAv, vBv, count, Np);
hipError_t err = hipGetLastError();
if (hipSuccess != err){
printf("CUDA error in ScaLBL_Color_BC: %s \n",hipGetErrorString(err));
}
}
// Pressure Boundary Conditions Functions
extern "C" void ScaLBL_D3Q19_AAeven_Color(int *Map, double *dist, double *Aq, double *Bq, double *Den, double *Phi,
double *Vel, double rhoA, double rhoB, double tauA, double tauB, double alpha, double beta,
double Fx, double Fy, double Fz, int strideY, int strideZ, int start, int finish, int Np){
hipProfilerStart();
hipFuncSetCacheConfig(dvc_ScaLBL_D3Q19_AAeven_Color, hipFuncCachePreferL1);
hipLaunchKernelGGL(( dvc_ScaLBL_D3Q19_AAeven_Color), dim3(NBLOCKS),dim3(NTHREADS) , 0, 0, Map, dist, Aq, Bq, Den, Phi, Vel, rhoA, rhoB, tauA, tauB,
alpha, beta, Fx, Fy, Fz, strideY, strideZ, start, finish, Np);
hipError_t err = hipGetLastError();
if (hipSuccess != err){
printf("CUDA error in ScaLBL_D3Q19_AAeven_Color: %s \n",hipGetErrorString(err));
}
hipProfilerStop();
}
extern "C" void ScaLBL_D3Q19_AAodd_Color(int *d_neighborList, int *Map, double *dist, double *Aq, double *Bq, double *Den,
double *Phi, double *Vel, double rhoA, double rhoB, double tauA, double tauB, double alpha, double beta,
double Fx, double Fy, double Fz, int strideY, int strideZ, int start, int finish, int Np){
hipProfilerStart();
hipFuncSetCacheConfig(dvc_ScaLBL_D3Q19_AAodd_Color, hipFuncCachePreferL1);
hipLaunchKernelGGL(( dvc_ScaLBL_D3Q19_AAodd_Color), dim3(NBLOCKS),dim3(NTHREADS) , 0, 0, d_neighborList, Map, dist, Aq, Bq, Den, Phi, Vel,
rhoA, rhoB, tauA, tauB, alpha, beta, Fx, Fy, Fz, strideY, strideZ, start, finish, Np);
hipError_t err = hipGetLastError();
if (hipSuccess != err){
printf("CUDA error in ScaLBL_D3Q19_AAodd_Color: %s \n",hipGetErrorString(err));
}
hipProfilerStop();
}
extern "C" void ScaLBL_D3Q7_AAodd_PhaseField(int *NeighborList, int *Map, double *Aq, double *Bq,
double *Den, double *Phi, int start, int finish, int Np){
hipProfilerStart();
hipLaunchKernelGGL(( dvc_ScaLBL_D3Q7_AAodd_PhaseField), dim3(NBLOCKS),dim3(NTHREADS) , 0, 0, NeighborList, Map, Aq, Bq, Den, Phi, start, finish, Np);
hipError_t err = hipGetLastError();
if (hipSuccess != err){
printf("CUDA error in ScaLBL_D3Q7_AAodd_PhaseField: %s \n",hipGetErrorString(err));
}
hipProfilerStop();
}
extern "C" void ScaLBL_D3Q7_AAeven_PhaseField(int *Map, double *Aq, double *Bq, double *Den, double *Phi,
int start, int finish, int Np){
hipProfilerStart();
hipLaunchKernelGGL(( dvc_ScaLBL_D3Q7_AAeven_PhaseField), dim3(NBLOCKS),dim3(NTHREADS) , 0, 0, Map, Aq, Bq, Den, Phi, start, finish, Np);
hipError_t err = hipGetLastError();
if (hipSuccess != err){
printf("CUDA error in ScaLBL_D3Q7_AAeven_PhaseField: %s \n",hipGetErrorString(err));
}
hipProfilerStop();
}
extern "C" void ScaLBL_PhaseField_Init(int *Map, double *Phi, double *Den, double *Aq, double *Bq, int start, int finish, int Np){
hipLaunchKernelGGL(( dvc_ScaLBL_PhaseField_Init), dim3(NBLOCKS),dim3(NTHREADS) , 0, 0, Map, Phi, Den, Aq, Bq, start, finish, Np);
hipError_t err = hipGetLastError();
if (hipSuccess != err){
printf("CUDA error in ScaLBL_PhaseField_Init: %s \n",hipGetErrorString(err));
}
}
| 7fcc145f55a3ea89bc7168edd6083f6c9629ed8e.cu | /*
Copyright 2013--2018 James E. McClure, Virginia Polytechnic & State University
This file is part of the Open Porous Media project (OPM).
OPM is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
OPM is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with OPM. If not, see <http://www.gnu.org/licenses/>.
*/
#include <math.h>
#include <stdio.h>
#include <cuda_profiler_api.h>
#define NBLOCKS 8192
#define NTHREADS 256
__global__ void dvc_ScaLBL_Color_Init(char *ID, double *Den, double *Phi, double das, double dbs, int Nx, int Ny, int Nz)
{
//int i,j,k;
int n,N;
char id;
N = Nx*Ny*Nz;
int S = N/NBLOCKS/NTHREADS + 1;
for (int s=0; s<S; s++){
//........Get 1-D index for this thread....................
n = S*blockIdx.x*blockDim.x + s*blockDim.x + threadIdx.x;
if (n<N){
id=ID[n];
//.......Back out the 3-D indices for node n..............
//k = n/(Nx*Ny);
//j = (n-Nx*Ny*k)/Nx;
//i = n-Nx*Ny*k-Nx*j;
if ( id == 1){
Den[n] = 1.0;
Den[N+n] = 0.0;
Phi[n] = 1.0;
}
else if ( id == 2){
Den[n] = 0.0;
Den[N+n] = 1.0;
Phi[n] = -1.0;
}
else{
Den[n] = das;
Den[N+n] = dbs;
Phi[n] = (das-dbs)/(das+dbs);
}
}
}
}
__global__ void dvc_ScaLBL_Color_BC(int *list, int *Map, double *Phi, double *Den, double vA, double vB, int count, int Np)
{
int idx,n,nm;
// Fill the outlet with component b
idx = blockIdx.x*blockDim.x + threadIdx.x;
if (idx < count){
n = list[idx];
Den[n] = vA;
Den[Np+n] = vB;
nm = Map[n];
Phi[nm] = (vA-vB)/(vA+vB);
}
}
__global__ void dvc_ScaLBL_Color_BC_YDW(int *list, int *Map, double *Phi, double *Den, double *vAv, double *vBv, int count, int Np)
{
int idx,n,nm;
// Fill the outlet with component b
idx = blockIdx.x*blockDim.x + threadIdx.x;
if (idx < count){
n = list[idx];
Den[n] = vAv[idx];
Den[Np+n] = vBv[idx];
nm = Map[n];
Phi[nm] = (vAv[idx]-vBv[idx])/(vAv[idx]+vBv[idx]);
}
}
//*************************************************************************
__global__ void dvc_ScaLBL_SetSlice_z(double *Phi, double value, int Nx, int Ny, int Nz, int Slice)
{
int n = Slice*Nx*Ny + blockIdx.x*blockDim.x + threadIdx.x;
if (n < (Slice+1)*Nx*Ny){
Phi[n] = value;
}
}
__global__ void dvc_ScaLBL_CopySlice_z(double *Phi, int Nx, int Ny, int Nz, int Source, int Dest){
double value;
int n = blockIdx.x*blockDim.x + threadIdx.x;
if (n < Nx*Ny){
value = Phi[Source*Nx*Ny+n];
Phi[Dest*Nx*Ny+n] = value;
}
}
__global__ void dvc_ScaLBL_D3Q19_AAeven_Color(int *Map, double *dist, double *Aq, double *Bq, double *Den, double *Phi,
double *Velocity, double rhoA, double rhoB, double tauA, double tauB, double alpha, double beta,
double Fx, double Fy, double Fz, int strideY, int strideZ, int start, int finish, int Np){
int ijk,nn,n;
double fq;
// conserved momemnts
double rho,jx,jy,jz;
// non-conserved moments
double m1,m2,m4,m6,m8,m9,m10,m11,m12,m13,m14,m15,m16,m17,m18;
double m3,m5,m7;
double nA,nB; // number density
double a1,b1,a2,b2,nAB,delta;
double C,nx,ny,nz; //color gradient magnitude and direction
double ux,uy,uz;
double phi,tau,rho0,rlx_setA,rlx_setB;
const double mrt_V1=0.05263157894736842;
const double mrt_V2=0.012531328320802;
const double mrt_V3=0.04761904761904762;
const double mrt_V4=0.004594820384294068;
const double mrt_V5=0.01587301587301587;
const double mrt_V6=0.0555555555555555555555555;
const double mrt_V7=0.02777777777777778;
const double mrt_V8=0.08333333333333333;
const double mrt_V9=0.003341687552213868;
const double mrt_V10=0.003968253968253968;
const double mrt_V11=0.01388888888888889;
const double mrt_V12=0.04166666666666666;
int S = Np/NBLOCKS/NTHREADS + 1;
for (int s=0; s<S; s++){
//........Get 1-D index for this thread....................
n = S*blockIdx.x*blockDim.x + s*blockDim.x + threadIdx.x + start;
if (n<finish) {
// read the component number densities
nA = Den[n];
nB = Den[Np + n];
// compute phase indicator field
phi=(nA-nB)/(nA+nB);
// local density
rho0=rhoA + 0.5*(1.0-phi)*(rhoB-rhoA);
// local relaxation time
tau=tauA + 0.5*(1.0-phi)*(tauB-tauA);
rlx_setA = 1.f/tau;
rlx_setB = 8.f*(2.f-rlx_setA)/(8.f-rlx_setA);
// Get the 1D index based on regular data layout
ijk = Map[n];
// COMPUTE THE COLOR GRADIENT
//........................................................................
//.................Read Phase Indicator Values............................
//........................................................................
nn = ijk-1; // neighbor index (get convention)
m1 = Phi[nn]; // get neighbor for phi - 1
//........................................................................
nn = ijk+1; // neighbor index (get convention)
m2 = Phi[nn]; // get neighbor for phi - 2
//........................................................................
nn = ijk-strideY; // neighbor index (get convention)
m3 = Phi[nn]; // get neighbor for phi - 3
//........................................................................
nn = ijk+strideY; // neighbor index (get convention)
m4 = Phi[nn]; // get neighbor for phi - 4
//........................................................................
nn = ijk-strideZ; // neighbor index (get convention)
m5 = Phi[nn]; // get neighbor for phi - 5
//........................................................................
nn = ijk+strideZ; // neighbor index (get convention)
m6 = Phi[nn]; // get neighbor for phi - 6
//........................................................................
nn = ijk-strideY-1; // neighbor index (get convention)
m7 = Phi[nn]; // get neighbor for phi - 7
//........................................................................
nn = ijk+strideY+1; // neighbor index (get convention)
m8 = Phi[nn]; // get neighbor for phi - 8
//........................................................................
nn = ijk+strideY-1; // neighbor index (get convention)
m9 = Phi[nn]; // get neighbor for phi - 9
//........................................................................
nn = ijk-strideY+1; // neighbor index (get convention)
m10 = Phi[nn]; // get neighbor for phi - 10
//........................................................................
nn = ijk-strideZ-1; // neighbor index (get convention)
m11 = Phi[nn]; // get neighbor for phi - 11
//........................................................................
nn = ijk+strideZ+1; // neighbor index (get convention)
m12 = Phi[nn]; // get neighbor for phi - 12
//........................................................................
nn = ijk+strideZ-1; // neighbor index (get convention)
m13 = Phi[nn]; // get neighbor for phi - 13
//........................................................................
nn = ijk-strideZ+1; // neighbor index (get convention)
m14 = Phi[nn]; // get neighbor for phi - 14
//........................................................................
nn = ijk-strideZ-strideY; // neighbor index (get convention)
m15 = Phi[nn]; // get neighbor for phi - 15
//........................................................................
nn = ijk+strideZ+strideY; // neighbor index (get convention)
m16 = Phi[nn]; // get neighbor for phi - 16
//........................................................................
nn = ijk+strideZ-strideY; // neighbor index (get convention)
m17 = Phi[nn]; // get neighbor for phi - 17
//........................................................................
nn = ijk-strideZ+strideY; // neighbor index (get convention)
m18 = Phi[nn]; // get neighbor for phi - 18
//............Compute the Color Gradient...................................
nx = -(m1-m2+0.5*(m7-m8+m9-m10+m11-m12+m13-m14));
ny = -(m3-m4+0.5*(m7-m8-m9+m10+m15-m16+m17-m18));
nz = -(m5-m6+0.5*(m11-m12-m13+m14+m15-m16-m17+m18));
//...........Normalize the Color Gradient.................................
C = sqrt(nx*nx+ny*ny+nz*nz);
double ColorMag = C;
if (C==0.0) ColorMag=1.0;
nx = nx/ColorMag;
ny = ny/ColorMag;
nz = nz/ColorMag;
// q=0
fq = dist[n];
rho = fq;
m1 = -30.0*fq;
m2 = 12.0*fq;
// q=1
fq = dist[2*Np+n];
rho += fq;
m1 -= 11.0*fq;
m2 -= 4.0*fq;
jx = fq;
m4 = -4.0*fq;
m9 = 2.0*fq;
m10 = -4.0*fq;
// f2 = dist[10*Np+n];
fq = dist[1*Np+n];
rho += fq;
m1 -= 11.0*(fq);
m2 -= 4.0*(fq);
jx -= fq;
m4 += 4.0*(fq);
m9 += 2.0*(fq);
m10 -= 4.0*(fq);
// q=3
fq = dist[4*Np+n];
rho += fq;
m1 -= 11.0*fq;
m2 -= 4.0*fq;
jy = fq;
m6 = -4.0*fq;
m9 -= fq;
m10 += 2.0*fq;
m11 = fq;
m12 = -2.0*fq;
// q = 4
fq = dist[3*Np+n];
rho+= fq;
m1 -= 11.0*fq;
m2 -= 4.0*fq;
jy -= fq;
m6 += 4.0*fq;
m9 -= fq;
m10 += 2.0*fq;
m11 += fq;
m12 -= 2.0*fq;
// q=5
fq = dist[6*Np+n];
rho += fq;
m1 -= 11.0*fq;
m2 -= 4.0*fq;
jz = fq;
m8 = -4.0*fq;
m9 -= fq;
m10 += 2.0*fq;
m11 -= fq;
m12 += 2.0*fq;
// q = 6
fq = dist[5*Np+n];
rho+= fq;
m1 -= 11.0*fq;
m2 -= 4.0*fq;
jz -= fq;
m8 += 4.0*fq;
m9 -= fq;
m10 += 2.0*fq;
m11 -= fq;
m12 += 2.0*fq;
// q=7
fq = dist[8*Np+n];
rho += fq;
m1 += 8.0*fq;
m2 += fq;
jx += fq;
m4 += fq;
jy += fq;
m6 += fq;
m9 += fq;
m10 += fq;
m11 += fq;
m12 += fq;
m13 = fq;
m16 = fq;
m17 = -fq;
// q = 8
fq = dist[7*Np+n];
rho += fq;
m1 += 8.0*fq;
m2 += fq;
jx -= fq;
m4 -= fq;
jy -= fq;
m6 -= fq;
m9 += fq;
m10 += fq;
m11 += fq;
m12 += fq;
m13 += fq;
m16 -= fq;
m17 += fq;
// q=9
fq = dist[10*Np+n];
rho += fq;
m1 += 8.0*fq;
m2 += fq;
jx += fq;
m4 += fq;
jy -= fq;
m6 -= fq;
m9 += fq;
m10 += fq;
m11 += fq;
m12 += fq;
m13 -= fq;
m16 += fq;
m17 += fq;
// q = 10
fq = dist[9*Np+n];
rho += fq;
m1 += 8.0*fq;
m2 += fq;
jx -= fq;
m4 -= fq;
jy += fq;
m6 += fq;
m9 += fq;
m10 += fq;
m11 += fq;
m12 += fq;
m13 -= fq;
m16 -= fq;
m17 -= fq;
// q=11
fq = dist[12*Np+n];
rho += fq;
m1 += 8.0*fq;
m2 += fq;
jx += fq;
m4 += fq;
jz += fq;
m8 += fq;
m9 += fq;
m10 += fq;
m11 -= fq;
m12 -= fq;
m15 = fq;
m16 -= fq;
m18 = fq;
// q=12
fq = dist[11*Np+n];
rho += fq;
m1 += 8.0*fq;
m2 += fq;
jx -= fq;
m4 -= fq;
jz -= fq;
m8 -= fq;
m9 += fq;
m10 += fq;
m11 -= fq;
m12 -= fq;
m15 += fq;
m16 += fq;
m18 -= fq;
// q=13
fq = dist[14*Np+n];
rho += fq;
m1 += 8.0*fq;
m2 += fq;
jx += fq;
m4 += fq;
jz -= fq;
m8 -= fq;
m9 += fq;
m10 += fq;
m11 -= fq;
m12 -= fq;
m15 -= fq;
m16 -= fq;
m18 -= fq;
// q=14
fq = dist[13*Np+n];
rho += fq;
m1 += 8.0*fq;
m2 += fq;
jx -= fq;
m4 -= fq;
jz += fq;
m8 += fq;
m9 += fq;
m10 += fq;
m11 -= fq;
m12 -= fq;
m15 -= fq;
m16 += fq;
m18 += fq;
// q=15
fq = dist[16*Np+n];
rho += fq;
m1 += 8.0*fq;
m2 += fq;
jy += fq;
m6 += fq;
jz += fq;
m8 += fq;
m9 -= 2.0*fq;
m10 -= 2.0*fq;
m14 = fq;
m17 += fq;
m18 -= fq;
// q=16
fq = dist[15*Np+n];
rho += fq;
m1 += 8.0*fq;
m2 += fq;
jy -= fq;
m6 -= fq;
jz -= fq;
m8 -= fq;
m9 -= 2.0*fq;
m10 -= 2.0*fq;
m14 += fq;
m17 -= fq;
m18 += fq;
// q=17
fq = dist[18*Np+n];
rho += fq;
m1 += 8.0*fq;
m2 += fq;
jy += fq;
m6 += fq;
jz -= fq;
m8 -= fq;
m9 -= 2.0*fq;
m10 -= 2.0*fq;
m14 -= fq;
m17 += fq;
m18 += fq;
// q=18
fq = dist[17*Np+n];
rho += fq;
m1 += 8.0*fq;
m2 += fq;
jy -= fq;
m6 -= fq;
jz += fq;
m8 += fq;
m9 -= 2.0*fq;
m10 -= 2.0*fq;
m14 -= fq;
m17 -= fq;
m18 -= fq;
//........................................................................
//..............carry out relaxation process..............................
//..........Toelke, Fruediger et. al. 2006................................
if (C == 0.0) nx = ny = nz = 0.0;
m1 = m1 + rlx_setA*((19*(jx*jx+jy*jy+jz*jz)/rho0 - 11*rho) -19*alpha*C - m1);
m2 = m2 + rlx_setA*((3*rho - 5.5*(jx*jx+jy*jy+jz*jz)/rho0)- m2);
m4 = m4 + rlx_setB*((-0.6666666666666666*jx)- m4);
m6 = m6 + rlx_setB*((-0.6666666666666666*jy)- m6);
m8 = m8 + rlx_setB*((-0.6666666666666666*jz)- m8);
m9 = m9 + rlx_setA*(((2*jx*jx-jy*jy-jz*jz)/rho0) + 0.5*alpha*C*(2*nx*nx-ny*ny-nz*nz) - m9);
m10 = m10 + rlx_setA*( - m10);
m11 = m11 + rlx_setA*(((jy*jy-jz*jz)/rho0) + 0.5*alpha*C*(ny*ny-nz*nz)- m11);
m12 = m12 + rlx_setA*( - m12);
m13 = m13 + rlx_setA*( (jx*jy/rho0) + 0.5*alpha*C*nx*ny - m13);
m14 = m14 + rlx_setA*( (jy*jz/rho0) + 0.5*alpha*C*ny*nz - m14);
m15 = m15 + rlx_setA*( (jx*jz/rho0) + 0.5*alpha*C*nx*nz - m15);
m16 = m16 + rlx_setB*( - m16);
m17 = m17 + rlx_setB*( - m17);
m18 = m18 + rlx_setB*( - m18);
//.......................................................................................................
//.................inverse transformation......................................................
// q=0
fq = mrt_V1*rho-mrt_V2*m1+mrt_V3*m2;
dist[n] = fq;
// q = 1
fq = mrt_V1*rho-mrt_V4*m1-mrt_V5*m2+0.1*(jx-m4)+mrt_V6*(m9-m10) + 0.16666666*Fx;
dist[1*Np+n] = fq;
// q=2
fq = mrt_V1*rho-mrt_V4*m1-mrt_V5*m2+0.1*(m4-jx)+mrt_V6*(m9-m10) - 0.16666666*Fx;
dist[2*Np+n] = fq;
// q = 3
fq = mrt_V1*rho-mrt_V4*m1-mrt_V5*m2+0.1*(jy-m6)+mrt_V7*(m10-m9)+mrt_V8*(m11-m12) + 0.16666666*Fy;
dist[3*Np+n] = fq;
// q = 4
fq = mrt_V1*rho-mrt_V4*m1-mrt_V5*m2+0.1*(m6-jy)+mrt_V7*(m10-m9)+mrt_V8*(m11-m12) - 0.16666666*Fy;
dist[4*Np+n] = fq;
// q = 5
fq = mrt_V1*rho-mrt_V4*m1-mrt_V5*m2+0.1*(jz-m8)+mrt_V7*(m10-m9)+mrt_V8*(m12-m11) + 0.16666666*Fz;
dist[5*Np+n] = fq;
// q = 6
fq = mrt_V1*rho-mrt_V4*m1-mrt_V5*m2+0.1*(m8-jz)+mrt_V7*(m10-m9)+mrt_V8*(m12-m11) - 0.16666666*Fz;
dist[6*Np+n] = fq;
// q = 7
fq = mrt_V1*rho+mrt_V9*m1+mrt_V10*m2+0.1*(jx+jy)+0.025*(m4+m6)+
mrt_V7*m9+mrt_V11*m10+mrt_V8*m11+mrt_V12*m12+0.25*m13+0.125*(m16-m17) + 0.08333333333*(Fx+Fy);
dist[7*Np+n] = fq;
// q = 8
fq = mrt_V1*rho+mrt_V9*m1+mrt_V10*m2-0.1*(jx+jy)-0.025*(m4+m6) +mrt_V7*m9+mrt_V11*m10+mrt_V8*m11
+mrt_V12*m12+0.25*m13+0.125*(m17-m16) - 0.08333333333*(Fx+Fy);
dist[8*Np+n] = fq;
// q = 9
fq = mrt_V1*rho+mrt_V9*m1+mrt_V10*m2+0.1*(jx-jy)+0.025*(m4-m6)+
mrt_V7*m9+mrt_V11*m10+mrt_V8*m11+mrt_V12*m12-0.25*m13+0.125*(m16+m17) + 0.08333333333*(Fx-Fy);
dist[9*Np+n] = fq;
// q = 10
fq = mrt_V1*rho+mrt_V9*m1+mrt_V10*m2+0.1*(jy-jx)+0.025*(m6-m4)+
mrt_V7*m9+mrt_V11*m10+mrt_V8*m11+mrt_V12*m12-0.25*m13-0.125*(m16+m17)- 0.08333333333*(Fx-Fy);
dist[10*Np+n] = fq;
// q = 11
fq = mrt_V1*rho+mrt_V9*m1
+mrt_V10*m2+0.1*(jx+jz)+0.025*(m4+m8)
+mrt_V7*m9+mrt_V11*m10-mrt_V8*m11
-mrt_V12*m12+0.25*m15+0.125*(m18-m16) + 0.08333333333*(Fx+Fz);
dist[11*Np+n] = fq;
// q = 12
fq = mrt_V1*rho+mrt_V9*m1+mrt_V10*m2-0.1*(jx+jz)-0.025*(m4+m8)+
mrt_V7*m9+mrt_V11*m10-mrt_V8*m11-mrt_V12*m12+0.25*m15+0.125*(m16-m18)-0.08333333333*(Fx+Fz);
dist[12*Np+n] = fq;
// q = 13
fq = mrt_V1*rho+mrt_V9*m1
+mrt_V10*m2+0.1*(jx-jz)+0.025*(m4-m8)
+mrt_V7*m9+mrt_V11*m10-mrt_V8*m11
-mrt_V12*m12-0.25*m15-0.125*(m16+m18) + 0.08333333333*(Fx-Fz);
dist[13*Np+n] = fq;
// q= 14
fq = mrt_V1*rho+mrt_V9*m1
+mrt_V10*m2+0.1*(jz-jx)+0.025*(m8-m4)
+mrt_V7*m9+mrt_V11*m10-mrt_V8*m11
-mrt_V12*m12-0.25*m15+0.125*(m16+m18) - 0.08333333333*(Fx-Fz);
dist[14*Np+n] = fq;
// q = 15
fq = mrt_V1*rho+mrt_V9*m1
+mrt_V10*m2+0.1*(jy+jz)+0.025*(m6+m8)
-mrt_V6*m9-mrt_V7*m10+0.25*m14+0.125*(m17-m18) + 0.08333333333*(Fy+Fz);
dist[15*Np+n] = fq;
// q = 16
fq = mrt_V1*rho+mrt_V9*m1
+mrt_V10*m2-0.1*(jy+jz)-0.025*(m6+m8)
-mrt_V6*m9-mrt_V7*m10+0.25*m14+0.125*(m18-m17)- 0.08333333333*(Fy+Fz);
dist[16*Np+n] = fq;
// q = 17
fq = mrt_V1*rho+mrt_V9*m1
+mrt_V10*m2+0.1*(jy-jz)+0.025*(m6-m8)
-mrt_V6*m9-mrt_V7*m10-0.25*m14+0.125*(m17+m18) + 0.08333333333*(Fy-Fz);
dist[17*Np+n] = fq;
// q = 18
fq = mrt_V1*rho+mrt_V9*m1
+mrt_V10*m2+0.1*(jz-jy)+0.025*(m8-m6)
-mrt_V6*m9-mrt_V7*m10-0.25*m14-0.125*(m17+m18) - 0.08333333333*(Fy-Fz);
dist[18*Np+n] = fq;
//........................................................................
// write the velocity
ux = jx / rho0;
uy = jy / rho0;
uz = jz / rho0;
Velocity[n] = ux;
Velocity[Np+n] = uy;
Velocity[2*Np+n] = uz;
// Instantiate mass transport distributions
// Stationary value - distribution 0
nAB = 1.0/(nA+nB);
Aq[n] = 0.3333333333333333*nA;
Bq[n] = 0.3333333333333333*nB;
//...............................................
// q = 0,2,4
// Cq = {1,0,0}, {0,1,0}, {0,0,1}
delta = beta*nA*nB*nAB*0.1111111111111111*nx;
if (!(nA*nB*nAB>0)) delta=0;
a1 = nA*(0.1111111111111111*(1+4.5*ux))+delta;
b1 = nB*(0.1111111111111111*(1+4.5*ux))-delta;
a2 = nA*(0.1111111111111111*(1-4.5*ux))-delta;
b2 = nB*(0.1111111111111111*(1-4.5*ux))+delta;
Aq[1*Np+n] = a1;
Bq[1*Np+n] = b1;
Aq[2*Np+n] = a2;
Bq[2*Np+n] = b2;
//...............................................
// q = 2
// Cq = {0,1,0}
delta = beta*nA*nB*nAB*0.1111111111111111*ny;
if (!(nA*nB*nAB>0)) delta=0;
a1 = nA*(0.1111111111111111*(1+4.5*uy))+delta;
b1 = nB*(0.1111111111111111*(1+4.5*uy))-delta;
a2 = nA*(0.1111111111111111*(1-4.5*uy))-delta;
b2 = nB*(0.1111111111111111*(1-4.5*uy))+delta;
Aq[3*Np+n] = a1;
Bq[3*Np+n] = b1;
Aq[4*Np+n] = a2;
Bq[4*Np+n] = b2;
//...............................................
// q = 4
// Cq = {0,0,1}
delta = beta*nA*nB*nAB*0.1111111111111111*nz;
if (!(nA*nB*nAB>0)) delta=0;
a1 = nA*(0.1111111111111111*(1+4.5*uz))+delta;
b1 = nB*(0.1111111111111111*(1+4.5*uz))-delta;
a2 = nA*(0.1111111111111111*(1-4.5*uz))-delta;
b2 = nB*(0.1111111111111111*(1-4.5*uz))+delta;
Aq[5*Np+n] = a1;
Bq[5*Np+n] = b1;
Aq[6*Np+n] = a2;
Bq[6*Np+n] = b2;
//...............................................
}
}
}
__global__ void dvc_ScaLBL_D3Q19_AAodd_Color(int *neighborList, int *Map, double *dist, double *Aq, double *Bq, double *Den,
double *Phi, double *Velocity, double rhoA, double rhoB, double tauA, double tauB, double alpha, double beta,
double Fx, double Fy, double Fz, int strideY, int strideZ, int start, int finish, int Np){
int n,nn,ijk,nread;
int nr1,nr2,nr3,nr4,nr5,nr6;
int nr7,nr8,nr9,nr10;
int nr11,nr12,nr13,nr14;
//int nr15,nr16,nr17,nr18;
double fq;
// conserved momemnts
double rho,jx,jy,jz;
// non-conserved moments
double m1,m2,m4,m6,m8,m9,m10,m11,m12,m13,m14,m15,m16,m17,m18;
double m3,m5,m7;
double nA,nB; // number density
double a1,b1,a2,b2,nAB,delta;
double C,nx,ny,nz; //color gradient magnitude and direction
double ux,uy,uz;
double phi,tau,rho0,rlx_setA,rlx_setB;
const double mrt_V1=0.05263157894736842;
const double mrt_V2=0.012531328320802;
const double mrt_V3=0.04761904761904762;
const double mrt_V4=0.004594820384294068;
const double mrt_V5=0.01587301587301587;
const double mrt_V6=0.0555555555555555555555555;
const double mrt_V7=0.02777777777777778;
const double mrt_V8=0.08333333333333333;
const double mrt_V9=0.003341687552213868;
const double mrt_V10=0.003968253968253968;
const double mrt_V11=0.01388888888888889;
const double mrt_V12=0.04166666666666666;
int S = Np/NBLOCKS/NTHREADS + 1;
for (int s=0; s<S; s++){
//........Get 1-D index for this thread....................
n = S*blockIdx.x*blockDim.x + s*blockDim.x + threadIdx.x + start;
if (n<finish) {
// read the component number densities
nA = Den[n];
nB = Den[Np + n];
// compute phase indicator field
phi=(nA-nB)/(nA+nB);
// local density
rho0=rhoA + 0.5*(1.0-phi)*(rhoB-rhoA);
// local relaxation time
tau=tauA + 0.5*(1.0-phi)*(tauB-tauA);
rlx_setA = 1.f/tau;
rlx_setB = 8.f*(2.f-rlx_setA)/(8.f-rlx_setA);
// Get the 1D index based on regular data layout
ijk = Map[n];
// COMPUTE THE COLOR GRADIENT
//........................................................................
//.................Read Phase Indicator Values............................
//........................................................................
nn = ijk-1; // neighbor index (get convention)
m1 = Phi[nn]; // get neighbor for phi - 1
//........................................................................
nn = ijk+1; // neighbor index (get convention)
m2 = Phi[nn]; // get neighbor for phi - 2
//........................................................................
nn = ijk-strideY; // neighbor index (get convention)
m3 = Phi[nn]; // get neighbor for phi - 3
//........................................................................
nn = ijk+strideY; // neighbor index (get convention)
m4 = Phi[nn]; // get neighbor for phi - 4
//........................................................................
nn = ijk-strideZ; // neighbor index (get convention)
m5 = Phi[nn]; // get neighbor for phi - 5
//........................................................................
nn = ijk+strideZ; // neighbor index (get convention)
m6 = Phi[nn]; // get neighbor for phi - 6
//........................................................................
nn = ijk-strideY-1; // neighbor index (get convention)
m7 = Phi[nn]; // get neighbor for phi - 7
//........................................................................
nn = ijk+strideY+1; // neighbor index (get convention)
m8 = Phi[nn]; // get neighbor for phi - 8
//........................................................................
nn = ijk+strideY-1; // neighbor index (get convention)
m9 = Phi[nn]; // get neighbor for phi - 9
//........................................................................
nn = ijk-strideY+1; // neighbor index (get convention)
m10 = Phi[nn]; // get neighbor for phi - 10
//........................................................................
nn = ijk-strideZ-1; // neighbor index (get convention)
m11 = Phi[nn]; // get neighbor for phi - 11
//........................................................................
nn = ijk+strideZ+1; // neighbor index (get convention)
m12 = Phi[nn]; // get neighbor for phi - 12
//........................................................................
nn = ijk+strideZ-1; // neighbor index (get convention)
m13 = Phi[nn]; // get neighbor for phi - 13
//........................................................................
nn = ijk-strideZ+1; // neighbor index (get convention)
m14 = Phi[nn]; // get neighbor for phi - 14
//........................................................................
nn = ijk-strideZ-strideY; // neighbor index (get convention)
m15 = Phi[nn]; // get neighbor for phi - 15
//........................................................................
nn = ijk+strideZ+strideY; // neighbor index (get convention)
m16 = Phi[nn]; // get neighbor for phi - 16
//........................................................................
nn = ijk+strideZ-strideY; // neighbor index (get convention)
m17 = Phi[nn]; // get neighbor for phi - 17
//........................................................................
nn = ijk-strideZ+strideY; // neighbor index (get convention)
m18 = Phi[nn]; // get neighbor for phi - 18
//............Compute the Color Gradient...................................
nx = -(m1-m2+0.5*(m7-m8+m9-m10+m11-m12+m13-m14));
ny = -(m3-m4+0.5*(m7-m8-m9+m10+m15-m16+m17-m18));
nz = -(m5-m6+0.5*(m11-m12-m13+m14+m15-m16-m17+m18));
//...........Normalize the Color Gradient.................................
C = sqrt(nx*nx+ny*ny+nz*nz);
if (C==0.0) C=1.0;
nx = nx/C;
ny = ny/C;
nz = nz/C;
// q=0
fq = dist[n];
rho = fq;
m1 = -30.0*fq;
m2 = 12.0*fq;
// q=1
//nread = neighborList[n]; // neighbor 2
//fq = dist[nread]; // reading the f1 data into register fq
nr1 = neighborList[n];
fq = dist[nr1]; // reading the f1 data into register fq
rho += fq;
m1 -= 11.0*fq;
m2 -= 4.0*fq;
jx = fq;
m4 = -4.0*fq;
m9 = 2.0*fq;
m10 = -4.0*fq;
// f2 = dist[10*Np+n];
//nread = neighborList[n+Np]; // neighbor 1 ( < 10Np => even part of dist)
//fq = dist[nread]; // reading the f2 data into register fq
nr2 = neighborList[n+Np]; // neighbor 1 ( < 10Np => even part of dist)
fq = dist[nr2]; // reading the f2 data into register fq
rho += fq;
m1 -= 11.0*(fq);
m2 -= 4.0*(fq);
jx -= fq;
m4 += 4.0*(fq);
m9 += 2.0*(fq);
m10 -= 4.0*(fq);
// q=3
//nread = neighborList[n+2*Np]; // neighbor 4
//fq = dist[nread];
nr3 = neighborList[n+2*Np]; // neighbor 4
fq = dist[nr3];
rho += fq;
m1 -= 11.0*fq;
m2 -= 4.0*fq;
jy = fq;
m6 = -4.0*fq;
m9 -= fq;
m10 += 2.0*fq;
m11 = fq;
m12 = -2.0*fq;
// q = 4
//nread = neighborList[n+3*Np]; // neighbor 3
//fq = dist[nread];
nr4 = neighborList[n+3*Np]; // neighbor 3
fq = dist[nr4];
rho+= fq;
m1 -= 11.0*fq;
m2 -= 4.0*fq;
jy -= fq;
m6 += 4.0*fq;
m9 -= fq;
m10 += 2.0*fq;
m11 += fq;
m12 -= 2.0*fq;
// q=5
//nread = neighborList[n+4*Np];
//fq = dist[nread];
nr5 = neighborList[n+4*Np];
fq = dist[nr5];
rho += fq;
m1 -= 11.0*fq;
m2 -= 4.0*fq;
jz = fq;
m8 = -4.0*fq;
m9 -= fq;
m10 += 2.0*fq;
m11 -= fq;
m12 += 2.0*fq;
// q = 6
//nread = neighborList[n+5*Np];
//fq = dist[nread];
nr6 = neighborList[n+5*Np];
fq = dist[nr6];
rho+= fq;
m1 -= 11.0*fq;
m2 -= 4.0*fq;
jz -= fq;
m8 += 4.0*fq;
m9 -= fq;
m10 += 2.0*fq;
m11 -= fq;
m12 += 2.0*fq;
// q=7
//nread = neighborList[n+6*Np];
//fq = dist[nread];
nr7 = neighborList[n+6*Np];
fq = dist[nr7];
rho += fq;
m1 += 8.0*fq;
m2 += fq;
jx += fq;
m4 += fq;
jy += fq;
m6 += fq;
m9 += fq;
m10 += fq;
m11 += fq;
m12 += fq;
m13 = fq;
m16 = fq;
m17 = -fq;
// q = 8
//nread = neighborList[n+7*Np];
//fq = dist[nread];
nr8 = neighborList[n+7*Np];
fq = dist[nr8];
rho += fq;
m1 += 8.0*fq;
m2 += fq;
jx -= fq;
m4 -= fq;
jy -= fq;
m6 -= fq;
m9 += fq;
m10 += fq;
m11 += fq;
m12 += fq;
m13 += fq;
m16 -= fq;
m17 += fq;
// q=9
//nread = neighborList[n+8*Np];
//fq = dist[nread];
nr9 = neighborList[n+8*Np];
fq = dist[nr9];
rho += fq;
m1 += 8.0*fq;
m2 += fq;
jx += fq;
m4 += fq;
jy -= fq;
m6 -= fq;
m9 += fq;
m10 += fq;
m11 += fq;
m12 += fq;
m13 -= fq;
m16 += fq;
m17 += fq;
// q = 10
//nread = neighborList[n+9*Np];
//fq = dist[nread];
nr10 = neighborList[n+9*Np];
fq = dist[nr10];
rho += fq;
m1 += 8.0*fq;
m2 += fq;
jx -= fq;
m4 -= fq;
jy += fq;
m6 += fq;
m9 += fq;
m10 += fq;
m11 += fq;
m12 += fq;
m13 -= fq;
m16 -= fq;
m17 -= fq;
// q=11
//nread = neighborList[n+10*Np];
//fq = dist[nread];
nr11 = neighborList[n+10*Np];
fq = dist[nr11];
rho += fq;
m1 += 8.0*fq;
m2 += fq;
jx += fq;
m4 += fq;
jz += fq;
m8 += fq;
m9 += fq;
m10 += fq;
m11 -= fq;
m12 -= fq;
m15 = fq;
m16 -= fq;
m18 = fq;
// q=12
//nread = neighborList[n+11*Np];
//fq = dist[nread];
nr12 = neighborList[n+11*Np];
fq = dist[nr12];
rho += fq;
m1 += 8.0*fq;
m2 += fq;
jx -= fq;
m4 -= fq;
jz -= fq;
m8 -= fq;
m9 += fq;
m10 += fq;
m11 -= fq;
m12 -= fq;
m15 += fq;
m16 += fq;
m18 -= fq;
// q=13
//nread = neighborList[n+12*Np];
//fq = dist[nread];
nr13 = neighborList[n+12*Np];
fq = dist[nr13];
rho += fq;
m1 += 8.0*fq;
m2 += fq;
jx += fq;
m4 += fq;
jz -= fq;
m8 -= fq;
m9 += fq;
m10 += fq;
m11 -= fq;
m12 -= fq;
m15 -= fq;
m16 -= fq;
m18 -= fq;
// q=14
//nread = neighborList[n+13*Np];
//fq = dist[nread];
nr14 = neighborList[n+13*Np];
fq = dist[nr14];
rho += fq;
m1 += 8.0*fq;
m2 += fq;
jx -= fq;
m4 -= fq;
jz += fq;
m8 += fq;
m9 += fq;
m10 += fq;
m11 -= fq;
m12 -= fq;
m15 -= fq;
m16 += fq;
m18 += fq;
// q=15
nread = neighborList[n+14*Np];
fq = dist[nread];
//fq = dist[17*Np+n];
rho += fq;
m1 += 8.0*fq;
m2 += fq;
jy += fq;
m6 += fq;
jz += fq;
m8 += fq;
m9 -= 2.0*fq;
m10 -= 2.0*fq;
m14 = fq;
m17 += fq;
m18 -= fq;
// q=16
nread = neighborList[n+15*Np];
fq = dist[nread];
//fq = dist[8*Np+n];
rho += fq;
m1 += 8.0*fq;
m2 += fq;
jy -= fq;
m6 -= fq;
jz -= fq;
m8 -= fq;
m9 -= 2.0*fq;
m10 -= 2.0*fq;
m14 += fq;
m17 -= fq;
m18 += fq;
// q=17
//fq = dist[18*Np+n];
nread = neighborList[n+16*Np];
fq = dist[nread];
rho += fq;
m1 += 8.0*fq;
m2 += fq;
jy += fq;
m6 += fq;
jz -= fq;
m8 -= fq;
m9 -= 2.0*fq;
m10 -= 2.0*fq;
m14 -= fq;
m17 += fq;
m18 += fq;
// q=18
nread = neighborList[n+17*Np];
fq = dist[nread];
//fq = dist[9*Np+n];
rho += fq;
m1 += 8.0*fq;
m2 += fq;
jy -= fq;
m6 -= fq;
jz += fq;
m8 += fq;
m9 -= 2.0*fq;
m10 -= 2.0*fq;
m14 -= fq;
m17 -= fq;
m18 -= fq;
//........................................................................
//..............carry out relaxation process..............................
//..........Toelke, Fruediger et. al. 2006................................
if (C == 0.0) nx = ny = nz = 0.0;
m1 = m1 + rlx_setA*((19*(jx*jx+jy*jy+jz*jz)/rho0 - 11*rho) -19*alpha*C - m1);
m2 = m2 + rlx_setA*((3*rho - 5.5*(jx*jx+jy*jy+jz*jz)/rho0)- m2);
m4 = m4 + rlx_setB*((-0.6666666666666666*jx)- m4);
m6 = m6 + rlx_setB*((-0.6666666666666666*jy)- m6);
m8 = m8 + rlx_setB*((-0.6666666666666666*jz)- m8);
m9 = m9 + rlx_setA*(((2*jx*jx-jy*jy-jz*jz)/rho0) + 0.5*alpha*C*(2*nx*nx-ny*ny-nz*nz) - m9);
m10 = m10 + rlx_setA*( - m10);
m11 = m11 + rlx_setA*(((jy*jy-jz*jz)/rho0) + 0.5*alpha*C*(ny*ny-nz*nz)- m11);
m12 = m12 + rlx_setA*( - m12);
m13 = m13 + rlx_setA*( (jx*jy/rho0) + 0.5*alpha*C*nx*ny - m13);
m14 = m14 + rlx_setA*( (jy*jz/rho0) + 0.5*alpha*C*ny*nz - m14);
m15 = m15 + rlx_setA*( (jx*jz/rho0) + 0.5*alpha*C*nx*nz - m15);
m16 = m16 + rlx_setB*( - m16);
m17 = m17 + rlx_setB*( - m17);
m18 = m18 + rlx_setB*( - m18);
//.................inverse transformation......................................................
// q=0
fq = mrt_V1*rho-mrt_V2*m1+mrt_V3*m2;
dist[n] = fq;
// q = 1
fq = mrt_V1*rho-mrt_V4*m1-mrt_V5*m2+0.1*(jx-m4)+mrt_V6*(m9-m10)+0.16666666*Fx;
//nread = neighborList[n+Np];
dist[nr2] = fq;
// q=2
fq = mrt_V1*rho-mrt_V4*m1-mrt_V5*m2+0.1*(m4-jx)+mrt_V6*(m9-m10) - 0.16666666*Fx;
//nread = neighborList[n];
dist[nr1] = fq;
// q = 3
fq = mrt_V1*rho-mrt_V4*m1-mrt_V5*m2+0.1*(jy-m6)+mrt_V7*(m10-m9)+mrt_V8*(m11-m12) + 0.16666666*Fy;
//nread = neighborList[n+3*Np];
dist[nr4] = fq;
// q = 4
fq = mrt_V1*rho-mrt_V4*m1-mrt_V5*m2+0.1*(m6-jy)+mrt_V7*(m10-m9)+mrt_V8*(m11-m12) - 0.16666666*Fy;
//nread = neighborList[n+2*Np];
dist[nr3] = fq;
// q = 5
fq = mrt_V1*rho-mrt_V4*m1-mrt_V5*m2+0.1*(jz-m8)+mrt_V7*(m10-m9)+mrt_V8*(m12-m11) + 0.16666666*Fz;
//nread = neighborList[n+5*Np];
dist[nr6] = fq;
// q = 6
fq = mrt_V1*rho-mrt_V4*m1-mrt_V5*m2+0.1*(m8-jz)+mrt_V7*(m10-m9)+mrt_V8*(m12-m11) - 0.16666666*Fz;
//nread = neighborList[n+4*Np];
dist[nr5] = fq;
// q = 7
fq = mrt_V1*rho+mrt_V9*m1+mrt_V10*m2+0.1*(jx+jy)+0.025*(m4+m6)+
mrt_V7*m9+mrt_V11*m10+mrt_V8*m11+mrt_V12*m12+0.25*m13+0.125*(m16-m17) + 0.08333333333*(Fx+Fy);
//nread = neighborList[n+7*Np];
dist[nr8] = fq;
// q = 8
fq = mrt_V1*rho+mrt_V9*m1+mrt_V10*m2-0.1*(jx+jy)-0.025*(m4+m6) +mrt_V7*m9+mrt_V11*m10+mrt_V8*m11
+mrt_V12*m12+0.25*m13+0.125*(m17-m16) - 0.08333333333*(Fx+Fy);
//nread = neighborList[n+6*Np];
dist[nr7] = fq;
// q = 9
fq = mrt_V1*rho+mrt_V9*m1+mrt_V10*m2+0.1*(jx-jy)+0.025*(m4-m6)+
mrt_V7*m9+mrt_V11*m10+mrt_V8*m11+mrt_V12*m12-0.25*m13+0.125*(m16+m17) + 0.08333333333*(Fx-Fy);
//nread = neighborList[n+9*Np];
dist[nr10] = fq;
// q = 10
fq = mrt_V1*rho+mrt_V9*m1+mrt_V10*m2+0.1*(jy-jx)+0.025*(m6-m4)+
mrt_V7*m9+mrt_V11*m10+mrt_V8*m11+mrt_V12*m12-0.25*m13-0.125*(m16+m17)- 0.08333333333*(Fx-Fy);
//nread = neighborList[n+8*Np];
dist[nr9] = fq;
// q = 11
fq = mrt_V1*rho+mrt_V9*m1
+mrt_V10*m2+0.1*(jx+jz)+0.025*(m4+m8)
+mrt_V7*m9+mrt_V11*m10-mrt_V8*m11
-mrt_V12*m12+0.25*m15+0.125*(m18-m16) + 0.08333333333*(Fx+Fz);
//nread = neighborList[n+11*Np];
dist[nr12] = fq;
// q = 12
fq = mrt_V1*rho+mrt_V9*m1+mrt_V10*m2-0.1*(jx+jz)-0.025*(m4+m8)+
mrt_V7*m9+mrt_V11*m10-mrt_V8*m11-mrt_V12*m12+0.25*m15+0.125*(m16-m18) - 0.08333333333*(Fx+Fz);
//nread = neighborList[n+10*Np];
dist[nr11]= fq;
// q = 13
fq = mrt_V1*rho+mrt_V9*m1
+mrt_V10*m2+0.1*(jx-jz)+0.025*(m4-m8)
+mrt_V7*m9+mrt_V11*m10-mrt_V8*m11
-mrt_V12*m12-0.25*m15-0.125*(m16+m18) + 0.08333333333*(Fx-Fz);
//nread = neighborList[n+13*Np];
dist[nr14] = fq;
// q= 14
fq = mrt_V1*rho+mrt_V9*m1
+mrt_V10*m2+0.1*(jz-jx)+0.025*(m8-m4)
+mrt_V7*m9+mrt_V11*m10-mrt_V8*m11
-mrt_V12*m12-0.25*m15+0.125*(m16+m18) - 0.08333333333*(Fx-Fz);
//nread = neighborList[n+12*Np];
dist[nr13] = fq;
// q = 15
fq = mrt_V1*rho+mrt_V9*m1
+mrt_V10*m2+0.1*(jy+jz)+0.025*(m6+m8)
-mrt_V6*m9-mrt_V7*m10+0.25*m14+0.125*(m17-m18) + 0.08333333333*(Fy+Fz);
nread = neighborList[n+15*Np];
dist[nread] = fq;
// q = 16
fq = mrt_V1*rho+mrt_V9*m1
+mrt_V10*m2-0.1*(jy+jz)-0.025*(m6+m8)
-mrt_V6*m9-mrt_V7*m10+0.25*m14+0.125*(m18-m17)- 0.08333333333*(Fy+Fz);
nread = neighborList[n+14*Np];
dist[nread] = fq;
// q = 17
fq = mrt_V1*rho+mrt_V9*m1
+mrt_V10*m2+0.1*(jy-jz)+0.025*(m6-m8)
-mrt_V6*m9-mrt_V7*m10-0.25*m14+0.125*(m17+m18) + 0.08333333333*(Fy-Fz);
nread = neighborList[n+17*Np];
dist[nread] = fq;
// q = 18
fq = mrt_V1*rho+mrt_V9*m1
+mrt_V10*m2+0.1*(jz-jy)+0.025*(m8-m6)
-mrt_V6*m9-mrt_V7*m10-0.25*m14-0.125*(m17+m18) - 0.08333333333*(Fy-Fz);
nread = neighborList[n+16*Np];
dist[nread] = fq;
// write the velocity
ux = jx / rho0;
uy = jy / rho0;
uz = jz / rho0;
Velocity[n] = ux;
Velocity[Np+n] = uy;
Velocity[2*Np+n] = uz;
// Instantiate mass transport distributions
// Stationary value - distribution 0
nAB = 1.0/(nA+nB);
Aq[n] = 0.3333333333333333*nA;
Bq[n] = 0.3333333333333333*nB;
//...............................................
// q = 0,2,4
// Cq = {1,0,0}, {0,1,0}, {0,0,1}
delta = beta*nA*nB*nAB*0.1111111111111111*nx;
if (!(nA*nB*nAB>0)) delta=0;
a1 = nA*(0.1111111111111111*(1+4.5*ux))+delta;
b1 = nB*(0.1111111111111111*(1+4.5*ux))-delta;
a2 = nA*(0.1111111111111111*(1-4.5*ux))-delta;
b2 = nB*(0.1111111111111111*(1-4.5*ux))+delta;
// q = 1
//nread = neighborList[n+Np];
Aq[nr2] = a1;
Bq[nr2] = b1;
// q=2
//nread = neighborList[n];
Aq[nr1] = a2;
Bq[nr1] = b2;
//...............................................
// Cq = {0,1,0}
delta = beta*nA*nB*nAB*0.1111111111111111*ny;
if (!(nA*nB*nAB>0)) delta=0;
a1 = nA*(0.1111111111111111*(1+4.5*uy))+delta;
b1 = nB*(0.1111111111111111*(1+4.5*uy))-delta;
a2 = nA*(0.1111111111111111*(1-4.5*uy))-delta;
b2 = nB*(0.1111111111111111*(1-4.5*uy))+delta;
// q = 3
//nread = neighborList[n+3*Np];
Aq[nr4] = a1;
Bq[nr4] = b1;
// q = 4
//nread = neighborList[n+2*Np];
Aq[nr3] = a2;
Bq[nr3] = b2;
//...............................................
// q = 4
// Cq = {0,0,1}
delta = beta*nA*nB*nAB*0.1111111111111111*nz;
if (!(nA*nB*nAB>0)) delta=0;
a1 = nA*(0.1111111111111111*(1+4.5*uz))+delta;
b1 = nB*(0.1111111111111111*(1+4.5*uz))-delta;
a2 = nA*(0.1111111111111111*(1-4.5*uz))-delta;
b2 = nB*(0.1111111111111111*(1-4.5*uz))+delta;
// q = 5
//nread = neighborList[n+5*Np];
Aq[nr6] = a1;
Bq[nr6] = b1;
// q = 6
//nread = neighborList[n+4*Np];
Aq[nr5] = a2;
Bq[nr5] = b2;
//...............................................
}
}
}
__global__ void dvc_ScaLBL_D3Q7_AAodd_PhaseField(int *neighborList, int *Map, double *Aq, double *Bq,
double *Den, double *Phi, int start, int finish, int Np){
int idx,n,nread;
double fq,nA,nB;
int S = Np/NBLOCKS/NTHREADS + 1;
for (int s=0; s<S; s++){
//........Get 1-D index for this thread....................
n = S*blockIdx.x*blockDim.x + s*blockDim.x + threadIdx.x + start;
if (n<finish) {
//..........Compute the number density for each component ............
// q=0
fq = Aq[n];
nA = fq;
fq = Bq[n];
nB = fq;
// q=1
nread = neighborList[n];
fq = Aq[nread];
nA += fq;
fq = Bq[nread];
nB += fq;
// q=2
nread = neighborList[n+Np];
fq = Aq[nread];
nA += fq;
fq = Bq[nread];
nB += fq;
// q=3
nread = neighborList[n+2*Np];
fq = Aq[nread];
nA += fq;
fq = Bq[nread];
nB += fq;
// q = 4
nread = neighborList[n+3*Np];
fq = Aq[nread];
nA += fq;
fq = Bq[nread];
nB += fq;
// q=5
nread = neighborList[n+4*Np];
fq = Aq[nread];
nA += fq;
fq = Bq[nread];
nB += fq;
// q = 6
nread = neighborList[n+5*Np];
fq = Aq[nread];
nA += fq;
fq = Bq[nread];
nB += fq;
// save the number densities
Den[n] = nA;
Den[Np+n] = nB;
// save the phase indicator field
idx = Map[n];
Phi[idx] = (nA-nB)/(nA+nB);
}
}
}
__global__ void dvc_ScaLBL_D3Q7_AAeven_PhaseField(int *Map, double *Aq, double *Bq, double *Den, double *Phi,
int start, int finish, int Np){
int idx,n;
double fq,nA,nB;
int S = Np/NBLOCKS/NTHREADS + 1;
for (int s=0; s<S; s++){
//........Get 1-D index for this thread....................
n = S*blockIdx.x*blockDim.x + s*blockDim.x + threadIdx.x + start;
if (n<finish) {
// compute number density for each component
// q=0
fq = Aq[n];
nA = fq;
fq = Bq[n];
nB = fq;
// q=1
fq = Aq[2*Np+n];
nA += fq;
fq = Bq[2*Np+n];
nB += fq;
// q=2
fq = Aq[1*Np+n];
nA += fq;
fq = Bq[1*Np+n];
nB += fq;
// q=3
fq = Aq[4*Np+n];
nA += fq;
fq = Bq[4*Np+n];
nB += fq;
// q = 4
fq = Aq[3*Np+n];
nA += fq;
fq = Bq[3*Np+n];
nB += fq;
// q=5
fq = Aq[6*Np+n];
nA += fq;
fq = Bq[6*Np+n];
nB += fq;
// q = 6
fq = Aq[5*Np+n];
nA += fq;
fq = Bq[5*Np+n];
nB += fq;
// save the number densities
Den[n] = nA;
Den[Np+n] = nB;
// save the phase indicator field
idx = Map[n];
Phi[idx] = (nA-nB)/(nA+nB);
}
}
}
__global__ void dvc_ScaLBL_PhaseField_Init(int *Map, double *Phi, double *Den, double *Aq, double *Bq, int start, int finish, int Np){
int idx,n;
double phi,nA,nB;
int S = Np/NBLOCKS/NTHREADS + 1;
for (int s=0; s<S; s++){
//........Get 1-D index for this thread....................
idx = S*blockIdx.x*blockDim.x + s*blockDim.x + threadIdx.x + start;
if (idx<finish) {
n = Map[idx];
phi = Phi[n];
if (phi > 1.f){
nA = 1.0; nB = 0.f;
}
else if (phi < -1.f){
nB = 1.0; nA = 0.f;
}
else{
nA=0.5*(phi+1.f);
nB=0.5*(1.f-phi);
}
Den[idx] = nA;
Den[Np+idx] = nB;
Aq[idx]=0.3333333333333333*nA;
Aq[Np+idx]=0.1111111111111111*nA;
Aq[2*Np+idx]=0.1111111111111111*nA;
Aq[3*Np+idx]=0.1111111111111111*nA;
Aq[4*Np+idx]=0.1111111111111111*nA;
Aq[5*Np+idx]=0.1111111111111111*nA;
Aq[6*Np+idx]=0.1111111111111111*nA;
Bq[idx]=0.3333333333333333*nB;
Bq[Np+idx]=0.1111111111111111*nB;
Bq[2*Np+idx]=0.1111111111111111*nB;
Bq[3*Np+idx]=0.1111111111111111*nB;
Bq[4*Np+idx]=0.1111111111111111*nB;
Bq[5*Np+idx]=0.1111111111111111*nB;
Bq[6*Np+idx]=0.1111111111111111*nB;
}
}
}
extern "C" void ScaLBL_SetSlice_z(double *Phi, double value, int Nx, int Ny, int Nz, int Slice){
int GRID = Nx*Ny / 512 + 1;
dvc_ScaLBL_SetSlice_z<<<GRID,512>>>(Phi,value,Nx,Ny,Nz,Slice);
}
extern "C" void ScaLBL_CopySlice_z(double *Phi, int Nx, int Ny, int Nz, int Source, int Dest){
int GRID = Nx*Ny / 512 + 1;
dvc_ScaLBL_CopySlice_z<<<GRID,512>>>(Phi,Nx,Ny,Nz,Source,Dest);
}
extern "C" void ScaLBL_Color_BC(int *list, int *Map, double *Phi, double *Den, double vA, double vB, int count, int Np){
int GRID = count / 512 + 1;
dvc_ScaLBL_Color_BC<<<GRID,512>>>(list, Map, Phi, Den, vA, vB, count, Np);
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err){
printf("CUDA error in ScaLBL_Color_BC: %s \n",cudaGetErrorString(err));
}
}
extern "C" void ScaLBL_Color_BC_YDW(int *list, int *Map, double *Phi, double *Den, double *vAv, double *vBv, int count, int Np){
int GRID = count / 512 + 1;
dvc_ScaLBL_Color_BC_YDW<<<GRID,512>>>(list, Map, Phi, Den, vAv, vBv, count, Np);
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err){
printf("CUDA error in ScaLBL_Color_BC: %s \n",cudaGetErrorString(err));
}
}
// Pressure Boundary Conditions Functions
extern "C" void ScaLBL_D3Q19_AAeven_Color(int *Map, double *dist, double *Aq, double *Bq, double *Den, double *Phi,
double *Vel, double rhoA, double rhoB, double tauA, double tauB, double alpha, double beta,
double Fx, double Fy, double Fz, int strideY, int strideZ, int start, int finish, int Np){
cudaProfilerStart();
cudaFuncSetCacheConfig(dvc_ScaLBL_D3Q19_AAeven_Color, cudaFuncCachePreferL1);
dvc_ScaLBL_D3Q19_AAeven_Color<<<NBLOCKS,NTHREADS >>>(Map, dist, Aq, Bq, Den, Phi, Vel, rhoA, rhoB, tauA, tauB,
alpha, beta, Fx, Fy, Fz, strideY, strideZ, start, finish, Np);
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err){
printf("CUDA error in ScaLBL_D3Q19_AAeven_Color: %s \n",cudaGetErrorString(err));
}
cudaProfilerStop();
}
extern "C" void ScaLBL_D3Q19_AAodd_Color(int *d_neighborList, int *Map, double *dist, double *Aq, double *Bq, double *Den,
double *Phi, double *Vel, double rhoA, double rhoB, double tauA, double tauB, double alpha, double beta,
double Fx, double Fy, double Fz, int strideY, int strideZ, int start, int finish, int Np){
cudaProfilerStart();
cudaFuncSetCacheConfig(dvc_ScaLBL_D3Q19_AAodd_Color, cudaFuncCachePreferL1);
dvc_ScaLBL_D3Q19_AAodd_Color<<<NBLOCKS,NTHREADS >>>(d_neighborList, Map, dist, Aq, Bq, Den, Phi, Vel,
rhoA, rhoB, tauA, tauB, alpha, beta, Fx, Fy, Fz, strideY, strideZ, start, finish, Np);
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err){
printf("CUDA error in ScaLBL_D3Q19_AAodd_Color: %s \n",cudaGetErrorString(err));
}
cudaProfilerStop();
}
extern "C" void ScaLBL_D3Q7_AAodd_PhaseField(int *NeighborList, int *Map, double *Aq, double *Bq,
double *Den, double *Phi, int start, int finish, int Np){
cudaProfilerStart();
dvc_ScaLBL_D3Q7_AAodd_PhaseField<<<NBLOCKS,NTHREADS >>>(NeighborList, Map, Aq, Bq, Den, Phi, start, finish, Np);
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err){
printf("CUDA error in ScaLBL_D3Q7_AAodd_PhaseField: %s \n",cudaGetErrorString(err));
}
cudaProfilerStop();
}
extern "C" void ScaLBL_D3Q7_AAeven_PhaseField(int *Map, double *Aq, double *Bq, double *Den, double *Phi,
int start, int finish, int Np){
cudaProfilerStart();
dvc_ScaLBL_D3Q7_AAeven_PhaseField<<<NBLOCKS,NTHREADS >>>(Map, Aq, Bq, Den, Phi, start, finish, Np);
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err){
printf("CUDA error in ScaLBL_D3Q7_AAeven_PhaseField: %s \n",cudaGetErrorString(err));
}
cudaProfilerStop();
}
extern "C" void ScaLBL_PhaseField_Init(int *Map, double *Phi, double *Den, double *Aq, double *Bq, int start, int finish, int Np){
dvc_ScaLBL_PhaseField_Init<<<NBLOCKS,NTHREADS >>>(Map, Phi, Den, Aq, Bq, start, finish, Np);
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err){
printf("CUDA error in ScaLBL_PhaseField_Init: %s \n",cudaGetErrorString(err));
}
}
|
fedf5c85343db7ffff533cfba2b7537c286ff815.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
__global__ void bcs_solute(double *f1, int * is_solid, mystruct *param)
{
int idx=threadIdx.x + blockIdx.x * blockDim.x, a;
if(idx <param->N)
{
if(param->solute_bcs_w==1 && idx%param->LX==0 )//WEST
f1[idx*5 + 1] = param->rho1_bcs+ param->dRho - f1[ idx*5 + 0] - f1[ idx*5 + 2] - f1[idx*5 + 3] - f1[idx*5 + 4];
if(param->solute_bcs_e==1 && (idx+1)%param->LX==0)//EAST
f1[idx*5 + 3] = param->rho1_bcs - f1[ idx*5 + 0] - f1[ idx*5 + 1] - f1[idx*5 + 2] - f1[idx*5 + 4];
if(param->solute_bcs_s==1 && idx<param->N/2 && is_solid[idx] )//SOUTH
f1[idx*5 + 2] = param->rho1_bcs+ param->dRho - f1[ idx*5 + 0] - f1[ idx*5 + 1] - f1[idx*5 + 3] - f1[idx*5 + 4];
if(param->solute_bcs_n==1 && idx>=param->N/2 && is_solid[idx] )//NORTH
//if(param->solute_bcs_n==1 && idx>=param->N-param->LX && is_solid[idx] )//NORTH
f1[idx*5 + 4] = param->rho1_bcs - f1[ idx*5 + 0] - f1[ idx*5 + 1] - f1[idx*5 + 2] - f1[idx*5 + 3];
if(param->solute_bcs_zerograd_n ==1 && idx>=param->N-param->LX )// NORTH
{
for(a=0; a<5; a++)
f1[idx*5+a] = f1[(idx-param->LX)*5+a];
}
if(param->solute_bcs_zerograd_s ==1 && idx<param->LX )// SOUTH
{
for(a=0; a<5; a++)
f1[idx*5+a] = f1[(idx+param->LX)*5+a];
}
if(param->solute_bcs_zerograd_e ==1 && idx%param->LX ==0)// EAST
{
for(a=0; a<5; a++)
f1[idx*5+a] = f1[(idx+1)*5+a];
}
if(param->solute_bcs_zerograd_w ==1 && (idx+1)%param->LX ==0)// WEST
{
for(a=0; a<5; a++)
f1[idx*5+a] = f1[(idx-1)*5+a];
}
}
}
| fedf5c85343db7ffff533cfba2b7537c286ff815.cu | __global__ void bcs_solute(double *f1, int * is_solid, mystruct *param)
{
int idx=threadIdx.x + blockIdx.x * blockDim.x, a;
if(idx <param->N)
{
if(param->solute_bcs_w==1 && idx%param->LX==0 )//WEST
f1[idx*5 + 1] = param->rho1_bcs+ param->dRho - f1[ idx*5 + 0] - f1[ idx*5 + 2] - f1[idx*5 + 3] - f1[idx*5 + 4];
if(param->solute_bcs_e==1 && (idx+1)%param->LX==0)//EAST
f1[idx*5 + 3] = param->rho1_bcs - f1[ idx*5 + 0] - f1[ idx*5 + 1] - f1[idx*5 + 2] - f1[idx*5 + 4];
if(param->solute_bcs_s==1 && idx<param->N/2 && is_solid[idx] )//SOUTH
f1[idx*5 + 2] = param->rho1_bcs+ param->dRho - f1[ idx*5 + 0] - f1[ idx*5 + 1] - f1[idx*5 + 3] - f1[idx*5 + 4];
if(param->solute_bcs_n==1 && idx>=param->N/2 && is_solid[idx] )//NORTH
//if(param->solute_bcs_n==1 && idx>=param->N-param->LX && is_solid[idx] )//NORTH
f1[idx*5 + 4] = param->rho1_bcs - f1[ idx*5 + 0] - f1[ idx*5 + 1] - f1[idx*5 + 2] - f1[idx*5 + 3];
if(param->solute_bcs_zerograd_n ==1 && idx>=param->N-param->LX )// NORTH
{
for(a=0; a<5; a++)
f1[idx*5+a] = f1[(idx-param->LX)*5+a];
}
if(param->solute_bcs_zerograd_s ==1 && idx<param->LX )// SOUTH
{
for(a=0; a<5; a++)
f1[idx*5+a] = f1[(idx+param->LX)*5+a];
}
if(param->solute_bcs_zerograd_e ==1 && idx%param->LX ==0)// EAST
{
for(a=0; a<5; a++)
f1[idx*5+a] = f1[(idx+1)*5+a];
}
if(param->solute_bcs_zerograd_w ==1 && (idx+1)%param->LX ==0)// WEST
{
for(a=0; a<5; a++)
f1[idx*5+a] = f1[(idx-1)*5+a];
}
}
}
|
229efc0e5698c15e76b450b50be21cbe19707671.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void EFD_1dBM( int size, float *d_val_n, float *d_val_npo, float Pu, float Pm, float Pd ){
int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i < size)
{
d_val_npo[i] = Pu * d_val_n[i + 1] + Pm * d_val_n[i] + Pd * d_val_n[i - 1];
if (i == 0)
{
d_val_npo[i] = d_val_npo[1];
}
else if (i == size - 1)
{
d_val_npo[i] = d_val_npo[i - 1];
}
}
} | 229efc0e5698c15e76b450b50be21cbe19707671.cu | #include "includes.h"
__global__ void EFD_1dBM( int size, float *d_val_n, float *d_val_npo, float Pu, float Pm, float Pd ){
int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i < size)
{
d_val_npo[i] = Pu * d_val_n[i + 1] + Pm * d_val_n[i] + Pd * d_val_n[i - 1];
if (i == 0)
{
d_val_npo[i] = d_val_npo[1];
}
else if (i == size - 1)
{
d_val_npo[i] = d_val_npo[i - 1];
}
}
} |
3f7ffb704dfe9eddde8ddc718271370776d5a4f6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <vector>
#include "caffe/layers/operator/batch_scale_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
static __global__ void compute_max(int num, int channels, int spatial_dim, const Dtype *in, Dtype *out)
{
__shared__ Dtype buffer[CAFFE_CUDA_NUM_THREADS];
buffer[threadIdx.x] = Dtype(-10000000.0);
for (int i = threadIdx.x; i < num * spatial_dim; i += blockDim.x)
{
const int index = i / spatial_dim * channels * spatial_dim + blockIdx.x * spatial_dim + i % spatial_dim;
buffer[threadIdx.x] = max(buffer[threadIdx.x],in[index]);
}
__syncthreads();
for (int s = blockDim.x / 2; s > 0; s >>= 1)
{
if (threadIdx.x < s)
buffer[threadIdx.x] = max(buffer[threadIdx.x], buffer[threadIdx.x+s]);
__syncthreads();
}
if (threadIdx.x == 0)
out[blockIdx.x] = buffer[0];
}
template <typename Dtype>
static __global__ void compute_min(int num, int channels, int spatial_dim, const Dtype *in, Dtype *out)
{
__shared__ Dtype buffer[CAFFE_CUDA_NUM_THREADS];
buffer[threadIdx.x] = Dtype(10000000.0);
for (int i = threadIdx.x; i < num * spatial_dim; i += blockDim.x)
{
const int index = i / spatial_dim * channels * spatial_dim + blockIdx.x * spatial_dim + i % spatial_dim;
buffer[threadIdx.x] = min(buffer[threadIdx.x],in[index]);
}
__syncthreads();
for (int s = blockDim.x / 2; s > 0; s >>= 1)
{
if (threadIdx.x < s)
buffer[threadIdx.x] = min(buffer[threadIdx.x], buffer[threadIdx.x+s]);
__syncthreads();
}
if (threadIdx.x == 0)
out[blockIdx.x] = buffer[0];
}
template <typename Dtype>
static __global__ void compute_sum_diff(int num, int channels, int spatial_dim, const Dtype *diff_out, const Dtype *in, Dtype *out, Dtype *out_x)
{
__shared__ Dtype buffer[CAFFE_CUDA_NUM_THREADS];
__shared__ Dtype buffer_x[CAFFE_CUDA_NUM_THREADS];
buffer[threadIdx.x] = 0;
buffer_x[threadIdx.x] = 0;
for (int i = threadIdx.x; i < num * spatial_dim; i += blockDim.x)
{
const int index = i / spatial_dim * channels * spatial_dim + blockIdx.x * spatial_dim + i % spatial_dim;
buffer[threadIdx.x] += diff_out[index];
buffer_x[threadIdx.x] += diff_out[index]*in[index];
}
__syncthreads();
for (int s = blockDim.x / 2; s > 0; s >>= 1)
{
if (threadIdx.x < s)
{
buffer[threadIdx.x] += buffer[threadIdx.x+s];
buffer_x[threadIdx.x] += buffer_x[threadIdx.x+s];
}
__syncthreads();
}
if (threadIdx.x == 0)
{
out[blockIdx.x] = buffer[0];
out_x[blockIdx.x] = buffer_x[0];
}
}
template <typename Dtype>
static __global__ void forward_kernel(int count, int channels,int spatial_dim, const Dtype *in, const Dtype *min_value, const Dtype * max_value, Dtype *out)
{
CUDA_KERNEL_LOOP(i, count)
{
int c = i / spatial_dim % channels;
out[i] = (in[i] - min_value[c]) / (max_value[c] - min_value[c]) - Dtype(0.5);
}
}
template <typename Dtype>
static __global__ void backward_kernel(int count, int channels,int spatial_dim, const Dtype *diff_out,
const Dtype *min_value, const Dtype * max_value, const Dtype *sum, const Dtype *sum_x,
const Dtype * in, Dtype *diff_in)
{
CUDA_KERNEL_LOOP(i, count)
{
int c = i / spatial_dim % channels;
Dtype gap = max_value[c] - min_value[c];
if (in[i] != max_value[c] && in[i] != min_value[c])
diff_in[i] = diff_out[i] / gap;
else if (in[i] == max_value[c])
diff_in[i] = diff_out[i] / gap + (sum[c]*min_value[c] - sum_x[c]) / (gap*gap);
else
diff_in[i] = diff_out[i] / gap + (sum_x[c] - sum[c]*max_value[c]) / (gap*gap);
}
}
template <typename Dtype>
void BatchScaleLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top)
{
int num = bottom[0]->num();
int channels = bottom[0]->channels();
int height = bottom[0]->height();
int width = bottom[0]->width();
hipLaunchKernelGGL(( compute_min), dim3(channels),dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num, channels, height*width,bottom[0]->gpu_data(),this->blobs_[2]->mutable_gpu_data());
hipLaunchKernelGGL(( compute_max), dim3(channels),dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num, channels, height*width,bottom[0]->gpu_data(),this->blobs_[3]->mutable_gpu_data());
hipLaunchKernelGGL(( forward_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(bottom[0]->count())), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
bottom[0]->count(),channels,height*width,bottom[0]->gpu_data(),this->blobs_[2]->gpu_data(),this->blobs_[3]->gpu_data(),
top[0]->mutable_gpu_data());
}
template <typename Dtype>
void BatchScaleLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<Blob<Dtype>*>& bottom)
{
int num = bottom[0]->num();
int channels = bottom[0]->channels();
int height = bottom[0]->height();
int width = bottom[0]->width();
hipLaunchKernelGGL(( compute_sum_diff), dim3(channels),dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num, channels, height*width,top[0]->gpu_diff(),bottom[0]->gpu_data(),sum_.mutable_gpu_data(),sum_.mutable_gpu_diff());
//LOG(ERROR)<<this->blobs_[2]->cpu_data()[0]<<", "<<this->blobs_[3]->cpu_data()[0]<<", "<<sum_.cpu_data()[0];
//LOG(ERROR)<<bottom[0]->cpu_data()[0]<<", "<<bottom[0]->cpu_data()[1]<<", "<<bottom[0]->cpu_data()[2];
//LOG(ERROR)<<top[0]->cpu_data()[0]<<", "<<top[0]->cpu_data()[1]<<", "<<top[0]->cpu_data()[2];
hipLaunchKernelGGL(( backward_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(bottom[0]->count())), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
bottom[0]->count(),channels,height*width,top[0]->gpu_diff(),this->blobs_[2]->gpu_data(),this->blobs_[3]->gpu_data(), sum_.gpu_data(),sum_.gpu_diff(),
bottom[0]->gpu_data(), bottom[0]->mutable_gpu_diff());
//LOG(ERROR)<<top[0]->cpu_diff()[0]<<", "<<top[0]->cpu_diff()[1]<<", "<<top[0]->cpu_diff()[2];
//LOG(ERROR)<<bottom[0]->cpu_diff()[0]<<", "<<bottom[0]->cpu_diff()[1]<<", "<<bottom[0]->cpu_diff()[2];
}
template <typename Dtype>
void BatchScaleLayer<Dtype>::SecForward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top)
{
}
INSTANTIATE_LAYER_GPU_FUNCS(BatchScaleLayer);
} // namespace caffe
| 3f7ffb704dfe9eddde8ddc718271370776d5a4f6.cu |
#include <vector>
#include "caffe/layers/operator/batch_scale_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
static __global__ void compute_max(int num, int channels, int spatial_dim, const Dtype *in, Dtype *out)
{
__shared__ Dtype buffer[CAFFE_CUDA_NUM_THREADS];
buffer[threadIdx.x] = Dtype(-10000000.0);
for (int i = threadIdx.x; i < num * spatial_dim; i += blockDim.x)
{
const int index = i / spatial_dim * channels * spatial_dim + blockIdx.x * spatial_dim + i % spatial_dim;
buffer[threadIdx.x] = max(buffer[threadIdx.x],in[index]);
}
__syncthreads();
for (int s = blockDim.x / 2; s > 0; s >>= 1)
{
if (threadIdx.x < s)
buffer[threadIdx.x] = max(buffer[threadIdx.x], buffer[threadIdx.x+s]);
__syncthreads();
}
if (threadIdx.x == 0)
out[blockIdx.x] = buffer[0];
}
template <typename Dtype>
static __global__ void compute_min(int num, int channels, int spatial_dim, const Dtype *in, Dtype *out)
{
__shared__ Dtype buffer[CAFFE_CUDA_NUM_THREADS];
buffer[threadIdx.x] = Dtype(10000000.0);
for (int i = threadIdx.x; i < num * spatial_dim; i += blockDim.x)
{
const int index = i / spatial_dim * channels * spatial_dim + blockIdx.x * spatial_dim + i % spatial_dim;
buffer[threadIdx.x] = min(buffer[threadIdx.x],in[index]);
}
__syncthreads();
for (int s = blockDim.x / 2; s > 0; s >>= 1)
{
if (threadIdx.x < s)
buffer[threadIdx.x] = min(buffer[threadIdx.x], buffer[threadIdx.x+s]);
__syncthreads();
}
if (threadIdx.x == 0)
out[blockIdx.x] = buffer[0];
}
template <typename Dtype>
static __global__ void compute_sum_diff(int num, int channels, int spatial_dim, const Dtype *diff_out, const Dtype *in, Dtype *out, Dtype *out_x)
{
__shared__ Dtype buffer[CAFFE_CUDA_NUM_THREADS];
__shared__ Dtype buffer_x[CAFFE_CUDA_NUM_THREADS];
buffer[threadIdx.x] = 0;
buffer_x[threadIdx.x] = 0;
for (int i = threadIdx.x; i < num * spatial_dim; i += blockDim.x)
{
const int index = i / spatial_dim * channels * spatial_dim + blockIdx.x * spatial_dim + i % spatial_dim;
buffer[threadIdx.x] += diff_out[index];
buffer_x[threadIdx.x] += diff_out[index]*in[index];
}
__syncthreads();
for (int s = blockDim.x / 2; s > 0; s >>= 1)
{
if (threadIdx.x < s)
{
buffer[threadIdx.x] += buffer[threadIdx.x+s];
buffer_x[threadIdx.x] += buffer_x[threadIdx.x+s];
}
__syncthreads();
}
if (threadIdx.x == 0)
{
out[blockIdx.x] = buffer[0];
out_x[blockIdx.x] = buffer_x[0];
}
}
template <typename Dtype>
static __global__ void forward_kernel(int count, int channels,int spatial_dim, const Dtype *in, const Dtype *min_value, const Dtype * max_value, Dtype *out)
{
CUDA_KERNEL_LOOP(i, count)
{
int c = i / spatial_dim % channels;
out[i] = (in[i] - min_value[c]) / (max_value[c] - min_value[c]) - Dtype(0.5);
}
}
template <typename Dtype>
static __global__ void backward_kernel(int count, int channels,int spatial_dim, const Dtype *diff_out,
const Dtype *min_value, const Dtype * max_value, const Dtype *sum, const Dtype *sum_x,
const Dtype * in, Dtype *diff_in)
{
CUDA_KERNEL_LOOP(i, count)
{
int c = i / spatial_dim % channels;
Dtype gap = max_value[c] - min_value[c];
if (in[i] != max_value[c] && in[i] != min_value[c])
diff_in[i] = diff_out[i] / gap;
else if (in[i] == max_value[c])
diff_in[i] = diff_out[i] / gap + (sum[c]*min_value[c] - sum_x[c]) / (gap*gap);
else
diff_in[i] = diff_out[i] / gap + (sum_x[c] - sum[c]*max_value[c]) / (gap*gap);
}
}
template <typename Dtype>
void BatchScaleLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top)
{
int num = bottom[0]->num();
int channels = bottom[0]->channels();
int height = bottom[0]->height();
int width = bottom[0]->width();
compute_min<<<channels,CAFFE_CUDA_NUM_THREADS>>>
(num, channels, height*width,bottom[0]->gpu_data(),this->blobs_[2]->mutable_gpu_data());
compute_max<<<channels,CAFFE_CUDA_NUM_THREADS>>>
(num, channels, height*width,bottom[0]->gpu_data(),this->blobs_[3]->mutable_gpu_data());
forward_kernel<Dtype><<<CAFFE_GET_BLOCKS(bottom[0]->count()), CAFFE_CUDA_NUM_THREADS>>>
(bottom[0]->count(),channels,height*width,bottom[0]->gpu_data(),this->blobs_[2]->gpu_data(),this->blobs_[3]->gpu_data(),
top[0]->mutable_gpu_data());
}
template <typename Dtype>
void BatchScaleLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<Blob<Dtype>*>& bottom)
{
int num = bottom[0]->num();
int channels = bottom[0]->channels();
int height = bottom[0]->height();
int width = bottom[0]->width();
compute_sum_diff<<<channels,CAFFE_CUDA_NUM_THREADS>>>
(num, channels, height*width,top[0]->gpu_diff(),bottom[0]->gpu_data(),sum_.mutable_gpu_data(),sum_.mutable_gpu_diff());
//LOG(ERROR)<<this->blobs_[2]->cpu_data()[0]<<", "<<this->blobs_[3]->cpu_data()[0]<<", "<<sum_.cpu_data()[0];
//LOG(ERROR)<<bottom[0]->cpu_data()[0]<<", "<<bottom[0]->cpu_data()[1]<<", "<<bottom[0]->cpu_data()[2];
//LOG(ERROR)<<top[0]->cpu_data()[0]<<", "<<top[0]->cpu_data()[1]<<", "<<top[0]->cpu_data()[2];
backward_kernel<Dtype><<<CAFFE_GET_BLOCKS(bottom[0]->count()), CAFFE_CUDA_NUM_THREADS>>>
(bottom[0]->count(),channels,height*width,top[0]->gpu_diff(),this->blobs_[2]->gpu_data(),this->blobs_[3]->gpu_data(), sum_.gpu_data(),sum_.gpu_diff(),
bottom[0]->gpu_data(), bottom[0]->mutable_gpu_diff());
//LOG(ERROR)<<top[0]->cpu_diff()[0]<<", "<<top[0]->cpu_diff()[1]<<", "<<top[0]->cpu_diff()[2];
//LOG(ERROR)<<bottom[0]->cpu_diff()[0]<<", "<<bottom[0]->cpu_diff()[1]<<", "<<bottom[0]->cpu_diff()[2];
}
template <typename Dtype>
void BatchScaleLayer<Dtype>::SecForward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top)
{
}
INSTANTIATE_LAYER_GPU_FUNCS(BatchScaleLayer);
} // namespace caffe
|
c31824f2572e02c9d2c39e97da6875901ea5fe34.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/group_norm_grad_kernel.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/common/layout.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/funcs/math_function.h"
#include "paddle/phi/kernels/gpu/group_norm_utils.h"
namespace phi {
template <typename T, typename AccT, int flags>
__global__ void GroupNormBackwardGetMeanAndVar(const T* x,
const T* scale,
const T* bias,
const T* d_y,
int N,
int C,
int W,
int imsize,
int groups,
int group_size,
float epsilon,
AccT* d_mean,
AccT* d_var,
T* d_scale,
T* d_bias) {
int gid = blockIdx.y;
int cid = blockIdx.x;
int bid = blockIdx.z;
int H = imsize / W;
int number = min(group_size, static_cast<int>(C - gid * group_size));
int ccid = gid * group_size + cid;
if (ccid >= C) return;
T x_scale = (flags & kHasScale) ? scale[ccid] : static_cast<T>(1);
T x_bias = (flags & kHasBias) ? bias[ccid] : static_cast<T>(0);
T x_scale_inv = static_cast<T>(0);
if (x_scale != static_cast<T>(0)) x_scale_inv = static_cast<T>(1.0) / x_scale;
AccT d_mean_data = static_cast<AccT>(0);
AccT d_var_data = static_cast<AccT>(0);
AccT d_scale_data = static_cast<AccT>(0);
AccT d_bias_data = static_cast<AccT>(0);
for (int imid = threadIdx.x; imid < imsize; imid += blockDim.x) {
AccT val, dval;
int hid = imid / W;
int wid = imid % W;
val = static_cast<AccT>(x[(bid * H + hid) * W * C + wid * C + ccid]) -
static_cast<AccT>(x_bias);
dval = static_cast<AccT>(d_y[(bid * H + hid) * W * C + wid * C + ccid]);
d_var_data += val * dval;
d_mean_data += dval * static_cast<AccT>(x_scale);
val = val * static_cast<AccT>(x_scale_inv);
d_bias_data += dval;
d_scale_data += val * dval;
}
CudaAtomicAddWithWarp(&(d_mean[bid * groups + gid]),
static_cast<AccT>(d_mean_data));
CudaAtomicAddWithWarp(&(d_var[bid * groups + gid]),
static_cast<AccT>(d_var_data));
if (flags & kHasScale) {
#if TORCH_HIP_VERSION >= 11070
phi::CudaAtomicAdd(&(d_scale[ccid]), static_cast<T>(d_scale_data));
#else
CudaAtomicAddWithWarp(&(d_scale[ccid]), static_cast<T>(d_scale_data));
#endif
}
if (flags & kHasBias) {
#if TORCH_HIP_VERSION >= 11070
phi::CudaAtomicAdd(&(d_bias[ccid]), static_cast<T>(d_bias_data));
#else
CudaAtomicAddWithWarp(&(d_bias[ccid]), static_cast<T>(d_bias_data));
#endif
}
}
template <typename T, typename AccT, int flags>
__global__ void GroupNormBackward(const T* x,
const T* d_y,
const T* scale,
const T* bias,
const AccT* var,
const AccT* d_mean,
const AccT* d_var,
int N,
int C,
int W,
int imsize,
int groups,
int group_size,
float epsilon,
T* d_x) {
// using AccT = typename phi::dtype::MPTypeTrait<T>::Type;
int gid = blockIdx.y;
int cid = blockIdx.x;
int bid = blockIdx.z;
int H = imsize / W;
int number = min(group_size, static_cast<int>(C - gid * group_size));
int ccid = gid * group_size + cid;
if (ccid >= C) return;
AccT x_var = var[bid * groups + gid];
AccT d_x_mean = static_cast<AccT>(d_mean[bid * groups + gid]);
AccT d_x_var = static_cast<AccT>(d_var[bid * groups + gid]);
AccT x_var_inv = static_cast<AccT>(1.0) / sqrt((x_var) + epsilon);
AccT number_inv =
static_cast<AccT>(1.0) / static_cast<AccT>((number * imsize));
AccT x_scale = (flags & kHasScale) ? static_cast<AccT>(scale[ccid])
: static_cast<AccT>(1);
AccT x_bias =
(flags & kHasBias) ? static_cast<AccT>(bias[ccid]) : static_cast<AccT>(0);
AccT x_scale_inv = static_cast<AccT>(0);
if (x_scale != static_cast<AccT>(0))
x_scale_inv = static_cast<AccT>(1.0) / x_scale;
for (int imid = threadIdx.x; imid < imsize; imid += blockDim.x) {
int hid = imid / W;
int wid = imid % W;
AccT tmp = static_cast<AccT>(x[(bid * H + hid) * W * C + wid * C + ccid]);
AccT v_y = (tmp - x_bias) * x_scale_inv;
AccT dly = static_cast<AccT>(d_y[(bid * H + hid) * W * C + wid * C + ccid]);
d_x[(bid * H + hid) * W * C + wid * C + ccid] =
static_cast<T>(x_var_inv * ((dly) * (x_scale)-number_inv * d_x_var *
(v_y)-number_inv * d_x_mean));
}
}
template <typename T, typename AccT>
__global__ void ScalarGetDsDbCUDAKernel(
int imsize, const T* x, const T* dy, AccT* ds, AccT* db) {
const int nc = blockIdx.x;
AccT ds_sum = 0;
AccT db_sum = 0;
for (int i = threadIdx.x; i < imsize; i += blockDim.x) {
const int index = nc * imsize + i;
ds_sum += static_cast<AccT>(dy[index]) * static_cast<AccT>(x[index]);
db_sum += static_cast<AccT>(dy[index]);
}
ReduceMeanAndVar<AccT>(db, ds, db_sum, ds_sum, 1);
}
template <typename T, typename AccT>
__global__ void GetScaleBiasGradientCUDAKernel(int N,
int C,
int group,
float epsilon,
const AccT* mean,
const AccT* var,
const AccT* ds,
const AccT* db,
T* d_scale,
T* d_bias) {
const int c = blockIdx.x * blockDim.x + threadIdx.x;
if (c < C) {
const int G = group;
const int D = C / G;
AccT sum1 = static_cast<AccT>(0);
AccT sum2 = static_cast<AccT>(0);
for (int n = 0; n < N; ++n) {
const int nc = n * C + c;
const int ng = n * G + c / D;
sum1 +=
(d_scale == nullptr)
? AccT(0)
: ((ds[nc] - db[nc] * (mean[ng])) * (rsqrt((var[ng]) + epsilon)));
sum2 += (d_bias == nullptr) ? AccT(0) : db[nc];
}
if (d_scale != nullptr) {
d_scale[c] = static_cast<T>(sum1);
}
if (d_bias != nullptr) {
d_bias[c] = static_cast<T>(sum2);
}
}
}
template <typename T, typename AccT, int BlockDim>
__global__ void GetBackwardParamsCUDAKernel(int imsize,
int groups,
int group_size,
float epsilon,
const AccT* mean,
const AccT* var,
const T* scale,
const AccT* ds,
const AccT* db,
AccT* p1,
AccT* p2,
AccT* p3) {
const int n = blockIdx.x;
const int g = blockIdx.y;
const int ng = n * groups + g;
AccT sum1 = 0;
AccT sum2 = 0;
AccT var_inv = rsqrt(static_cast<AccT>(var[ng]) + epsilon);
for (int64_t i = threadIdx.x; i < group_size; i += blockDim.x) {
const int64_t index = ng * group_size + i;
const int64_t c = g * group_size + i;
const AccT scale_v =
scale == nullptr ? static_cast<AccT>(1) : static_cast<AccT>(scale[c]);
sum1 += static_cast<AccT>(ds[index]) * scale_v;
sum2 += static_cast<AccT>(db[index]) * scale_v;
const AccT scale_c =
scale == nullptr ? static_cast<AccT>(0) : static_cast<AccT>(scale[c]);
p1[index] = static_cast<AccT>(scale_c) * var_inv;
}
typedef hipcub::BlockReduce<AccT, BlockDim> BlockReduce;
__shared__ typename BlockReduce::TempStorage ds_storage;
__shared__ typename BlockReduce::TempStorage db_storage;
sum1 = BlockReduce(ds_storage).Reduce(sum1, hipcub::Sum());
sum2 = BlockReduce(db_storage).Reduce(sum2, hipcub::Sum());
if (threadIdx.x == 0) {
const AccT s =
static_cast<AccT>(1) / static_cast<AccT>(group_size * imsize);
const AccT x = (sum2 * static_cast<AccT>(mean[ng]) - sum1) * (var_inv) *
(var_inv) * (var_inv)*s;
p2[ng] = x;
p3[ng] = -x * (mean[ng]) - (sum2 * var_inv) * s;
}
}
template <typename T, typename AccT>
__global__ void GetXGradientCUDAKernel(int imsize,
int C,
int group_size,
int groups,
AccT* p1,
AccT* p2,
AccT* p3,
const T* x,
const T* dy,
T* dx) {
int cid = blockIdx.x;
int gid = blockIdx.y;
int bid = blockIdx.z;
int ccid = bid * C + gid * group_size + cid;
int ng = bid * groups + gid;
int nc = gid * group_size + cid;
for (int imid = threadIdx.x; imid < imsize; imid += blockDim.x) {
int index = (bid * C + nc) * imsize + imid;
dx[index] = static_cast<T>(p1[ccid] * static_cast<AccT>(dy[index]) +
p2[ng] * static_cast<AccT>(x[index]) + p3[ng]);
}
}
template <typename T, typename Context>
void GroupNormGradKernel(const Context& dev_ctx,
const DenseTensor& x,
const paddle::optional<DenseTensor>& scale,
const paddle::optional<DenseTensor>& bias,
const DenseTensor& y,
const DenseTensor& mean,
const DenseTensor& var,
const DenseTensor& d_y,
float epsilon,
int groups,
const std::string& data_layout_str,
DenseTensor* d_x,
DenseTensor* d_scale,
DenseTensor* d_bias) {
using AccT = typename phi::dtype::MPTypeTrait<T>::Type;
const DataLayout data_layout = phi::StringToDataLayout(data_layout_str);
const auto scale_ptr = scale.get_ptr();
const auto bias_ptr = bias.get_ptr();
const auto& x_dims = x.dims();
const int C = (data_layout == DataLayout::kNCHW ? x_dims[1]
: x_dims[x_dims.size() - 1]);
const int group_size = C / groups;
const int W = (data_layout == DataLayout::kNCHW ? x_dims[x_dims.size() - 1]
: x_dims[x_dims.size() - 2]);
if (d_x) {
dev_ctx.template Alloc<T>(d_x);
}
phi::funcs::SetConstant<GPUContext, T> set_zero;
phi::funcs::SetConstant<GPUContext, AccT> set_zero_AccT;
DenseTensor ds, db;
ds.Resize({x_dims[0], C});
AccT* ds_data = dev_ctx.template Alloc<AccT>(&ds);
db.Resize({x_dims[0], C});
AccT* db_data = dev_ctx.template Alloc<AccT>(&db);
auto* y_data = y.data<T>();
auto* x_data = x.data<T>();
T* d_x_data = nullptr;
if (d_x) d_x_data = d_x->data<T>();
auto* dy_data = d_y.data<T>();
auto* var_data = var.data<AccT>();
auto* mean_data = mean.data<AccT>();
T* d_scale_data = nullptr;
if (d_scale) {
dev_ctx.template Alloc<T>(d_scale);
d_scale_data = d_scale->data<T>();
}
T* d_bias_data = nullptr;
if (d_bias) {
dev_ctx.template Alloc<T>(d_bias);
d_bias_data = d_bias->data<T>();
}
const T* scale_data = nullptr;
if (scale_ptr) scale_data = scale_ptr->data<T>();
const T* bias_data = nullptr;
if (bias_ptr) bias_data = bias_ptr->data<T>();
int imsize = 1;
if (data_layout == DataLayout::kNCHW) {
for (int i = 2; i < x_dims.size(); ++i) {
imsize *= x_dims[i];
}
} else {
for (int i = 1; i < x_dims.size() - 1; ++i) {
imsize *= x_dims[i];
}
}
int block_size = ::min(1024, imsize);
const int block_dims = 1024;
dim3 grid(group_size, groups, x_dims[0]);
dim3 threads(block_size, 1, 1);
int flags =
(scale_data != nullptr) * kHasScale + (bias_data != nullptr) * kHasBias;
if (data_layout == DataLayout::kNCHW) {
const int max_num_threads = 1024;
int max_block_size = ::min(imsize, max_num_threads);
int block_size_nchw = 1;
while (block_size_nchw < max_block_size) {
block_size_nchw *= 2;
}
block_size_nchw = ::max(block_size_nchw, kps::details::kWarpSize);
dim3 blocks(block_size_nchw);
hipLaunchKernelGGL(( ScalarGetDsDbCUDAKernel<T, AccT>)
, dim3(x_dims[0] * C), dim3(blocks), 0, dev_ctx.stream(),
imsize, x_data, dy_data, ds_data, db_data);
if (d_scale || d_bias) {
const int block = 256;
hipLaunchKernelGGL(( GetScaleBiasGradientCUDAKernel<T, AccT>)
, dim3((C + block - 1) / block), dim3(block), 0, dev_ctx.stream(),
x_dims[0],
C,
groups,
epsilon,
mean_data,
var_data,
ds_data,
db_data,
d_scale_data,
d_bias_data);
}
if (d_x_data != nullptr) {
// p1 * dy + p2 * x + p3,
// p1, p2, p3 represent the reverse calculation of temporary variables
// p1 = scale * var_inv
// p2 = (db * scale * mean - ds * scale) * pow(var_inv, 3) * (1/n)
// p3 = -p2 * mean[ng] - db * scale * var_inv * (1/n);
DenseTensor p1, p2, p3;
p1.Resize({x_dims[0] * C});
AccT* p1_data = dev_ctx.template Alloc<AccT>(&p1);
p2.Resize({x_dims[0], groups});
AccT* p2_data = dev_ctx.template Alloc<AccT>(&p2);
p3.Resize({x_dims[0], groups});
AccT* p3_data = dev_ctx.template Alloc<AccT>(&p3);
hipLaunchKernelGGL(( GetBackwardParamsCUDAKernel<T, AccT, block_dims>)
, dim3(dim3(x_dims[0], groups)), dim3(block_dims), 0, dev_ctx.stream(),
imsize,
groups,
group_size,
epsilon,
mean_data,
var_data,
scale_data,
ds_data,
db_data,
p1_data,
p2_data,
p3_data);
hipLaunchKernelGGL(( GetXGradientCUDAKernel<T, AccT>)
, dim3(grid), dim3(threads), 0, dev_ctx.stream(), imsize,
C,
group_size,
groups,
p1_data,
p2_data,
p3_data,
x_data,
dy_data,
d_x_data);
}
} else {
if (d_scale) {
set_zero(dev_ctx, d_scale, static_cast<T>(0));
}
if (d_bias) {
set_zero(dev_ctx, d_bias, static_cast<T>(0));
}
DenseTensor temp_var;
temp_var.Resize(var.dims());
dev_ctx.template Alloc<AccT>(&temp_var);
set_zero_AccT(dev_ctx, &temp_var, static_cast<AccT>(0));
auto* temp_var_data = temp_var.data<AccT>();
DenseTensor temp_mean;
temp_mean.Resize(var.dims());
dev_ctx.template Alloc<AccT>(&temp_mean);
set_zero_AccT(dev_ctx, &temp_mean, static_cast<AccT>(0));
auto* temp_mean_data = temp_mean.data<AccT>();
int flags =
(scale_data != nullptr) * kHasScale + (bias_data != nullptr) * kHasBias;
UNROLL_ALL_CASES(flags,
GroupNormBackwardGetMeanAndVar,
y_data,
scale_data,
bias_data,
dy_data,
x_dims[0],
C,
W,
imsize,
groups,
group_size,
epsilon,
temp_mean_data,
temp_var_data,
d_scale_data,
d_bias_data);
if (d_x_data != nullptr) {
UNROLL_ALL_CASES(flags,
GroupNormBackward,
y_data,
dy_data,
scale_data,
bias_data,
var_data,
temp_mean_data,
temp_var_data,
x_dims[0],
C,
W,
imsize,
groups,
group_size,
epsilon,
d_x_data);
}
}
}
} // namespace phi
PD_REGISTER_KERNEL(group_norm_grad,
GPU,
ALL_LAYOUT,
phi::GroupNormGradKernel,
float,
double,
phi::dtype::bfloat16,
phi::dtype::float16) {}
| c31824f2572e02c9d2c39e97da6875901ea5fe34.cu | // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/group_norm_grad_kernel.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/common/layout.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/funcs/math_function.h"
#include "paddle/phi/kernels/gpu/group_norm_utils.h"
namespace phi {
template <typename T, typename AccT, int flags>
__global__ void GroupNormBackwardGetMeanAndVar(const T* x,
const T* scale,
const T* bias,
const T* d_y,
int N,
int C,
int W,
int imsize,
int groups,
int group_size,
float epsilon,
AccT* d_mean,
AccT* d_var,
T* d_scale,
T* d_bias) {
int gid = blockIdx.y;
int cid = blockIdx.x;
int bid = blockIdx.z;
int H = imsize / W;
int number = min(group_size, static_cast<int>(C - gid * group_size));
int ccid = gid * group_size + cid;
if (ccid >= C) return;
T x_scale = (flags & kHasScale) ? scale[ccid] : static_cast<T>(1);
T x_bias = (flags & kHasBias) ? bias[ccid] : static_cast<T>(0);
T x_scale_inv = static_cast<T>(0);
if (x_scale != static_cast<T>(0)) x_scale_inv = static_cast<T>(1.0) / x_scale;
AccT d_mean_data = static_cast<AccT>(0);
AccT d_var_data = static_cast<AccT>(0);
AccT d_scale_data = static_cast<AccT>(0);
AccT d_bias_data = static_cast<AccT>(0);
for (int imid = threadIdx.x; imid < imsize; imid += blockDim.x) {
AccT val, dval;
int hid = imid / W;
int wid = imid % W;
val = static_cast<AccT>(x[(bid * H + hid) * W * C + wid * C + ccid]) -
static_cast<AccT>(x_bias);
dval = static_cast<AccT>(d_y[(bid * H + hid) * W * C + wid * C + ccid]);
d_var_data += val * dval;
d_mean_data += dval * static_cast<AccT>(x_scale);
val = val * static_cast<AccT>(x_scale_inv);
d_bias_data += dval;
d_scale_data += val * dval;
}
CudaAtomicAddWithWarp(&(d_mean[bid * groups + gid]),
static_cast<AccT>(d_mean_data));
CudaAtomicAddWithWarp(&(d_var[bid * groups + gid]),
static_cast<AccT>(d_var_data));
if (flags & kHasScale) {
#if CUDA_VERSION >= 11070
phi::CudaAtomicAdd(&(d_scale[ccid]), static_cast<T>(d_scale_data));
#else
CudaAtomicAddWithWarp(&(d_scale[ccid]), static_cast<T>(d_scale_data));
#endif
}
if (flags & kHasBias) {
#if CUDA_VERSION >= 11070
phi::CudaAtomicAdd(&(d_bias[ccid]), static_cast<T>(d_bias_data));
#else
CudaAtomicAddWithWarp(&(d_bias[ccid]), static_cast<T>(d_bias_data));
#endif
}
}
template <typename T, typename AccT, int flags>
__global__ void GroupNormBackward(const T* x,
const T* d_y,
const T* scale,
const T* bias,
const AccT* var,
const AccT* d_mean,
const AccT* d_var,
int N,
int C,
int W,
int imsize,
int groups,
int group_size,
float epsilon,
T* d_x) {
// using AccT = typename phi::dtype::MPTypeTrait<T>::Type;
int gid = blockIdx.y;
int cid = blockIdx.x;
int bid = blockIdx.z;
int H = imsize / W;
int number = min(group_size, static_cast<int>(C - gid * group_size));
int ccid = gid * group_size + cid;
if (ccid >= C) return;
AccT x_var = var[bid * groups + gid];
AccT d_x_mean = static_cast<AccT>(d_mean[bid * groups + gid]);
AccT d_x_var = static_cast<AccT>(d_var[bid * groups + gid]);
AccT x_var_inv = static_cast<AccT>(1.0) / sqrt((x_var) + epsilon);
AccT number_inv =
static_cast<AccT>(1.0) / static_cast<AccT>((number * imsize));
AccT x_scale = (flags & kHasScale) ? static_cast<AccT>(scale[ccid])
: static_cast<AccT>(1);
AccT x_bias =
(flags & kHasBias) ? static_cast<AccT>(bias[ccid]) : static_cast<AccT>(0);
AccT x_scale_inv = static_cast<AccT>(0);
if (x_scale != static_cast<AccT>(0))
x_scale_inv = static_cast<AccT>(1.0) / x_scale;
for (int imid = threadIdx.x; imid < imsize; imid += blockDim.x) {
int hid = imid / W;
int wid = imid % W;
AccT tmp = static_cast<AccT>(x[(bid * H + hid) * W * C + wid * C + ccid]);
AccT v_y = (tmp - x_bias) * x_scale_inv;
AccT dly = static_cast<AccT>(d_y[(bid * H + hid) * W * C + wid * C + ccid]);
d_x[(bid * H + hid) * W * C + wid * C + ccid] =
static_cast<T>(x_var_inv * ((dly) * (x_scale)-number_inv * d_x_var *
(v_y)-number_inv * d_x_mean));
}
}
template <typename T, typename AccT>
__global__ void ScalarGetDsDbCUDAKernel(
int imsize, const T* x, const T* dy, AccT* ds, AccT* db) {
const int nc = blockIdx.x;
AccT ds_sum = 0;
AccT db_sum = 0;
for (int i = threadIdx.x; i < imsize; i += blockDim.x) {
const int index = nc * imsize + i;
ds_sum += static_cast<AccT>(dy[index]) * static_cast<AccT>(x[index]);
db_sum += static_cast<AccT>(dy[index]);
}
ReduceMeanAndVar<AccT>(db, ds, db_sum, ds_sum, 1);
}
template <typename T, typename AccT>
__global__ void GetScaleBiasGradientCUDAKernel(int N,
int C,
int group,
float epsilon,
const AccT* mean,
const AccT* var,
const AccT* ds,
const AccT* db,
T* d_scale,
T* d_bias) {
const int c = blockIdx.x * blockDim.x + threadIdx.x;
if (c < C) {
const int G = group;
const int D = C / G;
AccT sum1 = static_cast<AccT>(0);
AccT sum2 = static_cast<AccT>(0);
for (int n = 0; n < N; ++n) {
const int nc = n * C + c;
const int ng = n * G + c / D;
sum1 +=
(d_scale == nullptr)
? AccT(0)
: ((ds[nc] - db[nc] * (mean[ng])) * (rsqrt((var[ng]) + epsilon)));
sum2 += (d_bias == nullptr) ? AccT(0) : db[nc];
}
if (d_scale != nullptr) {
d_scale[c] = static_cast<T>(sum1);
}
if (d_bias != nullptr) {
d_bias[c] = static_cast<T>(sum2);
}
}
}
template <typename T, typename AccT, int BlockDim>
__global__ void GetBackwardParamsCUDAKernel(int imsize,
int groups,
int group_size,
float epsilon,
const AccT* mean,
const AccT* var,
const T* scale,
const AccT* ds,
const AccT* db,
AccT* p1,
AccT* p2,
AccT* p3) {
const int n = blockIdx.x;
const int g = blockIdx.y;
const int ng = n * groups + g;
AccT sum1 = 0;
AccT sum2 = 0;
AccT var_inv = rsqrt(static_cast<AccT>(var[ng]) + epsilon);
for (int64_t i = threadIdx.x; i < group_size; i += blockDim.x) {
const int64_t index = ng * group_size + i;
const int64_t c = g * group_size + i;
const AccT scale_v =
scale == nullptr ? static_cast<AccT>(1) : static_cast<AccT>(scale[c]);
sum1 += static_cast<AccT>(ds[index]) * scale_v;
sum2 += static_cast<AccT>(db[index]) * scale_v;
const AccT scale_c =
scale == nullptr ? static_cast<AccT>(0) : static_cast<AccT>(scale[c]);
p1[index] = static_cast<AccT>(scale_c) * var_inv;
}
typedef cub::BlockReduce<AccT, BlockDim> BlockReduce;
__shared__ typename BlockReduce::TempStorage ds_storage;
__shared__ typename BlockReduce::TempStorage db_storage;
sum1 = BlockReduce(ds_storage).Reduce(sum1, cub::Sum());
sum2 = BlockReduce(db_storage).Reduce(sum2, cub::Sum());
if (threadIdx.x == 0) {
const AccT s =
static_cast<AccT>(1) / static_cast<AccT>(group_size * imsize);
const AccT x = (sum2 * static_cast<AccT>(mean[ng]) - sum1) * (var_inv) *
(var_inv) * (var_inv)*s;
p2[ng] = x;
p3[ng] = -x * (mean[ng]) - (sum2 * var_inv) * s;
}
}
template <typename T, typename AccT>
__global__ void GetXGradientCUDAKernel(int imsize,
int C,
int group_size,
int groups,
AccT* p1,
AccT* p2,
AccT* p3,
const T* x,
const T* dy,
T* dx) {
int cid = blockIdx.x;
int gid = blockIdx.y;
int bid = blockIdx.z;
int ccid = bid * C + gid * group_size + cid;
int ng = bid * groups + gid;
int nc = gid * group_size + cid;
for (int imid = threadIdx.x; imid < imsize; imid += blockDim.x) {
int index = (bid * C + nc) * imsize + imid;
dx[index] = static_cast<T>(p1[ccid] * static_cast<AccT>(dy[index]) +
p2[ng] * static_cast<AccT>(x[index]) + p3[ng]);
}
}
template <typename T, typename Context>
void GroupNormGradKernel(const Context& dev_ctx,
const DenseTensor& x,
const paddle::optional<DenseTensor>& scale,
const paddle::optional<DenseTensor>& bias,
const DenseTensor& y,
const DenseTensor& mean,
const DenseTensor& var,
const DenseTensor& d_y,
float epsilon,
int groups,
const std::string& data_layout_str,
DenseTensor* d_x,
DenseTensor* d_scale,
DenseTensor* d_bias) {
using AccT = typename phi::dtype::MPTypeTrait<T>::Type;
const DataLayout data_layout = phi::StringToDataLayout(data_layout_str);
const auto scale_ptr = scale.get_ptr();
const auto bias_ptr = bias.get_ptr();
const auto& x_dims = x.dims();
const int C = (data_layout == DataLayout::kNCHW ? x_dims[1]
: x_dims[x_dims.size() - 1]);
const int group_size = C / groups;
const int W = (data_layout == DataLayout::kNCHW ? x_dims[x_dims.size() - 1]
: x_dims[x_dims.size() - 2]);
if (d_x) {
dev_ctx.template Alloc<T>(d_x);
}
phi::funcs::SetConstant<GPUContext, T> set_zero;
phi::funcs::SetConstant<GPUContext, AccT> set_zero_AccT;
DenseTensor ds, db;
ds.Resize({x_dims[0], C});
AccT* ds_data = dev_ctx.template Alloc<AccT>(&ds);
db.Resize({x_dims[0], C});
AccT* db_data = dev_ctx.template Alloc<AccT>(&db);
auto* y_data = y.data<T>();
auto* x_data = x.data<T>();
T* d_x_data = nullptr;
if (d_x) d_x_data = d_x->data<T>();
auto* dy_data = d_y.data<T>();
auto* var_data = var.data<AccT>();
auto* mean_data = mean.data<AccT>();
T* d_scale_data = nullptr;
if (d_scale) {
dev_ctx.template Alloc<T>(d_scale);
d_scale_data = d_scale->data<T>();
}
T* d_bias_data = nullptr;
if (d_bias) {
dev_ctx.template Alloc<T>(d_bias);
d_bias_data = d_bias->data<T>();
}
const T* scale_data = nullptr;
if (scale_ptr) scale_data = scale_ptr->data<T>();
const T* bias_data = nullptr;
if (bias_ptr) bias_data = bias_ptr->data<T>();
int imsize = 1;
if (data_layout == DataLayout::kNCHW) {
for (int i = 2; i < x_dims.size(); ++i) {
imsize *= x_dims[i];
}
} else {
for (int i = 1; i < x_dims.size() - 1; ++i) {
imsize *= x_dims[i];
}
}
int block_size = std::min(1024, imsize);
const int block_dims = 1024;
dim3 grid(group_size, groups, x_dims[0]);
dim3 threads(block_size, 1, 1);
int flags =
(scale_data != nullptr) * kHasScale + (bias_data != nullptr) * kHasBias;
if (data_layout == DataLayout::kNCHW) {
const int max_num_threads = 1024;
int max_block_size = std::min(imsize, max_num_threads);
int block_size_nchw = 1;
while (block_size_nchw < max_block_size) {
block_size_nchw *= 2;
}
block_size_nchw = std::max(block_size_nchw, kps::details::kWarpSize);
dim3 blocks(block_size_nchw);
ScalarGetDsDbCUDAKernel<T, AccT>
<<<x_dims[0] * C, blocks, 0, dev_ctx.stream()>>>(
imsize, x_data, dy_data, ds_data, db_data);
if (d_scale || d_bias) {
const int block = 256;
GetScaleBiasGradientCUDAKernel<T, AccT>
<<<(C + block - 1) / block, block, 0, dev_ctx.stream()>>>(
x_dims[0],
C,
groups,
epsilon,
mean_data,
var_data,
ds_data,
db_data,
d_scale_data,
d_bias_data);
}
if (d_x_data != nullptr) {
// p1 * dy + p2 * x + p3,
// p1, p2, p3 represent the reverse calculation of temporary variables
// p1 = scale * var_inv
// p2 = (db * scale * mean - ds * scale) * pow(var_inv, 3) * (1/n)
// p3 = -p2 * mean[ng] - db * scale * var_inv * (1/n);
DenseTensor p1, p2, p3;
p1.Resize({x_dims[0] * C});
AccT* p1_data = dev_ctx.template Alloc<AccT>(&p1);
p2.Resize({x_dims[0], groups});
AccT* p2_data = dev_ctx.template Alloc<AccT>(&p2);
p3.Resize({x_dims[0], groups});
AccT* p3_data = dev_ctx.template Alloc<AccT>(&p3);
GetBackwardParamsCUDAKernel<T, AccT, block_dims>
<<<dim3(x_dims[0], groups), block_dims, 0, dev_ctx.stream()>>>(
imsize,
groups,
group_size,
epsilon,
mean_data,
var_data,
scale_data,
ds_data,
db_data,
p1_data,
p2_data,
p3_data);
GetXGradientCUDAKernel<T, AccT>
<<<grid, threads, 0, dev_ctx.stream()>>>(imsize,
C,
group_size,
groups,
p1_data,
p2_data,
p3_data,
x_data,
dy_data,
d_x_data);
}
} else {
if (d_scale) {
set_zero(dev_ctx, d_scale, static_cast<T>(0));
}
if (d_bias) {
set_zero(dev_ctx, d_bias, static_cast<T>(0));
}
DenseTensor temp_var;
temp_var.Resize(var.dims());
dev_ctx.template Alloc<AccT>(&temp_var);
set_zero_AccT(dev_ctx, &temp_var, static_cast<AccT>(0));
auto* temp_var_data = temp_var.data<AccT>();
DenseTensor temp_mean;
temp_mean.Resize(var.dims());
dev_ctx.template Alloc<AccT>(&temp_mean);
set_zero_AccT(dev_ctx, &temp_mean, static_cast<AccT>(0));
auto* temp_mean_data = temp_mean.data<AccT>();
int flags =
(scale_data != nullptr) * kHasScale + (bias_data != nullptr) * kHasBias;
UNROLL_ALL_CASES(flags,
GroupNormBackwardGetMeanAndVar,
y_data,
scale_data,
bias_data,
dy_data,
x_dims[0],
C,
W,
imsize,
groups,
group_size,
epsilon,
temp_mean_data,
temp_var_data,
d_scale_data,
d_bias_data);
if (d_x_data != nullptr) {
UNROLL_ALL_CASES(flags,
GroupNormBackward,
y_data,
dy_data,
scale_data,
bias_data,
var_data,
temp_mean_data,
temp_var_data,
x_dims[0],
C,
W,
imsize,
groups,
group_size,
epsilon,
d_x_data);
}
}
}
} // namespace phi
PD_REGISTER_KERNEL(group_norm_grad,
GPU,
ALL_LAYOUT,
phi::GroupNormGradKernel,
float,
double,
phi::dtype::bfloat16,
phi::dtype::float16) {}
|
f5a41e41a7793dbdef6013a57edfa22485ec442d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "Msnhnet/layers/cuda/MsnhMaxPoolLayerGPU.h"
namespace Msnhnet
{
__global__ void maxpoolDepthKernel(const int n, const int width, const int height, const int channel,
const int outChannel, const int batch, float *const input, float *const output)
{
int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(index < n)
{
int j = index % width;
index = index / width;
int i = index % height;
index = index / height;
int b = index % batch;
for (int g = 0; g < outChannel; ++g)
{
int outIndex = j + width*(i + height*(g + outChannel*b));
float max = -FLT_MAX;
for (int k = g; k < channel; k+=outChannel)
{
int inIndex = j + width*(i + height*(k + channel*b));
float val = input[inIndex];
max = (val > max)?val:max;
}
output[outIndex] = max;
}
}
}
__global__ void maxpoolNormalKernel(const int n,
const int width, const int height,
const int channel,
const int outWidth, const int outHeight,
const int strideX, const int strideY,
const int kSizeX, const int kSizeY,
const int paddingX, const int paddingY,
float *const input, float *const output)
{
int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(index < n)
{
int j = index % outWidth;
index = index / outWidth;
int i = index % outHeight;
index = index / outHeight;
int k = index % channel;
index = index / channel;
int b = index;
int widthOffset = -(paddingX + 1)/2;
int heightOffset = -(paddingY + 1)/2;
int outIndex = j + outWidth*(i + outHeight*(k + channel*b));
float max = -INFINITY;
for (int l = 0; l < kSizeY; ++l)
{
for (int m = 0; m < kSizeX; ++m)
{
int curHeight = heightOffset + i*strideY + l;
int curWidth = widthOffset + j*strideX + m;
int idx = curWidth + width*(curHeight + height*(k + b*channel));
bool valid = (curHeight >=0 && curHeight < height &&
curWidth >=0 && curWidth < width);
float value = (valid != 0)? input[idx] : -INFINITY;
max = (value > max) ? value : max;
}
}
output[outIndex] = max;
}
}
void MaxPoolLayerGPU::forwardDepthGPU(const int &width, const int &height, const int &channel, const int &outWidth, const int &outHeight,
const int &outChannel, const int &batch, float *const &input, float *const &output)
{
size_t n = outHeight * outWidth * 1 * batch;
hipLaunchKernelGGL(( maxpoolDepthKernel), dim3(Cuda::getGrid(n)), dim3(Cuda::blockThread), 0, Cuda::getCudaStream(), n, width, height, channel, outChannel, batch, input, output);
CUDA_CHECK(hipPeekAtLastError());
}
void MaxPoolLayerGPU::forwardNormalGPU(const int &width, const int &height, const int &channel, const int &outWidth, const int &outHeight,
const int &outChannel, const int &strideX, const int &strideY, const int &kSizeX, const int kSizeY,
const int &paddingX, const int &paddingY, const int &batch, float *const &input, float *const &output)
{
size_t n = outHeight * outWidth * outChannel * batch;
hipLaunchKernelGGL(( maxpoolNormalKernel), dim3(Cuda::getGrid(n)), dim3(Cuda::blockThread), 0, Cuda::getCudaStream(), n,width,height,
channel,
outHeight,outWidth,
strideX,strideY,
kSizeX,kSizeY,
paddingX,paddingY,
input,output);
CUDA_CHECK(hipPeekAtLastError());
}
}
| f5a41e41a7793dbdef6013a57edfa22485ec442d.cu | #include "Msnhnet/layers/cuda/MsnhMaxPoolLayerGPU.h"
namespace Msnhnet
{
__global__ void maxpoolDepthKernel(const int n, const int width, const int height, const int channel,
const int outChannel, const int batch, float *const input, float *const output)
{
int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(index < n)
{
int j = index % width;
index = index / width;
int i = index % height;
index = index / height;
int b = index % batch;
for (int g = 0; g < outChannel; ++g)
{
int outIndex = j + width*(i + height*(g + outChannel*b));
float max = -FLT_MAX;
for (int k = g; k < channel; k+=outChannel)
{
int inIndex = j + width*(i + height*(k + channel*b));
float val = input[inIndex];
max = (val > max)?val:max;
}
output[outIndex] = max;
}
}
}
__global__ void maxpoolNormalKernel(const int n,
const int width, const int height,
const int channel,
const int outWidth, const int outHeight,
const int strideX, const int strideY,
const int kSizeX, const int kSizeY,
const int paddingX, const int paddingY,
float *const input, float *const output)
{
int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(index < n)
{
int j = index % outWidth;
index = index / outWidth;
int i = index % outHeight;
index = index / outHeight;
int k = index % channel;
index = index / channel;
int b = index;
int widthOffset = -(paddingX + 1)/2;
int heightOffset = -(paddingY + 1)/2;
int outIndex = j + outWidth*(i + outHeight*(k + channel*b));
float max = -INFINITY;
for (int l = 0; l < kSizeY; ++l)
{
for (int m = 0; m < kSizeX; ++m)
{
int curHeight = heightOffset + i*strideY + l;
int curWidth = widthOffset + j*strideX + m;
int idx = curWidth + width*(curHeight + height*(k + b*channel));
bool valid = (curHeight >=0 && curHeight < height &&
curWidth >=0 && curWidth < width);
float value = (valid != 0)? input[idx] : -INFINITY;
max = (value > max) ? value : max;
}
}
output[outIndex] = max;
}
}
void MaxPoolLayerGPU::forwardDepthGPU(const int &width, const int &height, const int &channel, const int &outWidth, const int &outHeight,
const int &outChannel, const int &batch, float *const &input, float *const &output)
{
size_t n = outHeight * outWidth * 1 * batch;
maxpoolDepthKernel<<<Cuda::getGrid(n), Cuda::blockThread, 0, Cuda::getCudaStream()>>>(n, width, height, channel, outChannel, batch, input, output);
CUDA_CHECK(cudaPeekAtLastError());
}
void MaxPoolLayerGPU::forwardNormalGPU(const int &width, const int &height, const int &channel, const int &outWidth, const int &outHeight,
const int &outChannel, const int &strideX, const int &strideY, const int &kSizeX, const int kSizeY,
const int &paddingX, const int &paddingY, const int &batch, float *const &input, float *const &output)
{
size_t n = outHeight * outWidth * outChannel * batch;
maxpoolNormalKernel<<<Cuda::getGrid(n), Cuda::blockThread, 0, Cuda::getCudaStream()>>>(n,width,height,
channel,
outHeight,outWidth,
strideX,strideY,
kSizeX,kSizeY,
paddingX,paddingY,
input,output);
CUDA_CHECK(cudaPeekAtLastError());
}
}
|
e9402b12e2e7fe5a1a4476a4ad25f9e46e177a88.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "included.h"
#include "thread_indexing.h"
#define MB2B(x) ((x) << 20)
pthread_t posix_thread;
volatile uint64_t * glb_ptr [4];
uint64_t l2_size = 2 << 20; //2 MB
uint64_t l2_size_inmb = 1;
volatile char *device_mem;
void * thread_func(void * arg){
pthread_t self_id;
self_id=pthread_self();
//printf("\tIm the POSIXthread %u\n",self_id);
while(1){
if(glb_ptr[0] != NULL && glb_ptr[1] != NULL && glb_ptr[2] != NULL && glb_ptr[3] != NULL){
//printf("[T] glb_ptr[0]: %p\nglb_ptr[1]: %p\nglb_ptr[2]: %p\nglb_ptr[3]: %p\n",glb_ptr[0],glb_ptr[1],glb_ptr[2],glb_ptr[3]);
asm volatile(
"clflush (%0);\n\t"
"clflush (%1);\n\t"
"clflush (%2);\n\t"
"clflush (%3);\n\t"
: : "r" (glb_ptr[0]), "r" (glb_ptr[1]), "r" (glb_ptr[2]), "r" (glb_ptr[3]) : "memory");
}
}
}
void SetupMapping(uint64_t * mapping_size,volatile void** mapping) {
*mapping_size = GetPhysicalMemorySize() * fraction_of_physical_memory;
if(!memory_type_flag){
printf("[!] Allocating with hipHostMalloc %lu MB\n",B2MB(*mapping_size));
gpuErrchk(hipHostMalloc(mapping,(size_t)*mapping_size,hipHostMallocMapped));
}else{
printf("[!] Allocating with hipHostMalloc %lu MB\n",B2MB(*mapping_size));
gpuErrchk(hipHostMalloc(mapping,*mapping_size));
}
for (uint64_t index = 0; index < *mapping_size; index += 0x1000) {
uint64_t* temporary = (uint64_t *)( (uint8_t *)(*mapping) + index);
temporary[0] = index;
}
}
__global__ void hammer(
volatile uint64_t * first1,
volatile uint64_t * first2,
volatile uint64_t * second1,
volatile uint64_t * second2,
uint64_t * sum,
uint64_t number_of_reads,
volatile char * dev_mem,
uint64_t l2_size,
int number_of_threads
) {
uint64_t local_sum = 0;
int thId = getGlobalIdx_2D_2D();
int index = thId * l2_size / number_of_threads;
int i;
char local_char;
if(!thId%2){
while (number_of_reads-- > 0)
{
local_sum += first1[thId%64];
local_sum += second1[thId%64];
}
*sum = local_sum + 1;
}else{
/*
printf("\t\t<<thread %d>> reads [%d-%d] \n", thId, index,
index + (l2_size / number_of_threads));
*/
for (i = 0; i < l2_size / number_of_threads; ++i) {
local_char += dev_mem[index + i];
}
*sum = local_char + 1;
}
}
void HammerAllReachablePages(
uint64_t presumed_row_size,
volatile void * memory_mapping,
uint64_t memory_mapping_size,
uint64_t number_of_reads)
{
CpuTimer table_time;
std::vector<std::vector<uint8_t*> > pages_per_row;
uint64_t total_bitflips = 0;
int i=0,j=0,z=0;
double time[64];
double page_time=0, average =0;
uint64_t * d_sum;
uint64_t * h_first[2], * h_second[2];
volatile uint64_t * d_first1 = NULL ;
volatile uint64_t * d_first2 = NULL ;
volatile uint64_t * d_second1 = NULL;
volatile uint64_t * d_second2 = NULL;
pages_per_row.resize(memory_mapping_size / presumed_row_size);
int pagemap = open("/proc/self/pagemap", O_RDONLY);
assert(pagemap >= 0);
//filling the pointers table
for (uint64_t offset = 0; offset < memory_mapping_size; offset += 0x1000) {
uint8_t* virtual_address = (uint8_t*)(memory_mapping) + offset;
uint64_t page_frame_number = GetPageFrameNumber(pagemap, virtual_address);
uint64_t physical_address = page_frame_number * 0x1000;
uint64_t presumed_row_index = physical_address / presumed_row_size;
if (presumed_row_index > pages_per_row.size()) {
pages_per_row.resize(presumed_row_index);
}
pages_per_row[presumed_row_index-1].push_back(virtual_address);
}
//done finding the pointers
printf("[!] pointers for hammering took %f ms\n",table_time.get_diff_ms());
//device memory for writing the read data
gpuErrchk(hipMallocManaged((void**)&d_sum, sizeof(uint64_t)));
printf("[!] Allocating device memory with hipMalloc : %zu bytes\n\n", l2_size);
gpuErrchk(hipMalloc((void **) &device_mem, l2_size));
gpuErrchk(hipMemset((void *) device_mem, 1, l2_size));
pthread_create(&posix_thread,NULL,&thread_func,NULL);
for (uint64_t row_index = 0; row_index + 2 < pages_per_row.size(); ++row_index) {
CpuTimer row_time;
if ((pages_per_row[row_index].size() != 64) || (pages_per_row[row_index+2].size() != 64)) {
continue;
}else if (pages_per_row[row_index+1].size() == 0) {
printf("[!] Can't hammer row %ld,got no pages from that row\n",row_index+1);
continue;
}
printf("[!] Hammering rows %ld/%ld/%ld of %ld (got %ld/%ld/%ld pages)\n",
row_index, row_index+1, row_index+2, pages_per_row.size(),
pages_per_row[row_index].size(), pages_per_row[row_index+1].size(),
pages_per_row[row_index+2].size());
for (uint8_t * first_row_page : pages_per_row[row_index]) {
CpuTimer page_with_row_time;
j=0;
// Iterate over all pages we have for the second row.
for (uint8_t* second_row_page : pages_per_row[row_index+2]) {
// Set all the target pages to 0xFF.
for (uint8_t* target_page : pages_per_row[row_index+1]) {
memset(target_page, 0xFF, 0x1000);
}
glb_ptr[0] = h_first[0] = (uint64_t *) first_row_page;
glb_ptr[1] = h_first[1] = (uint64_t *) first_row_page + 0x1000;
glb_ptr[2] = h_second[0] = (uint64_t *) second_row_page;
glb_ptr[3] = h_second[1] = (uint64_t *) second_row_page + 0x1000;
gpuErrchk(hipHostGetDevicePointer((void**)&d_first1,(void*)h_first[0],0));
gpuErrchk(hipHostGetDevicePointer((void**)&d_first2,(void*)h_first[1],0));
gpuErrchk(hipHostGetDevicePointer((void**)&d_second1,(void*)h_second[0],0));
gpuErrchk(hipHostGetDevicePointer((void**)&d_second2,(void*)h_second[1],0));
GpuTimer g_timer;
g_timer.Start();
*d_sum=0;
//printf("1dsum is %u \n", *d_sum);
hipLaunchKernelGGL(( hammer), dim3(BLOCKS),dim3(THREADS), 0, 0, d_first1,d_first2,d_second1,d_second2,d_sum,number_of_reads,device_mem,l2_size,BLOCKS * THREADS);
gpuErrchk(hipDeviceSynchronize());
g_timer.Stop();
//printf("2dsum is %u \n", *d_sum);
assert(* d_sum != 0);
time[i] = g_timer.Elapsed();
if(hammering_time)
printf("[H] hammering : %f ms\n\n",time[i]);
++i;
uint64_t number_of_bitflips_in_target = 0;
for (const uint8_t* target_page : pages_per_row[row_index+1]) {
for (uint32_t index = 0; index < 0x1000; ++index) {
if (target_page[index] != 0xFF){
++number_of_bitflips_in_target;
}
}
}
if (number_of_bitflips_in_target) {
printf("[!] Found %ld flips in row %ld (%lx to %lx) when hammering "
"%lx and %lx\n", number_of_bitflips_in_target, row_index+1,
((row_index+1)*presumed_row_size),
((row_index+2)*presumed_row_size)-1,
GetPageFrameNumber(pagemap, first_row_page)*0x1000,
GetPageFrameNumber(pagemap, second_row_page)*0x1000);
total_bitflips += number_of_bitflips_in_target;
} // if
} // second row for
page_time = page_with_row_time.get_diff_ms();
for(z = 0 ; z < pages_per_row[row_index+2].size() ; z++){
average += time[z];
}
average /= 64 ;
if(debug){
printf("\t %p with row %lu took %f secs \n",first_row_page, row_index +2 ,MS2SEC(page_time));
printf("%d:[!] hammering a set of pointers (median): %f ms\n",++j,average);
}
i = average = page_time = 0;
} // first_row for
if(debug)
printf("\t hammering row %lu took %f ms \n",row_index+1,MS2SEC(row_time.get_diff_ms()));
} // end of row_index for
}//end
void HammerAllReachableRows(uint64_t number_of_reads) {
uint64_t mapping_size;
volatile void * mapping;
SetupMapping(&mapping_size, &mapping);
HammerAllReachablePages(1024*256, mapping, mapping_size,number_of_reads);
}
void prepare(){
setvbuf(stdout, NULL, _IONBF, 0);
signal(SIGALRM, HammeredEnough);
alarm(number_of_seconds_to_hammer);
}
void cuda_prepare(){
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, 0);
if (!prop.canMapHostMemory) exit(0);
printf("[!] Compute : %d.%d \n", prop.major, prop.minor);
hipSetDevice(0);
hipSetDeviceFlags(hipDeviceMapHost);
}
int main(int argc, char** argv) {
int opt;
int tmp=0;
while ((opt = getopt(argc, argv, "b:t:d:m:n:f:h:l:")) != -1) {
switch (opt) {
case 'b':
BLOCKS = atoi(optarg);
break;
case 't':
THREADS = atoi(optarg);
break;
case 'd':
debug =atoi(optarg);
break;
case 'm':
memory_type_flag = atoi(optarg);
break;
case 'n':
number_of_reads =atoi(optarg);
break;
case 'h':
hammering_time = atoi(optarg);
break;
case 'f':
tmp = atoi(optarg);
fraction_of_physical_memory = (float) tmp/10;
break;
case 'l':
l2_size_inmb = atoi(optarg);
l2_size = MB2B(l2_size_inmb);
break;
default:
fprintf(stderr, "Usage: %s -b [blocks] -t [threads] -d [debug] [-m memory_type] [-f fraction] [-h hammering_time] \n",argv[0]);
exit(EXIT_FAILURE);
}
}
uint64_t uid = getuid();
if(uid != 0){
fprintf(stderr,"Must be in root\n");
exit(EXIT_FAILURE);
}
printf("\n[!] BLOCKS: %d THREADS PER BLOCK : %d \n",BLOCKS,THREADS);
if(debug){
printf("\n[!] DEBUG_MODE\n");
}else{
printf("\n[!] RUN_MODE\n");
}
printf("[!] fraction : %.1f\n", fraction_of_physical_memory);
printf("[!] reads : %lu\n", number_of_reads);
prepare();
cuda_prepare();
HammerAllReachableRows(number_of_reads);
}
| e9402b12e2e7fe5a1a4476a4ad25f9e46e177a88.cu |
#include "included.h"
#include "thread_indexing.h"
#define MB2B(x) ((x) << 20)
pthread_t posix_thread;
volatile uint64_t * glb_ptr [4];
uint64_t l2_size = 2 << 20; //2 MB
uint64_t l2_size_inmb = 1;
volatile char *device_mem;
void * thread_func(void * arg){
pthread_t self_id;
self_id=pthread_self();
//printf("\tIm the POSIXthread %u\n",self_id);
while(1){
if(glb_ptr[0] != NULL && glb_ptr[1] != NULL && glb_ptr[2] != NULL && glb_ptr[3] != NULL){
//printf("[T] glb_ptr[0]: %p\nglb_ptr[1]: %p\nglb_ptr[2]: %p\nglb_ptr[3]: %p\n",glb_ptr[0],glb_ptr[1],glb_ptr[2],glb_ptr[3]);
asm volatile(
"clflush (%0);\n\t"
"clflush (%1);\n\t"
"clflush (%2);\n\t"
"clflush (%3);\n\t"
: : "r" (glb_ptr[0]), "r" (glb_ptr[1]), "r" (glb_ptr[2]), "r" (glb_ptr[3]) : "memory");
}
}
}
void SetupMapping(uint64_t * mapping_size,volatile void** mapping) {
*mapping_size = GetPhysicalMemorySize() * fraction_of_physical_memory;
if(!memory_type_flag){
printf("[!] Allocating with cudaHostAlloc %lu MB\n",B2MB(*mapping_size));
gpuErrchk(cudaHostAlloc(mapping,(size_t)*mapping_size,cudaHostAllocMapped));
}else{
printf("[!] Allocating with cudaMallocHost %lu MB\n",B2MB(*mapping_size));
gpuErrchk(cudaMallocHost(mapping,*mapping_size));
}
for (uint64_t index = 0; index < *mapping_size; index += 0x1000) {
uint64_t* temporary = (uint64_t *)( (uint8_t *)(*mapping) + index);
temporary[0] = index;
}
}
__global__ void hammer(
volatile uint64_t * first1,
volatile uint64_t * first2,
volatile uint64_t * second1,
volatile uint64_t * second2,
uint64_t * sum,
uint64_t number_of_reads,
volatile char * dev_mem,
uint64_t l2_size,
int number_of_threads
) {
uint64_t local_sum = 0;
int thId = getGlobalIdx_2D_2D();
int index = thId * l2_size / number_of_threads;
int i;
char local_char;
if(!thId%2){
while (number_of_reads-- > 0)
{
local_sum += first1[thId%64];
local_sum += second1[thId%64];
}
*sum = local_sum + 1;
}else{
/*
printf("\t\t<<thread %d>> reads [%d-%d] \n", thId, index,
index + (l2_size / number_of_threads));
*/
for (i = 0; i < l2_size / number_of_threads; ++i) {
local_char += dev_mem[index + i];
}
*sum = local_char + 1;
}
}
void HammerAllReachablePages(
uint64_t presumed_row_size,
volatile void * memory_mapping,
uint64_t memory_mapping_size,
uint64_t number_of_reads)
{
CpuTimer table_time;
std::vector<std::vector<uint8_t*> > pages_per_row;
uint64_t total_bitflips = 0;
int i=0,j=0,z=0;
double time[64];
double page_time=0, average =0;
uint64_t * d_sum;
uint64_t * h_first[2], * h_second[2];
volatile uint64_t * d_first1 = NULL ;
volatile uint64_t * d_first2 = NULL ;
volatile uint64_t * d_second1 = NULL;
volatile uint64_t * d_second2 = NULL;
pages_per_row.resize(memory_mapping_size / presumed_row_size);
int pagemap = open("/proc/self/pagemap", O_RDONLY);
assert(pagemap >= 0);
//filling the pointers table
for (uint64_t offset = 0; offset < memory_mapping_size; offset += 0x1000) {
uint8_t* virtual_address = (uint8_t*)(memory_mapping) + offset;
uint64_t page_frame_number = GetPageFrameNumber(pagemap, virtual_address);
uint64_t physical_address = page_frame_number * 0x1000;
uint64_t presumed_row_index = physical_address / presumed_row_size;
if (presumed_row_index > pages_per_row.size()) {
pages_per_row.resize(presumed_row_index);
}
pages_per_row[presumed_row_index-1].push_back(virtual_address);
}
//done finding the pointers
printf("[!] pointers for hammering took %f ms\n",table_time.get_diff_ms());
//device memory for writing the read data
gpuErrchk(cudaMallocManaged((void**)&d_sum, sizeof(uint64_t)));
printf("[!] Allocating device memory with cudaMalloc : %zu bytes\n\n", l2_size);
gpuErrchk(cudaMalloc((void **) &device_mem, l2_size));
gpuErrchk(cudaMemset((void *) device_mem, 1, l2_size));
pthread_create(&posix_thread,NULL,&thread_func,NULL);
for (uint64_t row_index = 0; row_index + 2 < pages_per_row.size(); ++row_index) {
CpuTimer row_time;
if ((pages_per_row[row_index].size() != 64) || (pages_per_row[row_index+2].size() != 64)) {
continue;
}else if (pages_per_row[row_index+1].size() == 0) {
printf("[!] Can't hammer row %ld,got no pages from that row\n",row_index+1);
continue;
}
printf("[!] Hammering rows %ld/%ld/%ld of %ld (got %ld/%ld/%ld pages)\n",
row_index, row_index+1, row_index+2, pages_per_row.size(),
pages_per_row[row_index].size(), pages_per_row[row_index+1].size(),
pages_per_row[row_index+2].size());
for (uint8_t * first_row_page : pages_per_row[row_index]) {
CpuTimer page_with_row_time;
j=0;
// Iterate over all pages we have for the second row.
for (uint8_t* second_row_page : pages_per_row[row_index+2]) {
// Set all the target pages to 0xFF.
for (uint8_t* target_page : pages_per_row[row_index+1]) {
memset(target_page, 0xFF, 0x1000);
}
glb_ptr[0] = h_first[0] = (uint64_t *) first_row_page;
glb_ptr[1] = h_first[1] = (uint64_t *) first_row_page + 0x1000;
glb_ptr[2] = h_second[0] = (uint64_t *) second_row_page;
glb_ptr[3] = h_second[1] = (uint64_t *) second_row_page + 0x1000;
gpuErrchk(cudaHostGetDevicePointer((void**)&d_first1,(void*)h_first[0],0));
gpuErrchk(cudaHostGetDevicePointer((void**)&d_first2,(void*)h_first[1],0));
gpuErrchk(cudaHostGetDevicePointer((void**)&d_second1,(void*)h_second[0],0));
gpuErrchk(cudaHostGetDevicePointer((void**)&d_second2,(void*)h_second[1],0));
GpuTimer g_timer;
g_timer.Start();
*d_sum=0;
//printf("1dsum is %u \n", *d_sum);
hammer<<<BLOCKS,THREADS>>>(d_first1,d_first2,d_second1,d_second2,d_sum,number_of_reads,device_mem,l2_size,BLOCKS * THREADS);
gpuErrchk(cudaDeviceSynchronize());
g_timer.Stop();
//printf("2dsum is %u \n", *d_sum);
assert(* d_sum != 0);
time[i] = g_timer.Elapsed();
if(hammering_time)
printf("[H] hammering : %f ms\n\n",time[i]);
++i;
uint64_t number_of_bitflips_in_target = 0;
for (const uint8_t* target_page : pages_per_row[row_index+1]) {
for (uint32_t index = 0; index < 0x1000; ++index) {
if (target_page[index] != 0xFF){
++number_of_bitflips_in_target;
}
}
}
if (number_of_bitflips_in_target) {
printf("[!] Found %ld flips in row %ld (%lx to %lx) when hammering "
"%lx and %lx\n", number_of_bitflips_in_target, row_index+1,
((row_index+1)*presumed_row_size),
((row_index+2)*presumed_row_size)-1,
GetPageFrameNumber(pagemap, first_row_page)*0x1000,
GetPageFrameNumber(pagemap, second_row_page)*0x1000);
total_bitflips += number_of_bitflips_in_target;
} // if
} // second row for
page_time = page_with_row_time.get_diff_ms();
for(z = 0 ; z < pages_per_row[row_index+2].size() ; z++){
average += time[z];
}
average /= 64 ;
if(debug){
printf("\t %p with row %lu took %f secs \n",first_row_page, row_index +2 ,MS2SEC(page_time));
printf("%d:[!] hammering a set of pointers (median): %f ms\n",++j,average);
}
i = average = page_time = 0;
} // first_row for
if(debug)
printf("\t hammering row %lu took %f ms \n",row_index+1,MS2SEC(row_time.get_diff_ms()));
} // end of row_index for
}//end
void HammerAllReachableRows(uint64_t number_of_reads) {
uint64_t mapping_size;
volatile void * mapping;
SetupMapping(&mapping_size, &mapping);
HammerAllReachablePages(1024*256, mapping, mapping_size,number_of_reads);
}
void prepare(){
setvbuf(stdout, NULL, _IONBF, 0);
signal(SIGALRM, HammeredEnough);
alarm(number_of_seconds_to_hammer);
}
void cuda_prepare(){
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, 0);
if (!prop.canMapHostMemory) exit(0);
printf("[!] Compute : %d.%d \n", prop.major, prop.minor);
cudaSetDevice(0);
cudaSetDeviceFlags(cudaDeviceMapHost);
}
int main(int argc, char** argv) {
int opt;
int tmp=0;
while ((opt = getopt(argc, argv, "b:t:d:m:n:f:h:l:")) != -1) {
switch (opt) {
case 'b':
BLOCKS = atoi(optarg);
break;
case 't':
THREADS = atoi(optarg);
break;
case 'd':
debug =atoi(optarg);
break;
case 'm':
memory_type_flag = atoi(optarg);
break;
case 'n':
number_of_reads =atoi(optarg);
break;
case 'h':
hammering_time = atoi(optarg);
break;
case 'f':
tmp = atoi(optarg);
fraction_of_physical_memory = (float) tmp/10;
break;
case 'l':
l2_size_inmb = atoi(optarg);
l2_size = MB2B(l2_size_inmb);
break;
default:
fprintf(stderr, "Usage: %s -b [blocks] -t [threads] -d [debug] [-m memory_type] [-f fraction] [-h hammering_time] \n",argv[0]);
exit(EXIT_FAILURE);
}
}
uint64_t uid = getuid();
if(uid != 0){
fprintf(stderr,"Must be in root\n");
exit(EXIT_FAILURE);
}
printf("\n[!] BLOCKS: %d THREADS PER BLOCK : %d \n",BLOCKS,THREADS);
if(debug){
printf("\n[!] DEBUG_MODE\n");
}else{
printf("\n[!] RUN_MODE\n");
}
printf("[!] fraction : %.1f\n", fraction_of_physical_memory);
printf("[!] reads : %lu\n", number_of_reads);
prepare();
cuda_prepare();
HammerAllReachableRows(number_of_reads);
}
|
7c7b05945d4f25bb864322c9a804f8ac68332b9b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Babak Poursartip
// 09/16/2020
// udemy CUDA
// Error: hipError_t
// We cannot use hipError_t to check the launch part of the function.
#include "common.h"
#include <cstdio>
#include <time.h>
// =================================
// cuda error check macro
#define gpuErrchk(ans) \
{ gpuAssert(ans, __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line,
bool abort = true) {
if (code != hipSuccess) {
fprintf(stderr, "GPUassert: %s. File: %s, line: %d. \n",
hipGetErrorString(code), file, line);
if (abort) {
// printf(" Exists from the gpuErrorCheck func.\n");
exit(code);
}
}
}
// =================================
__global__ void sum_array_gpu(int *a, int *b, int *c, int *out,
const int size) {
// 1d grid, 1d block thread
int gid = blockIdx.x * blockDim.x + threadIdx.x;
if (gid < size) {
out[gid] = a[gid] + b[gid] + c[gid];
}
}
// =================================
void sum_array_cpu(int *a, int *b, int *c, int *out, const int size) {
for (int i = 0; i < size; ++i) {
out[i] = a[i] + b[i] + c[i];
}
}
// =================================
int main() {
printf(" starts ... \n");
int size = 1 << 20;
const int NO_BYTES = size * sizeof(int);
// int block_size = 64;
int block_size;
printf(" Enter block size: ");
scanf("%d", &block_size);
printf(" size: %d, block_size: %d \n", size, block_size);
int *h_a, *h_b, *h_c; // input arrays
int *h_gpu_output, *h_cpu_output;
// allocate arrays on the host
h_a = (int *)malloc(NO_BYTES);
h_b = (int *)malloc(NO_BYTES);
h_c = (int *)malloc(NO_BYTES);
h_gpu_output = (int *)malloc(NO_BYTES);
h_cpu_output = (int *)malloc(NO_BYTES);
// initialize arrays on the host
time_t t;
srand((unsigned)time(&t));
for (int i = 0; i < size; ++i) {
h_a[i] = (int)(rand() & 0xFF);
h_b[i] = (int)(rand() & 0xFF);
h_c[i] = (int)(rand() & 0xFF);
}
// for (int i = 0; i < size; ++i) {
// h_b[i] = (int)(rand() & 0xFF);
// }
memset(h_gpu_output, 0, NO_BYTES);
memset(h_cpu_output, 0, NO_BYTES);
// cpu vector summation
clock_t cpu_start, cpu_end;
cpu_start = clock();
sum_array_cpu(h_a, h_b, h_c, h_cpu_output, size);
cpu_end = clock();
// device arrays;
int *d_a, *d_b, *d_c, *d_output;
gpuErrchk(hipMalloc((void **)&d_a, NO_BYTES));
gpuErrchk(hipMalloc((void **)&d_b, NO_BYTES));
gpuErrchk(hipMalloc((void **)&d_c, NO_BYTES));
gpuErrchk(hipMalloc((void **)&d_output, NO_BYTES));
clock_t htod_start, htod_end;
htod_start = clock();
gpuErrchk(hipMemcpy(d_a, h_a, NO_BYTES, hipMemcpyHostToDevice));
gpuErrchk(hipMemcpy(d_b, h_b, NO_BYTES, hipMemcpyHostToDevice));
gpuErrchk(hipMemcpy(d_c, h_c, NO_BYTES, hipMemcpyHostToDevice));
htod_end = clock();
dim3 block(block_size);
// +1 to guarantee that we have more threads than array size
dim3 grid(size / block.x + 1);
clock_t gpu_start, gpu_end;
gpu_start = clock();
hipLaunchKernelGGL(( sum_array_gpu), dim3(grid), dim3(block), 0, 0, d_a, d_b, d_c, d_output, size);
gpuErrchk(hipDeviceSynchronize());
gpu_end = clock();
clock_t dtoh_start, dtoh_end;
dtoh_start = clock();
gpuErrchk(
hipMemcpy(h_gpu_output, d_output, NO_BYTES, hipMemcpyDeviceToHost));
dtoh_end = clock();
// comparison the results of cpu and gpu
compare_arrays(h_gpu_output, h_cpu_output, size);
// printing execution time
printf(" htod mem transfer time: %4.6f \n",
(double)((double)(htod_end - htod_start) / CLOCKS_PER_SEC));
printf(" dtoh mem transfer time: %4.6f \n",
(double)((double)(dtoh_end - dtoh_start) / CLOCKS_PER_SEC));
printf(" Sum array GPU execution time: %4.6f \n",
(double)((double)(gpu_end - gpu_start) / CLOCKS_PER_SEC));
printf(" Sum array GPU total execution time: %4.6f \n",
(double)((double)(dtoh_end - htod_start) / CLOCKS_PER_SEC));
printf(" Sum array CPU execution time: %4.6f \n",
(double)((double)(cpu_end - cpu_start) / CLOCKS_PER_SEC));
gpuErrchk(hipFree(d_a));
gpuErrchk(hipFree(d_b));
gpuErrchk(hipFree(d_c));
gpuErrchk(hipFree(d_output));
free(h_a);
free(h_b);
free(h_c);
free(h_gpu_output);
free(h_cpu_output);
hipDeviceReset();
printf(" finished. \n");
return 0;
}
| 7c7b05945d4f25bb864322c9a804f8ac68332b9b.cu |
// Babak Poursartip
// 09/16/2020
// udemy CUDA
// Error: cudaError
// We cannot use cudaError to check the launch part of the function.
#include "common.h"
#include <cstdio>
#include <time.h>
// =================================
// cuda error check macro
#define gpuErrchk(ans) \
{ gpuAssert(ans, __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line,
bool abort = true) {
if (code != cudaSuccess) {
fprintf(stderr, "GPUassert: %s. File: %s, line: %d. \n",
cudaGetErrorString(code), file, line);
if (abort) {
// printf(" Exists from the gpuErrorCheck func.\n");
exit(code);
}
}
}
// =================================
__global__ void sum_array_gpu(int *a, int *b, int *c, int *out,
const int size) {
// 1d grid, 1d block thread
int gid = blockIdx.x * blockDim.x + threadIdx.x;
if (gid < size) {
out[gid] = a[gid] + b[gid] + c[gid];
}
}
// =================================
void sum_array_cpu(int *a, int *b, int *c, int *out, const int size) {
for (int i = 0; i < size; ++i) {
out[i] = a[i] + b[i] + c[i];
}
}
// =================================
int main() {
printf(" starts ... \n");
int size = 1 << 20;
const int NO_BYTES = size * sizeof(int);
// int block_size = 64;
int block_size;
printf(" Enter block size: ");
scanf("%d", &block_size);
printf(" size: %d, block_size: %d \n", size, block_size);
int *h_a, *h_b, *h_c; // input arrays
int *h_gpu_output, *h_cpu_output;
// allocate arrays on the host
h_a = (int *)malloc(NO_BYTES);
h_b = (int *)malloc(NO_BYTES);
h_c = (int *)malloc(NO_BYTES);
h_gpu_output = (int *)malloc(NO_BYTES);
h_cpu_output = (int *)malloc(NO_BYTES);
// initialize arrays on the host
time_t t;
srand((unsigned)time(&t));
for (int i = 0; i < size; ++i) {
h_a[i] = (int)(rand() & 0xFF);
h_b[i] = (int)(rand() & 0xFF);
h_c[i] = (int)(rand() & 0xFF);
}
// for (int i = 0; i < size; ++i) {
// h_b[i] = (int)(rand() & 0xFF);
// }
memset(h_gpu_output, 0, NO_BYTES);
memset(h_cpu_output, 0, NO_BYTES);
// cpu vector summation
clock_t cpu_start, cpu_end;
cpu_start = clock();
sum_array_cpu(h_a, h_b, h_c, h_cpu_output, size);
cpu_end = clock();
// device arrays;
int *d_a, *d_b, *d_c, *d_output;
gpuErrchk(cudaMalloc((void **)&d_a, NO_BYTES));
gpuErrchk(cudaMalloc((void **)&d_b, NO_BYTES));
gpuErrchk(cudaMalloc((void **)&d_c, NO_BYTES));
gpuErrchk(cudaMalloc((void **)&d_output, NO_BYTES));
clock_t htod_start, htod_end;
htod_start = clock();
gpuErrchk(cudaMemcpy(d_a, h_a, NO_BYTES, cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(d_b, h_b, NO_BYTES, cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(d_c, h_c, NO_BYTES, cudaMemcpyHostToDevice));
htod_end = clock();
dim3 block(block_size);
// +1 to guarantee that we have more threads than array size
dim3 grid(size / block.x + 1);
clock_t gpu_start, gpu_end;
gpu_start = clock();
sum_array_gpu<<<grid, block>>>(d_a, d_b, d_c, d_output, size);
gpuErrchk(cudaDeviceSynchronize());
gpu_end = clock();
clock_t dtoh_start, dtoh_end;
dtoh_start = clock();
gpuErrchk(
cudaMemcpy(h_gpu_output, d_output, NO_BYTES, cudaMemcpyDeviceToHost));
dtoh_end = clock();
// comparison the results of cpu and gpu
compare_arrays(h_gpu_output, h_cpu_output, size);
// printing execution time
printf(" htod mem transfer time: %4.6f \n",
(double)((double)(htod_end - htod_start) / CLOCKS_PER_SEC));
printf(" dtoh mem transfer time: %4.6f \n",
(double)((double)(dtoh_end - dtoh_start) / CLOCKS_PER_SEC));
printf(" Sum array GPU execution time: %4.6f \n",
(double)((double)(gpu_end - gpu_start) / CLOCKS_PER_SEC));
printf(" Sum array GPU total execution time: %4.6f \n",
(double)((double)(dtoh_end - htod_start) / CLOCKS_PER_SEC));
printf(" Sum array CPU execution time: %4.6f \n",
(double)((double)(cpu_end - cpu_start) / CLOCKS_PER_SEC));
gpuErrchk(cudaFree(d_a));
gpuErrchk(cudaFree(d_b));
gpuErrchk(cudaFree(d_c));
gpuErrchk(cudaFree(d_output));
free(h_a);
free(h_b);
free(h_c);
free(h_gpu_output);
free(h_cpu_output);
cudaDeviceReset();
printf(" finished. \n");
return 0;
}
|
c51723770a868ea9e1e55e9cf891db05e6dc1755.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "manager_edge_model_pipe_sequential_cuda.cuh"
#include "cutil_inline.h"
#include "thrust/device_vector.h"
#include "thrust/host_vector.h"
#include "gas.h"
#include "edge.h"
#include "passport.h"
#include "passport_pipe.h"
#include "model_pipe_sequential_functions_cuda.cuh"
#include "edge_model_pipe_sequential_cuda.cuh"
__global__
void FindQResultCudaKernel(
int size,
double* den_sc, double* co2, double* n2,
double2* p_and_t, double* p_target,
double* length,
double2* d_in_out,
double4* hydr_rough_env_exch,
double* q_result
)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
while(index < size)
{
//
//
double den_sc_ = den_sc[index];
double co2_ = co2[index];
double n2_ = n2[index];
//
double2 p_and_t_ = p_and_t[index];
//
double length_ = length[index];
double2 d_in_out_ = d_in_out[index];
double4 hydr_rough_env_exch_ = hydr_rough_env_exch[index];
double p_target_ = p_target[index];
//
double r_sc_ = FindRStandartConditionsCuda(den_sc_);
double t_pc_ = FindTPseudoCriticalCuda(den_sc_, co2_, n2_);
double p_pc_ = FindPPseudoCriticalCuda(den_sc_, co2_, n2_);
double q_out = 0;
double p_out = 0;
double t_out = 0;
FindSequentialQCudaRefactored(
p_target_,
p_and_t_.x, p_and_t_.y, //
p_pc_, t_pc_, r_sc_, den_sc_,
d_in_out_.x, d_in_out_.y, hydr_rough_env_exch_.y, hydr_rough_env_exch_.x, // -
hydr_rough_env_exch_.z, hydr_rough_env_exch_.w, // - ( )
length_/10, 10, // -
&p_out, &t_out,
&q_out); // out - ,
q_result[index] = q_out;
index += gridDim.x * blockDim.x;
} // end while (index < size)
}
ManagerEdgeModelPipeSequentialCuda::ManagerEdgeModelPipeSequentialCuda()
{
// 1. - GPU
cutilSafeCall(hipGetDeviceCount(&gpu_count_));
if(gpu_count_ > kMaxGpuCount_)
{
gpu_count_ = kMaxGpuCount_;
}
max_index_ = 0;
finish_adding_edges_ = false;
// .
edges_.resize(max_count_of_edges);
// ToDo : - GPU >= 3, -
// ( SDK - SimpleMultiGPU)
for(int i = 0; i < gpu_count_; i++)
{
cutilSafeCall( hipSetDevice(i) );
cutilSafeCall( hipStreamCreate(&(thread_data_[i].stream)) );
hipMalloc((void**)&(thread_data_[i].length_dev_), max_count_of_edges * sizeof(double) / gpu_count_);
hipMalloc((void**)&thread_data_[i].d_in_out_dev_, max_count_of_edges * sizeof(double2) / gpu_count_);
hipMalloc((void**)&thread_data_[i].hydr_rough_env_exch_dev_, max_count_of_edges * sizeof(double4) / gpu_count_);
hipMalloc((void**)&thread_data_[i].p_in_and_t_in_dev_, max_count_of_edges * sizeof(double2) / gpu_count_);
hipMalloc((void**)&thread_data_[i].p_target_dev_, max_count_of_edges * sizeof(double) / gpu_count_);
hipMalloc((void**)&thread_data_[i].q_result_dev_, max_count_of_edges * sizeof(double) / gpu_count_);
hipMalloc((void**)&thread_data_[i].den_sc_dev_, max_count_of_edges * sizeof(double) / gpu_count_);
hipMalloc((void**)&thread_data_[i].co2_dev_, max_count_of_edges * sizeof(double) / gpu_count_);
hipMalloc((void**)&thread_data_[i].n2_dev_, max_count_of_edges * sizeof(double) / gpu_count_);
//
cutilSafeCall(hipHostMalloc((void**)&(thread_data_[i].length_), max_count_of_edges * sizeof(double) / gpu_count_) );
cutilSafeCall(hipHostMalloc((void**)&thread_data_[i].d_in_out_, max_count_of_edges * sizeof(double2) / gpu_count_) );
cutilSafeCall(hipHostMalloc((void**)&thread_data_[i].hydr_rough_env_exch_, max_count_of_edges * sizeof(double4) / gpu_count_) );
//
cutilSafeCall(hipHostMalloc((void**)&(thread_data_[i].p_in_and_t_in_), max_count_of_edges * sizeof(double2) / gpu_count_) );
cutilSafeCall(hipHostMalloc((void**)&thread_data_[i].p_target_, max_count_of_edges * sizeof(double) / gpu_count_) );
cutilSafeCall(hipHostMalloc((void**)&thread_data_[i].q_result_, max_count_of_edges * sizeof(double) / gpu_count_) );
cutilSafeCall(hipHostMalloc((void**)&thread_data_[i].den_sc_, max_count_of_edges * sizeof(double) / gpu_count_) );
cutilSafeCall(hipHostMalloc((void**)&thread_data_[i].co2_, max_count_of_edges * sizeof(double) / gpu_count_) );
cutilSafeCall(hipHostMalloc((void**)&thread_data_[i].n2_, max_count_of_edges * sizeof(double) / gpu_count_) );
}
}
ManagerEdgeModelPipeSequentialCuda::~ManagerEdgeModelPipeSequentialCuda()
{
for(int i = 0; i < gpu_count_; i++)
{
cutilSafeCall( hipSetDevice(i) );
//Wait for all operations to finish
hipStreamSynchronize(thread_data_[i].stream);
hipFree(thread_data_[i].length_dev_);
hipFree(thread_data_[i].d_in_out_dev_);
hipFree(thread_data_[i].hydr_rough_env_exch_dev_);
hipFree(thread_data_[i].p_in_and_t_in_dev_);
hipFree(thread_data_[i].p_target_dev_);
hipFree(thread_data_[i].q_result_dev_);
hipFree(thread_data_[i].den_sc_dev_);
hipFree(thread_data_[i].co2_dev_);
hipFree(thread_data_[i].n2_dev_);
//
cutilSafeCall(hipHostFree((void**)&thread_data_[i].length_) );
cutilSafeCall(hipHostFree((void**)&thread_data_[i].d_in_out_) );
cutilSafeCall(hipHostFree((void**)&thread_data_[i].hydr_rough_env_exch_) );
//
cutilSafeCall(hipHostFree((void**)&thread_data_[i].p_in_and_t_in_) );
cutilSafeCall(hipHostFree((void**)&thread_data_[i].p_target_) );
cutilSafeCall(hipHostFree((void**)&thread_data_[i].q_result_) );
cutilSafeCall(hipHostFree((void**)&thread_data_[i].den_sc_) );
cutilSafeCall(hipHostFree((void**)&thread_data_[i].co2_) );
cutilSafeCall(hipHostFree((void**)&thread_data_[i].n2_) );
cutilSafeCall( hipStreamDestroy(thread_data_[i].stream) );
}
}
Edge* ManagerEdgeModelPipeSequentialCuda::CreateEdge(const Passport* passport)
{
PassportPipe pass = *(dynamic_cast<const PassportPipe*>(passport));
int i = 0;
int index = 0;
if(max_index_ < max_count_of_edges / gpu_count_)
{
i = 0;
index = max_index_;
}
else
{
i = 1;
index = max_index_ - (max_count_of_edges / gpu_count_);
}
//cutilSafeCall( hipSetDevice(i) );
thread_data_[i].length_[index] = pass.length_;
double2 d_in_out_temp;
d_in_out_temp.x = pass.d_inner_;
d_in_out_temp.y = pass.d_outer_;
thread_data_[i].d_in_out_[index] = d_in_out_temp;
double4 hydr_rough_env_exch_temp;
hydr_rough_env_exch_temp.x = pass.hydraulic_efficiency_coeff_;
hydr_rough_env_exch_temp.y = pass.roughness_coeff_;
hydr_rough_env_exch_temp.z = pass.t_env_;
hydr_rough_env_exch_temp.w = pass.heat_exchange_coeff_;
thread_data_[i].hydr_rough_env_exch_[index] = hydr_rough_env_exch_temp;
EdgeModelPipeSequentialCuda edge(max_index_, this);
edges_[max_index_] = edge;
++max_index_;
return &(edges_[max_index_ - 1]);
}
void ManagerEdgeModelPipeSequentialCuda::set_gas_in(const Gas* gas, int index)
{
int i = 0;
if(index < max_count_of_edges / gpu_count_)
{
i = 0;
}
else
{
i = 1;
index -= (max_count_of_edges / gpu_count_);
}
// ToDo: set_gas_out .
//cutilSafeCall( hipSetDevice(i) );
thread_data_[i].den_sc_[index] = gas->composition.density_std_cond;
thread_data_[i].co2_[index] = gas->composition.co2;
thread_data_[i].n2_[index] = gas->composition.n2;
double2 p_in_and_t_in_temp;
p_in_and_t_in_temp.x = gas->work_parameters.p;
p_in_and_t_in_temp.y = gas->work_parameters.t;
thread_data_[i].p_in_and_t_in_[index] = p_in_and_t_in_temp;
}
void ManagerEdgeModelPipeSequentialCuda::set_gas_out(const Gas* gas, int index)
{
int i = 0;
if(index < max_count_of_edges / gpu_count_)
{
i = 0;
}
else
{
i = 1;
index -= (max_count_of_edges / gpu_count_);
}
// ToDo: set_gas_out .
//cutilSafeCall( hipSetDevice(i) );
thread_data_[i].p_target_[index] = gas->work_parameters.p;
}
void ManagerEdgeModelPipeSequentialCuda::FinishAddingEdges()
{
// -
// - GPU
// ToDo: thread_data , .
// , GPU.
for(int i = 0; i < gpu_count_ ; i++)
{
cutilSafeCall( hipSetDevice(i) );
cutilSafeCall(hipMemcpyAsync(thread_data_[i].length_dev_, thread_data_[i].length_, (sizeof(double) * max_count_of_edges) / gpu_count_, hipMemcpyHostToDevice, thread_data_[i].stream) );
cutilSafeCall(hipMemcpyAsync(thread_data_[i].d_in_out_dev_, thread_data_[i].d_in_out_, sizeof(double2) * max_count_of_edges / gpu_count_, hipMemcpyHostToDevice, thread_data_[i].stream) );
cutilSafeCall(hipMemcpyAsync(thread_data_[i].hydr_rough_env_exch_dev_,thread_data_[i].hydr_rough_env_exch_,sizeof(double4) * max_count_of_edges / gpu_count_, hipMemcpyHostToDevice, thread_data_[i].stream) );
}
}
void ManagerEdgeModelPipeSequentialCuda::CountAll()
{
if(finish_adding_edges_ == false)
{
FinishAddingEdges();
finish_adding_edges_ = true;
}
// device,
// FinishAddingEdges.
// ( ! )
for(int i = 0; i < gpu_count_; i++)
{
cutilSafeCall( hipSetDevice(i) );
cutilSafeCall(hipMemcpyAsync(thread_data_[i].p_in_and_t_in_dev_, thread_data_[i].p_in_and_t_in_, sizeof(double2) * max_count_of_edges / gpu_count_, hipMemcpyHostToDevice, thread_data_[i].stream) );
cutilSafeCall(hipMemcpyAsync(thread_data_[i].p_target_dev_, thread_data_[i].p_target_, sizeof(double) * max_count_of_edges / gpu_count_, hipMemcpyHostToDevice, thread_data_[i].stream) );
cutilSafeCall(hipMemcpyAsync(thread_data_[i].den_sc_dev_, thread_data_[i].den_sc_, sizeof(double) *max_count_of_edges / gpu_count_, hipMemcpyHostToDevice, thread_data_[i].stream) );
cutilSafeCall(hipMemcpyAsync(thread_data_[i].co2_dev_, thread_data_[i].co2_, sizeof(double) * max_count_of_edges / gpu_count_, hipMemcpyHostToDevice, thread_data_[i].stream) );
cutilSafeCall(hipMemcpyAsync(thread_data_[i].n2_dev_, thread_data_[i].n2_, sizeof(double) * max_count_of_edges / gpu_count_, hipMemcpyHostToDevice, thread_data_[i].stream) );
// device
hipLaunchKernelGGL(( FindQResultCudaKernel), dim3(512), dim3(64), 0, thread_data_[i].stream,
max_index_ / gpu_count_,
thread_data_[i].den_sc_dev_, thread_data_[i].co2_dev_, thread_data_[i].n2_dev_,
thread_data_[i].p_in_and_t_in_dev_, thread_data_[i].p_target_dev_,
thread_data_[i].length_dev_,
thread_data_[i].d_in_out_dev_,
thread_data_[i].hydr_rough_env_exch_dev_,
thread_data_[i].q_result_dev_);
// q host
cutilSafeCall(hipMemcpyAsync(thread_data_[i].q_result_, thread_data_[i].q_result_dev_, sizeof(double) * max_count_of_edges / gpu_count_, hipMemcpyDeviceToHost, thread_data_[i].stream) );
}
for(int i = 0; i < gpu_count_; i++)
{
cutilSafeCall( hipSetDevice(i) );
hipStreamSynchronize(thread_data_[i].stream);
}
}
double ManagerEdgeModelPipeSequentialCuda::q(int index)
{
int i = 0;
if(index < max_count_of_edges / gpu_count_)
{
i = 0;
}
else
{
i = 1;
index -= (max_count_of_edges / gpu_count_);
}
return thread_data_[i].q_result_[index];
} | c51723770a868ea9e1e55e9cf891db05e6dc1755.cu | #include "manager_edge_model_pipe_sequential_cuda.cuh"
#include "cutil_inline.h"
#include "thrust/device_vector.h"
#include "thrust/host_vector.h"
#include "gas.h"
#include "edge.h"
#include "passport.h"
#include "passport_pipe.h"
#include "model_pipe_sequential_functions_cuda.cuh"
#include "edge_model_pipe_sequential_cuda.cuh"
__global__
void FindQResultCudaKernel(
int size,
double* den_sc, double* co2, double* n2,
double2* p_and_t, double* p_target,
double* length,
double2* d_in_out,
double4* hydr_rough_env_exch,
double* q_result
)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
while(index < size)
{
// Загружаем данные
// Состав газа
double den_sc_ = den_sc[index];
double co2_ = co2[index];
double n2_ = n2[index];
// Давление и температура на входе
double2 p_and_t_ = p_and_t[index];
// Пасспотные параметры трубы
double length_ = length[index];
double2 d_in_out_ = d_in_out[index];
double4 hydr_rough_env_exch_ = hydr_rough_env_exch[index];
double p_target_ = p_target[index];
// Вычисляем базовые свойства газового потока
double r_sc_ = FindRStandartConditionsCuda(den_sc_);
double t_pc_ = FindTPseudoCriticalCuda(den_sc_, co2_, n2_);
double p_pc_ = FindPPseudoCriticalCuda(den_sc_, co2_, n2_);
double q_out = 0;
double p_out = 0;
double t_out = 0;
FindSequentialQCudaRefactored(
p_target_,
p_and_t_.x, p_and_t_.y, // рабочие параметры газового потока на входе
p_pc_, t_pc_, r_sc_, den_sc_,
d_in_out_.x, d_in_out_.y, hydr_rough_env_exch_.y, hydr_rough_env_exch_.x, // св-ва трубы
hydr_rough_env_exch_.z, hydr_rough_env_exch_.w, // св-ва внешней среды (тоже входят в пасспорт трубы)
length_/10, 10, // длина сегмента и кол-во сегментов
&p_out, &t_out,
&q_out); // out - параметры, значения на выходе
q_result[index] = q_out;
index += gridDim.x * blockDim.x;
} // end while (index < size)
}
ManagerEdgeModelPipeSequentialCuda::ManagerEdgeModelPipeSequentialCuda()
{
// 1. Получаем кол-во доступных GPU в системе
cutilSafeCall(cudaGetDeviceCount(&gpu_count_));
if(gpu_count_ > kMaxGpuCount_)
{
gpu_count_ = kMaxGpuCount_;
}
max_index_ = 0;
finish_adding_edges_ = false;
// Резервируем место в векторах.
edges_.resize(max_count_of_edges);
// ToDo : при нечётном кол-ве GPU >= 3, надо будет подумать - как корректно делить данные
// (есть в примере из SDK - SimpleMultiGPU)
for(int i = 0; i < gpu_count_; i++)
{
cutilSafeCall( cudaSetDevice(i) );
cutilSafeCall( cudaStreamCreate(&(thread_data_[i].stream)) );
cudaMalloc((void**)&(thread_data_[i].length_dev_), max_count_of_edges * sizeof(double) / gpu_count_);
cudaMalloc((void**)&thread_data_[i].d_in_out_dev_, max_count_of_edges * sizeof(double2) / gpu_count_);
cudaMalloc((void**)&thread_data_[i].hydr_rough_env_exch_dev_, max_count_of_edges * sizeof(double4) / gpu_count_);
cudaMalloc((void**)&thread_data_[i].p_in_and_t_in_dev_, max_count_of_edges * sizeof(double2) / gpu_count_);
cudaMalloc((void**)&thread_data_[i].p_target_dev_, max_count_of_edges * sizeof(double) / gpu_count_);
cudaMalloc((void**)&thread_data_[i].q_result_dev_, max_count_of_edges * sizeof(double) / gpu_count_);
cudaMalloc((void**)&thread_data_[i].den_sc_dev_, max_count_of_edges * sizeof(double) / gpu_count_);
cudaMalloc((void**)&thread_data_[i].co2_dev_, max_count_of_edges * sizeof(double) / gpu_count_);
cudaMalloc((void**)&thread_data_[i].n2_dev_, max_count_of_edges * sizeof(double) / gpu_count_);
// Пасспортные параметры
cutilSafeCall(cudaMallocHost((void**)&(thread_data_[i].length_), max_count_of_edges * sizeof(double) / gpu_count_) );
cutilSafeCall(cudaMallocHost((void**)&thread_data_[i].d_in_out_, max_count_of_edges * sizeof(double2) / gpu_count_) );
cutilSafeCall(cudaMallocHost((void**)&thread_data_[i].hydr_rough_env_exch_, max_count_of_edges * sizeof(double4) / gpu_count_) );
// Рабочие параметры
cutilSafeCall(cudaMallocHost((void**)&(thread_data_[i].p_in_and_t_in_), max_count_of_edges * sizeof(double2) / gpu_count_) );
cutilSafeCall(cudaMallocHost((void**)&thread_data_[i].p_target_, max_count_of_edges * sizeof(double) / gpu_count_) );
cutilSafeCall(cudaMallocHost((void**)&thread_data_[i].q_result_, max_count_of_edges * sizeof(double) / gpu_count_) );
cutilSafeCall(cudaMallocHost((void**)&thread_data_[i].den_sc_, max_count_of_edges * sizeof(double) / gpu_count_) );
cutilSafeCall(cudaMallocHost((void**)&thread_data_[i].co2_, max_count_of_edges * sizeof(double) / gpu_count_) );
cutilSafeCall(cudaMallocHost((void**)&thread_data_[i].n2_, max_count_of_edges * sizeof(double) / gpu_count_) );
}
}
ManagerEdgeModelPipeSequentialCuda::~ManagerEdgeModelPipeSequentialCuda()
{
for(int i = 0; i < gpu_count_; i++)
{
cutilSafeCall( cudaSetDevice(i) );
//Wait for all operations to finish
cudaStreamSynchronize(thread_data_[i].stream);
cudaFree(thread_data_[i].length_dev_);
cudaFree(thread_data_[i].d_in_out_dev_);
cudaFree(thread_data_[i].hydr_rough_env_exch_dev_);
cudaFree(thread_data_[i].p_in_and_t_in_dev_);
cudaFree(thread_data_[i].p_target_dev_);
cudaFree(thread_data_[i].q_result_dev_);
cudaFree(thread_data_[i].den_sc_dev_);
cudaFree(thread_data_[i].co2_dev_);
cudaFree(thread_data_[i].n2_dev_);
// Пасспортные параметры
cutilSafeCall(cudaFreeHost((void**)&thread_data_[i].length_) );
cutilSafeCall(cudaFreeHost((void**)&thread_data_[i].d_in_out_) );
cutilSafeCall(cudaFreeHost((void**)&thread_data_[i].hydr_rough_env_exch_) );
// Рабочие параметры
cutilSafeCall(cudaFreeHost((void**)&thread_data_[i].p_in_and_t_in_) );
cutilSafeCall(cudaFreeHost((void**)&thread_data_[i].p_target_) );
cutilSafeCall(cudaFreeHost((void**)&thread_data_[i].q_result_) );
cutilSafeCall(cudaFreeHost((void**)&thread_data_[i].den_sc_) );
cutilSafeCall(cudaFreeHost((void**)&thread_data_[i].co2_) );
cutilSafeCall(cudaFreeHost((void**)&thread_data_[i].n2_) );
cutilSafeCall( cudaStreamDestroy(thread_data_[i].stream) );
}
}
Edge* ManagerEdgeModelPipeSequentialCuda::CreateEdge(const Passport* passport)
{
PassportPipe pass = *(dynamic_cast<const PassportPipe*>(passport));
int i = 0;
int index = 0;
if(max_index_ < max_count_of_edges / gpu_count_)
{
i = 0;
index = max_index_;
}
else
{
i = 1;
index = max_index_ - (max_count_of_edges / gpu_count_);
}
//cutilSafeCall( cudaSetDevice(i) );
thread_data_[i].length_[index] = pass.length_;
double2 d_in_out_temp;
d_in_out_temp.x = pass.d_inner_;
d_in_out_temp.y = pass.d_outer_;
thread_data_[i].d_in_out_[index] = d_in_out_temp;
double4 hydr_rough_env_exch_temp;
hydr_rough_env_exch_temp.x = pass.hydraulic_efficiency_coeff_;
hydr_rough_env_exch_temp.y = pass.roughness_coeff_;
hydr_rough_env_exch_temp.z = pass.t_env_;
hydr_rough_env_exch_temp.w = pass.heat_exchange_coeff_;
thread_data_[i].hydr_rough_env_exch_[index] = hydr_rough_env_exch_temp;
EdgeModelPipeSequentialCuda edge(max_index_, this);
edges_[max_index_] = edge;
++max_index_;
return &(edges_[max_index_ - 1]);
}
void ManagerEdgeModelPipeSequentialCuda::set_gas_in(const Gas* gas, int index)
{
int i = 0;
if(index < max_count_of_edges / gpu_count_)
{
i = 0;
}
else
{
i = 1;
index -= (max_count_of_edges / gpu_count_);
}
// ToDo: Тут и в set_gas_out надо учесть направление потока.
//cutilSafeCall( cudaSetDevice(i) );
thread_data_[i].den_sc_[index] = gas->composition.density_std_cond;
thread_data_[i].co2_[index] = gas->composition.co2;
thread_data_[i].n2_[index] = gas->composition.n2;
double2 p_in_and_t_in_temp;
p_in_and_t_in_temp.x = gas->work_parameters.p;
p_in_and_t_in_temp.y = gas->work_parameters.t;
thread_data_[i].p_in_and_t_in_[index] = p_in_and_t_in_temp;
}
void ManagerEdgeModelPipeSequentialCuda::set_gas_out(const Gas* gas, int index)
{
int i = 0;
if(index < max_count_of_edges / gpu_count_)
{
i = 0;
}
else
{
i = 1;
index -= (max_count_of_edges / gpu_count_);
}
// ToDo: Тут и в set_gas_out надо учесть направление потока.
//cutilSafeCall( cudaSetDevice(i) );
thread_data_[i].p_target_[index] = gas->work_parameters.p;
}
void ManagerEdgeModelPipeSequentialCuda::FinishAddingEdges()
{
// По завершении добавления рёбер - у нас должна быть собрана
// вся пасспортная информация о трубах - и её можно отправлять на GPU
// ToDo: нужно включить в структуру thread_data размер памяти, который обрабатывает каждый поток.
// причём этот кусок определять аккуратно, с учётом нечётного количества GPU.
for(int i = 0; i < gpu_count_ ; i++)
{
cutilSafeCall( cudaSetDevice(i) );
cutilSafeCall(cudaMemcpyAsync(thread_data_[i].length_dev_, thread_data_[i].length_, (sizeof(double) * max_count_of_edges) / gpu_count_, cudaMemcpyHostToDevice, thread_data_[i].stream) );
cutilSafeCall(cudaMemcpyAsync(thread_data_[i].d_in_out_dev_, thread_data_[i].d_in_out_, sizeof(double2) * max_count_of_edges / gpu_count_, cudaMemcpyHostToDevice, thread_data_[i].stream) );
cutilSafeCall(cudaMemcpyAsync(thread_data_[i].hydr_rough_env_exch_dev_,thread_data_[i].hydr_rough_env_exch_,sizeof(double4) * max_count_of_edges / gpu_count_, cudaMemcpyHostToDevice, thread_data_[i].stream) );
}
}
void ManagerEdgeModelPipeSequentialCuda::CountAll()
{
if(finish_adding_edges_ == false)
{
FinishAddingEdges();
finish_adding_edges_ = true;
}
// Скопировать собранные рабочие параметры на device, пасспортные уже должны быть отправлены
// туда функцией FinishAddingEdges.
// (Пасспортные данные каждый раз копировать не надо! Их достаточно скопировать всего один раз)
for(int i = 0; i < gpu_count_; i++)
{
cutilSafeCall( cudaSetDevice(i) );
cutilSafeCall(cudaMemcpyAsync(thread_data_[i].p_in_and_t_in_dev_, thread_data_[i].p_in_and_t_in_, sizeof(double2) * max_count_of_edges / gpu_count_, cudaMemcpyHostToDevice, thread_data_[i].stream) );
cutilSafeCall(cudaMemcpyAsync(thread_data_[i].p_target_dev_, thread_data_[i].p_target_, sizeof(double) * max_count_of_edges / gpu_count_, cudaMemcpyHostToDevice, thread_data_[i].stream) );
cutilSafeCall(cudaMemcpyAsync(thread_data_[i].den_sc_dev_, thread_data_[i].den_sc_, sizeof(double) *max_count_of_edges / gpu_count_, cudaMemcpyHostToDevice, thread_data_[i].stream) );
cutilSafeCall(cudaMemcpyAsync(thread_data_[i].co2_dev_, thread_data_[i].co2_, sizeof(double) * max_count_of_edges / gpu_count_, cudaMemcpyHostToDevice, thread_data_[i].stream) );
cutilSafeCall(cudaMemcpyAsync(thread_data_[i].n2_dev_, thread_data_[i].n2_, sizeof(double) * max_count_of_edges / gpu_count_, cudaMemcpyHostToDevice, thread_data_[i].stream) );
// выполняем расчёт на device
FindQResultCudaKernel<<<512, 64, 0, thread_data_[i].stream>>>(
max_index_ / gpu_count_,
thread_data_[i].den_sc_dev_, thread_data_[i].co2_dev_, thread_data_[i].n2_dev_,
thread_data_[i].p_in_and_t_in_dev_, thread_data_[i].p_target_dev_,
thread_data_[i].length_dev_,
thread_data_[i].d_in_out_dev_,
thread_data_[i].hydr_rough_env_exch_dev_,
thread_data_[i].q_result_dev_);
// копируем рассчитанное q обратно на host
cutilSafeCall(cudaMemcpyAsync(thread_data_[i].q_result_, thread_data_[i].q_result_dev_, sizeof(double) * max_count_of_edges / gpu_count_, cudaMemcpyDeviceToHost, thread_data_[i].stream) );
}
for(int i = 0; i < gpu_count_; i++)
{
cutilSafeCall( cudaSetDevice(i) );
cudaStreamSynchronize(thread_data_[i].stream);
}
}
double ManagerEdgeModelPipeSequentialCuda::q(int index)
{
int i = 0;
if(index < max_count_of_edges / gpu_count_)
{
i = 0;
}
else
{
i = 1;
index -= (max_count_of_edges / gpu_count_);
}
return thread_data_[i].q_result_[index];
} |
39d0c9ee1f97f30a101945d87db22575234f2d68.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void compute_B_for_depth(float* B, float* rho, float* Ns, int npix, int nchannels, int nimages) {
int i = blockIdx.x*blockDim.x + threadIdx.x;
int c = blockIdx.y*blockDim.y + threadIdx.y;
if (i < npix*nimages) {
B[c*npix*nimages + i] -= rho[c*npix + i%npix] * Ns[c*npix*nimages + i];
}
} | 39d0c9ee1f97f30a101945d87db22575234f2d68.cu | #include "includes.h"
__global__ void compute_B_for_depth(float* B, float* rho, float* Ns, int npix, int nchannels, int nimages) {
int i = blockIdx.x*blockDim.x + threadIdx.x;
int c = blockIdx.y*blockDim.y + threadIdx.y;
if (i < npix*nimages) {
B[c*npix*nimages + i] -= rho[c*npix + i%npix] * Ns[c*npix*nimages + i];
}
} |
889f1620d1daa3282207babf2c5e981e4d103ca7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
// This file is auto-generated. See "generate_kernels.py"
#include <ATen/native/transformers/hip/mem_eff_attention/kernel_backward.h>
using namespace PyTorchMemEffAttention;
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm50, float, false, false, false, 64, 64, 128>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm50, float, false, false, false, 64, 64, 128>::kMinBlocksPerSm)
fmha_cutlassB_f32_notaligned_64x64_k128_sm50(typename AttentionBackwardKernel<cutlass::arch::Sm50, float, false, false, false, 64, 64, 128>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 500
#if __CUDA_ARCH__ < 700
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm50, float, false, false, false, 64, 64, 128>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f32_notaligned_64x64_k128_sm50` is for sm50-sm70, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm70, float, false, false, false, 64, 64, 128>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm70, float, false, false, false, 64, 64, 128>::kMinBlocksPerSm)
fmha_cutlassB_f32_notaligned_64x64_k128_sm70(typename AttentionBackwardKernel<cutlass::arch::Sm70, float, false, false, false, 64, 64, 128>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 700
#if __CUDA_ARCH__ < 750
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm70, float, false, false, false, 64, 64, 128>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f32_notaligned_64x64_k128_sm70` is for sm70-sm75, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm75, float, false, false, false, 64, 64, 128>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm75, float, false, false, false, 64, 64, 128>::kMinBlocksPerSm)
fmha_cutlassB_f32_notaligned_64x64_k128_sm75(typename AttentionBackwardKernel<cutlass::arch::Sm75, float, false, false, false, 64, 64, 128>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 750
#if __CUDA_ARCH__ < 800
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm75, float, false, false, false, 64, 64, 128>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f32_notaligned_64x64_k128_sm75` is for sm75-sm80, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
| 889f1620d1daa3282207babf2c5e981e4d103ca7.cu | /*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
// This file is auto-generated. See "generate_kernels.py"
#include <ATen/native/transformers/cuda/mem_eff_attention/kernel_backward.h>
using namespace PyTorchMemEffAttention;
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm50, float, false, false, false, 64, 64, 128>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm50, float, false, false, false, 64, 64, 128>::kMinBlocksPerSm)
fmha_cutlassB_f32_notaligned_64x64_k128_sm50(typename AttentionBackwardKernel<cutlass::arch::Sm50, float, false, false, false, 64, 64, 128>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 500
#if __CUDA_ARCH__ < 700
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm50, float, false, false, false, 64, 64, 128>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f32_notaligned_64x64_k128_sm50` is for sm50-sm70, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm70, float, false, false, false, 64, 64, 128>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm70, float, false, false, false, 64, 64, 128>::kMinBlocksPerSm)
fmha_cutlassB_f32_notaligned_64x64_k128_sm70(typename AttentionBackwardKernel<cutlass::arch::Sm70, float, false, false, false, 64, 64, 128>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 700
#if __CUDA_ARCH__ < 750
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm70, float, false, false, false, 64, 64, 128>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f32_notaligned_64x64_k128_sm70` is for sm70-sm75, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm75, float, false, false, false, 64, 64, 128>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm75, float, false, false, false, 64, 64, 128>::kMinBlocksPerSm)
fmha_cutlassB_f32_notaligned_64x64_k128_sm75(typename AttentionBackwardKernel<cutlass::arch::Sm75, float, false, false, false, 64, 64, 128>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 750
#if __CUDA_ARCH__ < 800
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm75, float, false, false, false, 64, 64, 128>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f32_notaligned_64x64_k128_sm75` is for sm75-sm80, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
|
33fca24035af6f85c3417b435a237c64d53b269b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// ------------------------------------------------------------------
// Fast R-CNN
// copyright (c) 2015 Microsoft
// Licensed under The MIT License [see fast-rcnn/LICENSE for details]
// Written by Ross Girshick
// Modified by Wei Liu
// ------------------------------------------------------------------
#include <vector>
#include "caffe/layers/smooth_L1_loss_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void SmoothL1Forward(const int n, const Dtype *in, Dtype *out) {
// f(x) = 0.5 * x^2 if |x| < 1
// |x| - 0.5 otherwise
CUDA_KERNEL_LOOP(index, n) {
Dtype val = in[index];
Dtype abs_val = abs(val);
if (abs_val < 1) {
out[index] = 0.5 * val * val;
} else {
out[index] = abs_val - 0.5;
}
}
}
template <typename Dtype>
void SmoothL1LossLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype> *> &bottom,
const vector<Blob<Dtype> *> &top) {
int count = bottom[0]->count();
caffe_gpu_sub(count, bottom[0]->gpu_data(), bottom[1]->gpu_data(),
diff_.mutable_gpu_data()); // d := b0 - b1
if (has_weights_) {
caffe_gpu_mul(count, bottom[2]->gpu_data(), diff_.gpu_data(),
diff_.mutable_gpu_data()); // d := w * (b0 - b1)
}
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( SmoothL1Forward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, diff_.gpu_data(), errors_.mutable_gpu_data());
CUDA_POST_KERNEL_CHECK;
Dtype loss;
caffe_gpu_asum(count, errors_.gpu_data(), &loss);
top[0]->mutable_cpu_data()[0] = loss / bottom[0]->num();
}
template <typename Dtype>
__global__ void SmoothL1Backward(const int n, const Dtype *in, Dtype *out) {
// f'(x) = x if |x| < 1
// = sign(x) otherwise
CUDA_KERNEL_LOOP(index, n) {
Dtype val = in[index];
Dtype abs_val = abs(val);
if (abs_val < 1) {
out[index] = val;
} else {
out[index] = (Dtype(0) < val) - (val < Dtype(0));
}
}
}
template <typename Dtype>
void SmoothL1LossLayer<Dtype>::Backward_gpu(
const vector<Blob<Dtype> *> &top, const vector<bool> &propagate_down,
const vector<Blob<Dtype> *> &bottom) {
int count = diff_.count();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( SmoothL1Backward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, diff_.gpu_data(), diff_.mutable_gpu_data());
CUDA_POST_KERNEL_CHECK;
for (int i = 0; i < 2; ++i) {
if (propagate_down[i]) {
const Dtype sign = (i == 0) ? 1 : -1;
const Dtype alpha = sign * top[0]->cpu_diff()[0] / bottom[i]->num();
caffe_gpu_axpby(bottom[i]->count(), // count
alpha, // alpha
diff_.gpu_data(), // x
Dtype(0), // beta
bottom[i]->mutable_gpu_diff()); // y
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(SmoothL1LossLayer);
} // namespace caffe
| 33fca24035af6f85c3417b435a237c64d53b269b.cu | // ------------------------------------------------------------------
// Fast R-CNN
// copyright (c) 2015 Microsoft
// Licensed under The MIT License [see fast-rcnn/LICENSE for details]
// Written by Ross Girshick
// Modified by Wei Liu
// ------------------------------------------------------------------
#include <vector>
#include "caffe/layers/smooth_L1_loss_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void SmoothL1Forward(const int n, const Dtype *in, Dtype *out) {
// f(x) = 0.5 * x^2 if |x| < 1
// |x| - 0.5 otherwise
CUDA_KERNEL_LOOP(index, n) {
Dtype val = in[index];
Dtype abs_val = abs(val);
if (abs_val < 1) {
out[index] = 0.5 * val * val;
} else {
out[index] = abs_val - 0.5;
}
}
}
template <typename Dtype>
void SmoothL1LossLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype> *> &bottom,
const vector<Blob<Dtype> *> &top) {
int count = bottom[0]->count();
caffe_gpu_sub(count, bottom[0]->gpu_data(), bottom[1]->gpu_data(),
diff_.mutable_gpu_data()); // d := b0 - b1
if (has_weights_) {
caffe_gpu_mul(count, bottom[2]->gpu_data(), diff_.gpu_data(),
diff_.mutable_gpu_data()); // d := w * (b0 - b1)
}
// NOLINT_NEXT_LINE(whitespace/operators)
SmoothL1Forward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, diff_.gpu_data(), errors_.mutable_gpu_data());
CUDA_POST_KERNEL_CHECK;
Dtype loss;
caffe_gpu_asum(count, errors_.gpu_data(), &loss);
top[0]->mutable_cpu_data()[0] = loss / bottom[0]->num();
}
template <typename Dtype>
__global__ void SmoothL1Backward(const int n, const Dtype *in, Dtype *out) {
// f'(x) = x if |x| < 1
// = sign(x) otherwise
CUDA_KERNEL_LOOP(index, n) {
Dtype val = in[index];
Dtype abs_val = abs(val);
if (abs_val < 1) {
out[index] = val;
} else {
out[index] = (Dtype(0) < val) - (val < Dtype(0));
}
}
}
template <typename Dtype>
void SmoothL1LossLayer<Dtype>::Backward_gpu(
const vector<Blob<Dtype> *> &top, const vector<bool> &propagate_down,
const vector<Blob<Dtype> *> &bottom) {
int count = diff_.count();
// NOLINT_NEXT_LINE(whitespace/operators)
SmoothL1Backward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, diff_.gpu_data(), diff_.mutable_gpu_data());
CUDA_POST_KERNEL_CHECK;
for (int i = 0; i < 2; ++i) {
if (propagate_down[i]) {
const Dtype sign = (i == 0) ? 1 : -1;
const Dtype alpha = sign * top[0]->cpu_diff()[0] / bottom[i]->num();
caffe_gpu_axpby(bottom[i]->count(), // count
alpha, // alpha
diff_.gpu_data(), // x
Dtype(0), // beta
bottom[i]->mutable_gpu_diff()); // y
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(SmoothL1LossLayer);
} // namespace caffe
|
7cc5fbf7d00621871d078ea3375890fd6eab34ca.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Homework 1
// Color to Greyscale Conversion
//A common way to represent color images is known as RGBA - the color
//is specified by how much Red, Grean and Blue is in it.
//The 'A' stands for Alpha and is used for transparency, it will be
//ignored in this homework.
//Each channel Red, Blue, Green and Alpha is represented by one byte.
//Since we are using one byte for each color there are 256 different
//possible values for each color. This means we use 4 bytes per pixel.
//Greyscale images are represented by a single intensity value per pixel
//which is one byte in size.
//To convert an image from color to grayscale one simple method is to
//set the intensity to the average of the RGB channels. But we will
//use a more sophisticated method that takes into account how the eye
//perceives color and weights the channels unequally.
//The eye responds most strongly to green followed by red and then blue.
//The NTSC (National Television System Committee) recommends the following
//formula for color to greyscale conversion:
//I = .299f * R + .587f * G + .114f * B
//Notice the trailing f's on the numbers which indicate that they are
//single precision floating point constants and not double precision
//constants.
//You should fill in the kernel as well as set the block and grid sizes
//so that the entire image is processed.
#include "utils.h"
__global__
void rgba_to_greyscale(const uchar4* const rgbaImage,
unsigned char* const greyImage,
int numRows, int numCols)
{
//TODO
//Fill in the kernel to convert from color to greyscale
//the mapping from components of a uchar4 to RGBA is:
// .x -> R ; .y -> G ; .z -> B ; .w -> A
//
//The output (greyImage) at each pixel should be the result of
//applying the formula: output = .299f * R + .587f * G + .114f * B;
//Note: We will be ignoring the alpha channel for this conversion
//First create a mapping from the 2D block and grid locations
//to an absolute 2D location in the image, then use that to
//calculate a 1D offset
int r = (blockIdx.x * blockDim.x) + threadIdx.x, c = (blockIdx.y * blockDim.y) + threadIdx.y;
if (r < numRows && c < numCols)
{
uchar4 rgba = rgbaImage[r * numCols + c];
float channelSum = .299f * rgba.x + .587f * rgba.y + .114f * rgba.z;
greyImage[r * numCols + c] = channelSum;
}
}
void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage,
unsigned char* const d_greyImage, size_t numRows, size_t numCols)
{
//You must fill in the correct sizes for the blockSize and gridSize
//currently only one block with one thread is being launched
const int x = 16, y = 16;
const dim3 blockSize(x, y); //TODO
const dim3 gridSize((numRows + blockSize.x - 1) / blockSize.x, (numCols + blockSize.y - 1) / blockSize.y); //TODO
hipLaunchKernelGGL(( rgba_to_greyscale), dim3(gridSize), dim3(blockSize), 0, 0, d_rgbaImage, d_greyImage, numRows, numCols);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
}
| 7cc5fbf7d00621871d078ea3375890fd6eab34ca.cu | // Homework 1
// Color to Greyscale Conversion
//A common way to represent color images is known as RGBA - the color
//is specified by how much Red, Grean and Blue is in it.
//The 'A' stands for Alpha and is used for transparency, it will be
//ignored in this homework.
//Each channel Red, Blue, Green and Alpha is represented by one byte.
//Since we are using one byte for each color there are 256 different
//possible values for each color. This means we use 4 bytes per pixel.
//Greyscale images are represented by a single intensity value per pixel
//which is one byte in size.
//To convert an image from color to grayscale one simple method is to
//set the intensity to the average of the RGB channels. But we will
//use a more sophisticated method that takes into account how the eye
//perceives color and weights the channels unequally.
//The eye responds most strongly to green followed by red and then blue.
//The NTSC (National Television System Committee) recommends the following
//formula for color to greyscale conversion:
//I = .299f * R + .587f * G + .114f * B
//Notice the trailing f's on the numbers which indicate that they are
//single precision floating point constants and not double precision
//constants.
//You should fill in the kernel as well as set the block and grid sizes
//so that the entire image is processed.
#include "utils.h"
__global__
void rgba_to_greyscale(const uchar4* const rgbaImage,
unsigned char* const greyImage,
int numRows, int numCols)
{
//TODO
//Fill in the kernel to convert from color to greyscale
//the mapping from components of a uchar4 to RGBA is:
// .x -> R ; .y -> G ; .z -> B ; .w -> A
//
//The output (greyImage) at each pixel should be the result of
//applying the formula: output = .299f * R + .587f * G + .114f * B;
//Note: We will be ignoring the alpha channel for this conversion
//First create a mapping from the 2D block and grid locations
//to an absolute 2D location in the image, then use that to
//calculate a 1D offset
int r = (blockIdx.x * blockDim.x) + threadIdx.x, c = (blockIdx.y * blockDim.y) + threadIdx.y;
if (r < numRows && c < numCols)
{
uchar4 rgba = rgbaImage[r * numCols + c];
float channelSum = .299f * rgba.x + .587f * rgba.y + .114f * rgba.z;
greyImage[r * numCols + c] = channelSum;
}
}
void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage,
unsigned char* const d_greyImage, size_t numRows, size_t numCols)
{
//You must fill in the correct sizes for the blockSize and gridSize
//currently only one block with one thread is being launched
const int x = 16, y = 16;
const dim3 blockSize(x, y); //TODO
const dim3 gridSize((numRows + blockSize.x - 1) / blockSize.x, (numCols + blockSize.y - 1) / blockSize.y); //TODO
rgba_to_greyscale<<<gridSize, blockSize>>>(d_rgbaImage, d_greyImage, numRows, numCols);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
}
|
335f53a119adffec77f6dde9baf54e702095d092.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "common.h"
void checkCUDAErrorFn(const char *msg, const char *file, int line) {
hipError_t err = hipGetLastError();
if (hipSuccess == err) {
return;
}
fprintf(stderr, "CUDA error");
if (file) {
fprintf(stderr, " (%s:%d)", file, line);
}
fprintf(stderr, ": %s: %s\n", msg, hipGetErrorString(err));
exit(EXIT_FAILURE);
}
namespace StreamCompaction {
namespace Common {
/**
* Maps an array to an array of 0s and 1s for stream compaction. Elements
* which map to 0 will be removed, and elements which map to 1 will be kept.
*/
__global__ void kernMapToBoolean(int n, int *bools, const int *idata) {
// TODO
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= n) return;
bools[index] = (idata[index] != 0) ? 1 : 0;
}
/**
* Performs scatter on an array. That is, for each element in idata,
* if bools[idx] == 1, it copies idata[idx] to odata[indices[idx]].
*/
__global__ void kernScatter(int n, int *odata,
const int *idata, const int *bools, const int *indices) {
// TODO
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= n) return;
int i = indices[index];
if (bools[index] == 1)
odata[i] = idata[index];
}
}
}
| 335f53a119adffec77f6dde9baf54e702095d092.cu | #include "common.h"
void checkCUDAErrorFn(const char *msg, const char *file, int line) {
cudaError_t err = cudaGetLastError();
if (cudaSuccess == err) {
return;
}
fprintf(stderr, "CUDA error");
if (file) {
fprintf(stderr, " (%s:%d)", file, line);
}
fprintf(stderr, ": %s: %s\n", msg, cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
namespace StreamCompaction {
namespace Common {
/**
* Maps an array to an array of 0s and 1s for stream compaction. Elements
* which map to 0 will be removed, and elements which map to 1 will be kept.
*/
__global__ void kernMapToBoolean(int n, int *bools, const int *idata) {
// TODO
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= n) return;
bools[index] = (idata[index] != 0) ? 1 : 0;
}
/**
* Performs scatter on an array. That is, for each element in idata,
* if bools[idx] == 1, it copies idata[idx] to odata[indices[idx]].
*/
__global__ void kernScatter(int n, int *odata,
const int *idata, const int *bools, const int *indices) {
// TODO
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= n) return;
int i = indices[index];
if (bools[index] == 1)
odata[i] = idata[index];
}
}
}
|
cdee635d10b0e826a26e57fe49c1877161393030.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date
@author Azzam Haidar
@author Ahmad Abdelfattah
NOTE: There is a likely compiler bug affecting this file, specifically
the generated file in single precision (sgetrf). See below in the file
for an explanation.
@precisions normal z -> s d c
*/
#include "magma_internal.h"
#include "magma_templates.h"
#include "sync.cuh"
#include "shuffle.cuh"
#include "batched_kernel_param.h"
// use this so magmasubs will replace with relevant precision, so we can comment out
// the switch case that causes compilation failure
#define PRECISION_z
// This kernel uses registers for matrix storage, shared mem. for communication.
// It also uses lazy swap.
//extern __shared__ magmaDoubleComplex zdata[];
template<int N, int NPOW2>
__global__ void
zgetrf_batched_smallsq_noshfl_kernel( magmaDoubleComplex** dA_array, int ldda,
magma_int_t** ipiv_array, magma_int_t *info_array, int batchCount)
{
extern __shared__ magmaDoubleComplex zdata[];
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int batchid = blockIdx.x * blockDim.y + ty;
if(batchid >= batchCount) return;
magmaDoubleComplex* dA = dA_array[batchid];
magma_int_t* ipiv = ipiv_array[batchid];
magma_int_t* info = &info_array[batchid];
magmaDoubleComplex rA[N] = {MAGMA_Z_ZERO};
magmaDoubleComplex reg = MAGMA_Z_ZERO;
magmaDoubleComplex update = MAGMA_Z_ZERO;
int max_id, rowid = tx;
int linfo = 0;
double rx_abs_max = MAGMA_D_ZERO;
magmaDoubleComplex *sx = (magmaDoubleComplex*)(zdata);
double* dsx = (double*)(sx + blockDim.y * NPOW2);
int* sipiv = (int*)(dsx + blockDim.y * NPOW2);
sx += ty * NPOW2;
dsx += ty * NPOW2;
sipiv += ty * NPOW2;
// read
if( tx < N ){
#pragma unroll
for(int i = 0; i < N; i++){
rA[i] = dA[ i * ldda + tx ];
}
}
#pragma unroll
for(int i = 0; i < N; i++){
// izamax and find pivot
dsx[ rowid ] = fabs(MAGMA_Z_REAL( rA[i] )) + fabs(MAGMA_Z_IMAG( rA[i] ));
magmablas_syncwarp();
rx_abs_max = dsx[i];
max_id = i;
#pragma unroll
for(int j = i+1; j < N; j++){
if( dsx[j] > rx_abs_max){
max_id = j;
rx_abs_max = dsx[j];
}
}
linfo = ( rx_abs_max == MAGMA_D_ZERO && linfo == 0) ? (i+1) : linfo;
update = ( rx_abs_max == MAGMA_D_ZERO ) ? MAGMA_Z_ZERO : MAGMA_Z_ONE;
if(rowid == max_id){
sipiv[i] = max_id;
rowid = i;
#pragma unroll
for(int j = i; j < N; j++){
sx[j] = update * rA[j];
}
}
else if(rowid == i){
rowid = max_id;
}
magmablas_syncwarp();
reg = ( rx_abs_max == MAGMA_D_ZERO ) ? MAGMA_Z_ONE : MAGMA_Z_DIV(MAGMA_Z_ONE, sx[i] );
// scal and ger
if( rowid > i ){
rA[i] *= reg;
#pragma unroll
for(int j = i+1; j < N; j++){
rA[j] -= rA[i] * sx[j];
}
}
magmablas_syncwarp();
}
if(tx == 0){
(*info) = (magma_int_t)( linfo );
}
// write
if(tx < N) {
ipiv[ tx ] = (magma_int_t)(sipiv[tx] + 1); // fortran indexing
#pragma unroll
for(int i = 0; i < N; i++){
dA[ i * ldda + rowid ] = rA[i];
}
}
}
/***************************************************************************//**
Purpose
-------
zgetrf_batched_smallsq_noshfl computes the LU factorization of a square N-by-N matrix A
using partial pivoting with row interchanges.
This routine can deal only with square matrices of size up to 32
The factorization has the form
A = P * L * U
where P is a permutation matrix, L is lower triangular with unit
diagonal elements (lower trapezoidal if m > n), and U is upper
triangular (upper trapezoidal if m < n).
This is the right-looking Level 3 BLAS version of the algorithm.
This is a batched version that factors batchCount M-by-N matrices in parallel.
dA, ipiv, and info become arrays with one entry per matrix.
Arguments
---------
@param[in]
n INTEGER
The size of each matrix A. N >= 0.
@param[in,out]
dA_array Array of pointers, dimension (batchCount).
Each is a COMPLEX_16 array on the GPU, dimension (LDDA,N).
On entry, each pointer is an M-by-N matrix to be factored.
On exit, the factors L and U from the factorization
A = P*L*U; the unit diagonal elements of L are not stored.
@param[in]
ldda INTEGER
The leading dimension of each array A. LDDA >= max(1,M).
@param[out]
ipiv_array Array of pointers, dimension (batchCount), for corresponding matrices.
Each is an INTEGER array, dimension (min(M,N))
The pivot indices; for 1 <= i <= min(M,N), row i of the
matrix was interchanged with row IPIV(i).
@param[out]
info_array Array of INTEGERs, dimension (batchCount), for corresponding matrices.
- = 0: successful exit
- < 0: if INFO = -i, the i-th argument had an illegal value
or another error occured, such as memory allocation failed.
- > 0: if INFO = i, U(i,i) is exactly zero. The factorization
has been completed, but the factor U is exactly
singular, and division by zero will occur if it is used
to solve a system of equations.
@param[in]
batchCount INTEGER
The number of matrices to operate on.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_getrf_batched
*******************************************************************************/
extern "C" magma_int_t
magma_zgetrf_batched_smallsq_noshfl(
magma_int_t n,
magmaDoubleComplex** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, magma_int_t* info_array,
magma_int_t batchCount, magma_queue_t queue )
{
magma_int_t arginfo = 0;
magma_int_t m = n;
if( (m < 0) || ( m > 32 ) ){
arginfo = -1;
}
if (arginfo != 0) {
magma_xerbla( __func__, -(arginfo) );
return arginfo;
}
if( m == 0) return 0;
const magma_int_t ntcol = magma_get_zgetrf_batched_ntcol(m, n);
magma_int_t shmem = ntcol * magma_ceilpow2(m) * sizeof(int);
shmem += ntcol * magma_ceilpow2(m) * sizeof(double);
shmem += ntcol * magma_ceilpow2(m) * sizeof(magmaDoubleComplex);
dim3 threads(magma_ceilpow2(m), ntcol, 1);
const magma_int_t gridx = magma_ceildiv(batchCount, ntcol);
dim3 grid(gridx, 1, 1);
/* @author: Cade Brown <cbrow216@vols.utk.edu>
* @date : 2020-01-31
*
* Something very odd is happening with this file. The file never finishes compiling,
* causing compilation to hang indefinitely. I've only see it apply to It is likely a bug in either:
* * clang/clang++ compiler (C++ templating). I think it may be hanging on an invalid template parameter
* or searching through template matches in an infinite loop
* * LLVM code generation (specifically, the AMDGPU backend, as it seems that the compilation crashes
* during code generation in LL IR).
*
* I've only observed this when the file `magmablas_hip/sgetrf_batched_smallsq_noshfl.hip.cpp` is generated,
* never zgetrf or other precisions.
*
*/
switch(m){
case 1:hipLaunchKernelGGL(( zgetrf_batched_smallsq_noshfl_kernel< 1, magma_ceilpow2( 1)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 2:hipLaunchKernelGGL(( zgetrf_batched_smallsq_noshfl_kernel< 2, magma_ceilpow2( 2)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 3:hipLaunchKernelGGL(( zgetrf_batched_smallsq_noshfl_kernel< 3, magma_ceilpow2( 3)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 4:hipLaunchKernelGGL(( zgetrf_batched_smallsq_noshfl_kernel< 4, magma_ceilpow2( 4)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 5:hipLaunchKernelGGL(( zgetrf_batched_smallsq_noshfl_kernel< 5, magma_ceilpow2( 5)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 6:hipLaunchKernelGGL(( zgetrf_batched_smallsq_noshfl_kernel< 6, magma_ceilpow2( 6)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 7:hipLaunchKernelGGL(( zgetrf_batched_smallsq_noshfl_kernel< 7, magma_ceilpow2( 7)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 8:hipLaunchKernelGGL(( zgetrf_batched_smallsq_noshfl_kernel< 8, magma_ceilpow2( 8)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 9:hipLaunchKernelGGL(( zgetrf_batched_smallsq_noshfl_kernel< 9, magma_ceilpow2( 9)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 10:hipLaunchKernelGGL(( zgetrf_batched_smallsq_noshfl_kernel<10, magma_ceilpow2(10)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 11:hipLaunchKernelGGL(( zgetrf_batched_smallsq_noshfl_kernel<11, magma_ceilpow2(11)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break;
// here are the offending cases
case 12:hipLaunchKernelGGL(( zgetrf_batched_smallsq_noshfl_kernel<12, magma_ceilpow2(12)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 13:hipLaunchKernelGGL(( zgetrf_batched_smallsq_noshfl_kernel<13, magma_ceilpow2(13)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 14:hipLaunchKernelGGL(( zgetrf_batched_smallsq_noshfl_kernel<14, magma_ceilpow2(14)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 15:hipLaunchKernelGGL(( zgetrf_batched_smallsq_noshfl_kernel<15, magma_ceilpow2(15)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 16:hipLaunchKernelGGL(( zgetrf_batched_smallsq_noshfl_kernel<16, magma_ceilpow2(16)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 17:hipLaunchKernelGGL(( zgetrf_batched_smallsq_noshfl_kernel<17, magma_ceilpow2(17)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 18:hipLaunchKernelGGL(( zgetrf_batched_smallsq_noshfl_kernel<18, magma_ceilpow2(18)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 19:hipLaunchKernelGGL(( zgetrf_batched_smallsq_noshfl_kernel<19, magma_ceilpow2(19)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 20:hipLaunchKernelGGL(( zgetrf_batched_smallsq_noshfl_kernel<20, magma_ceilpow2(20)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 21:hipLaunchKernelGGL(( zgetrf_batched_smallsq_noshfl_kernel<21, magma_ceilpow2(21)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 22:hipLaunchKernelGGL(( zgetrf_batched_smallsq_noshfl_kernel<22, magma_ceilpow2(22)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 23:hipLaunchKernelGGL(( zgetrf_batched_smallsq_noshfl_kernel<23, magma_ceilpow2(23)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 24:hipLaunchKernelGGL(( zgetrf_batched_smallsq_noshfl_kernel<24, magma_ceilpow2(24)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 25:hipLaunchKernelGGL(( zgetrf_batched_smallsq_noshfl_kernel<25, magma_ceilpow2(25)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 26:hipLaunchKernelGGL(( zgetrf_batched_smallsq_noshfl_kernel<26, magma_ceilpow2(26)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 27:hipLaunchKernelGGL(( zgetrf_batched_smallsq_noshfl_kernel<27, magma_ceilpow2(27)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 28:hipLaunchKernelGGL(( zgetrf_batched_smallsq_noshfl_kernel<28, magma_ceilpow2(28)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 29:hipLaunchKernelGGL(( zgetrf_batched_smallsq_noshfl_kernel<29, magma_ceilpow2(29)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 30:hipLaunchKernelGGL(( zgetrf_batched_smallsq_noshfl_kernel<30, magma_ceilpow2(30)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 31:hipLaunchKernelGGL(( zgetrf_batched_smallsq_noshfl_kernel<31, magma_ceilpow2(31)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 32:hipLaunchKernelGGL(( zgetrf_batched_smallsq_noshfl_kernel<32, magma_ceilpow2(32)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break;
/**/
// replace the default error message with something so people can contact me
//default: printf("error: size %lld is not supported\n", (long long) m);
default: fprintf(stderr, "MAGMA: error in *getrf_batched_smallsq_noshfl, unsupported size '%lld'. Please contact Cade Brown <cbrow216@vols.utk.edu>, or some member of the MAGMA team with details about this application.\n", (long long)m);
}
return arginfo;
}
| cdee635d10b0e826a26e57fe49c1877161393030.cu | /*
-- MAGMA (version 2.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date
@author Azzam Haidar
@author Ahmad Abdelfattah
NOTE: There is a likely compiler bug affecting this file, specifically
the generated file in single precision (sgetrf). See below in the file
for an explanation.
@precisions normal z -> s d c
*/
#include "magma_internal.h"
#include "magma_templates.h"
#include "sync.cuh"
#include "shuffle.cuh"
#include "batched_kernel_param.h"
// use this so magmasubs will replace with relevant precision, so we can comment out
// the switch case that causes compilation failure
#define PRECISION_z
// This kernel uses registers for matrix storage, shared mem. for communication.
// It also uses lazy swap.
//extern __shared__ magmaDoubleComplex zdata[];
template<int N, int NPOW2>
__global__ void
zgetrf_batched_smallsq_noshfl_kernel( magmaDoubleComplex** dA_array, int ldda,
magma_int_t** ipiv_array, magma_int_t *info_array, int batchCount)
{
extern __shared__ magmaDoubleComplex zdata[];
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int batchid = blockIdx.x * blockDim.y + ty;
if(batchid >= batchCount) return;
magmaDoubleComplex* dA = dA_array[batchid];
magma_int_t* ipiv = ipiv_array[batchid];
magma_int_t* info = &info_array[batchid];
magmaDoubleComplex rA[N] = {MAGMA_Z_ZERO};
magmaDoubleComplex reg = MAGMA_Z_ZERO;
magmaDoubleComplex update = MAGMA_Z_ZERO;
int max_id, rowid = tx;
int linfo = 0;
double rx_abs_max = MAGMA_D_ZERO;
magmaDoubleComplex *sx = (magmaDoubleComplex*)(zdata);
double* dsx = (double*)(sx + blockDim.y * NPOW2);
int* sipiv = (int*)(dsx + blockDim.y * NPOW2);
sx += ty * NPOW2;
dsx += ty * NPOW2;
sipiv += ty * NPOW2;
// read
if( tx < N ){
#pragma unroll
for(int i = 0; i < N; i++){
rA[i] = dA[ i * ldda + tx ];
}
}
#pragma unroll
for(int i = 0; i < N; i++){
// izamax and find pivot
dsx[ rowid ] = fabs(MAGMA_Z_REAL( rA[i] )) + fabs(MAGMA_Z_IMAG( rA[i] ));
magmablas_syncwarp();
rx_abs_max = dsx[i];
max_id = i;
#pragma unroll
for(int j = i+1; j < N; j++){
if( dsx[j] > rx_abs_max){
max_id = j;
rx_abs_max = dsx[j];
}
}
linfo = ( rx_abs_max == MAGMA_D_ZERO && linfo == 0) ? (i+1) : linfo;
update = ( rx_abs_max == MAGMA_D_ZERO ) ? MAGMA_Z_ZERO : MAGMA_Z_ONE;
if(rowid == max_id){
sipiv[i] = max_id;
rowid = i;
#pragma unroll
for(int j = i; j < N; j++){
sx[j] = update * rA[j];
}
}
else if(rowid == i){
rowid = max_id;
}
magmablas_syncwarp();
reg = ( rx_abs_max == MAGMA_D_ZERO ) ? MAGMA_Z_ONE : MAGMA_Z_DIV(MAGMA_Z_ONE, sx[i] );
// scal and ger
if( rowid > i ){
rA[i] *= reg;
#pragma unroll
for(int j = i+1; j < N; j++){
rA[j] -= rA[i] * sx[j];
}
}
magmablas_syncwarp();
}
if(tx == 0){
(*info) = (magma_int_t)( linfo );
}
// write
if(tx < N) {
ipiv[ tx ] = (magma_int_t)(sipiv[tx] + 1); // fortran indexing
#pragma unroll
for(int i = 0; i < N; i++){
dA[ i * ldda + rowid ] = rA[i];
}
}
}
/***************************************************************************//**
Purpose
-------
zgetrf_batched_smallsq_noshfl computes the LU factorization of a square N-by-N matrix A
using partial pivoting with row interchanges.
This routine can deal only with square matrices of size up to 32
The factorization has the form
A = P * L * U
where P is a permutation matrix, L is lower triangular with unit
diagonal elements (lower trapezoidal if m > n), and U is upper
triangular (upper trapezoidal if m < n).
This is the right-looking Level 3 BLAS version of the algorithm.
This is a batched version that factors batchCount M-by-N matrices in parallel.
dA, ipiv, and info become arrays with one entry per matrix.
Arguments
---------
@param[in]
n INTEGER
The size of each matrix A. N >= 0.
@param[in,out]
dA_array Array of pointers, dimension (batchCount).
Each is a COMPLEX_16 array on the GPU, dimension (LDDA,N).
On entry, each pointer is an M-by-N matrix to be factored.
On exit, the factors L and U from the factorization
A = P*L*U; the unit diagonal elements of L are not stored.
@param[in]
ldda INTEGER
The leading dimension of each array A. LDDA >= max(1,M).
@param[out]
ipiv_array Array of pointers, dimension (batchCount), for corresponding matrices.
Each is an INTEGER array, dimension (min(M,N))
The pivot indices; for 1 <= i <= min(M,N), row i of the
matrix was interchanged with row IPIV(i).
@param[out]
info_array Array of INTEGERs, dimension (batchCount), for corresponding matrices.
- = 0: successful exit
- < 0: if INFO = -i, the i-th argument had an illegal value
or another error occured, such as memory allocation failed.
- > 0: if INFO = i, U(i,i) is exactly zero. The factorization
has been completed, but the factor U is exactly
singular, and division by zero will occur if it is used
to solve a system of equations.
@param[in]
batchCount INTEGER
The number of matrices to operate on.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_getrf_batched
*******************************************************************************/
extern "C" magma_int_t
magma_zgetrf_batched_smallsq_noshfl(
magma_int_t n,
magmaDoubleComplex** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, magma_int_t* info_array,
magma_int_t batchCount, magma_queue_t queue )
{
magma_int_t arginfo = 0;
magma_int_t m = n;
if( (m < 0) || ( m > 32 ) ){
arginfo = -1;
}
if (arginfo != 0) {
magma_xerbla( __func__, -(arginfo) );
return arginfo;
}
if( m == 0) return 0;
const magma_int_t ntcol = magma_get_zgetrf_batched_ntcol(m, n);
magma_int_t shmem = ntcol * magma_ceilpow2(m) * sizeof(int);
shmem += ntcol * magma_ceilpow2(m) * sizeof(double);
shmem += ntcol * magma_ceilpow2(m) * sizeof(magmaDoubleComplex);
dim3 threads(magma_ceilpow2(m), ntcol, 1);
const magma_int_t gridx = magma_ceildiv(batchCount, ntcol);
dim3 grid(gridx, 1, 1);
/* @author: Cade Brown <cbrow216@vols.utk.edu>
* @date : 2020-01-31
*
* Something very odd is happening with this file. The file never finishes compiling,
* causing compilation to hang indefinitely. I've only see it apply to It is likely a bug in either:
* * clang/clang++ compiler (C++ templating). I think it may be hanging on an invalid template parameter
* or searching through template matches in an infinite loop
* * LLVM code generation (specifically, the AMDGPU backend, as it seems that the compilation crashes
* during code generation in LL IR).
*
* I've only observed this when the file `magmablas_hip/sgetrf_batched_smallsq_noshfl.hip.cpp` is generated,
* never zgetrf or other precisions.
*
*/
switch(m){
case 1: zgetrf_batched_smallsq_noshfl_kernel< 1, magma_ceilpow2( 1)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 2: zgetrf_batched_smallsq_noshfl_kernel< 2, magma_ceilpow2( 2)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 3: zgetrf_batched_smallsq_noshfl_kernel< 3, magma_ceilpow2( 3)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 4: zgetrf_batched_smallsq_noshfl_kernel< 4, magma_ceilpow2( 4)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 5: zgetrf_batched_smallsq_noshfl_kernel< 5, magma_ceilpow2( 5)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 6: zgetrf_batched_smallsq_noshfl_kernel< 6, magma_ceilpow2( 6)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 7: zgetrf_batched_smallsq_noshfl_kernel< 7, magma_ceilpow2( 7)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 8: zgetrf_batched_smallsq_noshfl_kernel< 8, magma_ceilpow2( 8)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 9: zgetrf_batched_smallsq_noshfl_kernel< 9, magma_ceilpow2( 9)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 10: zgetrf_batched_smallsq_noshfl_kernel<10, magma_ceilpow2(10)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 11: zgetrf_batched_smallsq_noshfl_kernel<11, magma_ceilpow2(11)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break;
// here are the offending cases
case 12: zgetrf_batched_smallsq_noshfl_kernel<12, magma_ceilpow2(12)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 13: zgetrf_batched_smallsq_noshfl_kernel<13, magma_ceilpow2(13)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 14: zgetrf_batched_smallsq_noshfl_kernel<14, magma_ceilpow2(14)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 15: zgetrf_batched_smallsq_noshfl_kernel<15, magma_ceilpow2(15)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 16: zgetrf_batched_smallsq_noshfl_kernel<16, magma_ceilpow2(16)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 17: zgetrf_batched_smallsq_noshfl_kernel<17, magma_ceilpow2(17)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 18: zgetrf_batched_smallsq_noshfl_kernel<18, magma_ceilpow2(18)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 19: zgetrf_batched_smallsq_noshfl_kernel<19, magma_ceilpow2(19)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 20: zgetrf_batched_smallsq_noshfl_kernel<20, magma_ceilpow2(20)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 21: zgetrf_batched_smallsq_noshfl_kernel<21, magma_ceilpow2(21)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 22: zgetrf_batched_smallsq_noshfl_kernel<22, magma_ceilpow2(22)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 23: zgetrf_batched_smallsq_noshfl_kernel<23, magma_ceilpow2(23)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 24: zgetrf_batched_smallsq_noshfl_kernel<24, magma_ceilpow2(24)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 25: zgetrf_batched_smallsq_noshfl_kernel<25, magma_ceilpow2(25)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 26: zgetrf_batched_smallsq_noshfl_kernel<26, magma_ceilpow2(26)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 27: zgetrf_batched_smallsq_noshfl_kernel<27, magma_ceilpow2(27)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 28: zgetrf_batched_smallsq_noshfl_kernel<28, magma_ceilpow2(28)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 29: zgetrf_batched_smallsq_noshfl_kernel<29, magma_ceilpow2(29)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 30: zgetrf_batched_smallsq_noshfl_kernel<30, magma_ceilpow2(30)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 31: zgetrf_batched_smallsq_noshfl_kernel<31, magma_ceilpow2(31)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 32: zgetrf_batched_smallsq_noshfl_kernel<32, magma_ceilpow2(32)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break;
/**/
// replace the default error message with something so people can contact me
//default: printf("error: size %lld is not supported\n", (long long) m);
default: fprintf(stderr, "MAGMA: error in *getrf_batched_smallsq_noshfl, unsupported size '%lld'. Please contact Cade Brown <cbrow216@vols.utk.edu>, or some member of the MAGMA team with details about this application.\n", (long long)m);
}
return arginfo;
}
|
81c9482b421bffffe62be28bea3a7ef0d5a1b387.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Original code from https://github.com/idiap/fast-transformers/blob/master/fast_transformers/causal_product/causal_product_cuda.cu
// Copyright (c) 2020 Idiap Research Institute, http://www.idiap.ch/
// Written by Angelos Katharopoulos <angelos.katharopoulos@idiap.ch>,
// Apoorv Vyas <avyas@idiap.ch>
//
// Modified to implement the fast RNN with *FWM update rule*.
// Copyright (c) 2021 Kazuki Irie
#include <torch/extension.h>
// #include <iostream>
// #include <ATen/hip/impl/HIPGuardImplMasqueradingAsCUDA.h>
typedef torch::PackedTensorAccessor32<float, 4, torch::RestrictPtrTraits>
float_accessor;
// sigmoid
__device__ float sgmf(float x) {
return 1.f / (1.f + expf(-x));
}
__global__ void fast_rnn_forward_kernel(
const float_accessor inputs,
const float_accessor keys,
const float_accessor values,
const float_accessor betas,
float_accessor values_old,
float_accessor states,
float_accessor kv,
float_accessor result,
const int N,
const int H,
const int L,
const int E,
const int M,
const int E_per_subblock,
const int subblocks_per_seq,
const int T, // block chunk size in time dim.
const int l_offset // multiple of T, length offset.
) {
// Each block takes care of one sequence.
// blockIdx.x = n * H + h
int n = blockIdx.x / H; // batch id
int h = blockIdx.x % H; // head id
// threadIdx.x = e_local*M + m
// Local e coordinate within E_per_subblock sub-block.
int e_local = threadIdx.x / M;
int m = threadIdx.x % M;
const int E_block = subblocks_per_seq * E_per_subblock;
// Load the shared memory for KV
const int shared_kv_size = E_block * M;
extern __shared__ float shared_mem[];
float* shared_kv = shared_mem;
float* shared_results = shared_mem + shared_kv_size;
float* shared_states = shared_results + M;
float* shared_values_old = shared_states + M;
float* shared_betas = shared_values_old + M;
float* softmax_denom = shared_betas + T;
float* max_value = softmax_denom + 1;
float* shared_values = max_value + 1;
float* shared_keys = shared_values + M*T;
float* shared_inputs = shared_keys + E_block*T;
if (threadIdx.x < M) {
shared_results[m] = 0.f;
shared_values_old[m] = 0.f;
}
if (threadIdx.x < 1) {
softmax_denom[0] = 0.f;
max_value[0] = 0.f;
}
// the last segment is shorter.
int t_end = (T + l_offset) <= L ? T : L - l_offset;
for (int i = threadIdx.x; i < (t_end*M); i += blockDim.x)
{
int t = int(i / M) + l_offset;
int d = i % M;
shared_values[i] = values[n][h][t][d];
shared_inputs[i] = inputs[n][h][t][d];
}
for (int i = threadIdx.x; i < (t_end*E_block); i += blockDim.x)
{
int t = int(i / E_block) + l_offset;
int d = (i % E_block);
if (d < E) {
shared_keys[i] = keys[n][h][t][d];
}
}
for (int i = threadIdx.x; i < t_end; i += blockDim.x)
{
int t = i + l_offset;
shared_betas[i] = betas[n][h][t][0];
}
__syncthreads();
if (n >= N) {
return;
}
int e;
int kv_idx;
for (int sub=0; sub<subblocks_per_seq; sub++) {
e = sub * E_per_subblock + e_local;
kv_idx = threadIdx.x + sub * blockDim.x;
if (e < E) {
shared_kv[kv_idx] = kv[n][h][e][m];
}
}
// init variables
if (threadIdx.x < M) {
// initialize RNN state
shared_states[m] = states[n][h][0][m];
}
int e_abs;
for (int t=0; t<t_end; t++) { // loop over time in the segment
int l = t + l_offset; // absolute position in time
int m_abs = t*M + m;
float v_old;
for (int sub=0; sub<subblocks_per_seq; sub++) {
e = sub * E_per_subblock + e_local;
e_abs = t*E_block + e;
kv_idx = threadIdx.x + sub * blockDim.x;
if (e < E) {
// get old value
v_old = shared_kv[kv_idx] * shared_keys[e_abs];
atomicAdd(
&shared_values_old[m],
v_old
);
}
}
__syncthreads();
// compute new value to be inserted
// if (threadIdx.x < M) {
// shared_values_insert[m] = shared_betas[t]
// * (shared_values[m_abs] - shared_values_old[m]);
// }
// __syncthreads();
float res;
// compute new value to be inserted
float v_insert = shared_betas[t] *
(shared_values[m_abs] - shared_values_old[m]);
for (int sub=0; sub<subblocks_per_seq; sub++) {
e = sub * E_per_subblock + e_local;
e_abs = t*E_block + e;
kv_idx = threadIdx.x + sub * blockDim.x;
if (e < E) {
// Update fast weights
shared_kv[kv_idx] += shared_keys[e_abs] * v_insert;
res = shared_states[e] * shared_kv[kv_idx];
atomicAdd(
&shared_results[m], // recurrent part
res
);
}
}
__syncthreads();
// For stable softmax
float max_val;
float tmp_max;
if (threadIdx.x < 1) { // Not parallelized! this should be improved!
max_val = shared_results[0] + shared_inputs[t*M];
for (int i = 1; i < M; i++) {
tmp_max = shared_results[i] + shared_inputs[t*M + i];
if (tmp_max > max_val) {
max_val = tmp_max;
}
}
max_value[0] = max_val;
}
__syncthreads();
float r1 = expf(
shared_results[m] + shared_inputs[m_abs] - max_value[0]);
if (threadIdx.x < M) {
// m = threadIdx.x if threadIdx.x < M
// sigmoid
// float r1 = expf(shared_results[m] + shared_inputs[m_abs]);
// tanh version
// float r1 = tanhf(shared_results[m] + shared_inputs[m_abs]);
// atomicAdd(
// &result[n][h][l][m],
// r1
// );
atomicAdd(
&softmax_denom[0],
r1
);
// shared_states[m] = r1; // state update
// shared_results[m] = 0.f;
// same for v_old and v_insert
float r2 = shared_values_old[m];
atomicAdd(
&values_old[n][h][l][m],
r2
);
shared_values_old[m] = 0.f;
}
__syncthreads();
if (threadIdx.x < M) {
float r3 = r1 / softmax_denom[0]; // stable?
atomicAdd(
&result[n][h][l][m],
r3
);
shared_states[m] = r3; // state update
shared_results[m] = 0.f;
}
__syncthreads();
if (threadIdx.x < 1) {
softmax_denom[0] = 0.f;
}
__syncthreads();
}
__syncthreads();
// write back to kv to be carried over to the next segment.
for (int sub=0; sub<subblocks_per_seq; sub++) {
e = sub * E_per_subblock + e_local;
kv_idx = threadIdx.x + sub * blockDim.x;
if (e < E) {
kv[n][h][e][m] = shared_kv[kv_idx];
}
}
if (threadIdx.x < M) {
states[n][h][0][m] = shared_states[m];
}
}
// Forward
void fast_rnn_forward(
const torch::Tensor inputs,
const torch::Tensor keys,
const torch::Tensor values,
const torch::Tensor betas,
torch::Tensor v_old,
torch::Tensor states, // init states
torch::Tensor kv, // might be non zero if carried over from previous seg.
torch::Tensor outputs
) {
// const at::hip::OptionalHIPGuardMasqueradingAsCUDA device_guard(device_of(queries));
int N = inputs.size(0);
int H = inputs.size(1);
int L = inputs.size(2);
int E = inputs.size(3);
int M = values.size(3);
int threads = 1024;
// Shared mem max size is 48KB
int MUL_PER_BLOCK = min(threads, E*M);
// make sure that MUL_PER_BLOCK is divisible by M;
MUL_PER_BLOCK = int(MUL_PER_BLOCK / M) * M;
threads = MUL_PER_BLOCK;
// const int blocks_per_sequence = ((E*M) + threads -1) / threads;
const int subblocks_per_seq = ((E*M) + threads -1) / threads;
const int E_per_subblock = MUL_PER_BLOCK / M;
// int blocks = N*H*blocks_per_sequence;
int blocks = N*H; // total number of sequences
// KV, +1 output/results, + 1 for states, + 1 old val.
int shared_mem_const = (subblocks_per_seq * E_per_subblock + 3)*M + 1 + 1;
// M for value and input, E for key and 1 for beta.
int shared_mem_per_time = 2*M + E_per_subblock * subblocks_per_seq + 1;
// T = max time chunk size we can afford
// 12 * 1024 * 4 (float) = 49KB
assert(12 * 1024 - shared_mem_const > 0 &&
"`d_head` too large. To obtain large models, keep `d_head` small"
"e.g. 64 and increase the number of heads instead.");
const int T = int(((12 * 1024) - shared_mem_const) / shared_mem_per_time);
const int shared_mem_forward =
((T*shared_mem_per_time) + shared_mem_const) * sizeof(float);
for (int l_offset=0; l_offset < L; l_offset += T) {
hipLaunchKernelGGL(( fast_rnn_forward_kernel)
, dim3(blocks), dim3(MUL_PER_BLOCK), shared_mem_forward, 0,
inputs.packed_accessor32<float, 4, torch::RestrictPtrTraits>(),
keys.packed_accessor32<float, 4, torch::RestrictPtrTraits>(),
values.packed_accessor32<float, 4, torch::RestrictPtrTraits>(),
betas.packed_accessor32<float, 4, torch::RestrictPtrTraits>(),
v_old.packed_accessor32<float, 4, torch::RestrictPtrTraits>(),
states.packed_accessor32<float, 4, torch::RestrictPtrTraits>(),
kv.packed_accessor32<float, 4, torch::RestrictPtrTraits>(),
outputs.packed_accessor32<float, 4, torch::RestrictPtrTraits>(),
N, H, L, E, M, E_per_subblock, subblocks_per_seq, T, l_offset
);
}
}
// Backward kernel
__global__ void fast_rnn_backward_kernel(
const float_accessor keys,
const float_accessor values,
const float_accessor betas,
const float_accessor v_old,
const float_accessor rnn_out,
const float_accessor rnn_out_delayed,
const float_accessor grad_out,
float_accessor tmp_grad,
float_accessor kv,
float_accessor grad_kv,
float_accessor grad_inputs,
float_accessor grad_keys,
float_accessor grad_values,
float_accessor grad_betas,
int N,
int H,
int L,
int E,
int M,
int E_per_subblock,
int subblocks_per_seq,
int T,
int l_offset
) {
// Each block takes care of one sequence.
// blockIdx.x = n * H + h
int n = blockIdx.x / H;
int h = blockIdx.x % H;
// threadIdx.x = e_local*M + m
// Local e coordinate within E_per_subblock sub-block.
int e_local = threadIdx.x / M;
int m = threadIdx.x % M;
const int E_block = subblocks_per_seq * E_per_subblock;
// Load the shared memory for KV
const int shared_kv_size = E_block * M;
extern __shared__ float shared_mem[];
float* shared_kv = shared_mem;
float* shared_grad_kv = shared_mem + shared_kv_size;
float* shared_res_i = shared_grad_kv + shared_kv_size;
float* shared_res_k = shared_res_i + M;
float* shared_res_v = shared_res_k + M;
float* shared_grad_v_old =shared_res_v + M;
float* shared_res_beta = shared_grad_v_old + M;
float* grad_sft_cst = shared_res_beta + 1;
float* shared_gradout = grad_sft_cst + 1;
float* shared_keys = shared_gradout + M*T;
float* shared_values = shared_keys + E_block*T;
float* shared_rnn_out = shared_values + M*T;
float* shared_rnn_out_delayed = shared_rnn_out + M*T;
float* shared_v_old = shared_rnn_out_delayed + M*T;
float* shared_betas = shared_v_old + M*T;
float* shared_tmp_grad = shared_betas + T;
if (threadIdx.x < M) {
shared_res_i[m] = 0.f;
shared_res_k[m] = 0.f;
shared_res_v[m] = 0.f;
shared_grad_v_old[m] = 0.f;
}
if (threadIdx.x < 1) {
shared_res_beta[0] = 0.f;
grad_sft_cst[0] = 0.f; // offset for grad softmax
}
// Everythig goes backward
int t_end = (T + l_offset) <= L ? T : L - l_offset;
for (int i = threadIdx.x; i < (t_end*M); i += blockDim.x)
{
int t = int(i / M) + l_offset;
int t_bw = L - 1 - t;
int d = i % M;
shared_gradout[i] = grad_out[n][h][t_bw][d];
shared_rnn_out[i] = rnn_out[n][h][t_bw][d];
shared_values[i] = values[n][h][t_bw][d];
shared_v_old[i] = v_old[n][h][t_bw][d];
}
for (int i = threadIdx.x; i < (t_end*E_block); i += blockDim.x)
{
int t = int(i / E_block) + l_offset;
int t_bw = L - 1 - t;
int d = (i % E_block);
if (d < E) {
shared_rnn_out_delayed[i] = rnn_out_delayed[n][h][t_bw][d];
shared_keys[i] = keys[n][h][t_bw][d];
}
}
for (int i = threadIdx.x; i < t_end; i += blockDim.x)
{
int t = i + l_offset;
int t_bw = L - 1 - t;
shared_betas[i] = betas[n][h][t_bw][0];
}
__syncthreads();
if (n >= N) {
return;
}
int e;
int e_abs; // absolute idx from t=0
int kv_idx;
for (int sub=0; sub<subblocks_per_seq; sub++) {
e = sub * E_per_subblock + e_local;
kv_idx = threadIdx.x + sub * blockDim.x;
if (e < E) {
shared_kv[kv_idx] = kv[n][h][e][m];
shared_grad_kv[kv_idx] = grad_kv[n][h][e][m];
}
}
if (threadIdx.x < M) {
// threadIdx.x = m if threadIdx.x < M
shared_tmp_grad[m] = tmp_grad[n][h][0][m];
}
for (int t=0; t<t_end; t++) {
int l = t + l_offset;
int l_b = L - l -1;
int m_abs = t*M + m;
// compute constant for grad softmax
if (threadIdx.x < M) {
shared_tmp_grad[m] += shared_gradout[m_abs];
float cst = shared_tmp_grad[m] * shared_rnn_out[m_abs];
atomicAdd(
&grad_sft_cst[0],
cst
);
}
__syncthreads();
if (threadIdx.x < M) { // element-wise ops only here
// threadIdx.x = m if threadIdx.x < M
// add new grad to tmp grad accumulator
// shared_tmp_grad[m] += shared_gradout[m_abs];
// __syncthreads();
// sigmoid
// float grad_z =
// (1. - shared_rnn_out[m_abs]) * shared_rnn_out[m_abs]
// * shared_tmp_grad[m];
float grad_z = shared_rnn_out[m_abs] * (
shared_tmp_grad[m] - grad_sft_cst[0]);
// tanh --> grad_z = grad_h * (1- out^2)
// float grad_z = (
// 1. - shared_rnn_out[m_abs] * shared_rnn_out[m_abs])
// * shared_tmp_grad[m];
atomicAdd(
&shared_res_i[m],
grad_z
); // grad for input
shared_tmp_grad[m] = 0.f; // prepare grad for the next step.
}
__syncthreads(); // important to sync
if (threadIdx.x < 1) {
grad_sft_cst[0] = 0.f;
}
float v_diff = shared_values[m_abs] - shared_v_old[m_abs];
float v_ins = v_diff * shared_betas[t];
for (int sub=0; sub<subblocks_per_seq; sub++) {
e = sub * E_per_subblock + e_local;
e_abs = t*E_block + e;
kv_idx = threadIdx.x + sub * blockDim.x;
if (e < E) {
// grad rec weight part 1
shared_grad_kv[kv_idx] +=
shared_res_i[m] * shared_rnn_out_delayed[e_abs];
// grad v
float res = shared_keys[e_abs] * shared_grad_kv[kv_idx]
* shared_betas[t];
atomicAdd(
&shared_res_v[m],
res
);
// grad k part 1 and 2
float res_k = shared_grad_kv[kv_idx] * v_ins;
atomicAdd(
&shared_res_k[e],
res_k
);
// grad beta
float res_beta = shared_grad_kv[kv_idx] * shared_keys[e_abs]
* v_diff;
atomicAdd(
&shared_res_beta[0],
res_beta
);
float res_h = shared_res_i[m] * shared_kv[kv_idx];
atomicAdd(
&shared_tmp_grad[e],
res_h
);
}
}
__syncthreads();
for (int sub=0; sub<subblocks_per_seq; sub++) {
e = sub * E_per_subblock + e_local;
e_abs = t*E_block + e;
kv_idx = threadIdx.x + sub * blockDim.x;
if (e < E) {
// reverse update kv
shared_kv[kv_idx] -= shared_keys[e_abs] * v_ins;
// grad v_old
float res_v_old = - (shared_grad_kv[kv_idx] * shared_betas[t]
* shared_keys[e_abs]);
atomicAdd(
&shared_grad_v_old[m],
res_v_old
);
}
}
__syncthreads();
// remaining key grad
for (int sub=0; sub<subblocks_per_seq; sub++) {
e = sub * E_per_subblock + e_local;
e_abs = t*E_block + e;
kv_idx = threadIdx.x + sub * blockDim.x;
if (e < E) {
float res_kp3 = shared_grad_v_old[m] * shared_kv[kv_idx];
atomicAdd(
&shared_res_k[e],
res_kp3
); // remaining key grad
// grad kv via v old
shared_grad_kv[kv_idx] +=
shared_grad_v_old[m] * shared_keys[e_abs];
}
}
__syncthreads();
if (threadIdx.x < M) {
// m = threadIdx.x if threadIdx.x < M
float ri = shared_res_i[m];
atomicAdd(
&grad_inputs[n][h][l_b][m],
ri
);
float rk = shared_res_k[m];
atomicAdd(
&grad_keys[n][h][l_b][m],
rk
);
float rv = shared_res_v[m];
atomicAdd(
&grad_values[n][h][l_b][m],
rv
);
shared_res_i[m] = 0.f;
shared_res_k[m] = 0.f;
shared_res_v[m] = 0.f;
shared_grad_v_old[m] = 0.f;
}
__syncthreads();
if (threadIdx.x < 1) {
float r3 = shared_res_beta[0];
atomicAdd(
&grad_betas[n][h][l_b][0],
r3
);
shared_res_beta[0] = 0.f;
}
__syncthreads();
}
__syncthreads();
// write back temporal gradients.
for (int sub=0; sub<subblocks_per_seq; sub++) {
e = sub * E_per_subblock + e_local;
kv_idx = threadIdx.x + sub * blockDim.x;
if (e < E) {
kv[n][h][e][m] = shared_kv[kv_idx];
grad_kv[n][h][e][m] = shared_grad_kv[kv_idx];
}
}
if (threadIdx.x < M) {
// threadIdx.x = m if threadIdx.x < M
tmp_grad[n][h][0][m] = shared_tmp_grad[m];
}
}
// Backward
void fast_rnn_backward(
const torch::Tensor keys,
const torch::Tensor values,
const torch::Tensor betas,
const torch::Tensor v_old,
const torch::Tensor grad_out,
const torch::Tensor outputs,
const torch::Tensor o_delayed,
torch::Tensor fw_mem, // from the forward pass.
torch::Tensor grad_in,
torch::Tensor grad_k,
torch::Tensor grad_v,
torch::Tensor grad_beta
) {
// const at::hip::OptionalHIPGuardMasqueradingAsCUDA device_guard(device_of(grad_queries));
int N = keys.size(0);
int H = keys.size(1);
int L = keys.size(2);
int E = keys.size(3);
int M = values.size(3);
auto grad_kv = torch::zeros({N, H, E, M}, keys.options());
auto tmp_grad = torch::zeros({N, H, 1, M}, keys.options());
const int threads = 512;
// First part ====================================
int MPB = min(threads, E*M);
// make sure that MUL_PER_BLOCK is divisible by M;
MPB = int(MPB / M) * M;
const int subblocks_per_seq_value = ((E*M) + MPB - 1)/ MPB;
const int E_per_subblock = MPB / M;
const int blocks_value = N*H;
const int E_block = E_per_subblock * subblocks_per_seq_value;
// 2*E*M for KV and grad_KV, and 3*M.
int shared_mem_const = (2 * E_block + 4)*M + 2;
// 5M for value, rnn_out, rnn_delayed, grad_out, tmp_grad.
// E for key.
int shared_mem_per_time = 6*M + E_block + 1;
assert(12 * 1024 - shared_mem_const > 0 &&
"`d_head` too large. To obtain large models, keep `d_head` small"
"e.g. 64 and increase the number of heads instead.");
int T = int(((12 * 1024) - shared_mem_const) / shared_mem_per_time);
const int shared_mem_v_backward =
((T*shared_mem_per_time) + shared_mem_const) * sizeof(float);
for (int l_offset=0; l_offset < L; l_offset += T) {
hipLaunchKernelGGL(( fast_rnn_backward_kernel)
, dim3(blocks_value), dim3(MPB), shared_mem_v_backward, 0,
keys.packed_accessor32<float, 4, torch::RestrictPtrTraits>(),
values.packed_accessor32<float, 4, torch::RestrictPtrTraits>(),
betas.packed_accessor32<float, 4, torch::RestrictPtrTraits>(),
v_old.packed_accessor32<float, 4, torch::RestrictPtrTraits>(),
outputs.packed_accessor32<float, 4, torch::RestrictPtrTraits>(),
o_delayed.packed_accessor32<float, 4, torch::RestrictPtrTraits>(),
grad_out.packed_accessor32<float, 4, torch::RestrictPtrTraits>(),
tmp_grad.packed_accessor32<float, 4, torch::RestrictPtrTraits>(),
fw_mem.packed_accessor32<float, 4, torch::RestrictPtrTraits>(),
grad_kv.packed_accessor32<float, 4, torch::RestrictPtrTraits>(),
grad_in.packed_accessor32<float, 4, torch::RestrictPtrTraits>(),
grad_k.packed_accessor32<float, 4, torch::RestrictPtrTraits>(),
grad_v.packed_accessor32<float, 4, torch::RestrictPtrTraits>(),
grad_beta.packed_accessor32<float, 4, torch::RestrictPtrTraits>(),
N, H, L, E, M, E_per_subblock, subblocks_per_seq_value, T, l_offset
);
}
}
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
m.def(
"fast_rnn_forward",
&fast_rnn_forward,
"Compute the weighted sum of values but attending only to previous "
"values."
);
m.def(
"fast_rnn_backward",
&fast_rnn_backward,
"Compute the gradients for the fast weight memory."
);
}
| 81c9482b421bffffe62be28bea3a7ef0d5a1b387.cu | // Original code from https://github.com/idiap/fast-transformers/blob/master/fast_transformers/causal_product/causal_product_cuda.cu
// Copyright (c) 2020 Idiap Research Institute, http://www.idiap.ch/
// Written by Angelos Katharopoulos <angelos.katharopoulos@idiap.ch>,
// Apoorv Vyas <avyas@idiap.ch>
//
// Modified to implement the fast RNN with *FWM update rule*.
// Copyright (c) 2021 Kazuki Irie
#include <torch/extension.h>
// #include <iostream>
// #include <c10/cuda/CUDAGuard.h>
typedef torch::PackedTensorAccessor32<float, 4, torch::RestrictPtrTraits>
float_accessor;
// sigmoid
__device__ float sgmf(float x) {
return 1.f / (1.f + expf(-x));
}
__global__ void fast_rnn_forward_kernel(
const float_accessor inputs,
const float_accessor keys,
const float_accessor values,
const float_accessor betas,
float_accessor values_old,
float_accessor states,
float_accessor kv,
float_accessor result,
const int N,
const int H,
const int L,
const int E,
const int M,
const int E_per_subblock,
const int subblocks_per_seq,
const int T, // block chunk size in time dim.
const int l_offset // multiple of T, length offset.
) {
// Each block takes care of one sequence.
// blockIdx.x = n * H + h
int n = blockIdx.x / H; // batch id
int h = blockIdx.x % H; // head id
// threadIdx.x = e_local*M + m
// Local e coordinate within E_per_subblock sub-block.
int e_local = threadIdx.x / M;
int m = threadIdx.x % M;
const int E_block = subblocks_per_seq * E_per_subblock;
// Load the shared memory for KV
const int shared_kv_size = E_block * M;
extern __shared__ float shared_mem[];
float* shared_kv = shared_mem;
float* shared_results = shared_mem + shared_kv_size;
float* shared_states = shared_results + M;
float* shared_values_old = shared_states + M;
float* shared_betas = shared_values_old + M;
float* softmax_denom = shared_betas + T;
float* max_value = softmax_denom + 1;
float* shared_values = max_value + 1;
float* shared_keys = shared_values + M*T;
float* shared_inputs = shared_keys + E_block*T;
if (threadIdx.x < M) {
shared_results[m] = 0.f;
shared_values_old[m] = 0.f;
}
if (threadIdx.x < 1) {
softmax_denom[0] = 0.f;
max_value[0] = 0.f;
}
// the last segment is shorter.
int t_end = (T + l_offset) <= L ? T : L - l_offset;
for (int i = threadIdx.x; i < (t_end*M); i += blockDim.x)
{
int t = int(i / M) + l_offset;
int d = i % M;
shared_values[i] = values[n][h][t][d];
shared_inputs[i] = inputs[n][h][t][d];
}
for (int i = threadIdx.x; i < (t_end*E_block); i += blockDim.x)
{
int t = int(i / E_block) + l_offset;
int d = (i % E_block);
if (d < E) {
shared_keys[i] = keys[n][h][t][d];
}
}
for (int i = threadIdx.x; i < t_end; i += blockDim.x)
{
int t = i + l_offset;
shared_betas[i] = betas[n][h][t][0];
}
__syncthreads();
if (n >= N) {
return;
}
int e;
int kv_idx;
for (int sub=0; sub<subblocks_per_seq; sub++) {
e = sub * E_per_subblock + e_local;
kv_idx = threadIdx.x + sub * blockDim.x;
if (e < E) {
shared_kv[kv_idx] = kv[n][h][e][m];
}
}
// init variables
if (threadIdx.x < M) {
// initialize RNN state
shared_states[m] = states[n][h][0][m];
}
int e_abs;
for (int t=0; t<t_end; t++) { // loop over time in the segment
int l = t + l_offset; // absolute position in time
int m_abs = t*M + m;
float v_old;
for (int sub=0; sub<subblocks_per_seq; sub++) {
e = sub * E_per_subblock + e_local;
e_abs = t*E_block + e;
kv_idx = threadIdx.x + sub * blockDim.x;
if (e < E) {
// get old value
v_old = shared_kv[kv_idx] * shared_keys[e_abs];
atomicAdd(
&shared_values_old[m],
v_old
);
}
}
__syncthreads();
// compute new value to be inserted
// if (threadIdx.x < M) {
// shared_values_insert[m] = shared_betas[t]
// * (shared_values[m_abs] - shared_values_old[m]);
// }
// __syncthreads();
float res;
// compute new value to be inserted
float v_insert = shared_betas[t] *
(shared_values[m_abs] - shared_values_old[m]);
for (int sub=0; sub<subblocks_per_seq; sub++) {
e = sub * E_per_subblock + e_local;
e_abs = t*E_block + e;
kv_idx = threadIdx.x + sub * blockDim.x;
if (e < E) {
// Update fast weights
shared_kv[kv_idx] += shared_keys[e_abs] * v_insert;
res = shared_states[e] * shared_kv[kv_idx];
atomicAdd(
&shared_results[m], // recurrent part
res
);
}
}
__syncthreads();
// For stable softmax
float max_val;
float tmp_max;
if (threadIdx.x < 1) { // Not parallelized! this should be improved!
max_val = shared_results[0] + shared_inputs[t*M];
for (int i = 1; i < M; i++) {
tmp_max = shared_results[i] + shared_inputs[t*M + i];
if (tmp_max > max_val) {
max_val = tmp_max;
}
}
max_value[0] = max_val;
}
__syncthreads();
float r1 = expf(
shared_results[m] + shared_inputs[m_abs] - max_value[0]);
if (threadIdx.x < M) {
// m = threadIdx.x if threadIdx.x < M
// sigmoid
// float r1 = expf(shared_results[m] + shared_inputs[m_abs]);
// tanh version
// float r1 = tanhf(shared_results[m] + shared_inputs[m_abs]);
// atomicAdd(
// &result[n][h][l][m],
// r1
// );
atomicAdd(
&softmax_denom[0],
r1
);
// shared_states[m] = r1; // state update
// shared_results[m] = 0.f;
// same for v_old and v_insert
float r2 = shared_values_old[m];
atomicAdd(
&values_old[n][h][l][m],
r2
);
shared_values_old[m] = 0.f;
}
__syncthreads();
if (threadIdx.x < M) {
float r3 = r1 / softmax_denom[0]; // stable?
atomicAdd(
&result[n][h][l][m],
r3
);
shared_states[m] = r3; // state update
shared_results[m] = 0.f;
}
__syncthreads();
if (threadIdx.x < 1) {
softmax_denom[0] = 0.f;
}
__syncthreads();
}
__syncthreads();
// write back to kv to be carried over to the next segment.
for (int sub=0; sub<subblocks_per_seq; sub++) {
e = sub * E_per_subblock + e_local;
kv_idx = threadIdx.x + sub * blockDim.x;
if (e < E) {
kv[n][h][e][m] = shared_kv[kv_idx];
}
}
if (threadIdx.x < M) {
states[n][h][0][m] = shared_states[m];
}
}
// Forward
void fast_rnn_forward(
const torch::Tensor inputs,
const torch::Tensor keys,
const torch::Tensor values,
const torch::Tensor betas,
torch::Tensor v_old,
torch::Tensor states, // init states
torch::Tensor kv, // might be non zero if carried over from previous seg.
torch::Tensor outputs
) {
// const at::cuda::OptionalCUDAGuard device_guard(device_of(queries));
int N = inputs.size(0);
int H = inputs.size(1);
int L = inputs.size(2);
int E = inputs.size(3);
int M = values.size(3);
int threads = 1024;
// Shared mem max size is 48KB
int MUL_PER_BLOCK = min(threads, E*M);
// make sure that MUL_PER_BLOCK is divisible by M;
MUL_PER_BLOCK = int(MUL_PER_BLOCK / M) * M;
threads = MUL_PER_BLOCK;
// const int blocks_per_sequence = ((E*M) + threads -1) / threads;
const int subblocks_per_seq = ((E*M) + threads -1) / threads;
const int E_per_subblock = MUL_PER_BLOCK / M;
// int blocks = N*H*blocks_per_sequence;
int blocks = N*H; // total number of sequences
// KV, +1 output/results, + 1 for states, + 1 old val.
int shared_mem_const = (subblocks_per_seq * E_per_subblock + 3)*M + 1 + 1;
// M for value and input, E for key and 1 for beta.
int shared_mem_per_time = 2*M + E_per_subblock * subblocks_per_seq + 1;
// T = max time chunk size we can afford
// 12 * 1024 * 4 (float) = 49KB
assert(12 * 1024 - shared_mem_const > 0 &&
"`d_head` too large. To obtain large models, keep `d_head` small"
"e.g. 64 and increase the number of heads instead.");
const int T = int(((12 * 1024) - shared_mem_const) / shared_mem_per_time);
const int shared_mem_forward =
((T*shared_mem_per_time) + shared_mem_const) * sizeof(float);
for (int l_offset=0; l_offset < L; l_offset += T) {
fast_rnn_forward_kernel
<<<blocks, MUL_PER_BLOCK, shared_mem_forward>>>(
inputs.packed_accessor32<float, 4, torch::RestrictPtrTraits>(),
keys.packed_accessor32<float, 4, torch::RestrictPtrTraits>(),
values.packed_accessor32<float, 4, torch::RestrictPtrTraits>(),
betas.packed_accessor32<float, 4, torch::RestrictPtrTraits>(),
v_old.packed_accessor32<float, 4, torch::RestrictPtrTraits>(),
states.packed_accessor32<float, 4, torch::RestrictPtrTraits>(),
kv.packed_accessor32<float, 4, torch::RestrictPtrTraits>(),
outputs.packed_accessor32<float, 4, torch::RestrictPtrTraits>(),
N, H, L, E, M, E_per_subblock, subblocks_per_seq, T, l_offset
);
}
}
// Backward kernel
__global__ void fast_rnn_backward_kernel(
const float_accessor keys,
const float_accessor values,
const float_accessor betas,
const float_accessor v_old,
const float_accessor rnn_out,
const float_accessor rnn_out_delayed,
const float_accessor grad_out,
float_accessor tmp_grad,
float_accessor kv,
float_accessor grad_kv,
float_accessor grad_inputs,
float_accessor grad_keys,
float_accessor grad_values,
float_accessor grad_betas,
int N,
int H,
int L,
int E,
int M,
int E_per_subblock,
int subblocks_per_seq,
int T,
int l_offset
) {
// Each block takes care of one sequence.
// blockIdx.x = n * H + h
int n = blockIdx.x / H;
int h = blockIdx.x % H;
// threadIdx.x = e_local*M + m
// Local e coordinate within E_per_subblock sub-block.
int e_local = threadIdx.x / M;
int m = threadIdx.x % M;
const int E_block = subblocks_per_seq * E_per_subblock;
// Load the shared memory for KV
const int shared_kv_size = E_block * M;
extern __shared__ float shared_mem[];
float* shared_kv = shared_mem;
float* shared_grad_kv = shared_mem + shared_kv_size;
float* shared_res_i = shared_grad_kv + shared_kv_size;
float* shared_res_k = shared_res_i + M;
float* shared_res_v = shared_res_k + M;
float* shared_grad_v_old =shared_res_v + M;
float* shared_res_beta = shared_grad_v_old + M;
float* grad_sft_cst = shared_res_beta + 1;
float* shared_gradout = grad_sft_cst + 1;
float* shared_keys = shared_gradout + M*T;
float* shared_values = shared_keys + E_block*T;
float* shared_rnn_out = shared_values + M*T;
float* shared_rnn_out_delayed = shared_rnn_out + M*T;
float* shared_v_old = shared_rnn_out_delayed + M*T;
float* shared_betas = shared_v_old + M*T;
float* shared_tmp_grad = shared_betas + T;
if (threadIdx.x < M) {
shared_res_i[m] = 0.f;
shared_res_k[m] = 0.f;
shared_res_v[m] = 0.f;
shared_grad_v_old[m] = 0.f;
}
if (threadIdx.x < 1) {
shared_res_beta[0] = 0.f;
grad_sft_cst[0] = 0.f; // offset for grad softmax
}
// Everythig goes backward
int t_end = (T + l_offset) <= L ? T : L - l_offset;
for (int i = threadIdx.x; i < (t_end*M); i += blockDim.x)
{
int t = int(i / M) + l_offset;
int t_bw = L - 1 - t;
int d = i % M;
shared_gradout[i] = grad_out[n][h][t_bw][d];
shared_rnn_out[i] = rnn_out[n][h][t_bw][d];
shared_values[i] = values[n][h][t_bw][d];
shared_v_old[i] = v_old[n][h][t_bw][d];
}
for (int i = threadIdx.x; i < (t_end*E_block); i += blockDim.x)
{
int t = int(i / E_block) + l_offset;
int t_bw = L - 1 - t;
int d = (i % E_block);
if (d < E) {
shared_rnn_out_delayed[i] = rnn_out_delayed[n][h][t_bw][d];
shared_keys[i] = keys[n][h][t_bw][d];
}
}
for (int i = threadIdx.x; i < t_end; i += blockDim.x)
{
int t = i + l_offset;
int t_bw = L - 1 - t;
shared_betas[i] = betas[n][h][t_bw][0];
}
__syncthreads();
if (n >= N) {
return;
}
int e;
int e_abs; // absolute idx from t=0
int kv_idx;
for (int sub=0; sub<subblocks_per_seq; sub++) {
e = sub * E_per_subblock + e_local;
kv_idx = threadIdx.x + sub * blockDim.x;
if (e < E) {
shared_kv[kv_idx] = kv[n][h][e][m];
shared_grad_kv[kv_idx] = grad_kv[n][h][e][m];
}
}
if (threadIdx.x < M) {
// threadIdx.x = m if threadIdx.x < M
shared_tmp_grad[m] = tmp_grad[n][h][0][m];
}
for (int t=0; t<t_end; t++) {
int l = t + l_offset;
int l_b = L - l -1;
int m_abs = t*M + m;
// compute constant for grad softmax
if (threadIdx.x < M) {
shared_tmp_grad[m] += shared_gradout[m_abs];
float cst = shared_tmp_grad[m] * shared_rnn_out[m_abs];
atomicAdd(
&grad_sft_cst[0],
cst
);
}
__syncthreads();
if (threadIdx.x < M) { // element-wise ops only here
// threadIdx.x = m if threadIdx.x < M
// add new grad to tmp grad accumulator
// shared_tmp_grad[m] += shared_gradout[m_abs];
// __syncthreads();
// sigmoid
// float grad_z =
// (1. - shared_rnn_out[m_abs]) * shared_rnn_out[m_abs]
// * shared_tmp_grad[m];
float grad_z = shared_rnn_out[m_abs] * (
shared_tmp_grad[m] - grad_sft_cst[0]);
// tanh --> grad_z = grad_h * (1- out^2)
// float grad_z = (
// 1. - shared_rnn_out[m_abs] * shared_rnn_out[m_abs])
// * shared_tmp_grad[m];
atomicAdd(
&shared_res_i[m],
grad_z
); // grad for input
shared_tmp_grad[m] = 0.f; // prepare grad for the next step.
}
__syncthreads(); // important to sync
if (threadIdx.x < 1) {
grad_sft_cst[0] = 0.f;
}
float v_diff = shared_values[m_abs] - shared_v_old[m_abs];
float v_ins = v_diff * shared_betas[t];
for (int sub=0; sub<subblocks_per_seq; sub++) {
e = sub * E_per_subblock + e_local;
e_abs = t*E_block + e;
kv_idx = threadIdx.x + sub * blockDim.x;
if (e < E) {
// grad rec weight part 1
shared_grad_kv[kv_idx] +=
shared_res_i[m] * shared_rnn_out_delayed[e_abs];
// grad v
float res = shared_keys[e_abs] * shared_grad_kv[kv_idx]
* shared_betas[t];
atomicAdd(
&shared_res_v[m],
res
);
// grad k part 1 and 2
float res_k = shared_grad_kv[kv_idx] * v_ins;
atomicAdd(
&shared_res_k[e],
res_k
);
// grad beta
float res_beta = shared_grad_kv[kv_idx] * shared_keys[e_abs]
* v_diff;
atomicAdd(
&shared_res_beta[0],
res_beta
);
float res_h = shared_res_i[m] * shared_kv[kv_idx];
atomicAdd(
&shared_tmp_grad[e],
res_h
);
}
}
__syncthreads();
for (int sub=0; sub<subblocks_per_seq; sub++) {
e = sub * E_per_subblock + e_local;
e_abs = t*E_block + e;
kv_idx = threadIdx.x + sub * blockDim.x;
if (e < E) {
// reverse update kv
shared_kv[kv_idx] -= shared_keys[e_abs] * v_ins;
// grad v_old
float res_v_old = - (shared_grad_kv[kv_idx] * shared_betas[t]
* shared_keys[e_abs]);
atomicAdd(
&shared_grad_v_old[m],
res_v_old
);
}
}
__syncthreads();
// remaining key grad
for (int sub=0; sub<subblocks_per_seq; sub++) {
e = sub * E_per_subblock + e_local;
e_abs = t*E_block + e;
kv_idx = threadIdx.x + sub * blockDim.x;
if (e < E) {
float res_kp3 = shared_grad_v_old[m] * shared_kv[kv_idx];
atomicAdd(
&shared_res_k[e],
res_kp3
); // remaining key grad
// grad kv via v old
shared_grad_kv[kv_idx] +=
shared_grad_v_old[m] * shared_keys[e_abs];
}
}
__syncthreads();
if (threadIdx.x < M) {
// m = threadIdx.x if threadIdx.x < M
float ri = shared_res_i[m];
atomicAdd(
&grad_inputs[n][h][l_b][m],
ri
);
float rk = shared_res_k[m];
atomicAdd(
&grad_keys[n][h][l_b][m],
rk
);
float rv = shared_res_v[m];
atomicAdd(
&grad_values[n][h][l_b][m],
rv
);
shared_res_i[m] = 0.f;
shared_res_k[m] = 0.f;
shared_res_v[m] = 0.f;
shared_grad_v_old[m] = 0.f;
}
__syncthreads();
if (threadIdx.x < 1) {
float r3 = shared_res_beta[0];
atomicAdd(
&grad_betas[n][h][l_b][0],
r3
);
shared_res_beta[0] = 0.f;
}
__syncthreads();
}
__syncthreads();
// write back temporal gradients.
for (int sub=0; sub<subblocks_per_seq; sub++) {
e = sub * E_per_subblock + e_local;
kv_idx = threadIdx.x + sub * blockDim.x;
if (e < E) {
kv[n][h][e][m] = shared_kv[kv_idx];
grad_kv[n][h][e][m] = shared_grad_kv[kv_idx];
}
}
if (threadIdx.x < M) {
// threadIdx.x = m if threadIdx.x < M
tmp_grad[n][h][0][m] = shared_tmp_grad[m];
}
}
// Backward
void fast_rnn_backward(
const torch::Tensor keys,
const torch::Tensor values,
const torch::Tensor betas,
const torch::Tensor v_old,
const torch::Tensor grad_out,
const torch::Tensor outputs,
const torch::Tensor o_delayed,
torch::Tensor fw_mem, // from the forward pass.
torch::Tensor grad_in,
torch::Tensor grad_k,
torch::Tensor grad_v,
torch::Tensor grad_beta
) {
// const at::cuda::OptionalCUDAGuard device_guard(device_of(grad_queries));
int N = keys.size(0);
int H = keys.size(1);
int L = keys.size(2);
int E = keys.size(3);
int M = values.size(3);
auto grad_kv = torch::zeros({N, H, E, M}, keys.options());
auto tmp_grad = torch::zeros({N, H, 1, M}, keys.options());
const int threads = 512;
// First part ====================================
int MPB = min(threads, E*M);
// make sure that MUL_PER_BLOCK is divisible by M;
MPB = int(MPB / M) * M;
const int subblocks_per_seq_value = ((E*M) + MPB - 1)/ MPB;
const int E_per_subblock = MPB / M;
const int blocks_value = N*H;
const int E_block = E_per_subblock * subblocks_per_seq_value;
// 2*E*M for KV and grad_KV, and 3*M.
int shared_mem_const = (2 * E_block + 4)*M + 2;
// 5M for value, rnn_out, rnn_delayed, grad_out, tmp_grad.
// E for key.
int shared_mem_per_time = 6*M + E_block + 1;
assert(12 * 1024 - shared_mem_const > 0 &&
"`d_head` too large. To obtain large models, keep `d_head` small"
"e.g. 64 and increase the number of heads instead.");
int T = int(((12 * 1024) - shared_mem_const) / shared_mem_per_time);
const int shared_mem_v_backward =
((T*shared_mem_per_time) + shared_mem_const) * sizeof(float);
for (int l_offset=0; l_offset < L; l_offset += T) {
fast_rnn_backward_kernel
<<<blocks_value, MPB, shared_mem_v_backward>>>(
keys.packed_accessor32<float, 4, torch::RestrictPtrTraits>(),
values.packed_accessor32<float, 4, torch::RestrictPtrTraits>(),
betas.packed_accessor32<float, 4, torch::RestrictPtrTraits>(),
v_old.packed_accessor32<float, 4, torch::RestrictPtrTraits>(),
outputs.packed_accessor32<float, 4, torch::RestrictPtrTraits>(),
o_delayed.packed_accessor32<float, 4, torch::RestrictPtrTraits>(),
grad_out.packed_accessor32<float, 4, torch::RestrictPtrTraits>(),
tmp_grad.packed_accessor32<float, 4, torch::RestrictPtrTraits>(),
fw_mem.packed_accessor32<float, 4, torch::RestrictPtrTraits>(),
grad_kv.packed_accessor32<float, 4, torch::RestrictPtrTraits>(),
grad_in.packed_accessor32<float, 4, torch::RestrictPtrTraits>(),
grad_k.packed_accessor32<float, 4, torch::RestrictPtrTraits>(),
grad_v.packed_accessor32<float, 4, torch::RestrictPtrTraits>(),
grad_beta.packed_accessor32<float, 4, torch::RestrictPtrTraits>(),
N, H, L, E, M, E_per_subblock, subblocks_per_seq_value, T, l_offset
);
}
}
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
m.def(
"fast_rnn_forward",
&fast_rnn_forward,
"Compute the weighted sum of values but attending only to previous "
"values."
);
m.def(
"fast_rnn_backward",
&fast_rnn_backward,
"Compute the gradients for the fast weight memory."
);
}
|
61aa8ef7cae3b08b2b347b060ec144d89e857f46.hip | // !!! This is a file automatically generated by hipify!!!
/*
* (C) Copyright 1996-2016 ECMWF.
*
* This software is licensed under the terms of the Apache Licence Version 2.0
* which can be obtained at http://www.apache.org/licenses/LICENSE-2.0.
* In applying this licence, ECMWF does not waive the privileges and immunities
* granted to it by virtue of its status as an intergovernmental organisation nor
* does it submit to any jurisdiction.
*/
#include <hip/hip_runtime.h>
#include "tests/AtlasTestEnvironment.h"
#include "atlas/array.h"
#include "atlas/array/MakeView.h"
#include "atlas/runtime/Log.h"
using namespace atlas::array;
namespace atlas {
namespace test {
template<typename Value, int RANK>
__global__
void kernel_ex(array::ArrayView<Value, RANK> dv)
{
dv(3, 3, 3) += dv.data_view().template length<0>() * dv.data_view().template length<1>() * dv.data_view().template length<2>();
}
template<typename Value, int RANK>
__global__
void loop_kernel_ex(array::ArrayView<Value, RANK> dv)
{
for(int i=0; i < dv.data_view().template length<0>(); i++) {
for(int j=0; j < dv.data_view().template length<1>(); j++) {
for(int k=0; k < dv.data_view().template length<2>(); k++) {
dv(i,j,k) += i*10+j*100+k*1000;
}
}
}
}
CASE( "test_array" )
{
constexpr unsigned int dx = 5;
constexpr unsigned int dy = 6;
constexpr unsigned int dz = 7;
Array* ds = Array::create<double>(dx, dy, dz);
auto hv = make_host_view<double, 3>(*ds);
hv(3, 3, 3) = 4.5;
ds->cloneToDevice();
auto cv = make_device_view<double, 3>(*ds);
hipLaunchKernelGGL(( kernel_ex), dim3(1),dim3(1), 0, 0, cv);
hipDeviceSynchronize();
ds->cloneFromDevice();
ds->reactivateHostWriteViews();
EXPECT( hv(3, 3, 3) == 4.5 + dx*dy*dz );
delete ds;
}
CASE( "test_array_loop" )
{
constexpr unsigned int dx = 5;
constexpr unsigned int dy = 6;
constexpr unsigned int dz = 7;
Array* ds = Array::create<double>(dx, dy, dz);
array::ArrayView<double,3> hv = make_host_view<double, 3>(*ds);
for(int i=0; i < dx; i++) {
for(int j=0; j < dy; j++) {
for(int k=0; k < dz; k++) {
hv(i,j,k) = 0;
}
}
}
ds->cloneToDevice();
auto cv = make_device_view<double, 3>(*ds);
hipLaunchKernelGGL(( loop_kernel_ex), dim3(1),dim3(1), 0, 0, cv);
hipDeviceSynchronize();
ds->cloneFromDevice();
ds->reactivateHostWriteViews();
for(int i=0; i < dx; i++) {
for(int j=0; j < dy; j++) {
for(int k=0; k < dz; k++) {
EXPECT( hv(i,j,k) == i*10+j*100+k*1000 );
}
}
}
delete ds;
}
}
}
int main(int argc, char **argv) {
return atlas::test::run( argc, argv );
}
| 61aa8ef7cae3b08b2b347b060ec144d89e857f46.cu | /*
* (C) Copyright 1996-2016 ECMWF.
*
* This software is licensed under the terms of the Apache Licence Version 2.0
* which can be obtained at http://www.apache.org/licenses/LICENSE-2.0.
* In applying this licence, ECMWF does not waive the privileges and immunities
* granted to it by virtue of its status as an intergovernmental organisation nor
* does it submit to any jurisdiction.
*/
#include <cuda_runtime.h>
#include "tests/AtlasTestEnvironment.h"
#include "atlas/array.h"
#include "atlas/array/MakeView.h"
#include "atlas/runtime/Log.h"
using namespace atlas::array;
namespace atlas {
namespace test {
template<typename Value, int RANK>
__global__
void kernel_ex(array::ArrayView<Value, RANK> dv)
{
dv(3, 3, 3) += dv.data_view().template length<0>() * dv.data_view().template length<1>() * dv.data_view().template length<2>();
}
template<typename Value, int RANK>
__global__
void loop_kernel_ex(array::ArrayView<Value, RANK> dv)
{
for(int i=0; i < dv.data_view().template length<0>(); i++) {
for(int j=0; j < dv.data_view().template length<1>(); j++) {
for(int k=0; k < dv.data_view().template length<2>(); k++) {
dv(i,j,k) += i*10+j*100+k*1000;
}
}
}
}
CASE( "test_array" )
{
constexpr unsigned int dx = 5;
constexpr unsigned int dy = 6;
constexpr unsigned int dz = 7;
Array* ds = Array::create<double>(dx, dy, dz);
auto hv = make_host_view<double, 3>(*ds);
hv(3, 3, 3) = 4.5;
ds->cloneToDevice();
auto cv = make_device_view<double, 3>(*ds);
kernel_ex<<<1,1>>>(cv);
cudaDeviceSynchronize();
ds->cloneFromDevice();
ds->reactivateHostWriteViews();
EXPECT( hv(3, 3, 3) == 4.5 + dx*dy*dz );
delete ds;
}
CASE( "test_array_loop" )
{
constexpr unsigned int dx = 5;
constexpr unsigned int dy = 6;
constexpr unsigned int dz = 7;
Array* ds = Array::create<double>(dx, dy, dz);
array::ArrayView<double,3> hv = make_host_view<double, 3>(*ds);
for(int i=0; i < dx; i++) {
for(int j=0; j < dy; j++) {
for(int k=0; k < dz; k++) {
hv(i,j,k) = 0;
}
}
}
ds->cloneToDevice();
auto cv = make_device_view<double, 3>(*ds);
loop_kernel_ex<<<1,1>>>(cv);
cudaDeviceSynchronize();
ds->cloneFromDevice();
ds->reactivateHostWriteViews();
for(int i=0; i < dx; i++) {
for(int j=0; j < dy; j++) {
for(int k=0; k < dz; k++) {
EXPECT( hv(i,j,k) == i*10+j*100+k*1000 );
}
}
}
delete ds;
}
}
}
int main(int argc, char **argv) {
return atlas::test::run( argc, argv );
}
|
7d3f29742a21d62985045d9de9defd70053adbd2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <thrust/device_ptr.h>
#include <thrust/scan.h>
#include "../include/pagerank.h"
#define H2D (hipMemcpyHostToDevice)
#define D2H (hipMemcpyDeviceToHost)
int MONTE_CARLO = 0;
const int threads_per_block = 512;
/*
__global__ void compute(const int num_active_nodes, int* active_nodes, float* value, float* new_value, const int* rowdeg, const int* colptr, const int* row, const int* col)
{
const int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < num_active_nodes) {
int n = active_nodes[tid];
new_value[n] = 1 - alpha;
for (int e = colptr[n]; e < colptr[n + 1]; e++) {
new_value[n] += alpha * value[row[e]] / (float)rowdeg[row[e]];
}
}
}
*/
__global__ void compute(const int num_active_nodes, int* active_nodes, float* value, float* new_value, const int* rowdeg, const int* colptr, const int* row, const int* col)
{
const int tid = blockDim.x * blockIdx.x + threadIdx.x;
__shared__ float tile[threads_per_block];
tile[threadIdx.x] = 1 - alpha;
if (tid < num_active_nodes) {
int n = active_nodes[tid];
for (int e = colptr[n]; e < colptr[n + 1]; e++) {
tile[threadIdx.x] += alpha * value[row[e]] / (float)rowdeg[row[e]];
}
new_value[n] = tile[threadIdx.x];
}
}
__global__ void find_active(const int num_active_nodes, int* active_nodes, float* value, float* new_value, int* is_next_nodes)
{
const int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < num_active_nodes) {
int n = active_nodes[tid];
is_next_nodes[tid] = abs(value[n] - new_value[n]) > epsilon? 1: 0;
}
}
__global__ void copy_value(const int num_active_nodes, int* active_nodes, float* value, float* new_value)
{
const int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < num_active_nodes) {
int n = active_nodes[tid];
value[n] = new_value[n];
}
}
__global__ void coalesce_next_active(const int num_active_nodes, int* active_nodes, int* next_nodes, int* is_next_nodes)
{
const int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < num_active_nodes && is_next_nodes[tid] < is_next_nodes[tid + 1]) {
next_nodes[is_next_nodes[tid]] = active_nodes[tid];
}
}
__global__ void copy_active(const int num_active_nodes, int* active_nodes, int* next_nodes)
{
const int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < num_active_nodes) {
active_nodes[tid] = next_nodes[tid];
}
}
void pagerank(const int nodes, const int edges, float* value, const int* rowdeg, const int* colptr, const int* row, const int* col)
{
float *d_value, *d_new_value;
int *d_active_nodes, *d_is_next_nodes, *d_next_nodes, *d_rowdeg, *d_colptr, *d_row, *d_col;
int num_active_nodes = nodes;
int* active_nodes = new int[nodes];
for (int n = 0; n < nodes; n++) {
active_nodes[n] = n;
}
hipMalloc(&d_value, sizeof(float) * nodes);
hipMalloc(&d_new_value, sizeof(float) * nodes);
hipMalloc(&d_active_nodes, sizeof(int) * nodes);
hipMalloc(&d_is_next_nodes, sizeof(int) * (nodes + 1));
hipMalloc(&d_next_nodes, sizeof(int) * nodes);
hipMalloc(&d_rowdeg, sizeof(int) * nodes);
hipMalloc(&d_colptr, sizeof(int) * (nodes + 1));
hipMalloc(&d_row, sizeof(int) * edges);
hipMalloc(&d_col, sizeof(int) * edges);
hipMemcpy(d_value, value, sizeof(float) * nodes, H2D);
hipMemcpy(d_active_nodes, active_nodes, sizeof(int) * nodes, H2D);
hipMemcpy(d_rowdeg, rowdeg, sizeof(int) * nodes, H2D);
hipMemcpy(d_colptr, colptr, sizeof(int) * (nodes + 1), H2D);
hipMemcpy(d_row, row, sizeof(int) * edges, H2D);
hipMemcpy(d_col, col, sizeof(int) * edges, H2D);
while (true) {
hipLaunchKernelGGL(( compute), dim3(num_active_nodes/threads_per_block+1),dim3(threads_per_block), 0, 0, num_active_nodes, d_active_nodes, d_value, d_new_value, d_rowdeg, d_colptr, d_row, d_col);
hipLaunchKernelGGL(( find_active), dim3(num_active_nodes/threads_per_block+1),dim3(threads_per_block), 0, 0, num_active_nodes, d_active_nodes, d_value, d_new_value, d_is_next_nodes);
hipLaunchKernelGGL(( copy_value), dim3(num_active_nodes/threads_per_block+1),dim3(threads_per_block), 0, 0, num_active_nodes, d_active_nodes, d_value, d_new_value);
thrust::exclusive_scan(thrust::device, d_is_next_nodes, d_is_next_nodes + num_active_nodes + 1, d_is_next_nodes);
hipLaunchKernelGGL(( coalesce_next_active), dim3(num_active_nodes/threads_per_block+1), dim3(threads_per_block), 0, 0, num_active_nodes, d_active_nodes, d_next_nodes, d_is_next_nodes);
hipMemcpy(&num_active_nodes, &d_is_next_nodes[num_active_nodes], sizeof(int), D2H);
if (num_active_nodes == 0)
break;
hipLaunchKernelGGL(( copy_active), dim3(num_active_nodes/threads_per_block+1),dim3(threads_per_block), 0, 0, num_active_nodes, d_active_nodes, d_next_nodes);
}
hipMemcpy(value, d_value, sizeof(float) * nodes, D2H);
hipFree(d_value);
hipFree(d_new_value);
hipFree(d_active_nodes);
hipFree(d_is_next_nodes);
hipFree(d_next_nodes);
hipFree(d_rowdeg);
hipFree(d_colptr);
hipFree(d_row);
hipFree(d_col);
}
| 7d3f29742a21d62985045d9de9defd70053adbd2.cu | #include <thrust/device_ptr.h>
#include <thrust/scan.h>
#include "../include/pagerank.h"
#define H2D (cudaMemcpyHostToDevice)
#define D2H (cudaMemcpyDeviceToHost)
int MONTE_CARLO = 0;
const int threads_per_block = 512;
/*
__global__ void compute(const int num_active_nodes, int* active_nodes, float* value, float* new_value, const int* rowdeg, const int* colptr, const int* row, const int* col)
{
const int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < num_active_nodes) {
int n = active_nodes[tid];
new_value[n] = 1 - alpha;
for (int e = colptr[n]; e < colptr[n + 1]; e++) {
new_value[n] += alpha * value[row[e]] / (float)rowdeg[row[e]];
}
}
}
*/
__global__ void compute(const int num_active_nodes, int* active_nodes, float* value, float* new_value, const int* rowdeg, const int* colptr, const int* row, const int* col)
{
const int tid = blockDim.x * blockIdx.x + threadIdx.x;
__shared__ float tile[threads_per_block];
tile[threadIdx.x] = 1 - alpha;
if (tid < num_active_nodes) {
int n = active_nodes[tid];
for (int e = colptr[n]; e < colptr[n + 1]; e++) {
tile[threadIdx.x] += alpha * value[row[e]] / (float)rowdeg[row[e]];
}
new_value[n] = tile[threadIdx.x];
}
}
__global__ void find_active(const int num_active_nodes, int* active_nodes, float* value, float* new_value, int* is_next_nodes)
{
const int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < num_active_nodes) {
int n = active_nodes[tid];
is_next_nodes[tid] = abs(value[n] - new_value[n]) > epsilon? 1: 0;
}
}
__global__ void copy_value(const int num_active_nodes, int* active_nodes, float* value, float* new_value)
{
const int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < num_active_nodes) {
int n = active_nodes[tid];
value[n] = new_value[n];
}
}
__global__ void coalesce_next_active(const int num_active_nodes, int* active_nodes, int* next_nodes, int* is_next_nodes)
{
const int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < num_active_nodes && is_next_nodes[tid] < is_next_nodes[tid + 1]) {
next_nodes[is_next_nodes[tid]] = active_nodes[tid];
}
}
__global__ void copy_active(const int num_active_nodes, int* active_nodes, int* next_nodes)
{
const int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < num_active_nodes) {
active_nodes[tid] = next_nodes[tid];
}
}
void pagerank(const int nodes, const int edges, float* value, const int* rowdeg, const int* colptr, const int* row, const int* col)
{
float *d_value, *d_new_value;
int *d_active_nodes, *d_is_next_nodes, *d_next_nodes, *d_rowdeg, *d_colptr, *d_row, *d_col;
int num_active_nodes = nodes;
int* active_nodes = new int[nodes];
for (int n = 0; n < nodes; n++) {
active_nodes[n] = n;
}
cudaMalloc(&d_value, sizeof(float) * nodes);
cudaMalloc(&d_new_value, sizeof(float) * nodes);
cudaMalloc(&d_active_nodes, sizeof(int) * nodes);
cudaMalloc(&d_is_next_nodes, sizeof(int) * (nodes + 1));
cudaMalloc(&d_next_nodes, sizeof(int) * nodes);
cudaMalloc(&d_rowdeg, sizeof(int) * nodes);
cudaMalloc(&d_colptr, sizeof(int) * (nodes + 1));
cudaMalloc(&d_row, sizeof(int) * edges);
cudaMalloc(&d_col, sizeof(int) * edges);
cudaMemcpy(d_value, value, sizeof(float) * nodes, H2D);
cudaMemcpy(d_active_nodes, active_nodes, sizeof(int) * nodes, H2D);
cudaMemcpy(d_rowdeg, rowdeg, sizeof(int) * nodes, H2D);
cudaMemcpy(d_colptr, colptr, sizeof(int) * (nodes + 1), H2D);
cudaMemcpy(d_row, row, sizeof(int) * edges, H2D);
cudaMemcpy(d_col, col, sizeof(int) * edges, H2D);
while (true) {
compute<<<num_active_nodes/threads_per_block+1,threads_per_block>>>(num_active_nodes, d_active_nodes, d_value, d_new_value, d_rowdeg, d_colptr, d_row, d_col);
find_active<<<num_active_nodes/threads_per_block+1,threads_per_block>>>(num_active_nodes, d_active_nodes, d_value, d_new_value, d_is_next_nodes);
copy_value<<<num_active_nodes/threads_per_block+1,threads_per_block>>>(num_active_nodes, d_active_nodes, d_value, d_new_value);
thrust::exclusive_scan(thrust::device, d_is_next_nodes, d_is_next_nodes + num_active_nodes + 1, d_is_next_nodes);
coalesce_next_active<<<num_active_nodes/threads_per_block+1, threads_per_block>>>(num_active_nodes, d_active_nodes, d_next_nodes, d_is_next_nodes);
cudaMemcpy(&num_active_nodes, &d_is_next_nodes[num_active_nodes], sizeof(int), D2H);
if (num_active_nodes == 0)
break;
copy_active<<<num_active_nodes/threads_per_block+1,threads_per_block>>>(num_active_nodes, d_active_nodes, d_next_nodes);
}
cudaMemcpy(value, d_value, sizeof(float) * nodes, D2H);
cudaFree(d_value);
cudaFree(d_new_value);
cudaFree(d_active_nodes);
cudaFree(d_is_next_nodes);
cudaFree(d_next_nodes);
cudaFree(d_rowdeg);
cudaFree(d_colptr);
cudaFree(d_row);
cudaFree(d_col);
}
|
b0fafd689dd8a6624588047fdf16d8e757f07745.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2009 NVIDIA Corporation. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property and
* proprietary rights in and to this software and related documentation and
* any modifications thereto. Any use, reproduction, disclosure, or distribution
* of this software and related documentation without an express license
* agreement from NVIDIA Corporation is strictly prohibited.
*
*/
#ifndef _SCAN_BEST_KERNEL_H_
#define _SCAN_BEST_KERNEL_H_
#define NUM_BANKS 16
#define LOG_NUM_BANKS 4
// Define this to more rigorously avoid bank conflicts, even at the lower (root) levels of the tree
//#define ZERO_BANK_CONFLICTS
#ifdef ZERO_BANK_CONFLICTS
#define CONFLICT_FREE_OFFSET(index) ((index) >> LOG_NUM_BANKS + (index) >> (2 * LOG_NUM_BANKS))
#else
#define CONFLICT_FREE_OFFSET(index) ((index) >> LOG_NUM_BANKS)
#endif
#ifdef CHECK_BANK_CONFLICTS
#define TEMP(index) cutilBankChecker(temp, index)
#else
#define TEMP(index) temp[index]
#endif
///////////////////////////////////////////////////////////////////////////////
// Work-efficient compute implementation of scan, one thread per 2 elements
// Work-efficient: O(log(n)) steps, and O(n) adds.
// Also shared storage efficient: Uses n + n/NUM_BANKS shared memory -- no ping-ponging
// Also avoids most bank conflicts using single-element offsets every NUM_BANKS elements.
//
// In addition, If ZERO_BANK_CONFLICTS is defined, uses
// n + n/NUM_BANKS + n/(NUM_BANKS*NUM_BANKS)
// shared memory. If ZERO_BANK_CONFLICTS is defined, avoids ALL bank conflicts using
// single-element offsets every NUM_BANKS elements, plus additional single-element offsets
// after every NUM_BANKS^2 elements.
//
// Uses a balanced tree type algorithm. See Blelloch, 1990 "Prefix Sums
// and Their Applications", or Prins and Chatterjee PRAM course notes:
// https://www.cs.unc.edu/~prins/Classes/633/Handouts/pram.pdf
//
// This work-efficient version is based on the algorithm presented in Guy Blelloch's
// Excellent paper "Prefix sums and their applications".
// http://www.cs.cmu.edu/~blelloch/papers/Ble93.pdf
//
// Pro: Work Efficient, very few bank conflicts (or zero if ZERO_BANK_CONFLICTS is defined)
// Con: More instructions to compute bank-conflict-free shared memory addressing,
// and slightly more shared memory storage used.
//
// @param g_odata output data in global memory
// @param g_idata input data in global memory
// @param n input number of elements to scan from input data
__global__ void scan_best(int *g_odata, int *g_idata, int n)
{
// Dynamically allocated shared memory for scan kernels
extern __shared__ int temp[];
int thid = threadIdx.x;
int ai = thid;
int bi = thid + (n/2);
// compute spacing to avoid bank conflicts
int bankOffsetA = CONFLICT_FREE_OFFSET(ai);
int bankOffsetB = CONFLICT_FREE_OFFSET(bi);
// Cache the computational window in shared memory
TEMP(ai + bankOffsetA) = g_idata[ai];
TEMP(bi + bankOffsetB) = g_idata[bi];
int offset = 1;
// build the sum in place up the tree
for (int d = n/2; d > 0; d >>= 1)
{
__syncthreads();
if (thid < d)
{
int ai = offset*(2*thid+1)-1;
int bi = offset*(2*thid+2)-1;
ai += CONFLICT_FREE_OFFSET(ai);
bi += CONFLICT_FREE_OFFSET(bi);
TEMP(bi) += TEMP(ai);
}
offset *= 2;
}
// scan back down the tree
// clear the last element
if (thid == 0)
{
int index = n - 1;
index += CONFLICT_FREE_OFFSET(index);
TEMP(index) = 0;
}
// traverse down the tree building the scan in place
for (int d = 1; d < n; d *= 2)
{
offset /= 2;
__syncthreads();
if (thid < d)
{
int ai = offset*(2*thid+1)-1;
int bi = offset*(2*thid+2)-1;
ai += CONFLICT_FREE_OFFSET(ai);
bi += CONFLICT_FREE_OFFSET(bi);
int t = TEMP(ai);
TEMP(ai) = TEMP(bi);
TEMP(bi) += t;
}
}
__syncthreads();
// write results to global memory
g_odata[ai] = TEMP(ai + bankOffsetA);
g_odata[bi] = TEMP(bi + bankOffsetB);
}
#endif // #ifndef _SCAN_BEST_KERNEL_H_
| b0fafd689dd8a6624588047fdf16d8e757f07745.cu | /*
* Copyright 1993-2009 NVIDIA Corporation. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property and
* proprietary rights in and to this software and related documentation and
* any modifications thereto. Any use, reproduction, disclosure, or distribution
* of this software and related documentation without an express license
* agreement from NVIDIA Corporation is strictly prohibited.
*
*/
#ifndef _SCAN_BEST_KERNEL_H_
#define _SCAN_BEST_KERNEL_H_
#define NUM_BANKS 16
#define LOG_NUM_BANKS 4
// Define this to more rigorously avoid bank conflicts, even at the lower (root) levels of the tree
//#define ZERO_BANK_CONFLICTS
#ifdef ZERO_BANK_CONFLICTS
#define CONFLICT_FREE_OFFSET(index) ((index) >> LOG_NUM_BANKS + (index) >> (2 * LOG_NUM_BANKS))
#else
#define CONFLICT_FREE_OFFSET(index) ((index) >> LOG_NUM_BANKS)
#endif
#ifdef CHECK_BANK_CONFLICTS
#define TEMP(index) cutilBankChecker(temp, index)
#else
#define TEMP(index) temp[index]
#endif
///////////////////////////////////////////////////////////////////////////////
// Work-efficient compute implementation of scan, one thread per 2 elements
// Work-efficient: O(log(n)) steps, and O(n) adds.
// Also shared storage efficient: Uses n + n/NUM_BANKS shared memory -- no ping-ponging
// Also avoids most bank conflicts using single-element offsets every NUM_BANKS elements.
//
// In addition, If ZERO_BANK_CONFLICTS is defined, uses
// n + n/NUM_BANKS + n/(NUM_BANKS*NUM_BANKS)
// shared memory. If ZERO_BANK_CONFLICTS is defined, avoids ALL bank conflicts using
// single-element offsets every NUM_BANKS elements, plus additional single-element offsets
// after every NUM_BANKS^2 elements.
//
// Uses a balanced tree type algorithm. See Blelloch, 1990 "Prefix Sums
// and Their Applications", or Prins and Chatterjee PRAM course notes:
// https://www.cs.unc.edu/~prins/Classes/633/Handouts/pram.pdf
//
// This work-efficient version is based on the algorithm presented in Guy Blelloch's
// Excellent paper "Prefix sums and their applications".
// http://www.cs.cmu.edu/~blelloch/papers/Ble93.pdf
//
// Pro: Work Efficient, very few bank conflicts (or zero if ZERO_BANK_CONFLICTS is defined)
// Con: More instructions to compute bank-conflict-free shared memory addressing,
// and slightly more shared memory storage used.
//
// @param g_odata output data in global memory
// @param g_idata input data in global memory
// @param n input number of elements to scan from input data
__global__ void scan_best(int *g_odata, int *g_idata, int n)
{
// Dynamically allocated shared memory for scan kernels
extern __shared__ int temp[];
int thid = threadIdx.x;
int ai = thid;
int bi = thid + (n/2);
// compute spacing to avoid bank conflicts
int bankOffsetA = CONFLICT_FREE_OFFSET(ai);
int bankOffsetB = CONFLICT_FREE_OFFSET(bi);
// Cache the computational window in shared memory
TEMP(ai + bankOffsetA) = g_idata[ai];
TEMP(bi + bankOffsetB) = g_idata[bi];
int offset = 1;
// build the sum in place up the tree
for (int d = n/2; d > 0; d >>= 1)
{
__syncthreads();
if (thid < d)
{
int ai = offset*(2*thid+1)-1;
int bi = offset*(2*thid+2)-1;
ai += CONFLICT_FREE_OFFSET(ai);
bi += CONFLICT_FREE_OFFSET(bi);
TEMP(bi) += TEMP(ai);
}
offset *= 2;
}
// scan back down the tree
// clear the last element
if (thid == 0)
{
int index = n - 1;
index += CONFLICT_FREE_OFFSET(index);
TEMP(index) = 0;
}
// traverse down the tree building the scan in place
for (int d = 1; d < n; d *= 2)
{
offset /= 2;
__syncthreads();
if (thid < d)
{
int ai = offset*(2*thid+1)-1;
int bi = offset*(2*thid+2)-1;
ai += CONFLICT_FREE_OFFSET(ai);
bi += CONFLICT_FREE_OFFSET(bi);
int t = TEMP(ai);
TEMP(ai) = TEMP(bi);
TEMP(bi) += t;
}
}
__syncthreads();
// write results to global memory
g_odata[ai] = TEMP(ai + bankOffsetA);
g_odata[bi] = TEMP(bi + bankOffsetB);
}
#endif // #ifndef _SCAN_BEST_KERNEL_H_
|
b0c118adc9f363cdda1d3bc499c043170d97c47e.hip | // !!! This is a file automatically generated by hipify!!!
/**********************************************************************
Copyright 2013 Advanced Micro Devices, Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
********************************************************************/
#include <iostream>
#include <fstream>
#include <cstdlib>
#include <chrono>
#include <hip/hip_runtime.h>
#include "kernels.hip"
double runKernels(
float *inputImageBuffer,
float *outputImageBuffer,
const float *input,
float *output,
const float averageLuminance,
const float gamma,
const float c,
const float delta,
const uint width,
const uint numChannels,
const uint height)
{
hipMemcpy(inputImageBuffer, input, sizeof(float) * width * height * numChannels,
hipMemcpyHostToDevice);
dim3 grid (width/16, height/16);
dim3 block (16, 16);
hipDeviceSynchronize();
auto start = std::chrono::steady_clock::now();
hipLaunchKernelGGL(toneMapping, grid, block, 0, 0, inputImageBuffer, outputImageBuffer,
averageLuminance, gamma, c, delta, width, numChannels, height);
hipDeviceSynchronize();
auto end = std::chrono::steady_clock::now();
auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
hipMemcpy(output, outputImageBuffer, sizeof(float) * width * height * numChannels,
hipMemcpyDeviceToHost);
return time;
}
int main(int argc, char *argv[])
{
if (argc != 3) {
printf("Usage: %s <path to image> <repeat>\n", argv[0]);
return 1;
}
const char* inputImageName = argv[1]; //"input.hdr";
const int iterations = atoi(argv[2]);
// Read a simple image
std::ifstream inputFile;
std::cout << "Input file name " << inputImageName << std::endl;
inputFile.open(inputImageName, std::ifstream::binary);
if (!inputFile.is_open())
{
std::cout << "not able to open the file " << inputImageName << std::endl;
return 1;
}
const float cPattanaik = 0.25f;
const float gammaPattanaik = 0.4f;
const float deltaPattanaik = 0.000002f;
const uint numChannels = 4;
uint width;
uint height;
float averageLuminance = 0.0f;
// assume width and height are multiples of 16
inputFile >> width;
inputFile >> height;
float *input = (float*) aligned_alloc(1024, height * width * sizeof(float) * numChannels);
float *output = (float*) aligned_alloc(1024, height * width * sizeof(float) * numChannels);
for (unsigned int y = 0; y < height; y++)
{
for (unsigned int x = 0; x < width; x++)
{
inputFile >> input[(y * width * numChannels) + (x * numChannels + 0)];
inputFile >> input[(y * width * numChannels) + (x * numChannels + 1)];
inputFile >> input[(y * width * numChannels) + (x * numChannels + 2)];
inputFile >> input[(y * width * numChannels) + (x * numChannels + 3)];
}
}
std::cout << "Width of the image " << width << std::endl;
std::cout << "Height of the image " << height << std::endl;
inputFile.close();
//Calculating average luminance value
for (unsigned int y = 0; y < height; y++)
{
for (unsigned int x = 0; x < width; x++)
{
float r = input[(y * width * numChannels) + (x * numChannels + 0)];
float g = input[(y * width * numChannels) + (x * numChannels + 1)];
float b = input[(y * width * numChannels) + (x * numChannels + 2)];
float luminance = (0.2126f * r ) + ( 0.7152f * g ) + ( 0.0722f * b );
averageLuminance += luminance;
}
}
averageLuminance = averageLuminance / (width * height);
std::cout << "Average luminance value in the image "
<< averageLuminance << std::endl;
float *inputImageBuffer;
hipMalloc((void**)&inputImageBuffer, sizeof(float) * width * height * numChannels);
float *outputImageBuffer;
hipMalloc((void**)&outputImageBuffer, sizeof(float) * width * height * numChannels);
// Warm up
for(int i = 0; i < 2 && iterations != 1; i++)
{
runKernels(
inputImageBuffer,
outputImageBuffer,
input,
output,
averageLuminance,
gammaPattanaik,
cPattanaik,
deltaPattanaik,
width,
numChannels,
height);
}
std::cout << "Executing kernel for " << iterations << " iterations" <<std::endl;
std::cout << "-------------------------------------------" << std::endl;
double time = 0.0;
for(int i = 0; i < iterations; i++)
{
time += runKernels(
inputImageBuffer,
outputImageBuffer,
input,
output,
averageLuminance,
gammaPattanaik,
cPattanaik,
deltaPattanaik,
width,
numChannels,
height);
}
printf("Average kernel execution time: %f (us)\n", (time * 1e-3f) / iterations);
// VerifyResults
float *referenceOutput = (float*) malloc (sizeof(float) * height * width * numChannels);
float gcPattanaik = cPattanaik * averageLuminance;
for (unsigned int y = 0; y < height; y++)
{
for (unsigned int x = 0; x < width; x++)
{
float yLPattanaik = 0.0f;
float cLPattanaik = 0.0f;
float r, g, b;
float r1 = input[y * width * numChannels + (x * numChannels + 0)];
float g1 = input[y * width * numChannels + (x * numChannels + 1)];
float b1 = input[y * width * numChannels + (x * numChannels + 2)];
//Calculating the luminance value
float yLuminance = (0.2126f * r1) + (0.7152f * g1) + (0.0722f * b1);
if (x != 0 && y != 0 && x != width - 1 && y != height - 1)
{
//Calculating mean
float leftUp = 0.0f;
float up = 0.0f;
float rightUp = 0.0f;
float left = 0.0f;
float right = 0.0f;
float leftDown = 0.0f;
float down = 0.0f;
float rightDown = 0.0f;
r = input[width * numChannels * (y - 1) + ((x - 1) * numChannels) + 0 ];
g = input[width * numChannels * (y - 1) + ((x - 1) * numChannels) + 1 ];
b = input[width * numChannels * (y - 1) + ((x - 1) * numChannels) + 2 ];
leftUp = (0.2126f * r) + (0.7152f * g) + (0.0722f * b);
r = input[width * numChannels * (y - 1) + ((x) * numChannels) + 0 ];
g = input[width * numChannels * (y - 1) + ((x) * numChannels) + 1 ];
b = input[width * numChannels * (y - 1) + ((x) * numChannels) + 2 ];
up = (0.2126f * r) + (0.7152f * g) + (0.0722f * b);
r = input[width * numChannels * (y - 1) + ((x + 1) * numChannels) + 0 ];
g = input[width * numChannels * (y - 1) + ((x + 1) * numChannels) + 1 ];
b = input[width * numChannels * (y - 1) + ((x + 1) * numChannels) + 2 ];
rightUp = (0.2126f * r) + (0.7152f * g) + (0.0722f * b);
r = input[width * numChannels * (y) + ((x - 1) * numChannels) + 0 ];
g = input[width * numChannels * (y) + ((x - 1) * numChannels) + 1 ];
b = input[width * numChannels * (y) + ((x - 1) * numChannels) + 2 ];
left = (0.2126f * r) + (0.7152f * g) + (0.0722f * b);
r = input[width * numChannels * (y) + ((x + 1) * numChannels) + 0 ];
g = input[width * numChannels * (y) + ((x + 1) * numChannels) + 1 ];
b = input[width * numChannels * (y) + ((x + 1) * numChannels) + 2 ];
right = (0.2126f * r) + (0.7152f * g) + (0.0722f * b);
r = input[width * numChannels * (y + 1) + ((x - 1) * numChannels) + 0 ];
g = input[width * numChannels * (y + 1) + ((x - 1) * numChannels) + 1 ];
b = input[width * numChannels * (y + 1) + ((x - 1) * numChannels) + 2 ];
leftDown = (0.2126f * r) + (0.7152f * g) + (0.0722f * b);
r = input[width * numChannels * (y + 1) + ((x) * numChannels) + 0 ];
g = input[width * numChannels * (y + 1) + ((x) * numChannels) + 1 ];
b = input[width * numChannels * (y + 1) + ((x) * numChannels) + 2 ];
down = (0.2126f * r) + (0.7152f * g) + (0.0722f * b);
r = input[width * numChannels * (y + 1) + ((x + 1) * numChannels) + 0 ];
g = input[width * numChannels * (y + 1) + ((x + 1) * numChannels) + 1 ];
b = input[width * numChannels * (y + 1) + ((x + 1) * numChannels) + 2 ];
rightDown = (0.2126f * r) + (0.7152f * g) + (0.0722f * b);
//Calculate median
yLPattanaik = (leftUp + up + rightUp + left + right + leftDown + down +
rightDown) / 8;
}
else
{
yLPattanaik = yLuminance;
}
cLPattanaik = yLPattanaik * log(deltaPattanaik + yLPattanaik / yLuminance) +
gcPattanaik;
float yDPattanaik = yLuminance / (yLuminance + cLPattanaik);
r = pow((r1 / yLuminance), gammaPattanaik) * yDPattanaik;
g = pow((g1 / yLuminance), gammaPattanaik) * yDPattanaik;
b = pow((b1 / yLuminance), gammaPattanaik) * yDPattanaik;
referenceOutput[width * numChannels * y + (x * numChannels + 0)] = r;
referenceOutput[width * numChannels * y + (x * numChannels + 1)] = g;
referenceOutput[width * numChannels * y + (x * numChannels + 2)] = b;
referenceOutput[width * numChannels * y + (x * numChannels + 3)] =
input[width * numChannels * y + (x * numChannels + 3)];
}
}
float error = 0.0f;
for (unsigned int y = 0; y < height; y++)
{
for (unsigned int x = 0; x < width; x++)
{
error += referenceOutput[width * numChannels * y + (x * numChannels + 0)] -
output[width * numChannels * y + (x * numChannels + 0)];
error += referenceOutput[width * numChannels * y + (x * numChannels + 1)] -
output[width * numChannels * y + (x * numChannels + 1)];
error += referenceOutput[width * numChannels * y + (x * numChannels + 2)] -
output[width * numChannels * y + (x * numChannels + 2)];
error += referenceOutput[width * numChannels * y + (x * numChannels + 3)] -
output[width * numChannels * y + (x * numChannels + 3)];
}
}
error = error / (height * width);
if(error > 0.000001f)
{
std::cout << "FAIL with normalized error: " << error << std::endl;
return 1;
}
else
{
std::cout << "PASS" << std::endl;
}
free(input);
free(output);
free(referenceOutput);
hipFree(inputImageBuffer);
hipFree(outputImageBuffer);
return 0;
}
| b0c118adc9f363cdda1d3bc499c043170d97c47e.cu | /**********************************************************************
Copyright ©2013 Advanced Micro Devices, Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
• Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
• Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
********************************************************************/
#include <iostream>
#include <fstream>
#include <cstdlib>
#include <chrono>
#include <hip/hip_runtime.h>
#include "kernels.cu"
double runKernels(
float *inputImageBuffer,
float *outputImageBuffer,
const float *input,
float *output,
const float averageLuminance,
const float gamma,
const float c,
const float delta,
const uint width,
const uint numChannels,
const uint height)
{
hipMemcpy(inputImageBuffer, input, sizeof(float) * width * height * numChannels,
hipMemcpyHostToDevice);
dim3 grid (width/16, height/16);
dim3 block (16, 16);
hipDeviceSynchronize();
auto start = std::chrono::steady_clock::now();
hipLaunchKernelGGL(toneMapping, grid, block, 0, 0, inputImageBuffer, outputImageBuffer,
averageLuminance, gamma, c, delta, width, numChannels, height);
hipDeviceSynchronize();
auto end = std::chrono::steady_clock::now();
auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
hipMemcpy(output, outputImageBuffer, sizeof(float) * width * height * numChannels,
hipMemcpyDeviceToHost);
return time;
}
int main(int argc, char *argv[])
{
if (argc != 3) {
printf("Usage: %s <path to image> <repeat>\n", argv[0]);
return 1;
}
const char* inputImageName = argv[1]; //"input.hdr";
const int iterations = atoi(argv[2]);
// Read a simple image
std::ifstream inputFile;
std::cout << "Input file name " << inputImageName << std::endl;
inputFile.open(inputImageName, std::ifstream::binary);
if (!inputFile.is_open())
{
std::cout << "not able to open the file " << inputImageName << std::endl;
return 1;
}
const float cPattanaik = 0.25f;
const float gammaPattanaik = 0.4f;
const float deltaPattanaik = 0.000002f;
const uint numChannels = 4;
uint width;
uint height;
float averageLuminance = 0.0f;
// assume width and height are multiples of 16
inputFile >> width;
inputFile >> height;
float *input = (float*) aligned_alloc(1024, height * width * sizeof(float) * numChannels);
float *output = (float*) aligned_alloc(1024, height * width * sizeof(float) * numChannels);
for (unsigned int y = 0; y < height; y++)
{
for (unsigned int x = 0; x < width; x++)
{
inputFile >> input[(y * width * numChannels) + (x * numChannels + 0)];
inputFile >> input[(y * width * numChannels) + (x * numChannels + 1)];
inputFile >> input[(y * width * numChannels) + (x * numChannels + 2)];
inputFile >> input[(y * width * numChannels) + (x * numChannels + 3)];
}
}
std::cout << "Width of the image " << width << std::endl;
std::cout << "Height of the image " << height << std::endl;
inputFile.close();
//Calculating average luminance value
for (unsigned int y = 0; y < height; y++)
{
for (unsigned int x = 0; x < width; x++)
{
float r = input[(y * width * numChannels) + (x * numChannels + 0)];
float g = input[(y * width * numChannels) + (x * numChannels + 1)];
float b = input[(y * width * numChannels) + (x * numChannels + 2)];
float luminance = (0.2126f * r ) + ( 0.7152f * g ) + ( 0.0722f * b );
averageLuminance += luminance;
}
}
averageLuminance = averageLuminance / (width * height);
std::cout << "Average luminance value in the image "
<< averageLuminance << std::endl;
float *inputImageBuffer;
hipMalloc((void**)&inputImageBuffer, sizeof(float) * width * height * numChannels);
float *outputImageBuffer;
hipMalloc((void**)&outputImageBuffer, sizeof(float) * width * height * numChannels);
// Warm up
for(int i = 0; i < 2 && iterations != 1; i++)
{
runKernels(
inputImageBuffer,
outputImageBuffer,
input,
output,
averageLuminance,
gammaPattanaik,
cPattanaik,
deltaPattanaik,
width,
numChannels,
height);
}
std::cout << "Executing kernel for " << iterations << " iterations" <<std::endl;
std::cout << "-------------------------------------------" << std::endl;
double time = 0.0;
for(int i = 0; i < iterations; i++)
{
time += runKernels(
inputImageBuffer,
outputImageBuffer,
input,
output,
averageLuminance,
gammaPattanaik,
cPattanaik,
deltaPattanaik,
width,
numChannels,
height);
}
printf("Average kernel execution time: %f (us)\n", (time * 1e-3f) / iterations);
// VerifyResults
float *referenceOutput = (float*) malloc (sizeof(float) * height * width * numChannels);
float gcPattanaik = cPattanaik * averageLuminance;
for (unsigned int y = 0; y < height; y++)
{
for (unsigned int x = 0; x < width; x++)
{
float yLPattanaik = 0.0f;
float cLPattanaik = 0.0f;
float r, g, b;
float r1 = input[y * width * numChannels + (x * numChannels + 0)];
float g1 = input[y * width * numChannels + (x * numChannels + 1)];
float b1 = input[y * width * numChannels + (x * numChannels + 2)];
//Calculating the luminance value
float yLuminance = (0.2126f * r1) + (0.7152f * g1) + (0.0722f * b1);
if (x != 0 && y != 0 && x != width - 1 && y != height - 1)
{
//Calculating mean
float leftUp = 0.0f;
float up = 0.0f;
float rightUp = 0.0f;
float left = 0.0f;
float right = 0.0f;
float leftDown = 0.0f;
float down = 0.0f;
float rightDown = 0.0f;
r = input[width * numChannels * (y - 1) + ((x - 1) * numChannels) + 0 ];
g = input[width * numChannels * (y - 1) + ((x - 1) * numChannels) + 1 ];
b = input[width * numChannels * (y - 1) + ((x - 1) * numChannels) + 2 ];
leftUp = (0.2126f * r) + (0.7152f * g) + (0.0722f * b);
r = input[width * numChannels * (y - 1) + ((x) * numChannels) + 0 ];
g = input[width * numChannels * (y - 1) + ((x) * numChannels) + 1 ];
b = input[width * numChannels * (y - 1) + ((x) * numChannels) + 2 ];
up = (0.2126f * r) + (0.7152f * g) + (0.0722f * b);
r = input[width * numChannels * (y - 1) + ((x + 1) * numChannels) + 0 ];
g = input[width * numChannels * (y - 1) + ((x + 1) * numChannels) + 1 ];
b = input[width * numChannels * (y - 1) + ((x + 1) * numChannels) + 2 ];
rightUp = (0.2126f * r) + (0.7152f * g) + (0.0722f * b);
r = input[width * numChannels * (y) + ((x - 1) * numChannels) + 0 ];
g = input[width * numChannels * (y) + ((x - 1) * numChannels) + 1 ];
b = input[width * numChannels * (y) + ((x - 1) * numChannels) + 2 ];
left = (0.2126f * r) + (0.7152f * g) + (0.0722f * b);
r = input[width * numChannels * (y) + ((x + 1) * numChannels) + 0 ];
g = input[width * numChannels * (y) + ((x + 1) * numChannels) + 1 ];
b = input[width * numChannels * (y) + ((x + 1) * numChannels) + 2 ];
right = (0.2126f * r) + (0.7152f * g) + (0.0722f * b);
r = input[width * numChannels * (y + 1) + ((x - 1) * numChannels) + 0 ];
g = input[width * numChannels * (y + 1) + ((x - 1) * numChannels) + 1 ];
b = input[width * numChannels * (y + 1) + ((x - 1) * numChannels) + 2 ];
leftDown = (0.2126f * r) + (0.7152f * g) + (0.0722f * b);
r = input[width * numChannels * (y + 1) + ((x) * numChannels) + 0 ];
g = input[width * numChannels * (y + 1) + ((x) * numChannels) + 1 ];
b = input[width * numChannels * (y + 1) + ((x) * numChannels) + 2 ];
down = (0.2126f * r) + (0.7152f * g) + (0.0722f * b);
r = input[width * numChannels * (y + 1) + ((x + 1) * numChannels) + 0 ];
g = input[width * numChannels * (y + 1) + ((x + 1) * numChannels) + 1 ];
b = input[width * numChannels * (y + 1) + ((x + 1) * numChannels) + 2 ];
rightDown = (0.2126f * r) + (0.7152f * g) + (0.0722f * b);
//Calculate median
yLPattanaik = (leftUp + up + rightUp + left + right + leftDown + down +
rightDown) / 8;
}
else
{
yLPattanaik = yLuminance;
}
cLPattanaik = yLPattanaik * log(deltaPattanaik + yLPattanaik / yLuminance) +
gcPattanaik;
float yDPattanaik = yLuminance / (yLuminance + cLPattanaik);
r = pow((r1 / yLuminance), gammaPattanaik) * yDPattanaik;
g = pow((g1 / yLuminance), gammaPattanaik) * yDPattanaik;
b = pow((b1 / yLuminance), gammaPattanaik) * yDPattanaik;
referenceOutput[width * numChannels * y + (x * numChannels + 0)] = r;
referenceOutput[width * numChannels * y + (x * numChannels + 1)] = g;
referenceOutput[width * numChannels * y + (x * numChannels + 2)] = b;
referenceOutput[width * numChannels * y + (x * numChannels + 3)] =
input[width * numChannels * y + (x * numChannels + 3)];
}
}
float error = 0.0f;
for (unsigned int y = 0; y < height; y++)
{
for (unsigned int x = 0; x < width; x++)
{
error += referenceOutput[width * numChannels * y + (x * numChannels + 0)] -
output[width * numChannels * y + (x * numChannels + 0)];
error += referenceOutput[width * numChannels * y + (x * numChannels + 1)] -
output[width * numChannels * y + (x * numChannels + 1)];
error += referenceOutput[width * numChannels * y + (x * numChannels + 2)] -
output[width * numChannels * y + (x * numChannels + 2)];
error += referenceOutput[width * numChannels * y + (x * numChannels + 3)] -
output[width * numChannels * y + (x * numChannels + 3)];
}
}
error = error / (height * width);
if(error > 0.000001f)
{
std::cout << "FAIL with normalized error: " << error << std::endl;
return 1;
}
else
{
std::cout << "PASS" << std::endl;
}
free(input);
free(output);
free(referenceOutput);
hipFree(inputImageBuffer);
hipFree(outputImageBuffer);
return 0;
}
|
81ddfa53e96cb3841c13193c934207b93b4717de.hip | // !!! This is a file automatically generated by hipify!!!
#include "mex.h"
#include "gpu/mxGPUArray.h"
#include "hip/hip_runtime.h"
// https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#atomic-functions
#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 600
#else
__device__ double atomicAdd(double* address, double val)
{
unsigned long long int* address_as_ull = (unsigned long long int*)address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed,
__double_as_longlong(val + __longlong_as_double(assumed)));
// Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN)
} while (assumed != old);
return __longlong_as_double(old);
}
#endif
template<class Tval>
void __global__ mex_accum(const double *idx, const Tval *val, unsigned long long int const N, Tval *vTP)
{
unsigned long long int i = blockDim.x * blockIdx.x + threadIdx.x;
while (i < N)
{
unsigned long long int vTP_idx = idx[i] - 1;
if(val[i] != 0.0)
atomicAdd(&vTP[vTP_idx], val[i]);
i += blockDim.x*gridDim.x;
}
}
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, mxArray const *prhs[])
{
const mxGPUArray *idx, *val;
mxGPUArray *vTP;
mxInitGPU();
idx = mxGPUCreateFromMxArray(prhs[0]);
val = mxGPUCreateFromMxArray(prhs[1]);
unsigned long long int const Nout = mxGetScalar(prhs[2]);
mwSize dims[1] = {Nout};
vTP = mxGPUCreateGPUArray(1, dims, mxGPUGetClassID(val), mxREAL, MX_GPU_INITIALIZE_VALUES);
unsigned long long int const N = mxGPUGetNumberOfElements(val);
const int threadsPerBlock = 256;
unsigned long long int blocksPerGrid;
blocksPerGrid = (N + threadsPerBlock - 1) / threadsPerBlock;
const double *d_idx;
d_idx = (const double*) (mxGPUGetDataReadOnly(idx));
switch (mxGPUGetClassID(val))
{
case mxDOUBLE_CLASS:
const double *d_val;
double *d_vTP;
d_val = (const double*) (mxGPUGetDataReadOnly(val));
d_vTP = (double*) (mxGPUGetData(vTP));
hipLaunchKernelGGL(( mex_accum), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_idx, d_val, N, d_vTP);
break;
case mxSINGLE_CLASS:
const float *f_val;
float *f_vTP;
f_val = (const float*) (mxGPUGetDataReadOnly(val));
f_vTP = (float*) (mxGPUGetData(vTP));
hipLaunchKernelGGL(( mex_accum), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_idx, f_val, N, f_vTP);
break;
}
plhs[0] = mxGPUCreateMxArrayOnGPU(vTP);
mxGPUDestroyGPUArray(idx);
mxGPUDestroyGPUArray(val);
mxGPUDestroyGPUArray(vTP);
}
| 81ddfa53e96cb3841c13193c934207b93b4717de.cu | #include "mex.h"
#include "gpu/mxGPUArray.h"
#include "cuda.h"
// https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#atomic-functions
#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 600
#else
__device__ double atomicAdd(double* address, double val)
{
unsigned long long int* address_as_ull = (unsigned long long int*)address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed,
__double_as_longlong(val + __longlong_as_double(assumed)));
// Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN)
} while (assumed != old);
return __longlong_as_double(old);
}
#endif
template<class Tval>
void __global__ mex_accum(const double *idx, const Tval *val, unsigned long long int const N, Tval *vTP)
{
unsigned long long int i = blockDim.x * blockIdx.x + threadIdx.x;
while (i < N)
{
unsigned long long int vTP_idx = idx[i] - 1;
if(val[i] != 0.0)
atomicAdd(&vTP[vTP_idx], val[i]);
i += blockDim.x*gridDim.x;
}
}
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, mxArray const *prhs[])
{
const mxGPUArray *idx, *val;
mxGPUArray *vTP;
mxInitGPU();
idx = mxGPUCreateFromMxArray(prhs[0]);
val = mxGPUCreateFromMxArray(prhs[1]);
unsigned long long int const Nout = mxGetScalar(prhs[2]);
mwSize dims[1] = {Nout};
vTP = mxGPUCreateGPUArray(1, dims, mxGPUGetClassID(val), mxREAL, MX_GPU_INITIALIZE_VALUES);
unsigned long long int const N = mxGPUGetNumberOfElements(val);
const int threadsPerBlock = 256;
unsigned long long int blocksPerGrid;
blocksPerGrid = (N + threadsPerBlock - 1) / threadsPerBlock;
const double *d_idx;
d_idx = (const double*) (mxGPUGetDataReadOnly(idx));
switch (mxGPUGetClassID(val))
{
case mxDOUBLE_CLASS:
const double *d_val;
double *d_vTP;
d_val = (const double*) (mxGPUGetDataReadOnly(val));
d_vTP = (double*) (mxGPUGetData(vTP));
mex_accum<<<blocksPerGrid, threadsPerBlock>>>(d_idx, d_val, N, d_vTP);
break;
case mxSINGLE_CLASS:
const float *f_val;
float *f_vTP;
f_val = (const float*) (mxGPUGetDataReadOnly(val));
f_vTP = (float*) (mxGPUGetData(vTP));
mex_accum<<<blocksPerGrid, threadsPerBlock>>>(d_idx, f_val, N, f_vTP);
break;
}
plhs[0] = mxGPUCreateMxArrayOnGPU(vTP);
mxGPUDestroyGPUArray(idx);
mxGPUDestroyGPUArray(val);
mxGPUDestroyGPUArray(vTP);
}
|
dfcbe8cb0ab8874f9d8783ba1afb63d4b16df9c9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Program name: HelloGPU.cu
Author name: Dr. Nileshchandra Pikle
Email: nilesh.pikle@gmail.com
Contact Number: 7276834418
Webpage: https://piklenileshchandra.wixsite.com/personal
Purpose: To demonstarte
1. How to write a simple CUDA program
2. Calling CUDA kernel
3. How to compile & run CUDA program
Discrition: Given two functions helloCPU() and helloGPU()
helloCPU() function is executed on CPU and prints message
"Hello from the CPU."
helloGPU() function is executed on GPU and prints message
"Hello from the GPU."
To compile nvcc -arch=sm_35 1_HelloGPU.cu
To Run ./a.out
*/
#include <stdio.h>
void helloCPU()
{
printf("Hello from the CPU.\n");
}
__global__ void helloGPU()
{
printf("Hello also from the GPU.\n");
}
int main()
{
helloCPU();
// First #thread blocks Second = # threads per block
hipLaunchKernelGGL(( helloGPU), dim3(2),dim3(32), 0, 0, );
hipDeviceSynchronize();
return 0;
}
| dfcbe8cb0ab8874f9d8783ba1afb63d4b16df9c9.cu | /*
Program name: HelloGPU.cu
Author name: Dr. Nileshchandra Pikle
Email: nilesh.pikle@gmail.com
Contact Number: 7276834418
Webpage: https://piklenileshchandra.wixsite.com/personal
Purpose: To demonstarte
1. How to write a simple CUDA program
2. Calling CUDA kernel
3. How to compile & run CUDA program
Discrition: Given two functions helloCPU() and helloGPU()
helloCPU() function is executed on CPU and prints message
"Hello from the CPU."
helloGPU() function is executed on GPU and prints message
"Hello from the GPU."
To compile nvcc -arch=sm_35 1_HelloGPU.cu
To Run ./a.out
*/
#include <stdio.h>
void helloCPU()
{
printf("Hello from the CPU.\n");
}
__global__ void helloGPU()
{
printf("Hello also from the GPU.\n");
}
int main()
{
helloCPU();
// First #thread blocks Second = # threads per block
helloGPU<<<2,32>>>();
cudaDeviceSynchronize();
return 0;
}
|
4f44605d300cf0ff03a75b197bc54b14db68f815.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define GROUP_SIZE 256
#define BUFFER_SIZE 256
/**
* Find a bounding box for the atoms in each block.
*/
extern "C" __global__ void findBlockBounds(int numAtoms, real4 periodicBoxSize, real4 invPeriodicBoxSize, real4 periodicBoxVecX, real4 periodicBoxVecY, real4 periodicBoxVecZ,
const real4* __restrict__ posq, real4* __restrict__ blockCenter, real4* __restrict__ blockBoundingBox, int* __restrict__ rebuildNeighborList,
real2* __restrict__ sortedBlocks) {
int index = blockIdx.x*blockDim.x+threadIdx.x;
int base = index*TILE_SIZE;
while (base < numAtoms) {
real4 pos = posq[base];
#ifdef USE_PERIODIC
APPLY_PERIODIC_TO_POS(pos)
#endif
real4 minPos = pos;
real4 maxPos = pos;
int last = min(base+TILE_SIZE, numAtoms);
for (int i = base+1; i < last; i++) {
pos = posq[i];
#ifdef USE_PERIODIC
real4 center = 0.5f*(maxPos+minPos);
APPLY_PERIODIC_TO_POS_WITH_CENTER(pos, center)
#endif
minPos = make_real4(min(minPos.x,pos.x), min(minPos.y,pos.y), min(minPos.z,pos.z), 0);
maxPos = make_real4(max(maxPos.x,pos.x), max(maxPos.y,pos.y), max(maxPos.z,pos.z), 0);
}
real4 blockSize = 0.5f*(maxPos-minPos);
blockBoundingBox[index] = blockSize;
blockCenter[index] = 0.5f*(maxPos+minPos);
sortedBlocks[index] = make_real2(blockSize.x+blockSize.y+blockSize.z, index);
index += blockDim.x*gridDim.x;
base = index*TILE_SIZE;
}
if (blockIdx.x == 0 && threadIdx.x == 0)
rebuildNeighborList[0] = 0;
}
/**
* Sort the data about bounding boxes so it can be accessed more efficiently in the next kernel.
*/
extern "C" __global__ void sortBoxData(const real2* __restrict__ sortedBlock, const real4* __restrict__ blockCenter,
const real4* __restrict__ blockBoundingBox, real4* __restrict__ sortedBlockCenter,
real4* __restrict__ sortedBlockBoundingBox, const real4* __restrict__ posq, const real4* __restrict__ oldPositions,
unsigned int* __restrict__ interactionCount, int* __restrict__ rebuildNeighborList) {
for (int i = threadIdx.x+blockIdx.x*blockDim.x; i < NUM_BLOCKS; i += blockDim.x*gridDim.x) {
int index = (int) sortedBlock[i].y;
sortedBlockCenter[i] = blockCenter[index];
sortedBlockBoundingBox[i] = blockBoundingBox[index];
}
// Also check whether any atom has moved enough so that we really need to rebuild the neighbor list.
bool rebuild = false;
for (int i = threadIdx.x+blockIdx.x*blockDim.x; i < NUM_ATOMS; i += blockDim.x*gridDim.x) {
real4 delta = oldPositions[i]-posq[i];
if (delta.x*delta.x + delta.y*delta.y + delta.z*delta.z > 0.25f*PADDING*PADDING)
rebuild = true;
}
if (rebuild) {
rebuildNeighborList[0] = 1;
interactionCount[0] = 0;
}
}
/**
* Compare the bounding boxes for each pair of atom blocks (comprised of 32 atoms each), forming a tile. If the two
* atom blocks are sufficiently far apart, mark them as non-interacting. There are two stages in the algorithm.
*
* STAGE 1:
*
* A coarse grained atom block against interacting atom block neighbour list is constructed.
*
* Each warp first loads in some block X of interest. Each thread within the warp then loads
* in a different atom block Y. If Y has exclusions with X, then Y is not processed. If the bounding boxes
* of the two atom blocks are within the cutoff distance, then the two atom blocks are considered to be
* interacting and Y is added to the buffer for X.
*
* STAGE 2:
*
* A fine grained atom block against interacting atoms neighbour list is constructed.
*
* The warp loops over atom blocks Y that were found to (possibly) interact with atom block X. Each thread
* in the warp loops over the 32 atoms in X and compares their positions to one particular atom from block Y.
* If it finds one closer than the cutoff distance, the atom is added to the list of atoms interacting with block X.
* This continues until the buffer fills up, at which point the results are written to global memory.
*
* [in] periodicBoxSize - size of the rectangular periodic box
* [in] invPeriodicBoxSize - inverse of the periodic box
* [in] blockCenter - the center of each bounding box
* [in] blockBoundingBox - bounding box of each atom block
* [out] interactionCount - total number of tiles that have interactions
* [out] interactingTiles - set of blocks that have interactions
* [out] interactingAtoms - a list of atoms that interact with each atom block
* [in] posq - x,y,z coordinates of each atom and charge q
* [in] maxTiles - maximum number of tiles to process, used for multi-GPUs
* [in] startBlockIndex - first block to process, used for multi-GPUs,
* [in] numBlocks - total number of atom blocks
* [in] sortedBlocks - a sorted list of atom blocks based on volume
* [in] sortedBlockCenter - sorted centers, duplicated for fast access to avoid indexing
* [in] sortedBlockBoundingBox - sorted bounding boxes, duplicated for fast access
* [in] exclusionIndices - maps into exclusionRowIndices with the starting position for a given atom
* [in] exclusionRowIndices - stores the a continuous list of exclusions
* eg: block 0 is excluded from atom 3,5,6
* block 1 is excluded from atom 3,4
* block 2 is excluded from atom 1,3,5,6
* exclusionIndices[0][3][5][8]
* exclusionRowIndices[3][5][6][3][4][1][3][5][6]
* index 0 1 2 3 4 5 6 7 8
* [out] oldPos - stores the positions of the atoms in which this neighbourlist was built on
* - this is used to decide when to rebuild a neighbourlist
* [in] rebuildNeighbourList - whether or not to execute this kernel
*
*/
extern "C" __global__ void findBlocksWithInteractions(real4 periodicBoxSize, real4 invPeriodicBoxSize, real4 periodicBoxVecX, real4 periodicBoxVecY, real4 periodicBoxVecZ,
unsigned int* __restrict__ interactionCount, int* __restrict__ interactingTiles, unsigned int* __restrict__ interactingAtoms, const real4* __restrict__ posq,
unsigned int maxTiles, unsigned int startBlockIndex, unsigned int numBlocks, real2* __restrict__ sortedBlocks, const real4* __restrict__ sortedBlockCenter,
const real4* __restrict__ sortedBlockBoundingBox, const unsigned int* __restrict__ exclusionIndices, const unsigned int* __restrict__ exclusionRowIndices,
real4* __restrict__ oldPositions, const int* __restrict__ rebuildNeighborList) {
if (rebuildNeighborList[0] == 0)
return; // The neighbor list doesn't need to be rebuilt.
const int indexInWarp = threadIdx.x%32;
const int warpStart = threadIdx.x-indexInWarp;
const int totalWarps = blockDim.x*gridDim.x/32;
const int warpIndex = (blockIdx.x*blockDim.x+threadIdx.x)/32;
const int warpMask = (1<<indexInWarp)-1;
__shared__ int workgroupBuffer[BUFFER_SIZE*(GROUP_SIZE/32)];
__shared__ int warpExclusions[MAX_EXCLUSIONS*(GROUP_SIZE/32)];
__shared__ real3 posBuffer[GROUP_SIZE];
__shared__ volatile int workgroupTileIndex[GROUP_SIZE/32];
int* buffer = workgroupBuffer+BUFFER_SIZE*(warpStart/32);
int* exclusionsForX = warpExclusions+MAX_EXCLUSIONS*(warpStart/32);
volatile int& tileStartIndex = workgroupTileIndex[warpStart/32];
// Loop over blocks.
for (int block1 = startBlockIndex+warpIndex; block1 < startBlockIndex+numBlocks; block1 += totalWarps) {
// Load data for this block. Note that all threads in a warp are processing the same block.
real2 sortedKey = sortedBlocks[block1];
int x = (int) sortedKey.y;
real4 blockCenterX = sortedBlockCenter[block1];
real4 blockSizeX = sortedBlockBoundingBox[block1];
int neighborsInBuffer = 0;
real3 pos1 = trimTo3(posq[x*TILE_SIZE+indexInWarp]);
#ifdef USE_PERIODIC
const bool singlePeriodicCopy = (0.5f*periodicBoxSize.x-blockSizeX.x >= PADDED_CUTOFF &&
0.5f*periodicBoxSize.y-blockSizeX.y >= PADDED_CUTOFF &&
0.5f*periodicBoxSize.z-blockSizeX.z >= PADDED_CUTOFF);
if (singlePeriodicCopy) {
// The box is small enough that we can just translate all the atoms into a single periodic
// box, then skip having to apply periodic boundary conditions later.
APPLY_PERIODIC_TO_POS_WITH_CENTER(pos1, blockCenterX)
}
#endif
posBuffer[threadIdx.x] = pos1;
// Load exclusion data for block x.
const int exclusionStart = exclusionRowIndices[x];
const int exclusionEnd = exclusionRowIndices[x+1];
const int numExclusions = exclusionEnd-exclusionStart;
for (int j = indexInWarp; j < numExclusions; j += 32)
exclusionsForX[j] = exclusionIndices[exclusionStart+j];
if (MAX_EXCLUSIONS > 32)
__syncthreads();
// Loop over atom blocks to search for neighbors. The threads in a warp compare block1 against 32
// other blocks in parallel.
for (int block2Base = block1+1; block2Base < NUM_BLOCKS; block2Base += 32) {
int block2 = block2Base+indexInWarp;
bool includeBlock2 = (block2 < NUM_BLOCKS);
if (includeBlock2) {
real4 blockCenterY = (block2 < NUM_BLOCKS ? sortedBlockCenter[block2] : make_real4(0));
real4 blockSizeY = (block2 < NUM_BLOCKS ? sortedBlockBoundingBox[block2] : make_real4(0));
real4 blockDelta = blockCenterX-blockCenterY;
#ifdef USE_PERIODIC
APPLY_PERIODIC_TO_DELTA(blockDelta)
#endif
blockDelta.x = max(0.0f, fabs(blockDelta.x)-blockSizeX.x-blockSizeY.x);
blockDelta.y = max(0.0f, fabs(blockDelta.y)-blockSizeX.y-blockSizeY.y);
blockDelta.z = max(0.0f, fabs(blockDelta.z)-blockSizeX.z-blockSizeY.z);
includeBlock2 &= (blockDelta.x*blockDelta.x+blockDelta.y*blockDelta.y+blockDelta.z*blockDelta.z < PADDED_CUTOFF_SQUARED);
if (includeBlock2) {
unsigned short y = (unsigned short) sortedBlocks[block2].y;
for (int k = 0; k < numExclusions; k++)
includeBlock2 &= (exclusionsForX[k] != y);
}
}
// Loop over any blocks we identified as potentially containing neighbors.
int includeBlockFlags = __ballot(includeBlock2);
while (includeBlockFlags != 0) {
int i = __ffs(includeBlockFlags)-1;
includeBlockFlags &= includeBlockFlags-1;
unsigned short y = (unsigned short) sortedBlocks[block2Base+i].y;
// Check each atom in block Y for interactions.
int start = y*TILE_SIZE;
int atom2 = start+indexInWarp;
real3 pos2 = trimTo3(posq[atom2]);
#ifdef USE_PERIODIC
if (singlePeriodicCopy) {
APPLY_PERIODIC_TO_POS_WITH_CENTER(pos2, blockCenterX)
}
#endif
bool interacts = false;
if (atom2 < NUM_ATOMS) {
#ifdef USE_PERIODIC
if (!singlePeriodicCopy) {
for (int j = 0; j < TILE_SIZE; j++) {
real3 delta = pos2-posBuffer[warpStart+j];
APPLY_PERIODIC_TO_DELTA(delta)
interacts |= (delta.x*delta.x+delta.y*delta.y+delta.z*delta.z < PADDED_CUTOFF_SQUARED);
}
}
else {
#endif
for (int j = 0; j < TILE_SIZE; j++) {
real3 delta = pos2-posBuffer[warpStart+j];
interacts |= (delta.x*delta.x+delta.y*delta.y+delta.z*delta.z < PADDED_CUTOFF_SQUARED);
}
#ifdef USE_PERIODIC
}
#endif
}
// Add any interacting atoms to the buffer.
int includeAtomFlags = __ballot(interacts);
if (interacts)
buffer[neighborsInBuffer+__popc(includeAtomFlags&warpMask)] = atom2;
neighborsInBuffer += __popc(includeAtomFlags);
if (neighborsInBuffer > BUFFER_SIZE-TILE_SIZE) {
// Store the new tiles to memory.
int tilesToStore = neighborsInBuffer/TILE_SIZE;
if (indexInWarp == 0)
tileStartIndex = atomicAdd(interactionCount, tilesToStore);
int newTileStartIndex = tileStartIndex;
if (newTileStartIndex+tilesToStore <= maxTiles) {
if (indexInWarp < tilesToStore)
interactingTiles[newTileStartIndex+indexInWarp] = x;
for (int j = 0; j < tilesToStore; j++)
interactingAtoms[(newTileStartIndex+j)*TILE_SIZE+indexInWarp] = buffer[indexInWarp+j*TILE_SIZE];
}
buffer[indexInWarp] = buffer[indexInWarp+TILE_SIZE*tilesToStore];
neighborsInBuffer -= TILE_SIZE*tilesToStore;
}
}
}
// If we have a partially filled buffer, store it to memory.
if (neighborsInBuffer > 0) {
int tilesToStore = (neighborsInBuffer+TILE_SIZE-1)/TILE_SIZE;
if (indexInWarp == 0)
tileStartIndex = atomicAdd(interactionCount, tilesToStore);
int newTileStartIndex = tileStartIndex;
if (newTileStartIndex+tilesToStore <= maxTiles) {
if (indexInWarp < tilesToStore)
interactingTiles[newTileStartIndex+indexInWarp] = x;
for (int j = 0; j < tilesToStore; j++)
interactingAtoms[(newTileStartIndex+j)*TILE_SIZE+indexInWarp] = (indexInWarp+j*TILE_SIZE < neighborsInBuffer ? buffer[indexInWarp+j*TILE_SIZE] : NUM_ATOMS);
}
}
}
// Record the positions the neighbor list is based on.
for (int i = threadIdx.x+blockIdx.x*blockDim.x; i < NUM_ATOMS; i += blockDim.x*gridDim.x)
oldPositions[i] = posq[i];
} | 4f44605d300cf0ff03a75b197bc54b14db68f815.cu | #define GROUP_SIZE 256
#define BUFFER_SIZE 256
/**
* Find a bounding box for the atoms in each block.
*/
extern "C" __global__ void findBlockBounds(int numAtoms, real4 periodicBoxSize, real4 invPeriodicBoxSize, real4 periodicBoxVecX, real4 periodicBoxVecY, real4 periodicBoxVecZ,
const real4* __restrict__ posq, real4* __restrict__ blockCenter, real4* __restrict__ blockBoundingBox, int* __restrict__ rebuildNeighborList,
real2* __restrict__ sortedBlocks) {
int index = blockIdx.x*blockDim.x+threadIdx.x;
int base = index*TILE_SIZE;
while (base < numAtoms) {
real4 pos = posq[base];
#ifdef USE_PERIODIC
APPLY_PERIODIC_TO_POS(pos)
#endif
real4 minPos = pos;
real4 maxPos = pos;
int last = min(base+TILE_SIZE, numAtoms);
for (int i = base+1; i < last; i++) {
pos = posq[i];
#ifdef USE_PERIODIC
real4 center = 0.5f*(maxPos+minPos);
APPLY_PERIODIC_TO_POS_WITH_CENTER(pos, center)
#endif
minPos = make_real4(min(minPos.x,pos.x), min(minPos.y,pos.y), min(minPos.z,pos.z), 0);
maxPos = make_real4(max(maxPos.x,pos.x), max(maxPos.y,pos.y), max(maxPos.z,pos.z), 0);
}
real4 blockSize = 0.5f*(maxPos-minPos);
blockBoundingBox[index] = blockSize;
blockCenter[index] = 0.5f*(maxPos+minPos);
sortedBlocks[index] = make_real2(blockSize.x+blockSize.y+blockSize.z, index);
index += blockDim.x*gridDim.x;
base = index*TILE_SIZE;
}
if (blockIdx.x == 0 && threadIdx.x == 0)
rebuildNeighborList[0] = 0;
}
/**
* Sort the data about bounding boxes so it can be accessed more efficiently in the next kernel.
*/
extern "C" __global__ void sortBoxData(const real2* __restrict__ sortedBlock, const real4* __restrict__ blockCenter,
const real4* __restrict__ blockBoundingBox, real4* __restrict__ sortedBlockCenter,
real4* __restrict__ sortedBlockBoundingBox, const real4* __restrict__ posq, const real4* __restrict__ oldPositions,
unsigned int* __restrict__ interactionCount, int* __restrict__ rebuildNeighborList) {
for (int i = threadIdx.x+blockIdx.x*blockDim.x; i < NUM_BLOCKS; i += blockDim.x*gridDim.x) {
int index = (int) sortedBlock[i].y;
sortedBlockCenter[i] = blockCenter[index];
sortedBlockBoundingBox[i] = blockBoundingBox[index];
}
// Also check whether any atom has moved enough so that we really need to rebuild the neighbor list.
bool rebuild = false;
for (int i = threadIdx.x+blockIdx.x*blockDim.x; i < NUM_ATOMS; i += blockDim.x*gridDim.x) {
real4 delta = oldPositions[i]-posq[i];
if (delta.x*delta.x + delta.y*delta.y + delta.z*delta.z > 0.25f*PADDING*PADDING)
rebuild = true;
}
if (rebuild) {
rebuildNeighborList[0] = 1;
interactionCount[0] = 0;
}
}
/**
* Compare the bounding boxes for each pair of atom blocks (comprised of 32 atoms each), forming a tile. If the two
* atom blocks are sufficiently far apart, mark them as non-interacting. There are two stages in the algorithm.
*
* STAGE 1:
*
* A coarse grained atom block against interacting atom block neighbour list is constructed.
*
* Each warp first loads in some block X of interest. Each thread within the warp then loads
* in a different atom block Y. If Y has exclusions with X, then Y is not processed. If the bounding boxes
* of the two atom blocks are within the cutoff distance, then the two atom blocks are considered to be
* interacting and Y is added to the buffer for X.
*
* STAGE 2:
*
* A fine grained atom block against interacting atoms neighbour list is constructed.
*
* The warp loops over atom blocks Y that were found to (possibly) interact with atom block X. Each thread
* in the warp loops over the 32 atoms in X and compares their positions to one particular atom from block Y.
* If it finds one closer than the cutoff distance, the atom is added to the list of atoms interacting with block X.
* This continues until the buffer fills up, at which point the results are written to global memory.
*
* [in] periodicBoxSize - size of the rectangular periodic box
* [in] invPeriodicBoxSize - inverse of the periodic box
* [in] blockCenter - the center of each bounding box
* [in] blockBoundingBox - bounding box of each atom block
* [out] interactionCount - total number of tiles that have interactions
* [out] interactingTiles - set of blocks that have interactions
* [out] interactingAtoms - a list of atoms that interact with each atom block
* [in] posq - x,y,z coordinates of each atom and charge q
* [in] maxTiles - maximum number of tiles to process, used for multi-GPUs
* [in] startBlockIndex - first block to process, used for multi-GPUs,
* [in] numBlocks - total number of atom blocks
* [in] sortedBlocks - a sorted list of atom blocks based on volume
* [in] sortedBlockCenter - sorted centers, duplicated for fast access to avoid indexing
* [in] sortedBlockBoundingBox - sorted bounding boxes, duplicated for fast access
* [in] exclusionIndices - maps into exclusionRowIndices with the starting position for a given atom
* [in] exclusionRowIndices - stores the a continuous list of exclusions
* eg: block 0 is excluded from atom 3,5,6
* block 1 is excluded from atom 3,4
* block 2 is excluded from atom 1,3,5,6
* exclusionIndices[0][3][5][8]
* exclusionRowIndices[3][5][6][3][4][1][3][5][6]
* index 0 1 2 3 4 5 6 7 8
* [out] oldPos - stores the positions of the atoms in which this neighbourlist was built on
* - this is used to decide when to rebuild a neighbourlist
* [in] rebuildNeighbourList - whether or not to execute this kernel
*
*/
extern "C" __global__ void findBlocksWithInteractions(real4 periodicBoxSize, real4 invPeriodicBoxSize, real4 periodicBoxVecX, real4 periodicBoxVecY, real4 periodicBoxVecZ,
unsigned int* __restrict__ interactionCount, int* __restrict__ interactingTiles, unsigned int* __restrict__ interactingAtoms, const real4* __restrict__ posq,
unsigned int maxTiles, unsigned int startBlockIndex, unsigned int numBlocks, real2* __restrict__ sortedBlocks, const real4* __restrict__ sortedBlockCenter,
const real4* __restrict__ sortedBlockBoundingBox, const unsigned int* __restrict__ exclusionIndices, const unsigned int* __restrict__ exclusionRowIndices,
real4* __restrict__ oldPositions, const int* __restrict__ rebuildNeighborList) {
if (rebuildNeighborList[0] == 0)
return; // The neighbor list doesn't need to be rebuilt.
const int indexInWarp = threadIdx.x%32;
const int warpStart = threadIdx.x-indexInWarp;
const int totalWarps = blockDim.x*gridDim.x/32;
const int warpIndex = (blockIdx.x*blockDim.x+threadIdx.x)/32;
const int warpMask = (1<<indexInWarp)-1;
__shared__ int workgroupBuffer[BUFFER_SIZE*(GROUP_SIZE/32)];
__shared__ int warpExclusions[MAX_EXCLUSIONS*(GROUP_SIZE/32)];
__shared__ real3 posBuffer[GROUP_SIZE];
__shared__ volatile int workgroupTileIndex[GROUP_SIZE/32];
int* buffer = workgroupBuffer+BUFFER_SIZE*(warpStart/32);
int* exclusionsForX = warpExclusions+MAX_EXCLUSIONS*(warpStart/32);
volatile int& tileStartIndex = workgroupTileIndex[warpStart/32];
// Loop over blocks.
for (int block1 = startBlockIndex+warpIndex; block1 < startBlockIndex+numBlocks; block1 += totalWarps) {
// Load data for this block. Note that all threads in a warp are processing the same block.
real2 sortedKey = sortedBlocks[block1];
int x = (int) sortedKey.y;
real4 blockCenterX = sortedBlockCenter[block1];
real4 blockSizeX = sortedBlockBoundingBox[block1];
int neighborsInBuffer = 0;
real3 pos1 = trimTo3(posq[x*TILE_SIZE+indexInWarp]);
#ifdef USE_PERIODIC
const bool singlePeriodicCopy = (0.5f*periodicBoxSize.x-blockSizeX.x >= PADDED_CUTOFF &&
0.5f*periodicBoxSize.y-blockSizeX.y >= PADDED_CUTOFF &&
0.5f*periodicBoxSize.z-blockSizeX.z >= PADDED_CUTOFF);
if (singlePeriodicCopy) {
// The box is small enough that we can just translate all the atoms into a single periodic
// box, then skip having to apply periodic boundary conditions later.
APPLY_PERIODIC_TO_POS_WITH_CENTER(pos1, blockCenterX)
}
#endif
posBuffer[threadIdx.x] = pos1;
// Load exclusion data for block x.
const int exclusionStart = exclusionRowIndices[x];
const int exclusionEnd = exclusionRowIndices[x+1];
const int numExclusions = exclusionEnd-exclusionStart;
for (int j = indexInWarp; j < numExclusions; j += 32)
exclusionsForX[j] = exclusionIndices[exclusionStart+j];
if (MAX_EXCLUSIONS > 32)
__syncthreads();
// Loop over atom blocks to search for neighbors. The threads in a warp compare block1 against 32
// other blocks in parallel.
for (int block2Base = block1+1; block2Base < NUM_BLOCKS; block2Base += 32) {
int block2 = block2Base+indexInWarp;
bool includeBlock2 = (block2 < NUM_BLOCKS);
if (includeBlock2) {
real4 blockCenterY = (block2 < NUM_BLOCKS ? sortedBlockCenter[block2] : make_real4(0));
real4 blockSizeY = (block2 < NUM_BLOCKS ? sortedBlockBoundingBox[block2] : make_real4(0));
real4 blockDelta = blockCenterX-blockCenterY;
#ifdef USE_PERIODIC
APPLY_PERIODIC_TO_DELTA(blockDelta)
#endif
blockDelta.x = max(0.0f, fabs(blockDelta.x)-blockSizeX.x-blockSizeY.x);
blockDelta.y = max(0.0f, fabs(blockDelta.y)-blockSizeX.y-blockSizeY.y);
blockDelta.z = max(0.0f, fabs(blockDelta.z)-blockSizeX.z-blockSizeY.z);
includeBlock2 &= (blockDelta.x*blockDelta.x+blockDelta.y*blockDelta.y+blockDelta.z*blockDelta.z < PADDED_CUTOFF_SQUARED);
if (includeBlock2) {
unsigned short y = (unsigned short) sortedBlocks[block2].y;
for (int k = 0; k < numExclusions; k++)
includeBlock2 &= (exclusionsForX[k] != y);
}
}
// Loop over any blocks we identified as potentially containing neighbors.
int includeBlockFlags = __ballot(includeBlock2);
while (includeBlockFlags != 0) {
int i = __ffs(includeBlockFlags)-1;
includeBlockFlags &= includeBlockFlags-1;
unsigned short y = (unsigned short) sortedBlocks[block2Base+i].y;
// Check each atom in block Y for interactions.
int start = y*TILE_SIZE;
int atom2 = start+indexInWarp;
real3 pos2 = trimTo3(posq[atom2]);
#ifdef USE_PERIODIC
if (singlePeriodicCopy) {
APPLY_PERIODIC_TO_POS_WITH_CENTER(pos2, blockCenterX)
}
#endif
bool interacts = false;
if (atom2 < NUM_ATOMS) {
#ifdef USE_PERIODIC
if (!singlePeriodicCopy) {
for (int j = 0; j < TILE_SIZE; j++) {
real3 delta = pos2-posBuffer[warpStart+j];
APPLY_PERIODIC_TO_DELTA(delta)
interacts |= (delta.x*delta.x+delta.y*delta.y+delta.z*delta.z < PADDED_CUTOFF_SQUARED);
}
}
else {
#endif
for (int j = 0; j < TILE_SIZE; j++) {
real3 delta = pos2-posBuffer[warpStart+j];
interacts |= (delta.x*delta.x+delta.y*delta.y+delta.z*delta.z < PADDED_CUTOFF_SQUARED);
}
#ifdef USE_PERIODIC
}
#endif
}
// Add any interacting atoms to the buffer.
int includeAtomFlags = __ballot(interacts);
if (interacts)
buffer[neighborsInBuffer+__popc(includeAtomFlags&warpMask)] = atom2;
neighborsInBuffer += __popc(includeAtomFlags);
if (neighborsInBuffer > BUFFER_SIZE-TILE_SIZE) {
// Store the new tiles to memory.
int tilesToStore = neighborsInBuffer/TILE_SIZE;
if (indexInWarp == 0)
tileStartIndex = atomicAdd(interactionCount, tilesToStore);
int newTileStartIndex = tileStartIndex;
if (newTileStartIndex+tilesToStore <= maxTiles) {
if (indexInWarp < tilesToStore)
interactingTiles[newTileStartIndex+indexInWarp] = x;
for (int j = 0; j < tilesToStore; j++)
interactingAtoms[(newTileStartIndex+j)*TILE_SIZE+indexInWarp] = buffer[indexInWarp+j*TILE_SIZE];
}
buffer[indexInWarp] = buffer[indexInWarp+TILE_SIZE*tilesToStore];
neighborsInBuffer -= TILE_SIZE*tilesToStore;
}
}
}
// If we have a partially filled buffer, store it to memory.
if (neighborsInBuffer > 0) {
int tilesToStore = (neighborsInBuffer+TILE_SIZE-1)/TILE_SIZE;
if (indexInWarp == 0)
tileStartIndex = atomicAdd(interactionCount, tilesToStore);
int newTileStartIndex = tileStartIndex;
if (newTileStartIndex+tilesToStore <= maxTiles) {
if (indexInWarp < tilesToStore)
interactingTiles[newTileStartIndex+indexInWarp] = x;
for (int j = 0; j < tilesToStore; j++)
interactingAtoms[(newTileStartIndex+j)*TILE_SIZE+indexInWarp] = (indexInWarp+j*TILE_SIZE < neighborsInBuffer ? buffer[indexInWarp+j*TILE_SIZE] : NUM_ATOMS);
}
}
}
// Record the positions the neighbor list is based on.
for (int i = threadIdx.x+blockIdx.x*blockDim.x; i < NUM_ATOMS; i += blockDim.x*gridDim.x)
oldPositions[i] = posq[i];
} |
b7af91dbe2086e65318727a987d4578fbc0e250d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <vector>
#include "caffe/layer.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/vision_layers.hpp"
#ifdef USE_GREENTEA
#include "caffe/greentea/greentea.hpp"
#include "caffe/greentea/greentea_math_functions.hpp"
#endif
namespace caffe {
#ifdef USE_ROCM
template<typename Dtype>
__global__ void Concat(const int nthreads, const Dtype* in_data,
const bool forward, const int num_concats,
const int concat_size, const int top_concat_axis,
const int bottom_concat_axis,
const int offset_concat_axis, Dtype* out_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int total_concat_size = concat_size * bottom_concat_axis;
const int concat_num = index / total_concat_size;
const int concat_index = index % total_concat_size;
const int top_index = concat_index
+ (concat_num * top_concat_axis + offset_concat_axis) * concat_size;
if (forward) {
out_data[top_index] = in_data[index];
} else {
out_data[index] = in_data[top_index];
}
}
}
#endif // USE_ROCM
template<typename Dtype>
void ConcatLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
if (bottom.size() == 1) { return; }
Dtype* top_data = top[0]->mutable_gpu_data();
int offset_concat_axis = 0;
const int top_concat_axis = top[0]->shape(concat_axis_);
const bool kForward = true;
for (int i = 0; i < bottom.size(); ++i) {
const Dtype* bottom_data = bottom[i]->gpu_data();
const int bottom_concat_axis = bottom[i]->shape(concat_axis_);
const int bottom_concat_size = bottom_concat_axis * concat_input_size_;
const int nthreads = bottom_concat_size * num_concats_;
if (this->device_->backend() == BACKEND_CUDA) {
#ifdef USE_ROCM
// NOLINT_NEXT_LINE(whitespace/operators)
Concat<Dtype> CUDA_KERNEL(CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS)(
nthreads, bottom_data, kForward, num_concats_, concat_input_size_,
top_concat_axis, bottom_concat_axis, offset_concat_axis, top_data);
#endif // USE_ROCM
} else {
#ifdef USE_GREENTEA
viennacl::ocl::context &ctx = viennacl::ocl::get_context(
this->device_->id());
viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram(
this->device_->id());
viennacl::ocl::kernel &oclk_concat = program.get_kernel(
CL_KERNEL_SELECT("concat"));
viennacl::ocl::enqueue(
oclk_concat(nthreads, WrapHandle((cl_mem) bottom_data, &ctx),
kForward ? 1 : 0, num_concats_, concat_input_size_,
top_concat_axis, bottom_concat_axis, offset_concat_axis,
WrapHandle((cl_mem) top_data, &ctx)),
ctx.get_queue());
#endif // USE_GREENTEA
}
offset_concat_axis += bottom_concat_axis;
}
}
template<typename Dtype>
void ConcatLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (bottom.size() == 1) { return; }
const Dtype* top_diff = top[0]->gpu_diff();
int offset_concat_axis = 0;
const int top_concat_axis = top[0]->shape(concat_axis_);
const bool kForward = false;
for (int i = 0; i < bottom.size(); ++i) {
const int bottom_concat_axis = bottom[i]->shape(concat_axis_);
if (propagate_down[i]) {
Dtype* bottom_diff = bottom[i]->mutable_gpu_diff();
const int bottom_concat_axis = bottom[i]->shape(concat_axis_);
const int bottom_concat_size = bottom_concat_axis * concat_input_size_;
const int nthreads = bottom_concat_size * num_concats_;
if (this->device_->backend() == BACKEND_CUDA) {
#ifdef USE_ROCM
// NOLINT_NEXT_LINE(whitespace/operators)
Concat<Dtype> CUDA_KERNEL(CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS)(
nthreads, top_diff, kForward, num_concats_, concat_input_size_,
top_concat_axis, bottom_concat_axis,
offset_concat_axis, bottom_diff);
#endif // USE_ROCM
} else {
#ifdef USE_GREENTEA
viennacl::ocl::context &ctx = viennacl::ocl::get_context(
this->device_->id());
viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram(
this->device_->id());
viennacl::ocl::kernel &oclk_concat = program.get_kernel(
CL_KERNEL_SELECT("concat"));
viennacl::ocl::enqueue(
oclk_concat(nthreads, WrapHandle((cl_mem) top_diff, &ctx),
kForward ? 1 : 0, num_concats_, concat_input_size_,
top_concat_axis, bottom_concat_axis, offset_concat_axis,
WrapHandle((cl_mem) bottom_diff, &ctx)),
ctx.get_queue());
#endif // USE_GREENTEA
}
}
offset_concat_axis += bottom_concat_axis;
}
}
INSTANTIATE_LAYER_GPU_FUNCS(ConcatLayer);
} // namespace caffe
| b7af91dbe2086e65318727a987d4578fbc0e250d.cu | #include <vector>
#include "caffe/layer.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/vision_layers.hpp"
#ifdef USE_GREENTEA
#include "caffe/greentea/greentea.hpp"
#include "caffe/greentea/greentea_math_functions.hpp"
#endif
namespace caffe {
#ifdef USE_CUDA
template<typename Dtype>
__global__ void Concat(const int nthreads, const Dtype* in_data,
const bool forward, const int num_concats,
const int concat_size, const int top_concat_axis,
const int bottom_concat_axis,
const int offset_concat_axis, Dtype* out_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int total_concat_size = concat_size * bottom_concat_axis;
const int concat_num = index / total_concat_size;
const int concat_index = index % total_concat_size;
const int top_index = concat_index
+ (concat_num * top_concat_axis + offset_concat_axis) * concat_size;
if (forward) {
out_data[top_index] = in_data[index];
} else {
out_data[index] = in_data[top_index];
}
}
}
#endif // USE_CUDA
template<typename Dtype>
void ConcatLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
if (bottom.size() == 1) { return; }
Dtype* top_data = top[0]->mutable_gpu_data();
int offset_concat_axis = 0;
const int top_concat_axis = top[0]->shape(concat_axis_);
const bool kForward = true;
for (int i = 0; i < bottom.size(); ++i) {
const Dtype* bottom_data = bottom[i]->gpu_data();
const int bottom_concat_axis = bottom[i]->shape(concat_axis_);
const int bottom_concat_size = bottom_concat_axis * concat_input_size_;
const int nthreads = bottom_concat_size * num_concats_;
if (this->device_->backend() == BACKEND_CUDA) {
#ifdef USE_CUDA
// NOLINT_NEXT_LINE(whitespace/operators)
Concat<Dtype> CUDA_KERNEL(CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS)(
nthreads, bottom_data, kForward, num_concats_, concat_input_size_,
top_concat_axis, bottom_concat_axis, offset_concat_axis, top_data);
#endif // USE_CUDA
} else {
#ifdef USE_GREENTEA
viennacl::ocl::context &ctx = viennacl::ocl::get_context(
this->device_->id());
viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram(
this->device_->id());
viennacl::ocl::kernel &oclk_concat = program.get_kernel(
CL_KERNEL_SELECT("concat"));
viennacl::ocl::enqueue(
oclk_concat(nthreads, WrapHandle((cl_mem) bottom_data, &ctx),
kForward ? 1 : 0, num_concats_, concat_input_size_,
top_concat_axis, bottom_concat_axis, offset_concat_axis,
WrapHandle((cl_mem) top_data, &ctx)),
ctx.get_queue());
#endif // USE_GREENTEA
}
offset_concat_axis += bottom_concat_axis;
}
}
template<typename Dtype>
void ConcatLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (bottom.size() == 1) { return; }
const Dtype* top_diff = top[0]->gpu_diff();
int offset_concat_axis = 0;
const int top_concat_axis = top[0]->shape(concat_axis_);
const bool kForward = false;
for (int i = 0; i < bottom.size(); ++i) {
const int bottom_concat_axis = bottom[i]->shape(concat_axis_);
if (propagate_down[i]) {
Dtype* bottom_diff = bottom[i]->mutable_gpu_diff();
const int bottom_concat_axis = bottom[i]->shape(concat_axis_);
const int bottom_concat_size = bottom_concat_axis * concat_input_size_;
const int nthreads = bottom_concat_size * num_concats_;
if (this->device_->backend() == BACKEND_CUDA) {
#ifdef USE_CUDA
// NOLINT_NEXT_LINE(whitespace/operators)
Concat<Dtype> CUDA_KERNEL(CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS)(
nthreads, top_diff, kForward, num_concats_, concat_input_size_,
top_concat_axis, bottom_concat_axis,
offset_concat_axis, bottom_diff);
#endif // USE_CUDA
} else {
#ifdef USE_GREENTEA
viennacl::ocl::context &ctx = viennacl::ocl::get_context(
this->device_->id());
viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram(
this->device_->id());
viennacl::ocl::kernel &oclk_concat = program.get_kernel(
CL_KERNEL_SELECT("concat"));
viennacl::ocl::enqueue(
oclk_concat(nthreads, WrapHandle((cl_mem) top_diff, &ctx),
kForward ? 1 : 0, num_concats_, concat_input_size_,
top_concat_axis, bottom_concat_axis, offset_concat_axis,
WrapHandle((cl_mem) bottom_diff, &ctx)),
ctx.get_queue());
#endif // USE_GREENTEA
}
}
offset_concat_axis += bottom_concat_axis;
}
}
INSTANTIATE_LAYER_GPU_FUNCS(ConcatLayer);
} // namespace caffe
|
6710144bf3deee1e7ed85a139555d5780802aa2b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2013-2017, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <memory_intrinsics.h>
#include <strided_reduction.h>
#include <cstdio>
using namespace amgx;
namespace amgx
{
namespace aggregation
{
namespace size8_selector
{
template <int NUM_COLS, typename IndexType>
__global__ __launch_bounds__(256, 4)
void my_findStrongestNeighbourBlockDiaCsr_NoMergeClean(
const IndexType *row_offsets, const IndexType *column_indices,
const float *edge_weights, const IndexType num_block_rows, const IndexType num_nonzero,
IndexType *aggregated, IndexType *aggregates, int *strongest_neighbour,
IndexType *partner_index, float *weight_strongest_neighbour, int deterministic,
const IndexType *unassigned_rows,
const int num_unassigned_row)
{
int tid = threadIdx.x + blockDim.x * blockIdx.x;
const int lane_id = utils::lane_id();
bool valid_tid = false;
for (; utils::any(valid_tid = tid < num_block_rows); tid += gridDim.x * blockDim.x)
{
int jmin = -NUM_COLS * 2, jmax = -NUM_COLS * 4;
float weight;
int jcol;
float max_weight_unaggregated = 0;
int strongest_unaggregated = -1;
bool is_unassigned = false;
if (valid_tid)
{
is_unassigned = (__load_streaming(partner_index + tid) == -1);
}
if (is_unassigned) // Unaggregated row
{
jmin = __load_all(row_offsets + tid);
jmax = __load_all(row_offsets + tid + 1);
}
if (utils::any(is_unassigned))
{
int jj = jmin - amgx::strided_reduction::warp_loader<int, NUM_COLS>::align_shift(jmin);
for (; utils::any(jj < jmax, utils::activemask()); jj += NUM_COLS)
{
int I[NUM_COLS];
float W[NUM_COLS];
int P[NUM_COLS];
amgx::strided_reduction::warp_loader<int, NUM_COLS>::load(column_indices, jj, num_nonzero, I);
#pragma unroll
for (int i = 0; i < NUM_COLS; i++)
{
int j = jj + i;
jcol = I[i];
if (j >= jmin && j < jmax)
{
P[i] = __load_nc(partner_index + jcol); //make this load ASAP
}
}
amgx::strided_reduction::warp_loader<float, NUM_COLS>::load(edge_weights, jj, num_nonzero, W);
#pragma unroll
for (int i = 0; i < NUM_COLS; i++)
{
weight = W[i];
jcol = I[i];
int j = jj + i;
if (j >= jmin && j < jmax)
{
if (weight > max_weight_unaggregated || (weight == max_weight_unaggregated && jcol > strongest_unaggregated)) // unaggregated
{
if (tid != jcol && P[i] == -1)
{
max_weight_unaggregated = weight;
strongest_unaggregated = jcol;
}
}
}
}
}
if (strongest_unaggregated == -1) // All neighbours are aggregated
{
// Put in its own aggregate
if (!deterministic && is_unassigned)
{
partner_index[tid] = tid;
}
}
else
{
strongest_neighbour[tid] = strongest_unaggregated;
}
}
}
}
#define ALGORITHM_NOMERGE 0
#define ALGORITHM_STOREWEIGHTS 1
#define ALGORITHM_STOREWEIGHTS_2 2
template <int NUM_COLS, int ALGORITHM, int ASSUME_ALL_UNASSIGNED, int LOAD_ONLY_UNASSIGNED, typename IndexType>
__global__ __launch_bounds__(256, 4)
void my_findStrongestNeighbourBlockDiaCsr_NoMerge(
const IndexType *row_offsets, const IndexType *column_indices,
const float *edge_weights, const IndexType num_block_rows, const IndexType num_nonzero,
IndexType *aggregated, IndexType *aggregates, int *strongest_neighbour,
IndexType *partner_index, float *weight_strongest_neighbour, int deterministic,
const IndexType *n_unassigned_per_block, const IndexType *unassigned_per_block
//const int num_unassigned_row
//const IndexType *unassigned_rows,
)
{
int tid = threadIdx.x + blockDim.x * blockIdx.x;
int bid = blockIdx.x;
const int lane_id = utils::lane_id();
bool valid_tid = false;
for (; utils::any( valid_tid = tid < num_block_rows); tid += gridDim.x * blockDim.x)
{
int jmin = -NUM_COLS * 2, jmax = -NUM_COLS * 4;
float weight;
int jcol;
float max_weight_unaggregated = 0;
int strongest_unaggregated = -1;
float max_weight_aggregated = 0.;
int strongest_aggregated = -1;
int partner = -1;
int partner0, partner1, partner2;
int agg_jcol;
bool is_unassigned = false;
int rowi = -1;
if (LOAD_ONLY_UNASSIGNED)
{
if (valid_tid)
{
rowi = unassigned_per_block[tid];//unassigned_per_block+bid*256+threadIdx.x);
is_unassigned = (__load_nc(partner_index + rowi) == -1);
}
if (is_unassigned)
{
jmin = __load_nc(row_offsets + rowi);
jmax = __load_nc(row_offsets + rowi + 1);
}
}
else
{
rowi = tid;
if (ALGORITHM == ALGORITHM_NOMERGE)
{
if (valid_tid) { is_unassigned = (__load_streaming(partner_index + tid) == -1); }
}
else //ALGORITHM_STOREWEIGHTS or ALGORITHM_STOREWEIGHTS_2
{
if (valid_tid) { is_unassigned = (__load_streaming(aggregated + tid) == -1); }
}
if (is_unassigned) // mind the else above
{
jmin = __load_global(row_offsets + rowi);
jmax = __load_lastuse(row_offsets + rowi + 1);
}
}
if (utils::any(is_unassigned))
{
if (is_unassigned) // Unaggregated row
{
if (ALGORITHM == ALGORITHM_STOREWEIGHTS)
{
partner = partner_index[rowi];
}
else if (ALGORITHM == ALGORITHM_STOREWEIGHTS_2)
{
partner0 = partner_index[rowi];
partner1 = partner_index[num_block_rows + rowi];
partner2 = partner_index[2 * num_block_rows + rowi];
}
}
int jj = jmin - amgx::strided_reduction::warp_loader<int, NUM_COLS>::align_shift(jmin);
for (; utils::any(jj < jmax && jmax >= 0); jj += NUM_COLS)
{
int I[NUM_COLS];
float W[NUM_COLS];
int P[NUM_COLS];
int jj_ok = (jj >= 0 && jj < jmax && jmax >= 0) ? jj : 0;
amgx::strided_reduction::warp_loader<int, NUM_COLS>::load(column_indices, jj_ok, num_nonzero, I);
#pragma unroll
for (int i = 0; i < NUM_COLS; i++)
{
int j = jj + i;
jcol = I[i];
if (j >= jmin && j < jmax)
{
if (ALGORITHM == ALGORITHM_NOMERGE)
{
P[i] = __load_nc(partner_index + jcol); //make this load ASAP
}
else
{
P[i] = __load_nc(aggregated + jcol);
}
}
}
amgx::strided_reduction::warp_loader<float, NUM_COLS>::load(edge_weights, jj_ok, num_nonzero, W);
#pragma unroll
for (int i = 0; i < NUM_COLS; i++)
{
weight = W[i];
jcol = I[i];
int j = jj + i;
if (j >= jmin && j < jmax)
{
if (ALGORITHM == ALGORITHM_NOMERGE)
{
if (weight > max_weight_unaggregated || (weight == max_weight_unaggregated && jcol > strongest_unaggregated)) // unaggregated
{
if (rowi != jcol && P[i] == -1)
{
max_weight_unaggregated = weight;
strongest_unaggregated = jcol;
}
}
}
else
{
bool partner_condition;
if (ALGORITHM == ALGORITHM_STOREWEIGHTS)
{
partner_condition = jcol != partner;
}
else if (ALGORITHM == ALGORITHM_STOREWEIGHTS_2)
{
partner_condition = jcol != partner0 && jcol != partner1 && jcol != partner2;
}
agg_jcol = P[i];
if (partner_condition && rowi != jcol)
{
if (agg_jcol == -1 && (weight > max_weight_unaggregated || (weight == max_weight_unaggregated && jcol > strongest_unaggregated))) // unaggregated
{
max_weight_unaggregated = weight;
strongest_unaggregated = jcol;
}
else if (agg_jcol != -1 && (weight > max_weight_aggregated || (weight == max_weight_aggregated && jcol > strongest_aggregated))) // unaggregated
{
max_weight_aggregated = weight;
strongest_aggregated = jcol;
}
}
}
}
}
}
if (valid_tid && is_unassigned)
{
if (ALGORITHM == ALGORITHM_NOMERGE)
{
if (strongest_unaggregated == -1) // All neighbours are aggregated
{
// Put in its own aggregate
if (!deterministic)
{
partner_index[rowi] = rowi;
}
}
else
{
strongest_neighbour[rowi] = strongest_unaggregated;
}
}
else if (ALGORITHM == ALGORITHM_STOREWEIGHTS)
{
if (strongest_unaggregated == -1) // All neighbours are aggregated
{
if (!deterministic)
{
if (strongest_aggregated != -1)
{
aggregates[tid] = aggregates[strongest_aggregated];
aggregated[tid] = 1;
aggregates[partner] = aggregates[strongest_aggregated];
aggregated[partner] = 1;
}
else // leave in its own aggregate
{
aggregated[partner] = 1;
aggregated[tid] = 1;
}
}
}
else // Found an unaggregated aggregate
{
weight_strongest_neighbour[tid] = max_weight_unaggregated;
strongest_neighbour[tid] = aggregates[strongest_unaggregated];
}
}
else if (ALGORITHM == ALGORITHM_STOREWEIGHTS_2)
{
if (strongest_unaggregated == -1 && strongest_aggregated != -1) // all neighbours are aggregated, store the strongest aggregated
{
weight_strongest_neighbour[tid] = -max_weight_aggregated;
strongest_neighbour[tid] = aggregates[strongest_aggregated];
}
else if (strongest_unaggregated != -1)
{
weight_strongest_neighbour[tid] = max_weight_unaggregated;
strongest_neighbour[tid] = aggregates[strongest_unaggregated];
}
}
}
}
bid += gridDim.x;
}
}
#define INST_my_findStrongestNeighbourBlockDiaCsr_NoMerge(numcols, algo,assume,c) template __global__ void my_findStrongestNeighbourBlockDiaCsr_NoMerge<numcols,algo,assume,c>(\
const int *row_offsets, const int *column_indices,\
const float *edge_weights, const int num_block_rows,const int num_nonzero,\
int *aggregated, int *aggregates, int *strongest_neighbour,\
int *partner_index, float *weight_strongest_neighbour, int deterministic,\
const int* n_unassigned_per_block, const int * unassigned_per_block);
#define INSTmy_findStrongestNeighbourBlockDiaCsr_NoMergeClean(numcols) template __global__ void my_findStrongestNeighbourBlockDiaCsr_NoMergeClean<numcols,int>(\
const int *row_offsets, const int *column_indices,\
const float *edge_weights, const int num_block_rows,const int num_nonzero,\
int *aggregated, int *aggregates, int *strongest_neighbour,\
int *partner_index, float *weight_strongest_neighbour, int deterministic,\
const int *unassigned_rows,\
const int num_unassigned_row);
INST_my_findStrongestNeighbourBlockDiaCsr_NoMerge(1, ALGORITHM_NOMERGE, 0, 0)
INST_my_findStrongestNeighbourBlockDiaCsr_NoMerge(1, ALGORITHM_STOREWEIGHTS, 0, 0)
INST_my_findStrongestNeighbourBlockDiaCsr_NoMerge(1, ALGORITHM_STOREWEIGHTS_2, 0, 0)
INST_my_findStrongestNeighbourBlockDiaCsr_NoMerge(1, ALGORITHM_NOMERGE, 1, 0)
INST_my_findStrongestNeighbourBlockDiaCsr_NoMerge(1, ALGORITHM_STOREWEIGHTS, 1, 0)
INST_my_findStrongestNeighbourBlockDiaCsr_NoMerge(1, ALGORITHM_STOREWEIGHTS_2, 1, 0)
INST_my_findStrongestNeighbourBlockDiaCsr_NoMerge(1, ALGORITHM_NOMERGE, 0, 1) //load only unassigned
INST_my_findStrongestNeighbourBlockDiaCsr_NoMerge(2, ALGORITHM_NOMERGE, 0, 0)
INST_my_findStrongestNeighbourBlockDiaCsr_NoMerge(2, ALGORITHM_STOREWEIGHTS, 0, 0)
INST_my_findStrongestNeighbourBlockDiaCsr_NoMerge(2, ALGORITHM_STOREWEIGHTS_2, 0, 0)
INST_my_findStrongestNeighbourBlockDiaCsr_NoMerge(2, ALGORITHM_NOMERGE, 1, 0)
INST_my_findStrongestNeighbourBlockDiaCsr_NoMerge(2, ALGORITHM_STOREWEIGHTS, 1, 0)
INST_my_findStrongestNeighbourBlockDiaCsr_NoMerge(2, ALGORITHM_STOREWEIGHTS_2, 1, 0)
INST_my_findStrongestNeighbourBlockDiaCsr_NoMerge(2, ALGORITHM_NOMERGE, 0, 1) //load only unassigned
INST_my_findStrongestNeighbourBlockDiaCsr_NoMerge(4, ALGORITHM_NOMERGE, 0, 0)
INST_my_findStrongestNeighbourBlockDiaCsr_NoMerge(4, ALGORITHM_STOREWEIGHTS, 0, 0)
INST_my_findStrongestNeighbourBlockDiaCsr_NoMerge(4, ALGORITHM_STOREWEIGHTS_2, 0, 0)
INST_my_findStrongestNeighbourBlockDiaCsr_NoMerge(4, ALGORITHM_NOMERGE, 1, 0)
INST_my_findStrongestNeighbourBlockDiaCsr_NoMerge(4, ALGORITHM_STOREWEIGHTS, 1, 0)
INST_my_findStrongestNeighbourBlockDiaCsr_NoMerge(4, ALGORITHM_STOREWEIGHTS_2, 1, 0)
INST_my_findStrongestNeighbourBlockDiaCsr_NoMerge(4, ALGORITHM_NOMERGE, 0, 1) //load only unassigned
INSTmy_findStrongestNeighbourBlockDiaCsr_NoMergeClean(1)
INSTmy_findStrongestNeighbourBlockDiaCsr_NoMergeClean(2)
INSTmy_findStrongestNeighbourBlockDiaCsr_NoMergeClean(4)
#define __load_ __load_streaming
template<int ALREADY_COMPACT>
__global__ void my_blockCompact(
int *partner_index, const int num_rows,
int *unassigned_per_block_in,
int *n_unassigned_per_block, int *unassigned_per_block)
{
int bid = blockIdx.x; //RMV
int tid = threadIdx.x + blockDim.x * blockIdx.x;
for (; bid < num_rows / 256 + 1/*__any(tid < num_rows)*/; tid += gridDim.x * blockDim.x)
{
int row = tid;
bool no_partner = 0; //RMV
if (tid < num_rows)
{
if (ALREADY_COMPACT)
{
row = unassigned_per_block_in[tid];
}
if (partner_index[row] == -1) // Unaggregated row
{
no_partner = 1;
}
}
amgx::strided_reduction::block_binary_compaction<256, 32, 1>(
n_unassigned_per_block, unassigned_per_block, bid,
no_partner, row);
bid += gridDim.x;
}
}
template __global__ void my_blockCompact<0>(
int *partner_index, const int num_rows,
int *unassigned_per_block_in,
int *n_unassigned_per_block, int *unassigned_per_block);
template __global__ void my_blockCompact<1>(
int *partner_index, const int num_rows,
int *unassigned_per_block_in,
int *n_unassigned_per_block, int *unassigned_per_block);
__global__ void my_MatchEdgesWithCompaction(const int num_rows, int *partner_index, int *aggregates, const int *strongest_neighbour, int *sets_per_block,
int *unassigned_per_block_in, int *n_unassigned_per_block, int *unassigned_per_block
)
{
int potential_match, potential_match_neighbour;
int warp_count = 0;
int bid = blockIdx.x; //RMV
int tid = threadIdx.x + blockDim.x * blockIdx.x;
for (; bid < num_rows / 1024 + 1/*__any(tid < num_rows)*/; tid += gridDim.x * blockDim.x)
{
int row = tid;
bool no_partner = 0;
if (tid < num_rows)
{
if (partner_index[row] == -1) // Unaggregated row
{
no_partner = 1;
potential_match = strongest_neighbour[row];
if (potential_match != -1)
{
potential_match_neighbour = __load_(strongest_neighbour + potential_match);
if ( potential_match_neighbour == row ) // we have a match
{
no_partner = 0;
//partner_notnull = 1;//RMV
partner_index[row] = potential_match;
aggregates[row] = ( potential_match > row ) ? row : potential_match;
}
}
}
}
amgx::strided_reduction::block_binary_compaction<1024, 32, 1>(
n_unassigned_per_block, unassigned_per_block, bid,
no_partner, row);
warp_count += amgx::strided_reduction::warp_binary_count(no_partner);
bid += gridDim.x;
}
amgx::strided_reduction::block_count<1, 1024, 32, int>(warp_count, sets_per_block);
}
__global__ void my_MatchEdges(const int num_rows, int *partner_index, int *aggregates, const int *strongest_neighbour, int *sets_per_block)
{
int potential_match, potential_match_neighbour;
int warp_count = 0;
for (int tid = threadIdx.x + blockDim.x * blockIdx.x; utils::any(tid < num_rows); tid += gridDim.x * blockDim.x)
{
bool has_set_partner_index = 0;
if (tid < num_rows)
{
if (partner_index[tid] == -1) // Unaggregated row
{
potential_match = strongest_neighbour[tid];
if (potential_match != -1)
{
potential_match_neighbour = __load_(strongest_neighbour + potential_match);
if ( potential_match_neighbour == tid ) // we have a match
{
has_set_partner_index = 1;
partner_index[tid] = potential_match;
aggregates[tid] = ( potential_match > tid) ? tid : potential_match;
}
}
}
}
warp_count += amgx::strided_reduction::warp_binary_count(has_set_partner_index);
}
amgx::strided_reduction::block_count<1, 1024, 32, int>(warp_count, sets_per_block);
}
// matchEdges
__global__ void my_joinExistingAggregates(int num_rows, int *aggregates, int *aggregated, int *aggregates_candidate, int *sets_per_block)
{
int tid = threadIdx.x + blockDim.x * blockIdx.x;
int warp_count = 0;
while (utils::any(tid < num_rows))
{
bool has_set_partner_index = 0;
if (tid < num_rows)
{
if (aggregated[tid] == -1 && aggregates_candidate[tid] != -1) // Unaggregated row
{
aggregates[tid] = aggregates_candidate[tid];
aggregated[tid] = 1;
has_set_partner_index = 1;
}
}
warp_count += amgx::strided_reduction::warp_binary_count(has_set_partner_index);
tid += gridDim.x * blockDim.x;
}
amgx::strided_reduction::block_count<1, 1024, 32, int>(warp_count, sets_per_block);
}
// Kernel that checks if perfect matchs exist
__global__ void my_matchAggregates(int *aggregates, int *aggregated, int *strongest_neighbour, const int num_rows, int *sets_per_block)
{
int tid = threadIdx.x + blockDim.x * blockIdx.x;
int potential_match, potential_match_neighbour, my_aggregate;
int warp_count = 0;
while (utils::any(tid < num_rows))
{
bool has_set_partner_index = 0;
if (tid < num_rows) if (aggregated[tid] == -1) // Unaggregated row
{
potential_match = strongest_neighbour[tid];
if (potential_match != -1)
{
potential_match_neighbour = __load_(strongest_neighbour + potential_match); //or global
my_aggregate = aggregates[tid];
if (potential_match_neighbour == my_aggregate) // we have a match
{
has_set_partner_index = 1;
aggregated[tid] = 1;
aggregates[tid] = ( potential_match > my_aggregate) ? my_aggregate : potential_match;
}
}
}
warp_count += amgx::strided_reduction::warp_binary_count(has_set_partner_index);
tid += gridDim.x * blockDim.x;
}
amgx::strided_reduction::block_count<1, 1024, 32, int>(warp_count, sets_per_block);
}
// Kernel that checks if perfect matchs exist
__global__ void my_matchAggregatesSize4(int *aggregates, int *aggregated, int *strongest_neighbour, int *partner_index, const int num_rows, int *sets_per_block)
{
int potential_match, potential_match_neighbour, my_aggregate;
int warp_count = 0;
for (int tid = threadIdx.x + blockDim.x * blockIdx.x; utils::any(tid < num_rows); tid += blockDim.x * gridDim.x)
{
bool has_set_partner_index = 0;
if (tid < num_rows) if (aggregated[tid] == -1) // Unaggregated row
{
potential_match = strongest_neighbour[tid];
if (potential_match != -1)
{
potential_match_neighbour = __load_(strongest_neighbour + potential_match);
my_aggregate = aggregates[tid];
if (potential_match_neighbour == my_aggregate) // we have a match
{
has_set_partner_index = 1;
aggregated[tid] = 1;
aggregates[tid] = ( potential_match > my_aggregate) ? my_aggregate : potential_match;
partner_index[tid + num_rows] = potential_match;
partner_index[tid + 2 * num_rows] = partner_index[potential_match];
}
}
}
warp_count += amgx::strided_reduction::warp_binary_count(has_set_partner_index);
}
amgx::strided_reduction::block_count<1, 1024, 32, int>(warp_count, sets_per_block);
}
}
}
}
| 6710144bf3deee1e7ed85a139555d5780802aa2b.cu | /* Copyright (c) 2013-2017, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <memory_intrinsics.h>
#include <strided_reduction.h>
#include <cstdio>
using namespace amgx;
namespace amgx
{
namespace aggregation
{
namespace size8_selector
{
template <int NUM_COLS, typename IndexType>
__global__ __launch_bounds__(256, 4)
void my_findStrongestNeighbourBlockDiaCsr_NoMergeClean(
const IndexType *row_offsets, const IndexType *column_indices,
const float *edge_weights, const IndexType num_block_rows, const IndexType num_nonzero,
IndexType *aggregated, IndexType *aggregates, int *strongest_neighbour,
IndexType *partner_index, float *weight_strongest_neighbour, int deterministic,
const IndexType *unassigned_rows,
const int num_unassigned_row)
{
int tid = threadIdx.x + blockDim.x * blockIdx.x;
const int lane_id = utils::lane_id();
bool valid_tid = false;
for (; utils::any(valid_tid = tid < num_block_rows); tid += gridDim.x * blockDim.x)
{
int jmin = -NUM_COLS * 2, jmax = -NUM_COLS * 4;
float weight;
int jcol;
float max_weight_unaggregated = 0;
int strongest_unaggregated = -1;
bool is_unassigned = false;
if (valid_tid)
{
is_unassigned = (__load_streaming(partner_index + tid) == -1);
}
if (is_unassigned) // Unaggregated row
{
jmin = __load_all(row_offsets + tid);
jmax = __load_all(row_offsets + tid + 1);
}
if (utils::any(is_unassigned))
{
int jj = jmin - amgx::strided_reduction::warp_loader<int, NUM_COLS>::align_shift(jmin);
for (; utils::any(jj < jmax, utils::activemask()); jj += NUM_COLS)
{
int I[NUM_COLS];
float W[NUM_COLS];
int P[NUM_COLS];
amgx::strided_reduction::warp_loader<int, NUM_COLS>::load(column_indices, jj, num_nonzero, I);
#pragma unroll
for (int i = 0; i < NUM_COLS; i++)
{
int j = jj + i;
jcol = I[i];
if (j >= jmin && j < jmax)
{
P[i] = __load_nc(partner_index + jcol); //make this load ASAP
}
}
amgx::strided_reduction::warp_loader<float, NUM_COLS>::load(edge_weights, jj, num_nonzero, W);
#pragma unroll
for (int i = 0; i < NUM_COLS; i++)
{
weight = W[i];
jcol = I[i];
int j = jj + i;
if (j >= jmin && j < jmax)
{
if (weight > max_weight_unaggregated || (weight == max_weight_unaggregated && jcol > strongest_unaggregated)) // unaggregated
{
if (tid != jcol && P[i] == -1)
{
max_weight_unaggregated = weight;
strongest_unaggregated = jcol;
}
}
}
}
}
if (strongest_unaggregated == -1) // All neighbours are aggregated
{
// Put in its own aggregate
if (!deterministic && is_unassigned)
{
partner_index[tid] = tid;
}
}
else
{
strongest_neighbour[tid] = strongest_unaggregated;
}
}
}
}
#define ALGORITHM_NOMERGE 0
#define ALGORITHM_STOREWEIGHTS 1
#define ALGORITHM_STOREWEIGHTS_2 2
template <int NUM_COLS, int ALGORITHM, int ASSUME_ALL_UNASSIGNED, int LOAD_ONLY_UNASSIGNED, typename IndexType>
__global__ __launch_bounds__(256, 4)
void my_findStrongestNeighbourBlockDiaCsr_NoMerge(
const IndexType *row_offsets, const IndexType *column_indices,
const float *edge_weights, const IndexType num_block_rows, const IndexType num_nonzero,
IndexType *aggregated, IndexType *aggregates, int *strongest_neighbour,
IndexType *partner_index, float *weight_strongest_neighbour, int deterministic,
const IndexType *n_unassigned_per_block, const IndexType *unassigned_per_block
//const int num_unassigned_row
//const IndexType *unassigned_rows,
)
{
int tid = threadIdx.x + blockDim.x * blockIdx.x;
int bid = blockIdx.x;
const int lane_id = utils::lane_id();
bool valid_tid = false;
for (; utils::any( valid_tid = tid < num_block_rows); tid += gridDim.x * blockDim.x)
{
int jmin = -NUM_COLS * 2, jmax = -NUM_COLS * 4;
float weight;
int jcol;
float max_weight_unaggregated = 0;
int strongest_unaggregated = -1;
float max_weight_aggregated = 0.;
int strongest_aggregated = -1;
int partner = -1;
int partner0, partner1, partner2;
int agg_jcol;
bool is_unassigned = false;
int rowi = -1;
if (LOAD_ONLY_UNASSIGNED)
{
if (valid_tid)
{
rowi = unassigned_per_block[tid];//unassigned_per_block+bid*256+threadIdx.x);
is_unassigned = (__load_nc(partner_index + rowi) == -1);
}
if (is_unassigned)
{
jmin = __load_nc(row_offsets + rowi);
jmax = __load_nc(row_offsets + rowi + 1);
}
}
else
{
rowi = tid;
if (ALGORITHM == ALGORITHM_NOMERGE)
{
if (valid_tid) { is_unassigned = (__load_streaming(partner_index + tid) == -1); }
}
else //ALGORITHM_STOREWEIGHTS or ALGORITHM_STOREWEIGHTS_2
{
if (valid_tid) { is_unassigned = (__load_streaming(aggregated + tid) == -1); }
}
if (is_unassigned) // mind the else above
{
jmin = __load_global(row_offsets + rowi);
jmax = __load_lastuse(row_offsets + rowi + 1);
}
}
if (utils::any(is_unassigned))
{
if (is_unassigned) // Unaggregated row
{
if (ALGORITHM == ALGORITHM_STOREWEIGHTS)
{
partner = partner_index[rowi];
}
else if (ALGORITHM == ALGORITHM_STOREWEIGHTS_2)
{
partner0 = partner_index[rowi];
partner1 = partner_index[num_block_rows + rowi];
partner2 = partner_index[2 * num_block_rows + rowi];
}
}
int jj = jmin - amgx::strided_reduction::warp_loader<int, NUM_COLS>::align_shift(jmin);
for (; utils::any(jj < jmax && jmax >= 0); jj += NUM_COLS)
{
int I[NUM_COLS];
float W[NUM_COLS];
int P[NUM_COLS];
int jj_ok = (jj >= 0 && jj < jmax && jmax >= 0) ? jj : 0;
amgx::strided_reduction::warp_loader<int, NUM_COLS>::load(column_indices, jj_ok, num_nonzero, I);
#pragma unroll
for (int i = 0; i < NUM_COLS; i++)
{
int j = jj + i;
jcol = I[i];
if (j >= jmin && j < jmax)
{
if (ALGORITHM == ALGORITHM_NOMERGE)
{
P[i] = __load_nc(partner_index + jcol); //make this load ASAP
}
else
{
P[i] = __load_nc(aggregated + jcol);
}
}
}
amgx::strided_reduction::warp_loader<float, NUM_COLS>::load(edge_weights, jj_ok, num_nonzero, W);
#pragma unroll
for (int i = 0; i < NUM_COLS; i++)
{
weight = W[i];
jcol = I[i];
int j = jj + i;
if (j >= jmin && j < jmax)
{
if (ALGORITHM == ALGORITHM_NOMERGE)
{
if (weight > max_weight_unaggregated || (weight == max_weight_unaggregated && jcol > strongest_unaggregated)) // unaggregated
{
if (rowi != jcol && P[i] == -1)
{
max_weight_unaggregated = weight;
strongest_unaggregated = jcol;
}
}
}
else
{
bool partner_condition;
if (ALGORITHM == ALGORITHM_STOREWEIGHTS)
{
partner_condition = jcol != partner;
}
else if (ALGORITHM == ALGORITHM_STOREWEIGHTS_2)
{
partner_condition = jcol != partner0 && jcol != partner1 && jcol != partner2;
}
agg_jcol = P[i];
if (partner_condition && rowi != jcol)
{
if (agg_jcol == -1 && (weight > max_weight_unaggregated || (weight == max_weight_unaggregated && jcol > strongest_unaggregated))) // unaggregated
{
max_weight_unaggregated = weight;
strongest_unaggregated = jcol;
}
else if (agg_jcol != -1 && (weight > max_weight_aggregated || (weight == max_weight_aggregated && jcol > strongest_aggregated))) // unaggregated
{
max_weight_aggregated = weight;
strongest_aggregated = jcol;
}
}
}
}
}
}
if (valid_tid && is_unassigned)
{
if (ALGORITHM == ALGORITHM_NOMERGE)
{
if (strongest_unaggregated == -1) // All neighbours are aggregated
{
// Put in its own aggregate
if (!deterministic)
{
partner_index[rowi] = rowi;
}
}
else
{
strongest_neighbour[rowi] = strongest_unaggregated;
}
}
else if (ALGORITHM == ALGORITHM_STOREWEIGHTS)
{
if (strongest_unaggregated == -1) // All neighbours are aggregated
{
if (!deterministic)
{
if (strongest_aggregated != -1)
{
aggregates[tid] = aggregates[strongest_aggregated];
aggregated[tid] = 1;
aggregates[partner] = aggregates[strongest_aggregated];
aggregated[partner] = 1;
}
else // leave in its own aggregate
{
aggregated[partner] = 1;
aggregated[tid] = 1;
}
}
}
else // Found an unaggregated aggregate
{
weight_strongest_neighbour[tid] = max_weight_unaggregated;
strongest_neighbour[tid] = aggregates[strongest_unaggregated];
}
}
else if (ALGORITHM == ALGORITHM_STOREWEIGHTS_2)
{
if (strongest_unaggregated == -1 && strongest_aggregated != -1) // all neighbours are aggregated, store the strongest aggregated
{
weight_strongest_neighbour[tid] = -max_weight_aggregated;
strongest_neighbour[tid] = aggregates[strongest_aggregated];
}
else if (strongest_unaggregated != -1)
{
weight_strongest_neighbour[tid] = max_weight_unaggregated;
strongest_neighbour[tid] = aggregates[strongest_unaggregated];
}
}
}
}
bid += gridDim.x;
}
}
#define INST_my_findStrongestNeighbourBlockDiaCsr_NoMerge(numcols, algo,assume,c) template __global__ void my_findStrongestNeighbourBlockDiaCsr_NoMerge<numcols,algo,assume,c>(\
const int *row_offsets, const int *column_indices,\
const float *edge_weights, const int num_block_rows,const int num_nonzero,\
int *aggregated, int *aggregates, int *strongest_neighbour,\
int *partner_index, float *weight_strongest_neighbour, int deterministic,\
const int* n_unassigned_per_block, const int * unassigned_per_block);
#define INSTmy_findStrongestNeighbourBlockDiaCsr_NoMergeClean(numcols) template __global__ void my_findStrongestNeighbourBlockDiaCsr_NoMergeClean<numcols,int>(\
const int *row_offsets, const int *column_indices,\
const float *edge_weights, const int num_block_rows,const int num_nonzero,\
int *aggregated, int *aggregates, int *strongest_neighbour,\
int *partner_index, float *weight_strongest_neighbour, int deterministic,\
const int *unassigned_rows,\
const int num_unassigned_row);
INST_my_findStrongestNeighbourBlockDiaCsr_NoMerge(1, ALGORITHM_NOMERGE, 0, 0)
INST_my_findStrongestNeighbourBlockDiaCsr_NoMerge(1, ALGORITHM_STOREWEIGHTS, 0, 0)
INST_my_findStrongestNeighbourBlockDiaCsr_NoMerge(1, ALGORITHM_STOREWEIGHTS_2, 0, 0)
INST_my_findStrongestNeighbourBlockDiaCsr_NoMerge(1, ALGORITHM_NOMERGE, 1, 0)
INST_my_findStrongestNeighbourBlockDiaCsr_NoMerge(1, ALGORITHM_STOREWEIGHTS, 1, 0)
INST_my_findStrongestNeighbourBlockDiaCsr_NoMerge(1, ALGORITHM_STOREWEIGHTS_2, 1, 0)
INST_my_findStrongestNeighbourBlockDiaCsr_NoMerge(1, ALGORITHM_NOMERGE, 0, 1) //load only unassigned
INST_my_findStrongestNeighbourBlockDiaCsr_NoMerge(2, ALGORITHM_NOMERGE, 0, 0)
INST_my_findStrongestNeighbourBlockDiaCsr_NoMerge(2, ALGORITHM_STOREWEIGHTS, 0, 0)
INST_my_findStrongestNeighbourBlockDiaCsr_NoMerge(2, ALGORITHM_STOREWEIGHTS_2, 0, 0)
INST_my_findStrongestNeighbourBlockDiaCsr_NoMerge(2, ALGORITHM_NOMERGE, 1, 0)
INST_my_findStrongestNeighbourBlockDiaCsr_NoMerge(2, ALGORITHM_STOREWEIGHTS, 1, 0)
INST_my_findStrongestNeighbourBlockDiaCsr_NoMerge(2, ALGORITHM_STOREWEIGHTS_2, 1, 0)
INST_my_findStrongestNeighbourBlockDiaCsr_NoMerge(2, ALGORITHM_NOMERGE, 0, 1) //load only unassigned
INST_my_findStrongestNeighbourBlockDiaCsr_NoMerge(4, ALGORITHM_NOMERGE, 0, 0)
INST_my_findStrongestNeighbourBlockDiaCsr_NoMerge(4, ALGORITHM_STOREWEIGHTS, 0, 0)
INST_my_findStrongestNeighbourBlockDiaCsr_NoMerge(4, ALGORITHM_STOREWEIGHTS_2, 0, 0)
INST_my_findStrongestNeighbourBlockDiaCsr_NoMerge(4, ALGORITHM_NOMERGE, 1, 0)
INST_my_findStrongestNeighbourBlockDiaCsr_NoMerge(4, ALGORITHM_STOREWEIGHTS, 1, 0)
INST_my_findStrongestNeighbourBlockDiaCsr_NoMerge(4, ALGORITHM_STOREWEIGHTS_2, 1, 0)
INST_my_findStrongestNeighbourBlockDiaCsr_NoMerge(4, ALGORITHM_NOMERGE, 0, 1) //load only unassigned
INSTmy_findStrongestNeighbourBlockDiaCsr_NoMergeClean(1)
INSTmy_findStrongestNeighbourBlockDiaCsr_NoMergeClean(2)
INSTmy_findStrongestNeighbourBlockDiaCsr_NoMergeClean(4)
#define __load_ __load_streaming
template<int ALREADY_COMPACT>
__global__ void my_blockCompact(
int *partner_index, const int num_rows,
int *unassigned_per_block_in,
int *n_unassigned_per_block, int *unassigned_per_block)
{
int bid = blockIdx.x; //RMV
int tid = threadIdx.x + blockDim.x * blockIdx.x;
for (; bid < num_rows / 256 + 1/*__any(tid < num_rows)*/; tid += gridDim.x * blockDim.x)
{
int row = tid;
bool no_partner = 0; //RMV
if (tid < num_rows)
{
if (ALREADY_COMPACT)
{
row = unassigned_per_block_in[tid];
}
if (partner_index[row] == -1) // Unaggregated row
{
no_partner = 1;
}
}
amgx::strided_reduction::block_binary_compaction<256, 32, 1>(
n_unassigned_per_block, unassigned_per_block, bid,
no_partner, row);
bid += gridDim.x;
}
}
template __global__ void my_blockCompact<0>(
int *partner_index, const int num_rows,
int *unassigned_per_block_in,
int *n_unassigned_per_block, int *unassigned_per_block);
template __global__ void my_blockCompact<1>(
int *partner_index, const int num_rows,
int *unassigned_per_block_in,
int *n_unassigned_per_block, int *unassigned_per_block);
__global__ void my_MatchEdgesWithCompaction(const int num_rows, int *partner_index, int *aggregates, const int *strongest_neighbour, int *sets_per_block,
int *unassigned_per_block_in, int *n_unassigned_per_block, int *unassigned_per_block
)
{
int potential_match, potential_match_neighbour;
int warp_count = 0;
int bid = blockIdx.x; //RMV
int tid = threadIdx.x + blockDim.x * blockIdx.x;
for (; bid < num_rows / 1024 + 1/*__any(tid < num_rows)*/; tid += gridDim.x * blockDim.x)
{
int row = tid;
bool no_partner = 0;
if (tid < num_rows)
{
if (partner_index[row] == -1) // Unaggregated row
{
no_partner = 1;
potential_match = strongest_neighbour[row];
if (potential_match != -1)
{
potential_match_neighbour = __load_(strongest_neighbour + potential_match);
if ( potential_match_neighbour == row ) // we have a match
{
no_partner = 0;
//partner_notnull = 1;//RMV
partner_index[row] = potential_match;
aggregates[row] = ( potential_match > row ) ? row : potential_match;
}
}
}
}
amgx::strided_reduction::block_binary_compaction<1024, 32, 1>(
n_unassigned_per_block, unassigned_per_block, bid,
no_partner, row);
warp_count += amgx::strided_reduction::warp_binary_count(no_partner);
bid += gridDim.x;
}
amgx::strided_reduction::block_count<1, 1024, 32, int>(warp_count, sets_per_block);
}
__global__ void my_MatchEdges(const int num_rows, int *partner_index, int *aggregates, const int *strongest_neighbour, int *sets_per_block)
{
int potential_match, potential_match_neighbour;
int warp_count = 0;
for (int tid = threadIdx.x + blockDim.x * blockIdx.x; utils::any(tid < num_rows); tid += gridDim.x * blockDim.x)
{
bool has_set_partner_index = 0;
if (tid < num_rows)
{
if (partner_index[tid] == -1) // Unaggregated row
{
potential_match = strongest_neighbour[tid];
if (potential_match != -1)
{
potential_match_neighbour = __load_(strongest_neighbour + potential_match);
if ( potential_match_neighbour == tid ) // we have a match
{
has_set_partner_index = 1;
partner_index[tid] = potential_match;
aggregates[tid] = ( potential_match > tid) ? tid : potential_match;
}
}
}
}
warp_count += amgx::strided_reduction::warp_binary_count(has_set_partner_index);
}
amgx::strided_reduction::block_count<1, 1024, 32, int>(warp_count, sets_per_block);
}
// matchEdges
__global__ void my_joinExistingAggregates(int num_rows, int *aggregates, int *aggregated, int *aggregates_candidate, int *sets_per_block)
{
int tid = threadIdx.x + blockDim.x * blockIdx.x;
int warp_count = 0;
while (utils::any(tid < num_rows))
{
bool has_set_partner_index = 0;
if (tid < num_rows)
{
if (aggregated[tid] == -1 && aggregates_candidate[tid] != -1) // Unaggregated row
{
aggregates[tid] = aggregates_candidate[tid];
aggregated[tid] = 1;
has_set_partner_index = 1;
}
}
warp_count += amgx::strided_reduction::warp_binary_count(has_set_partner_index);
tid += gridDim.x * blockDim.x;
}
amgx::strided_reduction::block_count<1, 1024, 32, int>(warp_count, sets_per_block);
}
// Kernel that checks if perfect matchs exist
__global__ void my_matchAggregates(int *aggregates, int *aggregated, int *strongest_neighbour, const int num_rows, int *sets_per_block)
{
int tid = threadIdx.x + blockDim.x * blockIdx.x;
int potential_match, potential_match_neighbour, my_aggregate;
int warp_count = 0;
while (utils::any(tid < num_rows))
{
bool has_set_partner_index = 0;
if (tid < num_rows) if (aggregated[tid] == -1) // Unaggregated row
{
potential_match = strongest_neighbour[tid];
if (potential_match != -1)
{
potential_match_neighbour = __load_(strongest_neighbour + potential_match); //or global
my_aggregate = aggregates[tid];
if (potential_match_neighbour == my_aggregate) // we have a match
{
has_set_partner_index = 1;
aggregated[tid] = 1;
aggregates[tid] = ( potential_match > my_aggregate) ? my_aggregate : potential_match;
}
}
}
warp_count += amgx::strided_reduction::warp_binary_count(has_set_partner_index);
tid += gridDim.x * blockDim.x;
}
amgx::strided_reduction::block_count<1, 1024, 32, int>(warp_count, sets_per_block);
}
// Kernel that checks if perfect matchs exist
__global__ void my_matchAggregatesSize4(int *aggregates, int *aggregated, int *strongest_neighbour, int *partner_index, const int num_rows, int *sets_per_block)
{
int potential_match, potential_match_neighbour, my_aggregate;
int warp_count = 0;
for (int tid = threadIdx.x + blockDim.x * blockIdx.x; utils::any(tid < num_rows); tid += blockDim.x * gridDim.x)
{
bool has_set_partner_index = 0;
if (tid < num_rows) if (aggregated[tid] == -1) // Unaggregated row
{
potential_match = strongest_neighbour[tid];
if (potential_match != -1)
{
potential_match_neighbour = __load_(strongest_neighbour + potential_match);
my_aggregate = aggregates[tid];
if (potential_match_neighbour == my_aggregate) // we have a match
{
has_set_partner_index = 1;
aggregated[tid] = 1;
aggregates[tid] = ( potential_match > my_aggregate) ? my_aggregate : potential_match;
partner_index[tid + num_rows] = potential_match;
partner_index[tid + 2 * num_rows] = partner_index[potential_match];
}
}
}
warp_count += amgx::strided_reduction::warp_binary_count(has_set_partner_index);
}
amgx::strided_reduction::block_count<1, 1024, 32, int>(warp_count, sets_per_block);
}
}
}
}
|
c91a1b4f8d081009ea627ed3171505a380693bca.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
//__device__ float Determinant(float *a,int n,float *temp);
__device__ __shared__ float result[3];
__device__ void MatrixDeterminant(void *param)
{
float *input = (float *) param;
int warp_size=32;
int n = (int)input[0];
float* matrix = input+1;
int thread = threadIdx.x % warp_size;
float value =0;
float *det = matrix +n*n;
if(n < 1){
//Error return 0
value = 0;
}
else {
if(n==1)
value = matrix[0];
else if(n==2)
value = matrix[0] * matrix[3] - matrix[2] * matrix[1];
else if (n==3){
if(thread < 3){
result[thread] = pow(-1.0,thread) *(matrix[thread]*(matrix[1*n + (thread+1)%3]*matrix[2*n + (thread+2)%3] - matrix[1*n + (thread+2)%3]*matrix[2 *n + (thread+1)%3]));
}
}
else
value = 0;//This program works only for n=1 to 3
}
if(n==3 && thread ==0)
{
for(int i=0; i < n; i++)
{
value = value + result[i];
}
*det = value;
}
else if(n<3)
*det = value;
}
//Recursive function not working
/*
__device__ float Determinant(float *a,int n,float *m)
{
int i,j,j1,j2;
float det = 0;
printf("%dInput\n",n);
if (n < 1) { * Error
} else if (n == 1) { /* Shouldn't get used
det = a[0];
} else if (n == 2) {
det = a[0] * a[3] - a[2] * a[1];
} else {
det = 0;
for (j1=0;j1<n;j1++) {
// m = (float *)malloc((n-1)*(n-1) * sizeof(float));
// for (i=0;i<n-1;i++)
// m[i] = (float *)malloc((n-1)*sizeof(float));
for (i=1;i<n;i++) {
j2 = 0;
for (j=0;j<n;j++) {
if (j == j1)
continue;
m[(i-1)*n+j2] = a[i * n + j];
j2++;
}
}
det += pow(-1.0,1.0+j1+1.0) * a[j1] * Determinant(m,n-1,a);
printf("%f Intermidiate det\n", det);
// free(m);
}
}
return(det);
}*/
| c91a1b4f8d081009ea627ed3171505a380693bca.cu | #include <stdio.h>
//__device__ float Determinant(float *a,int n,float *temp);
__device__ __shared__ float result[3];
__device__ void MatrixDeterminant(void *param)
{
float *input = (float *) param;
int warp_size=32;
int n = (int)input[0];
float* matrix = input+1;
int thread = threadIdx.x % warp_size;
float value =0;
float *det = matrix +n*n;
if(n < 1){
//Error return 0
value = 0;
}
else {
if(n==1)
value = matrix[0];
else if(n==2)
value = matrix[0] * matrix[3] - matrix[2] * matrix[1];
else if (n==3){
if(thread < 3){
result[thread] = pow(-1.0,thread) *(matrix[thread]*(matrix[1*n + (thread+1)%3]*matrix[2*n + (thread+2)%3] - matrix[1*n + (thread+2)%3]*matrix[2 *n + (thread+1)%3]));
}
}
else
value = 0;//This program works only for n=1 to 3
}
if(n==3 && thread ==0)
{
for(int i=0; i < n; i++)
{
value = value + result[i];
}
*det = value;
}
else if(n<3)
*det = value;
}
//Recursive function not working
/*
__device__ float Determinant(float *a,int n,float *m)
{
int i,j,j1,j2;
float det = 0;
printf("%dInput\n",n);
if (n < 1) { * Error
} else if (n == 1) { /* Shouldn't get used
det = a[0];
} else if (n == 2) {
det = a[0] * a[3] - a[2] * a[1];
} else {
det = 0;
for (j1=0;j1<n;j1++) {
// m = (float *)malloc((n-1)*(n-1) * sizeof(float));
// for (i=0;i<n-1;i++)
// m[i] = (float *)malloc((n-1)*sizeof(float));
for (i=1;i<n;i++) {
j2 = 0;
for (j=0;j<n;j++) {
if (j == j1)
continue;
m[(i-1)*n+j2] = a[i * n + j];
j2++;
}
}
det += pow(-1.0,1.0+j1+1.0) * a[j1] * Determinant(m,n-1,a);
printf("%f Intermidiate det\n", det);
// free(m);
}
}
return(det);
}*/
|
3024b36cfba1c34095fc5799692f23a877a29ded.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "RK_time_step.h"
__global__ void copy_arrays_device(int N, cudaComplex *source1, cudaComplex *source2, cudaComplex *source3, cudaComplex *source4, cudaComplex *destination1, cudaComplex *destination2, cudaComplex *destination3, cudaComplex *destination4){
unsigned int j = blockIdx.x * blockDim.x + threadIdx.x;
if(j<N){
destination1[j].x=source1[j].x;
destination1[j].y=source1[j].y;
destination2[j].x=source2[j].x;
destination2[j].y=source2[j].y;
destination3[j].x=source3[j].x;
destination3[j].y=source3[j].y;
}
}
__global__ void single_RK4_step_device(int N, cudaComplex *RHS1_1, cudaComplex *RHS2_1, cudaComplex *RHS3_1, cudaComplex *RHS4_1, cudaComplex *RHS1_2, cudaComplex *RHS2_2, cudaComplex *RHS3_2, cudaComplex *RHS4_2, cudaComplex *RHS1_3, cudaComplex *RHS2_3, cudaComplex *RHS3_3, cudaComplex *RHS4_3, cudaComplex *RHS1_4, cudaComplex *RHS2_4, cudaComplex *RHS3_4, cudaComplex *RHS4_4, cudaComplex *x1_hat, cudaComplex *x2_hat, cudaComplex *x3_hat, cudaComplex *x4_hat){
unsigned int j = blockIdx.x * blockDim.x + threadIdx.x;
if(j<N){
x1_hat[j].x+=(RHS1_1[j].x+2.0*RHS1_2[j].x+2.0*RHS1_3[j].x+RHS1_4[j].x)/6.0;
x1_hat[j].y+=(RHS1_1[j].y+2.0*RHS1_2[j].y+2.0*RHS1_3[j].y+RHS1_4[j].y)/6.0;
x2_hat[j].x+=(RHS2_1[j].x+2.0*RHS2_2[j].x+2.0*RHS2_3[j].x+RHS2_4[j].x)/6.0;
x2_hat[j].y+=(RHS2_1[j].y+2.0*RHS2_2[j].y+2.0*RHS2_3[j].y+RHS2_4[j].y)/6.0;
x3_hat[j].x+=(RHS3_1[j].x+2.0*RHS3_2[j].x+2.0*RHS3_3[j].x+RHS3_4[j].x)/6.0;
x3_hat[j].y+=(RHS3_1[j].y+2.0*RHS3_2[j].y+2.0*RHS3_3[j].y+RHS3_4[j].y)/6.0;
x4_hat[j].x+=(RHS4_1[j].x+2.0*RHS4_2[j].x+2.0*RHS4_3[j].x+RHS4_4[j].x)/6.0;
x4_hat[j].y+=(RHS4_1[j].y+2.0*RHS4_2[j].y+2.0*RHS4_3[j].y+RHS4_4[j].y)/6.0;
}
}
__global__ void assemble_NLS_device(int N, real dt, real g, cudaComplex *x1_hat, cudaComplex *x2_hat, cudaComplex *x3_hat, cudaComplex *x4_hat, cudaComplex *RHS1, cudaComplex *RHS2, cudaComplex *RHS3, cudaComplex *RHS4, cudaComplex *Q3_hat_mul, cudaComplex *Q4_hat_mul, real *k_laplace_d)
{
unsigned int j = blockIdx.x * blockDim.x + threadIdx.x;
if (!(j < N)) return;
RHS1[j].x=dt*(-0.5*k_laplace_d[j]/betta*x2_hat[j].x-(lka*x1_hat[j].x-delta_betta*x2_hat[j].x+kappa*x4_hat[j].x));
RHS1[j].y=dt*(-0.5*k_laplace_d[j]/betta*x2_hat[j].y-(lka*x1_hat[j].y-delta_betta*x2_hat[j].y+kappa*x4_hat[j].y));
RHS2[j].x=dt*(0.5*k_laplace_d[j]/betta*x1_hat[j].x-(delta_betta*x1_hat[j].x+lka*x2_hat[j].x-kappa*x3_hat[j].x));
RHS2[j].y=dt*(0.5*k_laplace_d[j]/betta*x1_hat[j].y-(delta_betta*x1_hat[j].y+lka*x2_hat[j].y-kappa*x3_hat[j].y));
RHS3[j].x=dt*(-0.5*k_laplace_d[j]/betta*x4_hat[j].x-(kappa*x2_hat[j].x+(lka-g)*x3_hat[j].x+delta_betta*x4_hat[j].x)-Q3_hat_mul[j].x);
RHS3[j].y=dt*(-0.5*k_laplace_d[j]/betta*x4_hat[j].y-(kappa*x2_hat[j].y+(lka-g)*x3_hat[j].y+delta_betta*x4_hat[j].y)-Q3_hat_mul[j].y);
RHS4[j].x=dt*(0.5*k_laplace_d[j]/betta*x3_hat[j].x-(-kappa*x1_hat[j].x+(lka-g)*x4_hat[j].x-delta_betta*x3_hat[j].x)-Q4_hat_mul[j].x);
RHS4[j].y=dt*(0.5*k_laplace_d[j]/betta*x3_hat[j].y-(-kappa*x1_hat[j].y+(lka-g)*x4_hat[j].y-delta_betta*x3_hat[j].y)-Q4_hat_mul[j].y);
}
__global__ void intermediate_device(int N, real wight, cudaComplex *x1_hat, cudaComplex *x2_hat, cudaComplex *x3_hat, cudaComplex *x4_hat, cudaComplex *RHS1, cudaComplex *RHS2, cudaComplex *RHS3, cudaComplex *RHS4, cudaComplex *x1_p, cudaComplex *x2_p, cudaComplex *x3_p, cudaComplex *x4_p){
unsigned int j = blockIdx.x * blockDim.x + threadIdx.x;
if(j<N){
x1_p[j].x=x1_hat[j].x+wight*RHS1[j].x;
x2_p[j].x=x2_hat[j].x+wight*RHS2[j].x;
x3_p[j].x=x3_hat[j].x+wight*RHS3[j].x;
x4_p[j].x=x4_hat[j].x+wight*RHS4[j].x;
x1_p[j].y=x1_hat[j].y+wight*RHS1[j].y;
x2_p[j].y=x2_hat[j].y+wight*RHS2[j].y;
x3_p[j].y=x3_hat[j].y+wight*RHS3[j].y;
x4_p[j].y=x4_hat[j].y+wight*RHS4[j].y;
}
}
__global__ void select_sin_transfer(int N, cudaComplex *x1_hat, cudaComplex *x2_hat, cudaComplex *x3_hat, cudaComplex *x4_hat){
unsigned int j = blockIdx.x * blockDim.x + threadIdx.x;
if(j<N){
x1_hat[j].x=0.0;//x1_hat[j].x+wight*RHS1[j].x;
x2_hat[j].x=0.0;//x2_hat[j].x+wight*RHS2[j].x;
x3_hat[j].x=0.0;//x3_hat[j].x+wight*RHS3[j].x;
x4_hat[j].x=0.0;//x4_hat[j].x+wight*RHS4[j].x;
}
}
__global__ void select_cos_transfer(int N, cudaComplex *x1_hat, cudaComplex *x2_hat, cudaComplex *x3_hat, cudaComplex *x4_hat){
unsigned int j = blockIdx.x * blockDim.x + threadIdx.x;
if(j<N){
x1_hat[j].y=0.0;//x1_hat[j].y+wight*RHS1[j].y;
x2_hat[j].y=0.0;//x2_hat[j].y+wight*RHS2[j].y;
x3_hat[j].y=0.0;//x3_hat[j].y+wight*RHS3[j].y;
x4_hat[j].y=0.0;//x4_hat[j].y+wight*RHS4[j].y;
}
}
void RightHandSide(dim3 dimGridD, dim3 dimBlockD, dim3 dimGridI, dim3 dimBlockI, hipfftHandle planR2C, hipfftHandle planC2R, int N, int M, real dt, real g, cudaComplex *x1_hat, cudaComplex *x2_hat, cudaComplex *x3_hat, cudaComplex *x4_hat, cudaComplex *RHS1, cudaComplex *RHS2, cudaComplex *RHS3, cudaComplex *RHS4, cudaComplex *x3_hat_cut, cudaComplex *x4_hat_cut, real *x3_c, real *x4_c, real *Q3_mul, real *Q4_mul, cudaComplex *Q3_hat_mul, cudaComplex *Q4_hat_mul, real *mask_2_3_d, real *k_laplace_d){
calculate_convolution_2p3(dimGridD, dimBlockD, dimGridI, dimBlockI, planR2C, planC2R, N, M, x3_hat, x4_hat,x3_hat_cut, x4_hat_cut, x3_c, x4_c, Q3_mul, Q4_mul, Q3_hat_mul, Q4_hat_mul, mask_2_3_d);
hipLaunchKernelGGL(( assemble_NLS_device), dim3(dimGridI), dim3(dimBlockI), 0, 0, M, dt, g, x1_hat, x2_hat, x3_hat, x4_hat, RHS1, RHS2, RHS3, RHS4, Q3_hat_mul, Q4_hat_mul, k_laplace_d);
}
void RK4_single_step(dim3 dimGridD, dim3 dimBlockD, dim3 dimGridI, dim3 dimBlockI, hipfftHandle planR2C, hipfftHandle planC2R, int N, int M, real dt, real g, cudaComplex *x1_hat, cudaComplex *x2_hat, cudaComplex *x3_hat, cudaComplex *x4_hat, cudaComplex *x1_p, cudaComplex *x2_p, cudaComplex *x3_p, cudaComplex *x4_p, cudaComplex *x3_hat_cut, cudaComplex *x4_hat_cut, real *x3_c, real *x4_c, real *Q3_mul, real *Q4_mul, cudaComplex *Q3_hat_mul, cudaComplex *Q4_hat_mul, real *mask_2_3_d, real *k_laplace_d, cudaComplex *RHS1_1, cudaComplex *RHS2_1, cudaComplex *RHS3_1, cudaComplex *RHS4_1, cudaComplex *RHS1_2, cudaComplex *RHS2_2, cudaComplex *RHS3_2, cudaComplex *RHS4_2, cudaComplex *RHS1_3, cudaComplex *RHS2_3, cudaComplex *RHS3_3, cudaComplex *RHS4_3, cudaComplex *RHS1_4, cudaComplex *RHS2_4, cudaComplex *RHS3_4, cudaComplex *RHS4_4){
//select_sin_transfer<<<dimGridI, dimBlockI>>>(M, x1_hat, x2_hat, x3_hat, x4_hat);
//K1:
RightHandSide(dimGridD, dimBlockD, dimGridI, dimBlockI, planR2C, planC2R, N, M, dt, g, x1_hat, x2_hat, x3_hat, x4_hat, RHS1_1, RHS2_1, RHS3_1, RHS4_1, x3_hat_cut, x4_hat_cut, x3_c, x4_c, Q3_mul, Q4_mul, Q3_hat_mul, Q4_hat_mul, mask_2_3_d, k_laplace_d);
hipLaunchKernelGGL(( intermediate_device), dim3(dimGridI), dim3(dimBlockI), 0, 0, M, 0.5, x1_hat, x2_hat, x3_hat, x4_hat, RHS1_1, RHS2_1, RHS3_1, RHS4_1, x1_p, x2_p, x3_p, x4_p);
//K2:
RightHandSide(dimGridD, dimBlockD, dimGridI, dimBlockI, planR2C, planC2R, N, M, dt, g, x1_p, x2_p, x3_p, x4_p, RHS1_2, RHS2_2, RHS3_2, RHS4_2, x3_hat_cut, x4_hat_cut, x3_c, x4_c, Q3_mul, Q4_mul, Q3_hat_mul, Q4_hat_mul, mask_2_3_d, k_laplace_d);
hipLaunchKernelGGL(( intermediate_device), dim3(dimGridI), dim3(dimBlockI), 0, 0, M, 0.5, x1_hat, x2_hat, x3_hat, x4_hat, RHS1_2, RHS2_2, RHS3_2, RHS4_2, x1_p, x2_p, x3_p, x4_p);
//K3:
RightHandSide(dimGridD, dimBlockD, dimGridI, dimBlockI, planR2C, planC2R, N, M, dt, g, x1_p, x2_p, x3_p, x4_p, RHS1_3, RHS2_3, RHS3_3, RHS4_3, x3_hat_cut, x4_hat_cut, x3_c, x4_c, Q3_mul, Q4_mul, Q3_hat_mul, Q4_hat_mul, mask_2_3_d, k_laplace_d);
hipLaunchKernelGGL(( intermediate_device), dim3(dimGridI), dim3(dimBlockI), 0, 0, M, 1.0, x1_hat, x2_hat, x3_hat, x4_hat, RHS1_3, RHS2_3, RHS3_3, RHS4_3, x1_p, x2_p, x3_p, x4_p);
//K4:
RightHandSide(dimGridD, dimBlockD, dimGridI, dimBlockI, planR2C, planC2R, N, M, dt, g, x1_p, x2_p, x3_p, x4_p, RHS1_4, RHS2_4, RHS3_4, RHS4_4, x3_hat_cut, x4_hat_cut, x3_c, x4_c, Q3_mul, Q4_mul, Q3_hat_mul, Q4_hat_mul, mask_2_3_d, k_laplace_d);
//RK-4 assembly:
hipLaunchKernelGGL(( single_RK4_step_device), dim3(dimGridI), dim3(dimBlockI), 0, 0, M, RHS1_1, RHS2_1, RHS3_1, RHS4_1, RHS1_2, RHS2_2, RHS3_2, RHS4_2, RHS1_3, RHS2_3, RHS3_3, RHS4_3, RHS1_4, RHS2_4, RHS3_4, RHS4_4, x1_hat, x2_hat, x3_hat, x4_hat);
}
| 3024b36cfba1c34095fc5799692f23a877a29ded.cu | #include "RK_time_step.h"
__global__ void copy_arrays_device(int N, cudaComplex *source1, cudaComplex *source2, cudaComplex *source3, cudaComplex *source4, cudaComplex *destination1, cudaComplex *destination2, cudaComplex *destination3, cudaComplex *destination4){
unsigned int j = blockIdx.x * blockDim.x + threadIdx.x;
if(j<N){
destination1[j].x=source1[j].x;
destination1[j].y=source1[j].y;
destination2[j].x=source2[j].x;
destination2[j].y=source2[j].y;
destination3[j].x=source3[j].x;
destination3[j].y=source3[j].y;
}
}
__global__ void single_RK4_step_device(int N, cudaComplex *RHS1_1, cudaComplex *RHS2_1, cudaComplex *RHS3_1, cudaComplex *RHS4_1, cudaComplex *RHS1_2, cudaComplex *RHS2_2, cudaComplex *RHS3_2, cudaComplex *RHS4_2, cudaComplex *RHS1_3, cudaComplex *RHS2_3, cudaComplex *RHS3_3, cudaComplex *RHS4_3, cudaComplex *RHS1_4, cudaComplex *RHS2_4, cudaComplex *RHS3_4, cudaComplex *RHS4_4, cudaComplex *x1_hat, cudaComplex *x2_hat, cudaComplex *x3_hat, cudaComplex *x4_hat){
unsigned int j = blockIdx.x * blockDim.x + threadIdx.x;
if(j<N){
x1_hat[j].x+=(RHS1_1[j].x+2.0*RHS1_2[j].x+2.0*RHS1_3[j].x+RHS1_4[j].x)/6.0;
x1_hat[j].y+=(RHS1_1[j].y+2.0*RHS1_2[j].y+2.0*RHS1_3[j].y+RHS1_4[j].y)/6.0;
x2_hat[j].x+=(RHS2_1[j].x+2.0*RHS2_2[j].x+2.0*RHS2_3[j].x+RHS2_4[j].x)/6.0;
x2_hat[j].y+=(RHS2_1[j].y+2.0*RHS2_2[j].y+2.0*RHS2_3[j].y+RHS2_4[j].y)/6.0;
x3_hat[j].x+=(RHS3_1[j].x+2.0*RHS3_2[j].x+2.0*RHS3_3[j].x+RHS3_4[j].x)/6.0;
x3_hat[j].y+=(RHS3_1[j].y+2.0*RHS3_2[j].y+2.0*RHS3_3[j].y+RHS3_4[j].y)/6.0;
x4_hat[j].x+=(RHS4_1[j].x+2.0*RHS4_2[j].x+2.0*RHS4_3[j].x+RHS4_4[j].x)/6.0;
x4_hat[j].y+=(RHS4_1[j].y+2.0*RHS4_2[j].y+2.0*RHS4_3[j].y+RHS4_4[j].y)/6.0;
}
}
__global__ void assemble_NLS_device(int N, real dt, real g, cudaComplex *x1_hat, cudaComplex *x2_hat, cudaComplex *x3_hat, cudaComplex *x4_hat, cudaComplex *RHS1, cudaComplex *RHS2, cudaComplex *RHS3, cudaComplex *RHS4, cudaComplex *Q3_hat_mul, cudaComplex *Q4_hat_mul, real *k_laplace_d)
{
unsigned int j = blockIdx.x * blockDim.x + threadIdx.x;
if (!(j < N)) return;
RHS1[j].x=dt*(-0.5*k_laplace_d[j]/betta*x2_hat[j].x-(lka*x1_hat[j].x-delta_betta*x2_hat[j].x+kappa*x4_hat[j].x));
RHS1[j].y=dt*(-0.5*k_laplace_d[j]/betta*x2_hat[j].y-(lka*x1_hat[j].y-delta_betta*x2_hat[j].y+kappa*x4_hat[j].y));
RHS2[j].x=dt*(0.5*k_laplace_d[j]/betta*x1_hat[j].x-(delta_betta*x1_hat[j].x+lka*x2_hat[j].x-kappa*x3_hat[j].x));
RHS2[j].y=dt*(0.5*k_laplace_d[j]/betta*x1_hat[j].y-(delta_betta*x1_hat[j].y+lka*x2_hat[j].y-kappa*x3_hat[j].y));
RHS3[j].x=dt*(-0.5*k_laplace_d[j]/betta*x4_hat[j].x-(kappa*x2_hat[j].x+(lka-g)*x3_hat[j].x+delta_betta*x4_hat[j].x)-Q3_hat_mul[j].x);
RHS3[j].y=dt*(-0.5*k_laplace_d[j]/betta*x4_hat[j].y-(kappa*x2_hat[j].y+(lka-g)*x3_hat[j].y+delta_betta*x4_hat[j].y)-Q3_hat_mul[j].y);
RHS4[j].x=dt*(0.5*k_laplace_d[j]/betta*x3_hat[j].x-(-kappa*x1_hat[j].x+(lka-g)*x4_hat[j].x-delta_betta*x3_hat[j].x)-Q4_hat_mul[j].x);
RHS4[j].y=dt*(0.5*k_laplace_d[j]/betta*x3_hat[j].y-(-kappa*x1_hat[j].y+(lka-g)*x4_hat[j].y-delta_betta*x3_hat[j].y)-Q4_hat_mul[j].y);
}
__global__ void intermediate_device(int N, real wight, cudaComplex *x1_hat, cudaComplex *x2_hat, cudaComplex *x3_hat, cudaComplex *x4_hat, cudaComplex *RHS1, cudaComplex *RHS2, cudaComplex *RHS3, cudaComplex *RHS4, cudaComplex *x1_p, cudaComplex *x2_p, cudaComplex *x3_p, cudaComplex *x4_p){
unsigned int j = blockIdx.x * blockDim.x + threadIdx.x;
if(j<N){
x1_p[j].x=x1_hat[j].x+wight*RHS1[j].x;
x2_p[j].x=x2_hat[j].x+wight*RHS2[j].x;
x3_p[j].x=x3_hat[j].x+wight*RHS3[j].x;
x4_p[j].x=x4_hat[j].x+wight*RHS4[j].x;
x1_p[j].y=x1_hat[j].y+wight*RHS1[j].y;
x2_p[j].y=x2_hat[j].y+wight*RHS2[j].y;
x3_p[j].y=x3_hat[j].y+wight*RHS3[j].y;
x4_p[j].y=x4_hat[j].y+wight*RHS4[j].y;
}
}
__global__ void select_sin_transfer(int N, cudaComplex *x1_hat, cudaComplex *x2_hat, cudaComplex *x3_hat, cudaComplex *x4_hat){
unsigned int j = blockIdx.x * blockDim.x + threadIdx.x;
if(j<N){
x1_hat[j].x=0.0;//x1_hat[j].x+wight*RHS1[j].x;
x2_hat[j].x=0.0;//x2_hat[j].x+wight*RHS2[j].x;
x3_hat[j].x=0.0;//x3_hat[j].x+wight*RHS3[j].x;
x4_hat[j].x=0.0;//x4_hat[j].x+wight*RHS4[j].x;
}
}
__global__ void select_cos_transfer(int N, cudaComplex *x1_hat, cudaComplex *x2_hat, cudaComplex *x3_hat, cudaComplex *x4_hat){
unsigned int j = blockIdx.x * blockDim.x + threadIdx.x;
if(j<N){
x1_hat[j].y=0.0;//x1_hat[j].y+wight*RHS1[j].y;
x2_hat[j].y=0.0;//x2_hat[j].y+wight*RHS2[j].y;
x3_hat[j].y=0.0;//x3_hat[j].y+wight*RHS3[j].y;
x4_hat[j].y=0.0;//x4_hat[j].y+wight*RHS4[j].y;
}
}
void RightHandSide(dim3 dimGridD, dim3 dimBlockD, dim3 dimGridI, dim3 dimBlockI, cufftHandle planR2C, cufftHandle planC2R, int N, int M, real dt, real g, cudaComplex *x1_hat, cudaComplex *x2_hat, cudaComplex *x3_hat, cudaComplex *x4_hat, cudaComplex *RHS1, cudaComplex *RHS2, cudaComplex *RHS3, cudaComplex *RHS4, cudaComplex *x3_hat_cut, cudaComplex *x4_hat_cut, real *x3_c, real *x4_c, real *Q3_mul, real *Q4_mul, cudaComplex *Q3_hat_mul, cudaComplex *Q4_hat_mul, real *mask_2_3_d, real *k_laplace_d){
calculate_convolution_2p3(dimGridD, dimBlockD, dimGridI, dimBlockI, planR2C, planC2R, N, M, x3_hat, x4_hat,x3_hat_cut, x4_hat_cut, x3_c, x4_c, Q3_mul, Q4_mul, Q3_hat_mul, Q4_hat_mul, mask_2_3_d);
assemble_NLS_device<<<dimGridI, dimBlockI>>>(M, dt, g, x1_hat, x2_hat, x3_hat, x4_hat, RHS1, RHS2, RHS3, RHS4, Q3_hat_mul, Q4_hat_mul, k_laplace_d);
}
void RK4_single_step(dim3 dimGridD, dim3 dimBlockD, dim3 dimGridI, dim3 dimBlockI, cufftHandle planR2C, cufftHandle planC2R, int N, int M, real dt, real g, cudaComplex *x1_hat, cudaComplex *x2_hat, cudaComplex *x3_hat, cudaComplex *x4_hat, cudaComplex *x1_p, cudaComplex *x2_p, cudaComplex *x3_p, cudaComplex *x4_p, cudaComplex *x3_hat_cut, cudaComplex *x4_hat_cut, real *x3_c, real *x4_c, real *Q3_mul, real *Q4_mul, cudaComplex *Q3_hat_mul, cudaComplex *Q4_hat_mul, real *mask_2_3_d, real *k_laplace_d, cudaComplex *RHS1_1, cudaComplex *RHS2_1, cudaComplex *RHS3_1, cudaComplex *RHS4_1, cudaComplex *RHS1_2, cudaComplex *RHS2_2, cudaComplex *RHS3_2, cudaComplex *RHS4_2, cudaComplex *RHS1_3, cudaComplex *RHS2_3, cudaComplex *RHS3_3, cudaComplex *RHS4_3, cudaComplex *RHS1_4, cudaComplex *RHS2_4, cudaComplex *RHS3_4, cudaComplex *RHS4_4){
//select_sin_transfer<<<dimGridI, dimBlockI>>>(M, x1_hat, x2_hat, x3_hat, x4_hat);
//K1:
RightHandSide(dimGridD, dimBlockD, dimGridI, dimBlockI, planR2C, planC2R, N, M, dt, g, x1_hat, x2_hat, x3_hat, x4_hat, RHS1_1, RHS2_1, RHS3_1, RHS4_1, x3_hat_cut, x4_hat_cut, x3_c, x4_c, Q3_mul, Q4_mul, Q3_hat_mul, Q4_hat_mul, mask_2_3_d, k_laplace_d);
intermediate_device<<<dimGridI, dimBlockI>>>(M, 0.5, x1_hat, x2_hat, x3_hat, x4_hat, RHS1_1, RHS2_1, RHS3_1, RHS4_1, x1_p, x2_p, x3_p, x4_p);
//K2:
RightHandSide(dimGridD, dimBlockD, dimGridI, dimBlockI, planR2C, planC2R, N, M, dt, g, x1_p, x2_p, x3_p, x4_p, RHS1_2, RHS2_2, RHS3_2, RHS4_2, x3_hat_cut, x4_hat_cut, x3_c, x4_c, Q3_mul, Q4_mul, Q3_hat_mul, Q4_hat_mul, mask_2_3_d, k_laplace_d);
intermediate_device<<<dimGridI, dimBlockI>>>(M, 0.5, x1_hat, x2_hat, x3_hat, x4_hat, RHS1_2, RHS2_2, RHS3_2, RHS4_2, x1_p, x2_p, x3_p, x4_p);
//K3:
RightHandSide(dimGridD, dimBlockD, dimGridI, dimBlockI, planR2C, planC2R, N, M, dt, g, x1_p, x2_p, x3_p, x4_p, RHS1_3, RHS2_3, RHS3_3, RHS4_3, x3_hat_cut, x4_hat_cut, x3_c, x4_c, Q3_mul, Q4_mul, Q3_hat_mul, Q4_hat_mul, mask_2_3_d, k_laplace_d);
intermediate_device<<<dimGridI, dimBlockI>>>(M, 1.0, x1_hat, x2_hat, x3_hat, x4_hat, RHS1_3, RHS2_3, RHS3_3, RHS4_3, x1_p, x2_p, x3_p, x4_p);
//K4:
RightHandSide(dimGridD, dimBlockD, dimGridI, dimBlockI, planR2C, planC2R, N, M, dt, g, x1_p, x2_p, x3_p, x4_p, RHS1_4, RHS2_4, RHS3_4, RHS4_4, x3_hat_cut, x4_hat_cut, x3_c, x4_c, Q3_mul, Q4_mul, Q3_hat_mul, Q4_hat_mul, mask_2_3_d, k_laplace_d);
//RK-4 assembly:
single_RK4_step_device<<<dimGridI, dimBlockI>>>(M, RHS1_1, RHS2_1, RHS3_1, RHS4_1, RHS1_2, RHS2_2, RHS3_2, RHS4_2, RHS1_3, RHS2_3, RHS3_3, RHS4_3, RHS1_4, RHS2_4, RHS3_4, RHS4_4, x1_hat, x2_hat, x3_hat, x4_hat);
}
|
55c6ac130c4268f8c064094419e31ce3b9ccaf3c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Librerias
#include <stdio.h>
#include <stdlib.h>
// Tamao del bloque
#define TILE_I 16
#define TILE_J 16
// Punteros en el CPU
float *t0;
// Punteros, arreglos y texturas en el GPU
float *t_data;
float *t_data_old;
// Escalares globales
float dx,dy,dp,dt,kcond;
int ni,nj,paso;
size_t pitch;
// Definicion del CUDA kernel
__global__ void solveheat_kernel (int ni, int nj, int pitch, float kcond, float dt, float dx, float dy, float *t_data, float *t_data_old);
// Definicion C wrappers
void solveheat(void);
void Imprimir(void);
///////////////////////////////////////////////////////////////////
int main(void)
{
int i;
int totpoints;
dt = 0.01f;
dx = 0.1f;
dy = 0.1f;
kcond = 0.01f;
ni=800;
nj=800;
totpoints = ni*nj;
printf ("ni = %d\n", ni);
printf ("nj = %d\n", nj);
printf ("Numero de puntos = %d\n", totpoints);
// Asigna la memoria en el CPU (host)
t0 = (float *)malloc(ni*nj*sizeof(float));
// Asigna la memoria en el GPU (device)
hipMallocPitch((void **)&t_data, &pitch, sizeof(float)*ni, nj);
hipMallocPitch((void **)&t_data_old, &pitch, sizeof(float)*ni, nj);
// Valores iniciales del campo t
for (i=0; i<totpoints; i++) {
t0[i] = 0.f;
}
t0[totpoints/2 + ni/2] = 1000.f;
// Copia valores iniciales al GPU
hipMemcpy2D((void *)t_data, pitch, (void *)t0,sizeof(float)*ni,sizeof(float)*ni, nj,
hipMemcpyHostToDevice);
paso = 0;
for (i=1;i<=10000;i++){
paso = paso + 1;
solveheat();
if (paso%1000 == 0) printf ("Iteracion: %d\n", paso);
}
Imprimir();
//
printf("CUDA: %s\n", hipGetErrorString(hipGetLastError()));
return 0;
}
////////////////////////////////////////////////////////////////////////////////
void solveheat(void)
{
// Copiado de t_data a t_array y "Bind" de t_array a la textura
hipMemcpy2D((void *)t_data_old, pitch, (void *)t_data,sizeof(float)*ni,sizeof(float)*ni, nj,
hipMemcpyDeviceToDevice);
dim3 grid = dim3(ni/TILE_I, nj/TILE_J);
dim3 block = dim3(TILE_I, TILE_J);
hipLaunchKernelGGL(( solveheat_kernel), dim3(grid), dim3(block), 0, 0, ni,nj,pitch,kcond,dt,dx,dy,t_data, t_data_old);
}
__global__ void solveheat_kernel (int ni,int nj,int pitch, float kcond, float dt,
float dx, float dy, float *t_data, float *t_data_old)
{
int i, j, i2d, i2d2, i2d3, i2d4, i2d5;
float told,tnow,tip1,tim1,tjp1,tjm1;
i = blockIdx.x*TILE_I + threadIdx.x;
j = blockIdx.y*TILE_J + threadIdx.y;
i2d = i + j*pitch/sizeof(float);
i2d2= (i+1) + (j)*pitch/sizeof(float);
i2d3= (i-1) + (j)*pitch/sizeof(float);
i2d4= (i) + (j+1)*pitch/sizeof(float);
i2d5= (i) + (j-1)*pitch/sizeof(float);
if (i ==ni-1) i2d2= ni-1 + (j)*pitch/sizeof(float);
if (i == 0) i2d3= 0 + (j)*pitch/sizeof(float);
if (j ==nj-1) i2d4= i + (nj-1)*pitch/sizeof(float);
if (j == 0) i2d5= i + (0)*pitch/sizeof(float);
told = t_data_old[i2d];
tip1 = t_data_old[i2d2];
tim1 = t_data_old[i2d3];
tjp1 = t_data_old[i2d4];
tjm1 = t_data_old[i2d5];
tnow = told + dt*kcond*((tip1-2.0f*told+tim1)/(dx*dx)
+ (tjp1-2.0f*told+tjm1)/(dy*dy));
t_data[i2d] = tnow;
}
////////////////////////////////////////////////////////////////////////////////
void Imprimir(void)
{
int i, j, i2d;
float t;
FILE *fp;
// Copia de VRAM a RAM
hipMemcpy((void *)t0, (void *)t_data, nj*ni*sizeof(float), hipMemcpyDeviceToHost);
fp = fopen ( "Datos_sintex.dat", "w+" );
for (i=0;i<ni;++i){
for (j=0;j<nj;++j){
i2d = i + ni*j;
t = t0[i2d];
fprintf(fp, "%f\t %f\t %f\n" , i*dx,j*dy,t);
}
fprintf(fp, "\n");
}
fclose ( fp );
}
////////////////////////////////////////////////////////////////////////////////
| 55c6ac130c4268f8c064094419e31ce3b9ccaf3c.cu | // Librerias
#include <stdio.h>
#include <stdlib.h>
// Tamaño del bloque
#define TILE_I 16
#define TILE_J 16
// Punteros en el CPU
float *t0;
// Punteros, arreglos y texturas en el GPU
float *t_data;
float *t_data_old;
// Escalares globales
float dx,dy,dp,dt,kcond;
int ni,nj,paso;
size_t pitch;
// Definicion del CUDA kernel
__global__ void solveheat_kernel (int ni, int nj, int pitch, float kcond, float dt, float dx, float dy, float *t_data, float *t_data_old);
// Definicion C wrappers
void solveheat(void);
void Imprimir(void);
///////////////////////////////////////////////////////////////////
int main(void)
{
int i;
int totpoints;
dt = 0.01f;
dx = 0.1f;
dy = 0.1f;
kcond = 0.01f;
ni=800;
nj=800;
totpoints = ni*nj;
printf ("ni = %d\n", ni);
printf ("nj = %d\n", nj);
printf ("Numero de puntos = %d\n", totpoints);
// Asigna la memoria en el CPU (host)
t0 = (float *)malloc(ni*nj*sizeof(float));
// Asigna la memoria en el GPU (device)
cudaMallocPitch((void **)&t_data, &pitch, sizeof(float)*ni, nj);
cudaMallocPitch((void **)&t_data_old, &pitch, sizeof(float)*ni, nj);
// Valores iniciales del campo t
for (i=0; i<totpoints; i++) {
t0[i] = 0.f;
}
t0[totpoints/2 + ni/2] = 1000.f;
// Copia valores iniciales al GPU
cudaMemcpy2D((void *)t_data, pitch, (void *)t0,sizeof(float)*ni,sizeof(float)*ni, nj,
cudaMemcpyHostToDevice);
paso = 0;
for (i=1;i<=10000;i++){
paso = paso + 1;
solveheat();
if (paso%1000 == 0) printf ("Iteracion: %d\n", paso);
}
Imprimir();
//
printf("CUDA: %s\n", cudaGetErrorString(cudaGetLastError()));
return 0;
}
////////////////////////////////////////////////////////////////////////////////
void solveheat(void)
{
// Copiado de t_data a t_array y "Bind" de t_array a la textura
cudaMemcpy2D((void *)t_data_old, pitch, (void *)t_data,sizeof(float)*ni,sizeof(float)*ni, nj,
cudaMemcpyDeviceToDevice);
dim3 grid = dim3(ni/TILE_I, nj/TILE_J);
dim3 block = dim3(TILE_I, TILE_J);
solveheat_kernel<<<grid, block>>>(ni,nj,pitch,kcond,dt,dx,dy,t_data, t_data_old);
}
__global__ void solveheat_kernel (int ni,int nj,int pitch, float kcond, float dt,
float dx, float dy, float *t_data, float *t_data_old)
{
int i, j, i2d, i2d2, i2d3, i2d4, i2d5;
float told,tnow,tip1,tim1,tjp1,tjm1;
i = blockIdx.x*TILE_I + threadIdx.x;
j = blockIdx.y*TILE_J + threadIdx.y;
i2d = i + j*pitch/sizeof(float);
i2d2= (i+1) + (j)*pitch/sizeof(float);
i2d3= (i-1) + (j)*pitch/sizeof(float);
i2d4= (i) + (j+1)*pitch/sizeof(float);
i2d5= (i) + (j-1)*pitch/sizeof(float);
if (i ==ni-1) i2d2= ni-1 + (j)*pitch/sizeof(float);
if (i == 0) i2d3= 0 + (j)*pitch/sizeof(float);
if (j ==nj-1) i2d4= i + (nj-1)*pitch/sizeof(float);
if (j == 0) i2d5= i + (0)*pitch/sizeof(float);
told = t_data_old[i2d];
tip1 = t_data_old[i2d2];
tim1 = t_data_old[i2d3];
tjp1 = t_data_old[i2d4];
tjm1 = t_data_old[i2d5];
tnow = told + dt*kcond*((tip1-2.0f*told+tim1)/(dx*dx)
+ (tjp1-2.0f*told+tjm1)/(dy*dy));
t_data[i2d] = tnow;
}
////////////////////////////////////////////////////////////////////////////////
void Imprimir(void)
{
int i, j, i2d;
float t;
FILE *fp;
// Copia de VRAM a RAM
cudaMemcpy((void *)t0, (void *)t_data, nj*ni*sizeof(float), cudaMemcpyDeviceToHost);
fp = fopen ( "Datos_sintex.dat", "w+" );
for (i=0;i<ni;++i){
for (j=0;j<nj;++j){
i2d = i + ni*j;
t = t0[i2d];
fprintf(fp, "%f\t %f\t %f\n" , i*dx,j*dy,t);
}
fprintf(fp, "\n");
}
fclose ( fp );
}
////////////////////////////////////////////////////////////////////////////////
|
e80fc4201e6878ff804514e99b46f3b883c725a9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// ------------------------------------------------------------------
// Faster R-CNN
// Copyright (c) 2015 Microsoft
// Licensed under The MIT License [see fast-rcnn/LICENSE for details]
// Written by Shaoqing Ren
// ------------------------------------------------------------------
#include "gpu_nms.hpp"
#include <vector>
#include <iostream>
#include <cmath>
#define CUDA_CHECK(condition) \
/* Code block avoids redefinition of hipError_t error */ \
do { \
hipError_t error = condition; \
if (error != hipSuccess) { \
std::cout << hipGetErrorString(error) << std::endl; \
} \
} while (0)
#define DIVUP(m,n) ((m) / (n) + ((m) % (n) > 0))
int const threadsPerBlock = sizeof(unsigned long long) * 8;
__device__ inline float devIoU(float const * const a, float const * const b) {
float left = max(a[0], b[0]), right = min(a[2], b[2]);
float top = max(a[1], b[1]), bottom = min(a[3], b[3]);
float theta_a = a[4];
float theta_b = b[4];
float width = max(right - left + 1, 0.f), height = max(bottom - top + 1, 0.f);
float interS = width * height;
float Sa = (a[2] - a[0] + 1) * (a[3] - a[1] + 1);
float Sb = (b[2] - b[0] + 1) * (b[3] - b[1] + 1);
return fabs(theta_a-theta_b)>0.3927 ? 0 : interS / (Sa + Sb - interS);
}
__global__ void nms_kernel(const int n_boxes, const float nms_overlap_thresh,
const float *dev_boxes, unsigned long long *dev_mask) {
const int row_start = blockIdx.y;
const int col_start = blockIdx.x;
// if (row_start > col_start) return;
const int row_size =
min(n_boxes - row_start * threadsPerBlock, threadsPerBlock);
const int col_size =
min(n_boxes - col_start * threadsPerBlock, threadsPerBlock);
__shared__ float block_boxes[threadsPerBlock * 6];
if (threadIdx.x < col_size) {
block_boxes[threadIdx.x * 6 + 0] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 6 + 0];
block_boxes[threadIdx.x * 6 + 1] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 6 + 1];
block_boxes[threadIdx.x * 6 + 2] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 6 + 2];
block_boxes[threadIdx.x * 6 + 3] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 6 + 3];
block_boxes[threadIdx.x * 6 + 4] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 6 + 4];
block_boxes[threadIdx.x * 6 + 5] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 6 + 5];
}
__syncthreads();
if (threadIdx.x < row_size) {
const int cur_box_idx = threadsPerBlock * row_start + threadIdx.x;
const float *cur_box = dev_boxes + cur_box_idx * 6;
int i = 0;
unsigned long long t = 0;
int start = 0;
if (row_start == col_start) {
start = threadIdx.x + 1;
}
for (i = start; i < col_size; i++) {
if (devIoU(cur_box, block_boxes + i * 6) > nms_overlap_thresh) {
t |= 1ULL << i;
}
}
const int col_blocks = DIVUP(n_boxes, threadsPerBlock);
dev_mask[cur_box_idx * col_blocks + col_start] = t;
}
}
void _set_device(int device_id) {
int current_device;
CUDA_CHECK(hipGetDevice(¤t_device));
if (current_device == device_id) {
return;
}
// The call to hipSetDevice must come before any calls to Get, which
// may perform initialization using the GPU.
CUDA_CHECK(hipSetDevice(device_id));
}
void _nms(int* keep_out, int* num_out, const float* boxes_host, int boxes_num,
int boxes_dim, float nms_overlap_thresh, int device_id) {
_set_device(device_id);
float* boxes_dev = NULL;
unsigned long long* mask_dev = NULL;
const int col_blocks = DIVUP(boxes_num, threadsPerBlock);
CUDA_CHECK(hipMalloc(&boxes_dev,
boxes_num * boxes_dim * sizeof(float)));
CUDA_CHECK(hipMemcpy(boxes_dev,
boxes_host,
boxes_num * boxes_dim * sizeof(float),
hipMemcpyHostToDevice));
CUDA_CHECK(hipMalloc(&mask_dev,
boxes_num * col_blocks * sizeof(unsigned long long)));
dim3 blocks(DIVUP(boxes_num, threadsPerBlock),
DIVUP(boxes_num, threadsPerBlock));
dim3 threads(threadsPerBlock);
hipLaunchKernelGGL(( nms_kernel), dim3(blocks), dim3(threads), 0, 0, boxes_num,
nms_overlap_thresh,
boxes_dev,
mask_dev);
std::vector<unsigned long long> mask_host(boxes_num * col_blocks);
CUDA_CHECK(hipMemcpy(&mask_host[0],
mask_dev,
sizeof(unsigned long long) * boxes_num * col_blocks,
hipMemcpyDeviceToHost));
std::vector<unsigned long long> remv(col_blocks);
memset(&remv[0], 0, sizeof(unsigned long long) * col_blocks);
int num_to_keep = 0;
for (int i = 0; i < boxes_num; i++) {
int nblock = i / threadsPerBlock;
int inblock = i % threadsPerBlock;
if (!(remv[nblock] & (1ULL << inblock))) {
keep_out[num_to_keep++] = i;
unsigned long long *p = &mask_host[0] + i * col_blocks;
for (int j = nblock; j < col_blocks; j++) {
remv[j] |= p[j];
}
}
}
*num_out = num_to_keep;
CUDA_CHECK(hipFree(boxes_dev));
CUDA_CHECK(hipFree(mask_dev));
}
| e80fc4201e6878ff804514e99b46f3b883c725a9.cu | // ------------------------------------------------------------------
// Faster R-CNN
// Copyright (c) 2015 Microsoft
// Licensed under The MIT License [see fast-rcnn/LICENSE for details]
// Written by Shaoqing Ren
// ------------------------------------------------------------------
#include "gpu_nms.hpp"
#include <vector>
#include <iostream>
#include <cmath>
#define CUDA_CHECK(condition) \
/* Code block avoids redefinition of cudaError_t error */ \
do { \
cudaError_t error = condition; \
if (error != cudaSuccess) { \
std::cout << cudaGetErrorString(error) << std::endl; \
} \
} while (0)
#define DIVUP(m,n) ((m) / (n) + ((m) % (n) > 0))
int const threadsPerBlock = sizeof(unsigned long long) * 8;
__device__ inline float devIoU(float const * const a, float const * const b) {
float left = max(a[0], b[0]), right = min(a[2], b[2]);
float top = max(a[1], b[1]), bottom = min(a[3], b[3]);
float theta_a = a[4];
float theta_b = b[4];
float width = max(right - left + 1, 0.f), height = max(bottom - top + 1, 0.f);
float interS = width * height;
float Sa = (a[2] - a[0] + 1) * (a[3] - a[1] + 1);
float Sb = (b[2] - b[0] + 1) * (b[3] - b[1] + 1);
return fabs(theta_a-theta_b)>0.3927 ? 0 : interS / (Sa + Sb - interS);
}
__global__ void nms_kernel(const int n_boxes, const float nms_overlap_thresh,
const float *dev_boxes, unsigned long long *dev_mask) {
const int row_start = blockIdx.y;
const int col_start = blockIdx.x;
// if (row_start > col_start) return;
const int row_size =
min(n_boxes - row_start * threadsPerBlock, threadsPerBlock);
const int col_size =
min(n_boxes - col_start * threadsPerBlock, threadsPerBlock);
__shared__ float block_boxes[threadsPerBlock * 6];
if (threadIdx.x < col_size) {
block_boxes[threadIdx.x * 6 + 0] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 6 + 0];
block_boxes[threadIdx.x * 6 + 1] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 6 + 1];
block_boxes[threadIdx.x * 6 + 2] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 6 + 2];
block_boxes[threadIdx.x * 6 + 3] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 6 + 3];
block_boxes[threadIdx.x * 6 + 4] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 6 + 4];
block_boxes[threadIdx.x * 6 + 5] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 6 + 5];
}
__syncthreads();
if (threadIdx.x < row_size) {
const int cur_box_idx = threadsPerBlock * row_start + threadIdx.x;
const float *cur_box = dev_boxes + cur_box_idx * 6;
int i = 0;
unsigned long long t = 0;
int start = 0;
if (row_start == col_start) {
start = threadIdx.x + 1;
}
for (i = start; i < col_size; i++) {
if (devIoU(cur_box, block_boxes + i * 6) > nms_overlap_thresh) {
t |= 1ULL << i;
}
}
const int col_blocks = DIVUP(n_boxes, threadsPerBlock);
dev_mask[cur_box_idx * col_blocks + col_start] = t;
}
}
void _set_device(int device_id) {
int current_device;
CUDA_CHECK(cudaGetDevice(¤t_device));
if (current_device == device_id) {
return;
}
// The call to cudaSetDevice must come before any calls to Get, which
// may perform initialization using the GPU.
CUDA_CHECK(cudaSetDevice(device_id));
}
void _nms(int* keep_out, int* num_out, const float* boxes_host, int boxes_num,
int boxes_dim, float nms_overlap_thresh, int device_id) {
_set_device(device_id);
float* boxes_dev = NULL;
unsigned long long* mask_dev = NULL;
const int col_blocks = DIVUP(boxes_num, threadsPerBlock);
CUDA_CHECK(cudaMalloc(&boxes_dev,
boxes_num * boxes_dim * sizeof(float)));
CUDA_CHECK(cudaMemcpy(boxes_dev,
boxes_host,
boxes_num * boxes_dim * sizeof(float),
cudaMemcpyHostToDevice));
CUDA_CHECK(cudaMalloc(&mask_dev,
boxes_num * col_blocks * sizeof(unsigned long long)));
dim3 blocks(DIVUP(boxes_num, threadsPerBlock),
DIVUP(boxes_num, threadsPerBlock));
dim3 threads(threadsPerBlock);
nms_kernel<<<blocks, threads>>>(boxes_num,
nms_overlap_thresh,
boxes_dev,
mask_dev);
std::vector<unsigned long long> mask_host(boxes_num * col_blocks);
CUDA_CHECK(cudaMemcpy(&mask_host[0],
mask_dev,
sizeof(unsigned long long) * boxes_num * col_blocks,
cudaMemcpyDeviceToHost));
std::vector<unsigned long long> remv(col_blocks);
memset(&remv[0], 0, sizeof(unsigned long long) * col_blocks);
int num_to_keep = 0;
for (int i = 0; i < boxes_num; i++) {
int nblock = i / threadsPerBlock;
int inblock = i % threadsPerBlock;
if (!(remv[nblock] & (1ULL << inblock))) {
keep_out[num_to_keep++] = i;
unsigned long long *p = &mask_host[0] + i * col_blocks;
for (int j = nblock; j < col_blocks; j++) {
remv[j] |= p[j];
}
}
}
*num_out = num_to_keep;
CUDA_CHECK(cudaFree(boxes_dev));
CUDA_CHECK(cudaFree(mask_dev));
}
|
13a524e938de0c54b36c175892cad29500fcb3ba.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define N 32
__global__ void iwarp(int* out)
{
__shared__ volatile int smem[32];
volatile int* vout = out;
int idx = threadIdx.x;
smem[idx] = vout[idx];
if(idx % 2 == 0)
smem[idx] = 1;
else
smem[idx-1] = 0;
vout[idx] = smem[idx];
}
/*
int main()
{
int* din;
hipMalloc((void**)&din, N*sizeof(int));
int in[N];
for(int i = 0; i < N; i++)
in[i] = 0;
hipMemcpy(din, &in, N*sizeof(int), hipMemcpyHostToDevice);
iwarp<<<1,N>>>(din);
int output[N];
hipMemcpy(&output, din, N*sizeof(int), hipMemcpyDeviceToHost);
for(int i = 0; i < N; i++)
printf("%d ", output[i]);
printf("\n");
}
*/
| 13a524e938de0c54b36c175892cad29500fcb3ba.cu |
#define N 32
__global__ void iwarp(int* out)
{
__shared__ volatile int smem[32];
volatile int* vout = out;
int idx = threadIdx.x;
smem[idx] = vout[idx];
if(idx % 2 == 0)
smem[idx] = 1;
else
smem[idx-1] = 0;
vout[idx] = smem[idx];
}
/*
int main()
{
int* din;
cudaMalloc((void**)&din, N*sizeof(int));
int in[N];
for(int i = 0; i < N; i++)
in[i] = 0;
cudaMemcpy(din, &in, N*sizeof(int), cudaMemcpyHostToDevice);
iwarp<<<1,N>>>(din);
int output[N];
cudaMemcpy(&output, din, N*sizeof(int), cudaMemcpyDeviceToHost);
for(int i = 0; i < N; i++)
printf("%d ", output[i]);
printf("\n");
}
*/
|
f4748adb1b96b8911d8820d79f6bfc4b18a42c2d.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "dense_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int num_input = 1;
int num_output = 1;
double *gpu_in = NULL;
hipMalloc(&gpu_in, XSIZE*YSIZE);
double *weights = NULL;
hipMalloc(&weights, XSIZE*YSIZE);
double *biases = NULL;
hipMalloc(&biases, XSIZE*YSIZE);
double *gpu_out = NULL;
hipMalloc(&gpu_out, XSIZE*YSIZE);
int num_classes = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
dense_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, num_input,num_output,gpu_in,weights,biases,gpu_out,num_classes);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
dense_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, num_input,num_output,gpu_in,weights,biases,gpu_out,num_classes);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
dense_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, num_input,num_output,gpu_in,weights,biases,gpu_out,num_classes);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | f4748adb1b96b8911d8820d79f6bfc4b18a42c2d.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "dense_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int num_input = 1;
int num_output = 1;
double *gpu_in = NULL;
cudaMalloc(&gpu_in, XSIZE*YSIZE);
double *weights = NULL;
cudaMalloc(&weights, XSIZE*YSIZE);
double *biases = NULL;
cudaMalloc(&biases, XSIZE*YSIZE);
double *gpu_out = NULL;
cudaMalloc(&gpu_out, XSIZE*YSIZE);
int num_classes = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
dense_kernel<<<gridBlock,threadBlock>>>(num_input,num_output,gpu_in,weights,biases,gpu_out,num_classes);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
dense_kernel<<<gridBlock,threadBlock>>>(num_input,num_output,gpu_in,weights,biases,gpu_out,num_classes);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
dense_kernel<<<gridBlock,threadBlock>>>(num_input,num_output,gpu_in,weights,biases,gpu_out,num_classes);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
1cb1bcaeec87c475db128ba6acb930338824054c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <algorithm>
#include "paddle/fluid/operators/sequence_ops/sequence_expand_op.h"
#include "paddle/fluid/platform/cuda_primitives.h"
namespace paddle {
namespace operators {
using LoDTensor = framework::LoDTensor;
template <typename T>
__global__ void sequence_expand_kernel(const T* x_data, const size_t* x_lod,
const size_t* ref_lod,
const size_t* offset,
const size_t lod_size,
/* default=1,
the instance length*/
const int x_item_length, T* out_data) {
int bid = blockIdx.x;
if (bid >= lod_size - 1) return;
int x_item_count = x_lod[bid + 1] - x_lod[bid];
int repeats = ref_lod[bid + 1] - ref_lod[bid];
int out_offset = static_cast<int>(offset[bid]);
int x_offset = x_lod[bid];
for (int tid_z = threadIdx.z; tid_z < repeats; tid_z += blockDim.z) {
for (int tid_y = threadIdx.y; tid_y < x_item_count; tid_y += blockDim.y) {
for (int tid_x = threadIdx.x; tid_x < x_item_length;
tid_x += blockDim.x) {
out_data[(out_offset + tid_z * x_item_count + tid_y) * x_item_length +
tid_x] = x_data[(x_offset + tid_y) * x_item_length + tid_x];
}
}
}
}
template <typename T>
__global__ void sequence_expand_grad_kernel(
const T* dout_data, const size_t* ref_lod, const size_t* dx_lod,
const size_t* offset, const size_t lod_size,
/* default=1,
the instance length*/
const int x_item_length, T* dx_data) {
int bid = blockIdx.x;
if (bid >= lod_size - 1) return;
int x_item_count = dx_lod[bid + 1] - dx_lod[bid];
int repeats = ref_lod[bid + 1] - ref_lod[bid];
int out_offset = static_cast<int>(offset[bid]);
int x_offset = dx_lod[bid];
for (int tid_z = threadIdx.z; tid_z < repeats; tid_z += blockDim.z) {
for (int tid_y = threadIdx.y; tid_y < x_item_count; tid_y += blockDim.y) {
for (int tid_x = threadIdx.x; tid_x < x_item_length;
tid_x += blockDim.x) {
platform::CudaAtomicAdd(
&dx_data[(x_offset + tid_y) * x_item_length + tid_x],
dout_data[(out_offset + tid_z * x_item_count + tid_y) *
x_item_length +
tid_x]);
}
}
}
}
void GetOutputOffset(const framework::Vector<size_t>& x_lod,
const framework::Vector<size_t>& ref_lod,
framework::Vector<size_t>* out_offset) {
size_t offset = 0;
int lod_size = static_cast<int>(x_lod.size());
for (int i = 0; i < static_cast<int>(x_lod.size()); ++i) {
(*out_offset)[i] = offset;
if (i < lod_size - 1) {
offset += (ref_lod[i + 1] - ref_lod[i]) * (x_lod[i + 1] - x_lod[i]);
}
}
}
template <typename T>
struct SequenceExpandFunctor<platform::CUDADeviceContext, T> {
void operator()(
const platform::CUDADeviceContext& context, const LoDTensor& x,
const framework::Vector<size_t>& x_lod, /*expand source lod*/
const framework::Vector<size_t>& ref_lod, /*expand referenced lod*/
LoDTensor* out) {
int x_item_length = x.numel() / x.dims()[0];
framework::Vector<size_t> out_offset(x_lod.size());
GetOutputOffset(x_lod, ref_lod, &out_offset);
int thread_x = ::min(32, ::max(static_cast<int>(ref_lod.size()), 16));
int thread_y = 16;
int thread_z = 1024 / thread_x / thread_y;
int block_x = static_cast<int>(ref_lod.size());
dim3 block_size(thread_x, thread_y, thread_z);
dim3 grid_size(block_x, 1);
hipLaunchKernelGGL(( sequence_expand_kernel), dim3(grid_size), dim3(block_size), 0, context.stream(),
x.data<T>(), x_lod.CUDAData(context.GetPlace()),
ref_lod.CUDAData(context.GetPlace()),
out_offset.CUDAData(context.GetPlace()), x_lod.size(), x_item_length,
out->mutable_data<T>(context.GetPlace()));
}
};
template <typename T>
struct SequenceExpandGradFunctor<platform::CUDADeviceContext, T> {
void operator()(const platform::CUDADeviceContext& context,
const LoDTensor& dout,
const framework::Vector<size_t>& x_lod, /*expand source lod*/
const framework::Vector<size_t>& ref_lod, /*expand based lod*/
LoDTensor* dx) {
int x_item_length = framework::product(dx->dims()) / dx->dims()[0];
framework::Vector<size_t> out_offset(x_lod.size());
GetOutputOffset(x_lod, ref_lod, &out_offset);
int thread_x = ::min(32, ::max(static_cast<int>(ref_lod.size()), 16));
int thread_y = 16;
int thread_z = 1024 / thread_x / thread_y;
int block_x = static_cast<int>(ref_lod.size());
dim3 block_size(thread_x, thread_y, thread_z);
dim3 grid_size(block_x, 1);
hipLaunchKernelGGL(( sequence_expand_grad_kernel), dim3(grid_size), dim3(block_size), 0, context.stream(),
dout.data<T>(), ref_lod.CUDAData(context.GetPlace()),
x_lod.CUDAData(context.GetPlace()),
out_offset.CUDAData(context.GetPlace()), ref_lod.size(), x_item_length,
dx->mutable_data<T>(context.GetPlace()));
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(
sequence_expand,
ops::SequenceExpandKernel<paddle::platform::CUDADeviceContext, float>,
ops::SequenceExpandKernel<paddle::platform::CUDADeviceContext, double>,
ops::SequenceExpandKernel<paddle::platform::CUDADeviceContext, int>,
ops::SequenceExpandKernel<paddle::platform::CUDADeviceContext, int64_t>);
REGISTER_OP_CUDA_KERNEL(
sequence_expand_grad,
ops::SequenceExpandGradKernel<paddle::platform::CUDADeviceContext, float>,
ops::SequenceExpandGradKernel<paddle::platform::CUDADeviceContext, double>,
ops::SequenceExpandGradKernel<paddle::platform::CUDADeviceContext, int>,
ops::SequenceExpandGradKernel<paddle::platform::CUDADeviceContext,
int64_t>);
| 1cb1bcaeec87c475db128ba6acb930338824054c.cu | /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <algorithm>
#include "paddle/fluid/operators/sequence_ops/sequence_expand_op.h"
#include "paddle/fluid/platform/cuda_primitives.h"
namespace paddle {
namespace operators {
using LoDTensor = framework::LoDTensor;
template <typename T>
__global__ void sequence_expand_kernel(const T* x_data, const size_t* x_lod,
const size_t* ref_lod,
const size_t* offset,
const size_t lod_size,
/* default=1,
the instance length*/
const int x_item_length, T* out_data) {
int bid = blockIdx.x;
if (bid >= lod_size - 1) return;
int x_item_count = x_lod[bid + 1] - x_lod[bid];
int repeats = ref_lod[bid + 1] - ref_lod[bid];
int out_offset = static_cast<int>(offset[bid]);
int x_offset = x_lod[bid];
for (int tid_z = threadIdx.z; tid_z < repeats; tid_z += blockDim.z) {
for (int tid_y = threadIdx.y; tid_y < x_item_count; tid_y += blockDim.y) {
for (int tid_x = threadIdx.x; tid_x < x_item_length;
tid_x += blockDim.x) {
out_data[(out_offset + tid_z * x_item_count + tid_y) * x_item_length +
tid_x] = x_data[(x_offset + tid_y) * x_item_length + tid_x];
}
}
}
}
template <typename T>
__global__ void sequence_expand_grad_kernel(
const T* dout_data, const size_t* ref_lod, const size_t* dx_lod,
const size_t* offset, const size_t lod_size,
/* default=1,
the instance length*/
const int x_item_length, T* dx_data) {
int bid = blockIdx.x;
if (bid >= lod_size - 1) return;
int x_item_count = dx_lod[bid + 1] - dx_lod[bid];
int repeats = ref_lod[bid + 1] - ref_lod[bid];
int out_offset = static_cast<int>(offset[bid]);
int x_offset = dx_lod[bid];
for (int tid_z = threadIdx.z; tid_z < repeats; tid_z += blockDim.z) {
for (int tid_y = threadIdx.y; tid_y < x_item_count; tid_y += blockDim.y) {
for (int tid_x = threadIdx.x; tid_x < x_item_length;
tid_x += blockDim.x) {
platform::CudaAtomicAdd(
&dx_data[(x_offset + tid_y) * x_item_length + tid_x],
dout_data[(out_offset + tid_z * x_item_count + tid_y) *
x_item_length +
tid_x]);
}
}
}
}
void GetOutputOffset(const framework::Vector<size_t>& x_lod,
const framework::Vector<size_t>& ref_lod,
framework::Vector<size_t>* out_offset) {
size_t offset = 0;
int lod_size = static_cast<int>(x_lod.size());
for (int i = 0; i < static_cast<int>(x_lod.size()); ++i) {
(*out_offset)[i] = offset;
if (i < lod_size - 1) {
offset += (ref_lod[i + 1] - ref_lod[i]) * (x_lod[i + 1] - x_lod[i]);
}
}
}
template <typename T>
struct SequenceExpandFunctor<platform::CUDADeviceContext, T> {
void operator()(
const platform::CUDADeviceContext& context, const LoDTensor& x,
const framework::Vector<size_t>& x_lod, /*expand source lod*/
const framework::Vector<size_t>& ref_lod, /*expand referenced lod*/
LoDTensor* out) {
int x_item_length = x.numel() / x.dims()[0];
framework::Vector<size_t> out_offset(x_lod.size());
GetOutputOffset(x_lod, ref_lod, &out_offset);
int thread_x = std::min(32, std::max(static_cast<int>(ref_lod.size()), 16));
int thread_y = 16;
int thread_z = 1024 / thread_x / thread_y;
int block_x = static_cast<int>(ref_lod.size());
dim3 block_size(thread_x, thread_y, thread_z);
dim3 grid_size(block_x, 1);
sequence_expand_kernel<<<grid_size, block_size, 0, context.stream()>>>(
x.data<T>(), x_lod.CUDAData(context.GetPlace()),
ref_lod.CUDAData(context.GetPlace()),
out_offset.CUDAData(context.GetPlace()), x_lod.size(), x_item_length,
out->mutable_data<T>(context.GetPlace()));
}
};
template <typename T>
struct SequenceExpandGradFunctor<platform::CUDADeviceContext, T> {
void operator()(const platform::CUDADeviceContext& context,
const LoDTensor& dout,
const framework::Vector<size_t>& x_lod, /*expand source lod*/
const framework::Vector<size_t>& ref_lod, /*expand based lod*/
LoDTensor* dx) {
int x_item_length = framework::product(dx->dims()) / dx->dims()[0];
framework::Vector<size_t> out_offset(x_lod.size());
GetOutputOffset(x_lod, ref_lod, &out_offset);
int thread_x = std::min(32, std::max(static_cast<int>(ref_lod.size()), 16));
int thread_y = 16;
int thread_z = 1024 / thread_x / thread_y;
int block_x = static_cast<int>(ref_lod.size());
dim3 block_size(thread_x, thread_y, thread_z);
dim3 grid_size(block_x, 1);
sequence_expand_grad_kernel<<<grid_size, block_size, 0, context.stream()>>>(
dout.data<T>(), ref_lod.CUDAData(context.GetPlace()),
x_lod.CUDAData(context.GetPlace()),
out_offset.CUDAData(context.GetPlace()), ref_lod.size(), x_item_length,
dx->mutable_data<T>(context.GetPlace()));
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(
sequence_expand,
ops::SequenceExpandKernel<paddle::platform::CUDADeviceContext, float>,
ops::SequenceExpandKernel<paddle::platform::CUDADeviceContext, double>,
ops::SequenceExpandKernel<paddle::platform::CUDADeviceContext, int>,
ops::SequenceExpandKernel<paddle::platform::CUDADeviceContext, int64_t>);
REGISTER_OP_CUDA_KERNEL(
sequence_expand_grad,
ops::SequenceExpandGradKernel<paddle::platform::CUDADeviceContext, float>,
ops::SequenceExpandGradKernel<paddle::platform::CUDADeviceContext, double>,
ops::SequenceExpandGradKernel<paddle::platform::CUDADeviceContext, int>,
ops::SequenceExpandGradKernel<paddle::platform::CUDADeviceContext,
int64_t>);
|
0e93497ac93d30441d2c736611f1895177349501.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <algorithm>
#include "paddle/fluid/operators/gather.cu.h"
#include "paddle/fluid/operators/math/math_function.h"
#include "paddle/fluid/operators/math/segment_pooling.h"
#include "paddle/fluid/platform/cuda_primitives.h"
#include "paddle/fluid/platform/gpu_launch_config.h"
namespace paddle {
namespace operators {
using Tensor = framework::Tensor;
template <typename T, typename Index, int DimTileSize>
__global__ void SegmentSumIdsKernel(const Index* segment_ids, T* summed_ids,
const Index input_length_size,
const Index total_stripe_count) {
CUDA_KERNEL_LOOP(stripe_index, total_stripe_count) {
const Index segment_offset = stripe_index;
const Index dim_index_base = stripe_index * Index(DimTileSize);
const Index actual_height =
min(Index(DimTileSize), input_length_size - dim_index_base);
Index first_segment_id = segment_ids[dim_index_base];
Index last_segment_id = -1;
if (dim_index_base > 0) {
last_segment_id = segment_ids[dim_index_base - 1];
}
T sum = T(0);
for (Index j = 0; j < actual_height; j++) {
Index current_segment_id = segment_ids[dim_index_base + j];
PADDLE_ENFORCE(current_segment_id >= last_segment_id,
"the segment ids should be sorted, but got "
"segment_ids[%d]:%d > segment_ids[%d]:%d.",
dim_index_base + j - 1, dim_index_base + j,
last_segment_id, current_segment_id);
if (current_segment_id > last_segment_id) {
for (Index interval_id = last_segment_id + 1;
interval_id < current_segment_id; ++interval_id) {
*(summed_ids + interval_id) = 0;
}
if (j > 0) {
if (last_segment_id == first_segment_id) {
platform::CudaAtomicAdd(summed_ids + last_segment_id, sum);
} else {
*(summed_ids + last_segment_id) = sum;
}
sum = T(0);
}
}
sum += T(1);
last_segment_id = current_segment_id;
}
platform::CudaAtomicAdd(summed_ids + last_segment_id, sum);
}
}
template <typename T, typename Index, int DimTileSize>
__global__ void SegmentMeanKernel(const Index* segment_ids, const T* input,
T* output, T* summed_ids,
const Index input_length_size,
const Index inner_dim_size,
const Index output_length_size,
const Index total_stripe_count) {
CUDA_KERNEL_LOOP(stripe_index, total_stripe_count) {
const Index segment_offset = stripe_index % inner_dim_size;
const Index dim_index_base =
stripe_index / inner_dim_size * Index(DimTileSize);
const Index actual_height =
min(Index(DimTileSize), input_length_size - dim_index_base);
Index first_segment_id = segment_ids[dim_index_base];
Index last_segment_id = -1;
if (dim_index_base > 0) {
last_segment_id = segment_ids[dim_index_base - 1];
}
T sum = T(0);
for (Index j = 0; j < actual_height; j++) {
Index current_segment_id = segment_ids[dim_index_base + j];
if (current_segment_id > last_segment_id) {
// reset the interval value which do not have corresponding ids.
for (Index interval_id = last_segment_id + 1;
interval_id < current_segment_id; ++interval_id) {
*(output + interval_id * inner_dim_size + segment_offset) = T(0);
}
if (j > 0) {
Index output_index =
last_segment_id * inner_dim_size + segment_offset;
if (last_segment_id == first_segment_id) {
platform::CudaAtomicAdd(output + output_index,
sum / *(summed_ids + last_segment_id));
} else {
*(output + output_index) = sum / *(summed_ids + last_segment_id);
}
sum = T(0);
}
}
sum += input[(dim_index_base + j) * inner_dim_size + segment_offset];
last_segment_id = current_segment_id;
}
Index output_index = last_segment_id * inner_dim_size + segment_offset;
platform::CudaAtomicAdd(output + output_index,
sum / *(summed_ids + last_segment_id));
}
}
template <typename T, typename Index, typename Helper, typename Pool>
__global__ void SegmentOpsKernel(const Index* segment_ids, const T* input,
T* output, Helper h, Pool pool) {
CUDA_KERNEL_LOOP(stripe_index, h.total_stripe_count) {
Index segment_offset, dim_index_base, actual_height;
Index inner_dim_size = h.inner_dim_size;
h.calculate(stripe_index, &segment_offset, &dim_index_base, &actual_height);
T minmax = pool.initial();
Index first_segment_id = segment_ids[dim_index_base];
// -1 is for the start value when interval_id = 0
Index last_segment_id = -1;
if (dim_index_base > 0) {
last_segment_id = segment_ids[dim_index_base - 1];
}
for (Index j = 0; j < actual_height; j++) {
Index current_segment_id = segment_ids[dim_index_base + j];
// ensure the segment_ids is sorted.
PADDLE_ENFORCE(current_segment_id >= last_segment_id,
"The segment ids should be sorted, but got "
"segment_ids[%d]:%d > segment_ids[%d]:%d.",
dim_index_base + j - 1, dim_index_base + j,
last_segment_id, current_segment_id);
if (current_segment_id > last_segment_id) {
// reset the interval value which do not have corresponding ids.
for (Index interval_id = last_segment_id + 1;
interval_id < current_segment_id; ++interval_id) {
*(output + interval_id * inner_dim_size + segment_offset) = T(0);
}
// don't update result when j=0
if (j > 0) {
const Index output_index =
last_segment_id * inner_dim_size + segment_offset;
if (last_segment_id == first_segment_id) {
pool.atomic(output + output_index, minmax);
} else {
*(output + output_index) = minmax;
}
minmax = pool.initial();
}
}
pool.compute(
input[(dim_index_base + j) * inner_dim_size + segment_offset],
&minmax);
last_segment_id = current_segment_id;
}
const Index output_index =
last_segment_id * inner_dim_size + segment_offset;
pool.atomic(output + output_index, minmax);
}
}
template <typename T, typename Index, typename Helper>
__global__ void SegmentIndexGradKernel(const Index* segment_ids, const T* input,
const T* output, const T* out_grad,
T* in_grad, Helper h) {
CUDA_KERNEL_LOOP(stripe_index, h.total_stripe_count) {
Index segment_offset, dim_index_base, actual_height;
h.calculate(stripe_index, &segment_offset, &dim_index_base, &actual_height);
for (Index j = 0; j < actual_height; j++) {
Index current_segment_id = segment_ids[dim_index_base + j];
Index input_index =
(dim_index_base + j) * h.inner_dim_size + segment_offset;
Index output_index =
current_segment_id * h.inner_dim_size + segment_offset;
if (input[input_index] == output[output_index]) {
in_grad[input_index] = out_grad[output_index];
}
}
}
}
template <class T>
class MaxPool {
public:
DEVICE inline T initial() { return static_cast<T>(-FLT_MAX); }
DEVICE inline void compute(const T& x, T* y) { *y = *y > x ? *y : x; }
DEVICE inline T atomic(T* address, const T val) {
return platform::CudaAtomicMax(address, val);
}
};
template <class T>
class MinPool {
public:
DEVICE inline T initial() { return static_cast<T>(FLT_MAX); }
DEVICE inline void compute(const T& x, T* y) { *y = *y < x ? *y : x; }
DEVICE inline T atomic(T* address, const T val) {
return platform::CudaAtomicMin(address, val);
}
};
template <class T>
class SumPool {
public:
DEVICE inline T initial() { return static_cast<T>(0); }
DEVICE inline void compute(const T& x, T* y) { *y = *y + x; }
DEVICE inline T atomic(T* address, const T val) {
return platform::CudaAtomicAdd(address, val);
}
};
template <class T>
class ArrangeHelper {
public:
const T input_total_size;
const T input_length_size;
const T output_length_size;
T inner_dim_size;
T total_stripe_count;
const T DimTileSize = 8;
ArrangeHelper(T a, T b, T c)
: input_total_size(a), input_length_size(b), output_length_size(c) {
T input_outer_dim_num_stripe =
(input_length_size + DimTileSize - 1) / DimTileSize;
inner_dim_size = input_total_size / input_length_size;
total_stripe_count = inner_dim_size * input_outer_dim_num_stripe;
}
DEVICE inline void calculate(T stripe_index, T* segment_offset,
T* dim_index_base, T* actual_height) {
*segment_offset = stripe_index % inner_dim_size;
*dim_index_base = stripe_index / inner_dim_size * DimTileSize;
*actual_height = min(DimTileSize, input_length_size - *dim_index_base);
}
};
template <typename T, typename Index>
void SegmentPoolCUDAGradFunctor(const platform::CUDADeviceContext& ctx,
const framework::Tensor& input,
const framework::Tensor& segment_ids,
const framework::Tensor& output,
const framework::Tensor& out_grad,
framework::Tensor* in_grad,
const std::string pooltype = "SUM") {
auto h = ArrangeHelper<Index>(input.numel(), segment_ids.dims()[0],
output.dims()[0]);
auto config = platform::GetGpuLaunchConfig1D(ctx, h.total_stripe_count);
if (pooltype == "MAX" || pooltype == "MIN") {
hipLaunchKernelGGL(( SegmentIndexGradKernel<T, Index, ArrangeHelper<Index>>),
dim3(config.block_per_grid.x), dim3(config.thread_per_block.x), 0, ctx.stream(),
segment_ids.data<Index>(), input.data<T>(), output.data<T>(),
out_grad.data<T>(), in_grad->data<T>(), h);
} else {
PADDLE_THROW(platform::errors::InvalidArgument(
"Unsupported segment pooling grad operation, Only MAX, MIN "
"available, but got %s.",
pooltype));
}
}
template <typename T>
__global__ void SimpleDiv(T* x, const T* y, const int len, const int dim) {
for (int i = blockIdx.x; i < len; i += gridDim.x) {
__shared__ T y_i;
auto base = i * dim;
if (threadIdx.x == 0) {
y_i = y[i];
}
__syncthreads();
for (int j = threadIdx.x; j < dim; j += blockDim.x) {
x[base + j] /= y_i;
}
}
}
template <typename T, typename IndexT>
class SegmentPoolFunctor<platform::CUDADeviceContext, T, IndexT> {
public:
void operator()(const platform::CUDADeviceContext& ctx,
const framework::Tensor& input,
const framework::Tensor& segment_ids,
framework::Tensor* output,
framework::Tensor* summed_ids = nullptr,
const std::string pooltype = "SUM") {
if (pooltype == "MEAN") {
// Sum the segment id num first
T DimTileSize = 8;
auto input_length_size = segment_ids.numel();
auto total_stripe_count =
(input_length_size + DimTileSize - 1) / DimTileSize;
auto config = platform::GetGpuLaunchConfig1D(ctx, total_stripe_count);
hipLaunchKernelGGL(( SegmentSumIdsKernel<
T, IndexT, IndexT(8)>), dim3(config.block_per_grid.x),
dim3(config.thread_per_block.x), 0, ctx.stream(),
segment_ids.data<IndexT>(), summed_ids->data<T>(), input_length_size,
total_stripe_count);
}
auto h = ArrangeHelper<IndexT>(input.numel(), segment_ids.dims()[0],
output->dims()[0]);
auto config = platform::GetGpuLaunchConfig1D(ctx, h.total_stripe_count);
if (pooltype == "MEAN") {
hipLaunchKernelGGL(( SegmentMeanKernel<
T, IndexT, IndexT(8)>), dim3(config.block_per_grid.x),
dim3(config.thread_per_block.x), 0, ctx.stream(),
segment_ids.data<IndexT>(), input.data<T>(), output->data<T>(),
summed_ids->data<T>(), h.input_length_size, h.inner_dim_size,
h.output_length_size, h.total_stripe_count);
} else if (pooltype == "SUM") {
SumPool<T> pool;
hipLaunchKernelGGL(( SegmentOpsKernel<
T, IndexT, ArrangeHelper<IndexT>,
SumPool<T>>), dim3(config.block_per_grid.x), dim3(config.thread_per_block.x), 0,
ctx.stream(), segment_ids.data<IndexT>(),
input.data<T>(), output->data<T>(), h,
pool);
} else if (pooltype == "MAX") {
MaxPool<T> pool;
hipLaunchKernelGGL(( SegmentOpsKernel<
T, IndexT, ArrangeHelper<IndexT>,
MaxPool<T>>), dim3(config.block_per_grid.x), dim3(config.thread_per_block.x), 0,
ctx.stream(), segment_ids.data<IndexT>(),
input.data<T>(), output->data<T>(), h,
pool);
} else if (pooltype == "MIN") {
MinPool<T> pool;
hipLaunchKernelGGL(( SegmentOpsKernel<
T, IndexT, ArrangeHelper<IndexT>,
MinPool<T>>), dim3(config.block_per_grid.x), dim3(config.thread_per_block.x), 0,
ctx.stream(), segment_ids.data<IndexT>(),
input.data<T>(), output->data<T>(), h,
pool);
} else {
PADDLE_THROW(platform::errors::InvalidArgument(
"Unsupported segment pooling operation, Only MEAN, SUM, MAX, MIN "
"available, but got %s.",
pooltype));
}
}
};
template <typename T, typename IndexT>
class SegmentPoolGradFunctor<platform::CUDADeviceContext, T, IndexT> {
public:
void operator()(const platform::CUDADeviceContext& context,
const framework::Tensor& input,
const framework::Tensor& output,
const framework::Tensor& out_grad,
const framework::Tensor& segments, framework::Tensor* in_grad,
const framework::Tensor* summed_ids = nullptr,
const std::string pooltype = "SUM") {
if (pooltype == "MAX" || pooltype == "MIN") {
SegmentPoolCUDAGradFunctor<T, IndexT>(context, input, segments, output,
out_grad, in_grad, pooltype);
} else if (pooltype == "MEAN") {
framework::Tensor mean_grad;
mean_grad.mutable_data<T>(input.dims(), context.GetPlace());
framework::TensorCopy(out_grad, context.GetPlace(), context, &mean_grad);
int len = output.dims()[0];
int dim = output.numel() / len;
auto config = platform::GetGpuLaunchConfig1D(context, len);
hipLaunchKernelGGL(( SimpleDiv<T>), dim3(config.block_per_grid.x), dim3(config.thread_per_block.x), 0,
context.stream(), mean_grad.data<T>(),
summed_ids->data<T>(), len, dim);
GPUGather<T, IndexT>(context, mean_grad, segments, in_grad);
} else if (pooltype == "SUM") {
GPUGather<T, IndexT>(context, out_grad, segments, in_grad);
} else {
PADDLE_THROW(platform::errors::InvalidArgument(
"Unsupported segment pooling operation, Only MEAN, SUM, MAX, MIN "
"available, but got %s.",
pooltype));
}
}
};
using CUDA = paddle::platform::CUDADeviceContext;
template class SegmentPoolFunctor<CUDA, float, int>;
template class SegmentPoolFunctor<CUDA, float, int64_t>;
template class SegmentPoolFunctor<CUDA, double, int>;
template class SegmentPoolFunctor<CUDA, double, int64_t>;
template class SegmentPoolGradFunctor<CUDA, float, int>;
template class SegmentPoolGradFunctor<CUDA, float, int64_t>;
template class SegmentPoolGradFunctor<CUDA, double, int>;
template class SegmentPoolGradFunctor<CUDA, double, int64_t>;
} // namespace operators
} // namespace paddle
| 0e93497ac93d30441d2c736611f1895177349501.cu | /* Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <algorithm>
#include "paddle/fluid/operators/gather.cu.h"
#include "paddle/fluid/operators/math/math_function.h"
#include "paddle/fluid/operators/math/segment_pooling.h"
#include "paddle/fluid/platform/cuda_primitives.h"
#include "paddle/fluid/platform/gpu_launch_config.h"
namespace paddle {
namespace operators {
using Tensor = framework::Tensor;
template <typename T, typename Index, int DimTileSize>
__global__ void SegmentSumIdsKernel(const Index* segment_ids, T* summed_ids,
const Index input_length_size,
const Index total_stripe_count) {
CUDA_KERNEL_LOOP(stripe_index, total_stripe_count) {
const Index segment_offset = stripe_index;
const Index dim_index_base = stripe_index * Index(DimTileSize);
const Index actual_height =
min(Index(DimTileSize), input_length_size - dim_index_base);
Index first_segment_id = segment_ids[dim_index_base];
Index last_segment_id = -1;
if (dim_index_base > 0) {
last_segment_id = segment_ids[dim_index_base - 1];
}
T sum = T(0);
for (Index j = 0; j < actual_height; j++) {
Index current_segment_id = segment_ids[dim_index_base + j];
PADDLE_ENFORCE(current_segment_id >= last_segment_id,
"the segment ids should be sorted, but got "
"segment_ids[%d]:%d > segment_ids[%d]:%d.",
dim_index_base + j - 1, dim_index_base + j,
last_segment_id, current_segment_id);
if (current_segment_id > last_segment_id) {
for (Index interval_id = last_segment_id + 1;
interval_id < current_segment_id; ++interval_id) {
*(summed_ids + interval_id) = 0;
}
if (j > 0) {
if (last_segment_id == first_segment_id) {
platform::CudaAtomicAdd(summed_ids + last_segment_id, sum);
} else {
*(summed_ids + last_segment_id) = sum;
}
sum = T(0);
}
}
sum += T(1);
last_segment_id = current_segment_id;
}
platform::CudaAtomicAdd(summed_ids + last_segment_id, sum);
}
}
template <typename T, typename Index, int DimTileSize>
__global__ void SegmentMeanKernel(const Index* segment_ids, const T* input,
T* output, T* summed_ids,
const Index input_length_size,
const Index inner_dim_size,
const Index output_length_size,
const Index total_stripe_count) {
CUDA_KERNEL_LOOP(stripe_index, total_stripe_count) {
const Index segment_offset = stripe_index % inner_dim_size;
const Index dim_index_base =
stripe_index / inner_dim_size * Index(DimTileSize);
const Index actual_height =
min(Index(DimTileSize), input_length_size - dim_index_base);
Index first_segment_id = segment_ids[dim_index_base];
Index last_segment_id = -1;
if (dim_index_base > 0) {
last_segment_id = segment_ids[dim_index_base - 1];
}
T sum = T(0);
for (Index j = 0; j < actual_height; j++) {
Index current_segment_id = segment_ids[dim_index_base + j];
if (current_segment_id > last_segment_id) {
// reset the interval value which do not have corresponding ids.
for (Index interval_id = last_segment_id + 1;
interval_id < current_segment_id; ++interval_id) {
*(output + interval_id * inner_dim_size + segment_offset) = T(0);
}
if (j > 0) {
Index output_index =
last_segment_id * inner_dim_size + segment_offset;
if (last_segment_id == first_segment_id) {
platform::CudaAtomicAdd(output + output_index,
sum / *(summed_ids + last_segment_id));
} else {
*(output + output_index) = sum / *(summed_ids + last_segment_id);
}
sum = T(0);
}
}
sum += input[(dim_index_base + j) * inner_dim_size + segment_offset];
last_segment_id = current_segment_id;
}
Index output_index = last_segment_id * inner_dim_size + segment_offset;
platform::CudaAtomicAdd(output + output_index,
sum / *(summed_ids + last_segment_id));
}
}
template <typename T, typename Index, typename Helper, typename Pool>
__global__ void SegmentOpsKernel(const Index* segment_ids, const T* input,
T* output, Helper h, Pool pool) {
CUDA_KERNEL_LOOP(stripe_index, h.total_stripe_count) {
Index segment_offset, dim_index_base, actual_height;
Index inner_dim_size = h.inner_dim_size;
h.calculate(stripe_index, &segment_offset, &dim_index_base, &actual_height);
T minmax = pool.initial();
Index first_segment_id = segment_ids[dim_index_base];
// -1 is for the start value when interval_id = 0
Index last_segment_id = -1;
if (dim_index_base > 0) {
last_segment_id = segment_ids[dim_index_base - 1];
}
for (Index j = 0; j < actual_height; j++) {
Index current_segment_id = segment_ids[dim_index_base + j];
// ensure the segment_ids is sorted.
PADDLE_ENFORCE(current_segment_id >= last_segment_id,
"The segment ids should be sorted, but got "
"segment_ids[%d]:%d > segment_ids[%d]:%d.",
dim_index_base + j - 1, dim_index_base + j,
last_segment_id, current_segment_id);
if (current_segment_id > last_segment_id) {
// reset the interval value which do not have corresponding ids.
for (Index interval_id = last_segment_id + 1;
interval_id < current_segment_id; ++interval_id) {
*(output + interval_id * inner_dim_size + segment_offset) = T(0);
}
// don't update result when j=0
if (j > 0) {
const Index output_index =
last_segment_id * inner_dim_size + segment_offset;
if (last_segment_id == first_segment_id) {
pool.atomic(output + output_index, minmax);
} else {
*(output + output_index) = minmax;
}
minmax = pool.initial();
}
}
pool.compute(
input[(dim_index_base + j) * inner_dim_size + segment_offset],
&minmax);
last_segment_id = current_segment_id;
}
const Index output_index =
last_segment_id * inner_dim_size + segment_offset;
pool.atomic(output + output_index, minmax);
}
}
template <typename T, typename Index, typename Helper>
__global__ void SegmentIndexGradKernel(const Index* segment_ids, const T* input,
const T* output, const T* out_grad,
T* in_grad, Helper h) {
CUDA_KERNEL_LOOP(stripe_index, h.total_stripe_count) {
Index segment_offset, dim_index_base, actual_height;
h.calculate(stripe_index, &segment_offset, &dim_index_base, &actual_height);
for (Index j = 0; j < actual_height; j++) {
Index current_segment_id = segment_ids[dim_index_base + j];
Index input_index =
(dim_index_base + j) * h.inner_dim_size + segment_offset;
Index output_index =
current_segment_id * h.inner_dim_size + segment_offset;
if (input[input_index] == output[output_index]) {
in_grad[input_index] = out_grad[output_index];
}
}
}
}
template <class T>
class MaxPool {
public:
DEVICE inline T initial() { return static_cast<T>(-FLT_MAX); }
DEVICE inline void compute(const T& x, T* y) { *y = *y > x ? *y : x; }
DEVICE inline T atomic(T* address, const T val) {
return platform::CudaAtomicMax(address, val);
}
};
template <class T>
class MinPool {
public:
DEVICE inline T initial() { return static_cast<T>(FLT_MAX); }
DEVICE inline void compute(const T& x, T* y) { *y = *y < x ? *y : x; }
DEVICE inline T atomic(T* address, const T val) {
return platform::CudaAtomicMin(address, val);
}
};
template <class T>
class SumPool {
public:
DEVICE inline T initial() { return static_cast<T>(0); }
DEVICE inline void compute(const T& x, T* y) { *y = *y + x; }
DEVICE inline T atomic(T* address, const T val) {
return platform::CudaAtomicAdd(address, val);
}
};
template <class T>
class ArrangeHelper {
public:
const T input_total_size;
const T input_length_size;
const T output_length_size;
T inner_dim_size;
T total_stripe_count;
const T DimTileSize = 8;
ArrangeHelper(T a, T b, T c)
: input_total_size(a), input_length_size(b), output_length_size(c) {
T input_outer_dim_num_stripe =
(input_length_size + DimTileSize - 1) / DimTileSize;
inner_dim_size = input_total_size / input_length_size;
total_stripe_count = inner_dim_size * input_outer_dim_num_stripe;
}
DEVICE inline void calculate(T stripe_index, T* segment_offset,
T* dim_index_base, T* actual_height) {
*segment_offset = stripe_index % inner_dim_size;
*dim_index_base = stripe_index / inner_dim_size * DimTileSize;
*actual_height = min(DimTileSize, input_length_size - *dim_index_base);
}
};
template <typename T, typename Index>
void SegmentPoolCUDAGradFunctor(const platform::CUDADeviceContext& ctx,
const framework::Tensor& input,
const framework::Tensor& segment_ids,
const framework::Tensor& output,
const framework::Tensor& out_grad,
framework::Tensor* in_grad,
const std::string pooltype = "SUM") {
auto h = ArrangeHelper<Index>(input.numel(), segment_ids.dims()[0],
output.dims()[0]);
auto config = platform::GetGpuLaunchConfig1D(ctx, h.total_stripe_count);
if (pooltype == "MAX" || pooltype == "MIN") {
SegmentIndexGradKernel<T, Index, ArrangeHelper<Index>><<<
config.block_per_grid.x, config.thread_per_block.x, 0, ctx.stream()>>>(
segment_ids.data<Index>(), input.data<T>(), output.data<T>(),
out_grad.data<T>(), in_grad->data<T>(), h);
} else {
PADDLE_THROW(platform::errors::InvalidArgument(
"Unsupported segment pooling grad operation, Only MAX, MIN "
"available, but got %s.",
pooltype));
}
}
template <typename T>
__global__ void SimpleDiv(T* x, const T* y, const int len, const int dim) {
for (int i = blockIdx.x; i < len; i += gridDim.x) {
__shared__ T y_i;
auto base = i * dim;
if (threadIdx.x == 0) {
y_i = y[i];
}
__syncthreads();
for (int j = threadIdx.x; j < dim; j += blockDim.x) {
x[base + j] /= y_i;
}
}
}
template <typename T, typename IndexT>
class SegmentPoolFunctor<platform::CUDADeviceContext, T, IndexT> {
public:
void operator()(const platform::CUDADeviceContext& ctx,
const framework::Tensor& input,
const framework::Tensor& segment_ids,
framework::Tensor* output,
framework::Tensor* summed_ids = nullptr,
const std::string pooltype = "SUM") {
if (pooltype == "MEAN") {
// Sum the segment id num first
T DimTileSize = 8;
auto input_length_size = segment_ids.numel();
auto total_stripe_count =
(input_length_size + DimTileSize - 1) / DimTileSize;
auto config = platform::GetGpuLaunchConfig1D(ctx, total_stripe_count);
SegmentSumIdsKernel<
T, IndexT, IndexT(8)><<<config.block_per_grid.x,
config.thread_per_block.x, 0, ctx.stream()>>>(
segment_ids.data<IndexT>(), summed_ids->data<T>(), input_length_size,
total_stripe_count);
}
auto h = ArrangeHelper<IndexT>(input.numel(), segment_ids.dims()[0],
output->dims()[0]);
auto config = platform::GetGpuLaunchConfig1D(ctx, h.total_stripe_count);
if (pooltype == "MEAN") {
SegmentMeanKernel<
T, IndexT, IndexT(8)><<<config.block_per_grid.x,
config.thread_per_block.x, 0, ctx.stream()>>>(
segment_ids.data<IndexT>(), input.data<T>(), output->data<T>(),
summed_ids->data<T>(), h.input_length_size, h.inner_dim_size,
h.output_length_size, h.total_stripe_count);
} else if (pooltype == "SUM") {
SumPool<T> pool;
SegmentOpsKernel<
T, IndexT, ArrangeHelper<IndexT>,
SumPool<T>><<<config.block_per_grid.x, config.thread_per_block.x, 0,
ctx.stream()>>>(segment_ids.data<IndexT>(),
input.data<T>(), output->data<T>(), h,
pool);
} else if (pooltype == "MAX") {
MaxPool<T> pool;
SegmentOpsKernel<
T, IndexT, ArrangeHelper<IndexT>,
MaxPool<T>><<<config.block_per_grid.x, config.thread_per_block.x, 0,
ctx.stream()>>>(segment_ids.data<IndexT>(),
input.data<T>(), output->data<T>(), h,
pool);
} else if (pooltype == "MIN") {
MinPool<T> pool;
SegmentOpsKernel<
T, IndexT, ArrangeHelper<IndexT>,
MinPool<T>><<<config.block_per_grid.x, config.thread_per_block.x, 0,
ctx.stream()>>>(segment_ids.data<IndexT>(),
input.data<T>(), output->data<T>(), h,
pool);
} else {
PADDLE_THROW(platform::errors::InvalidArgument(
"Unsupported segment pooling operation, Only MEAN, SUM, MAX, MIN "
"available, but got %s.",
pooltype));
}
}
};
template <typename T, typename IndexT>
class SegmentPoolGradFunctor<platform::CUDADeviceContext, T, IndexT> {
public:
void operator()(const platform::CUDADeviceContext& context,
const framework::Tensor& input,
const framework::Tensor& output,
const framework::Tensor& out_grad,
const framework::Tensor& segments, framework::Tensor* in_grad,
const framework::Tensor* summed_ids = nullptr,
const std::string pooltype = "SUM") {
if (pooltype == "MAX" || pooltype == "MIN") {
SegmentPoolCUDAGradFunctor<T, IndexT>(context, input, segments, output,
out_grad, in_grad, pooltype);
} else if (pooltype == "MEAN") {
framework::Tensor mean_grad;
mean_grad.mutable_data<T>(input.dims(), context.GetPlace());
framework::TensorCopy(out_grad, context.GetPlace(), context, &mean_grad);
int len = output.dims()[0];
int dim = output.numel() / len;
auto config = platform::GetGpuLaunchConfig1D(context, len);
SimpleDiv<T><<<config.block_per_grid.x, config.thread_per_block.x, 0,
context.stream()>>>(mean_grad.data<T>(),
summed_ids->data<T>(), len, dim);
GPUGather<T, IndexT>(context, mean_grad, segments, in_grad);
} else if (pooltype == "SUM") {
GPUGather<T, IndexT>(context, out_grad, segments, in_grad);
} else {
PADDLE_THROW(platform::errors::InvalidArgument(
"Unsupported segment pooling operation, Only MEAN, SUM, MAX, MIN "
"available, but got %s.",
pooltype));
}
}
};
using CUDA = paddle::platform::CUDADeviceContext;
template class SegmentPoolFunctor<CUDA, float, int>;
template class SegmentPoolFunctor<CUDA, float, int64_t>;
template class SegmentPoolFunctor<CUDA, double, int>;
template class SegmentPoolFunctor<CUDA, double, int64_t>;
template class SegmentPoolGradFunctor<CUDA, float, int>;
template class SegmentPoolGradFunctor<CUDA, float, int64_t>;
template class SegmentPoolGradFunctor<CUDA, double, int>;
template class SegmentPoolGradFunctor<CUDA, double, int64_t>;
} // namespace operators
} // namespace paddle
|
27920dbf22d3dabcf84ed3eceab825112e121551.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Perform the first step of Langevin integration.
*/
extern "C" __global__ void integrateDrudeLangevinPart1(mixed4* __restrict__ velm, const long long* __restrict__ force, mixed4* __restrict__ posDelta,
const int* __restrict__ normalParticles, const int2* __restrict__ pairParticles, const mixed2* __restrict__ dt, mixed vscale, mixed fscale,
mixed noisescale, mixed vscaleDrude, mixed fscaleDrude, mixed noisescaleDrude, const float4* __restrict__ random, unsigned int randomIndex) {
mixed stepSize = dt[0].y;
// Update normal particles.
for (int i = blockIdx.x*blockDim.x+threadIdx.x; i < NUM_NORMAL_PARTICLES; i += blockDim.x*gridDim.x) {
int index = normalParticles[i];
mixed4 velocity = velm[index];
if (velocity.w != 0) {
mixed sqrtInvMass = SQRT(velocity.w);
float4 rand = random[randomIndex+index];
velocity.x = vscale*velocity.x + fscale*velocity.w*force[index] + noisescale*sqrtInvMass*rand.x;
velocity.y = vscale*velocity.y + fscale*velocity.w*force[index+PADDED_NUM_ATOMS] + noisescale*sqrtInvMass*rand.y;
velocity.z = vscale*velocity.z + fscale*velocity.w*force[index+PADDED_NUM_ATOMS*2] + noisescale*sqrtInvMass*rand.z;
velm[index] = velocity;
posDelta[index] = make_mixed4(stepSize*velocity.x, stepSize*velocity.y, stepSize*velocity.z, 0);
}
}
// Update Drude particle pairs.
randomIndex += NUM_NORMAL_PARTICLES;
for (int i = blockIdx.x*blockDim.x+threadIdx.x; i < NUM_PAIRS; i += blockDim.x*gridDim.x) {
int2 particles = pairParticles[i];
mixed4 velocity1 = velm[particles.x];
mixed4 velocity2 = velm[particles.y];
mixed mass1 = RECIP(velocity1.w);
mixed mass2 = RECIP(velocity2.w);
mixed invTotalMass = RECIP(mass1+mass2);
mixed invReducedMass = (mass1+mass2)*velocity1.w*velocity2.w;
mixed mass1fract = invTotalMass*mass1;
mixed mass2fract = invTotalMass*mass2;
mixed sqrtInvTotalMass = SQRT(invTotalMass);
mixed sqrtInvReducedMass = SQRT(invReducedMass);
mixed4 cmVel = velocity1*mass1fract+velocity2*mass2fract;
mixed4 relVel = velocity2-velocity1;
mixed3 force1 = make_mixed3(force[particles.x], force[particles.x+PADDED_NUM_ATOMS], force[particles.x+PADDED_NUM_ATOMS*2]);
mixed3 force2 = make_mixed3(force[particles.y], force[particles.y+PADDED_NUM_ATOMS], force[particles.y+PADDED_NUM_ATOMS*2]);
mixed3 cmForce = force1+force2;
mixed3 relForce = force2*mass1fract - force1*mass2fract;
float4 rand1 = random[randomIndex+2*i];
float4 rand2 = random[randomIndex+2*i+1];
cmVel.x = vscale*cmVel.x + fscale*invTotalMass*cmForce.x + noisescale*sqrtInvTotalMass*rand1.x;
cmVel.y = vscale*cmVel.y + fscale*invTotalMass*cmForce.y + noisescale*sqrtInvTotalMass*rand1.y;
cmVel.z = vscale*cmVel.z + fscale*invTotalMass*cmForce.z + noisescale*sqrtInvTotalMass*rand1.z;
relVel.x = vscaleDrude*relVel.x + fscaleDrude*invReducedMass*relForce.x + noisescaleDrude*sqrtInvReducedMass*rand2.x;
relVel.y = vscaleDrude*relVel.y + fscaleDrude*invReducedMass*relForce.y + noisescaleDrude*sqrtInvReducedMass*rand2.y;
relVel.z = vscaleDrude*relVel.z + fscaleDrude*invReducedMass*relForce.z + noisescaleDrude*sqrtInvReducedMass*rand2.z;
velocity1.x = cmVel.x-relVel.x*mass2fract;
velocity1.y = cmVel.y-relVel.y*mass2fract;
velocity1.z = cmVel.z-relVel.z*mass2fract;
velocity2.x = cmVel.x+relVel.x*mass1fract;
velocity2.y = cmVel.y+relVel.y*mass1fract;
velocity2.z = cmVel.z+relVel.z*mass1fract;
velm[particles.x] = velocity1;
velm[particles.y] = velocity2;
posDelta[particles.x] = make_mixed4(stepSize*velocity1.x, stepSize*velocity1.y, stepSize*velocity1.z, 0);
posDelta[particles.y] = make_mixed4(stepSize*velocity2.x, stepSize*velocity2.y, stepSize*velocity2.z, 0);
}
}
/**
* Perform the second step of Langevin integration.
*/
extern "C" __global__ void integrateDrudeLangevinPart2(real4* __restrict__ posq, real4* __restrict__ posqCorrection, const mixed4* __restrict__ posDelta, mixed4* __restrict__ velm, const mixed2* __restrict__ dt) {
double invStepSize = 1.0/dt[0].y;
int index = blockIdx.x*blockDim.x+threadIdx.x;
while (index < NUM_ATOMS) {
mixed4 vel = velm[index];
if (vel.w != 0) {
#ifdef USE_MIXED_PRECISION
real4 pos1 = posq[index];
real4 pos2 = posqCorrection[index];
mixed4 pos = make_mixed4(pos1.x+(mixed)pos2.x, pos1.y+(mixed)pos2.y, pos1.z+(mixed)pos2.z, pos1.w);
#else
real4 pos = posq[index];
#endif
mixed4 delta = posDelta[index];
pos.x += delta.x;
pos.y += delta.y;
pos.z += delta.z;
vel.x = (mixed) (invStepSize*delta.x);
vel.y = (mixed) (invStepSize*delta.y);
vel.z = (mixed) (invStepSize*delta.z);
#ifdef USE_MIXED_PRECISION
posq[index] = make_real4((real) pos.x, (real) pos.y, (real) pos.z, (real) pos.w);
posqCorrection[index] = make_real4(pos.x-(real) pos.x, pos.y-(real) pos.y, pos.z-(real) pos.z, 0);
#else
posq[index] = pos;
#endif
velm[index] = vel;
}
index += blockDim.x*gridDim.x;
}
}
/**
* Apply hard wall constraints
*/
extern "C" __global__ void applyHardWallConstraints(real4* __restrict__ posq, real4* __restrict__ posqCorrection, mixed4* __restrict__ velm,
const int2* __restrict__ pairParticles, const mixed2* __restrict__ dt, mixed maxDrudeDistance, mixed hardwallscaleDrude) {
mixed stepSize = dt[0].y;
for (int i = blockIdx.x*blockDim.x+threadIdx.x; i < NUM_PAIRS; i += blockDim.x*gridDim.x) {
int2 particles = pairParticles[i];
#ifdef USE_MIXED_PRECISION
real4 posReal1 = posq[particles.x];
real4 posReal2 = posq[particles.y];
real4 posCorr1 = posqCorrection[particles.x];
real4 posCorr2 = posqCorrection[particles.y];
mixed4 pos1 = make_mixed4(posReal1.x+(mixed)posCorr1.x, posReal1.y+(mixed)posCorr1.y, posReal1.z+(mixed)posCorr1.z, posReal1.w);
mixed4 pos2 = make_mixed4(posReal2.x+(mixed)posCorr2.x, posReal2.y+(mixed)posCorr2.y, posReal2.z+(mixed)posCorr2.z, posReal2.w);
#else
mixed4 pos1 = posq[particles.x];
mixed4 pos2 = posq[particles.y];
#endif
mixed4 delta = pos1-pos2;
mixed r = SQRT(delta.x*delta.x + delta.y*delta.y + delta.z*delta.z);
mixed rInv = RECIP(r);
if (rInv*maxDrudeDistance < 1) {
// The constraint has been violated, so make the inter-particle distance "bounce"
// off the hard wall.
mixed4 bondDir = delta*rInv;
mixed4 vel1 = velm[particles.x];
mixed4 vel2 = velm[particles.y];
mixed mass1 = RECIP(vel1.w);
mixed mass2 = RECIP(vel2.w);
mixed deltaR = r-maxDrudeDistance;
mixed deltaT = stepSize;
mixed dotvr1 = vel1.x*bondDir.x + vel1.y*bondDir.y + vel1.z*bondDir.z;
mixed4 vb1 = bondDir*dotvr1;
mixed4 vp1 = vel1-vb1;
if (vel2.w == 0) {
// The parent particle is massless, so move only the Drude particle.
if (dotvr1 != 0)
deltaT = deltaR/fabs(dotvr1);
if (deltaT > stepSize)
deltaT = stepSize;
dotvr1 = -dotvr1*hardwallscaleDrude/(fabs(dotvr1)*SQRT(mass1));
mixed dr = -deltaR + deltaT*dotvr1;
pos1.x += bondDir.x*dr;
pos1.y += bondDir.y*dr;
pos1.z += bondDir.z*dr;
#ifdef USE_MIXED_PRECISION
posq[particles.x] = make_real4((real) pos1.x, (real) pos1.y, (real) pos1.z, (real) pos1.w);
posqCorrection[particles.x] = make_real4(pos1.x-(real) pos1.x, pos1.y-(real) pos1.y, pos1.z-(real) pos1.z, 0);
#else
posq[particles.x] = pos1;
#endif
vel1.x = vp1.x + bondDir.x*dotvr1;
vel1.y = vp1.y + bondDir.y*dotvr1;
vel1.z = vp1.z + bondDir.z*dotvr1;
velm[particles.x] = vel1;
}
else {
// Move both particles.
mixed invTotalMass = RECIP(mass1+mass2);
mixed dotvr2 = vel2.x*bondDir.x + vel2.y*bondDir.y + vel2.z*bondDir.z;
mixed4 vb2 = bondDir*dotvr2;
mixed4 vp2 = vel2-vb2;
mixed vbCMass = (mass1*dotvr1 + mass2*dotvr2)*invTotalMass;
dotvr1 -= vbCMass;
dotvr2 -= vbCMass;
if (dotvr1 != dotvr2)
deltaT = deltaR/fabs(dotvr1-dotvr2);
if (deltaT > stepSize)
deltaT = stepSize;
mixed vBond = hardwallscaleDrude/SQRT(mass1);
dotvr1 = -dotvr1*vBond*mass2*invTotalMass/fabs(dotvr1);
dotvr2 = -dotvr2*vBond*mass1*invTotalMass/fabs(dotvr2);
mixed dr1 = -deltaR*mass2*invTotalMass + deltaT*dotvr1;
mixed dr2 = deltaR*mass1*invTotalMass + deltaT*dotvr2;
dotvr1 += vbCMass;
dotvr2 += vbCMass;
pos1.x += bondDir.x*dr1;
pos1.y += bondDir.y*dr1;
pos1.z += bondDir.z*dr1;
pos2.x += bondDir.x*dr2;
pos2.y += bondDir.y*dr2;
pos2.z += bondDir.z*dr2;
#ifdef USE_MIXED_PRECISION
posq[particles.x] = make_real4((real) pos1.x, (real) pos1.y, (real) pos1.z, (real) pos1.w);
posq[particles.y] = make_real4((real) pos2.x, (real) pos2.y, (real) pos2.z, (real) pos2.w);
posqCorrection[particles.x] = make_real4(pos1.x-(real) pos1.x, pos1.y-(real) pos1.y, pos1.z-(real) pos1.z, 0);
posqCorrection[particles.y] = make_real4(pos2.x-(real) pos2.x, pos2.y-(real) pos2.y, pos2.z-(real) pos2.z, 0);
#else
posq[particles.x] = pos1;
posq[particles.y] = pos2;
#endif
vel1.x = vp1.x + bondDir.x*dotvr1;
vel1.y = vp1.y + bondDir.y*dotvr1;
vel1.z = vp1.z + bondDir.z*dotvr1;
vel2.x = vp2.x + bondDir.x*dotvr2;
vel2.y = vp2.y + bondDir.y*dotvr2;
vel2.z = vp2.z + bondDir.z*dotvr2;
velm[particles.x] = vel1;
velm[particles.y] = vel2;
}
}
}
}
| 27920dbf22d3dabcf84ed3eceab825112e121551.cu | /**
* Perform the first step of Langevin integration.
*/
extern "C" __global__ void integrateDrudeLangevinPart1(mixed4* __restrict__ velm, const long long* __restrict__ force, mixed4* __restrict__ posDelta,
const int* __restrict__ normalParticles, const int2* __restrict__ pairParticles, const mixed2* __restrict__ dt, mixed vscale, mixed fscale,
mixed noisescale, mixed vscaleDrude, mixed fscaleDrude, mixed noisescaleDrude, const float4* __restrict__ random, unsigned int randomIndex) {
mixed stepSize = dt[0].y;
// Update normal particles.
for (int i = blockIdx.x*blockDim.x+threadIdx.x; i < NUM_NORMAL_PARTICLES; i += blockDim.x*gridDim.x) {
int index = normalParticles[i];
mixed4 velocity = velm[index];
if (velocity.w != 0) {
mixed sqrtInvMass = SQRT(velocity.w);
float4 rand = random[randomIndex+index];
velocity.x = vscale*velocity.x + fscale*velocity.w*force[index] + noisescale*sqrtInvMass*rand.x;
velocity.y = vscale*velocity.y + fscale*velocity.w*force[index+PADDED_NUM_ATOMS] + noisescale*sqrtInvMass*rand.y;
velocity.z = vscale*velocity.z + fscale*velocity.w*force[index+PADDED_NUM_ATOMS*2] + noisescale*sqrtInvMass*rand.z;
velm[index] = velocity;
posDelta[index] = make_mixed4(stepSize*velocity.x, stepSize*velocity.y, stepSize*velocity.z, 0);
}
}
// Update Drude particle pairs.
randomIndex += NUM_NORMAL_PARTICLES;
for (int i = blockIdx.x*blockDim.x+threadIdx.x; i < NUM_PAIRS; i += blockDim.x*gridDim.x) {
int2 particles = pairParticles[i];
mixed4 velocity1 = velm[particles.x];
mixed4 velocity2 = velm[particles.y];
mixed mass1 = RECIP(velocity1.w);
mixed mass2 = RECIP(velocity2.w);
mixed invTotalMass = RECIP(mass1+mass2);
mixed invReducedMass = (mass1+mass2)*velocity1.w*velocity2.w;
mixed mass1fract = invTotalMass*mass1;
mixed mass2fract = invTotalMass*mass2;
mixed sqrtInvTotalMass = SQRT(invTotalMass);
mixed sqrtInvReducedMass = SQRT(invReducedMass);
mixed4 cmVel = velocity1*mass1fract+velocity2*mass2fract;
mixed4 relVel = velocity2-velocity1;
mixed3 force1 = make_mixed3(force[particles.x], force[particles.x+PADDED_NUM_ATOMS], force[particles.x+PADDED_NUM_ATOMS*2]);
mixed3 force2 = make_mixed3(force[particles.y], force[particles.y+PADDED_NUM_ATOMS], force[particles.y+PADDED_NUM_ATOMS*2]);
mixed3 cmForce = force1+force2;
mixed3 relForce = force2*mass1fract - force1*mass2fract;
float4 rand1 = random[randomIndex+2*i];
float4 rand2 = random[randomIndex+2*i+1];
cmVel.x = vscale*cmVel.x + fscale*invTotalMass*cmForce.x + noisescale*sqrtInvTotalMass*rand1.x;
cmVel.y = vscale*cmVel.y + fscale*invTotalMass*cmForce.y + noisescale*sqrtInvTotalMass*rand1.y;
cmVel.z = vscale*cmVel.z + fscale*invTotalMass*cmForce.z + noisescale*sqrtInvTotalMass*rand1.z;
relVel.x = vscaleDrude*relVel.x + fscaleDrude*invReducedMass*relForce.x + noisescaleDrude*sqrtInvReducedMass*rand2.x;
relVel.y = vscaleDrude*relVel.y + fscaleDrude*invReducedMass*relForce.y + noisescaleDrude*sqrtInvReducedMass*rand2.y;
relVel.z = vscaleDrude*relVel.z + fscaleDrude*invReducedMass*relForce.z + noisescaleDrude*sqrtInvReducedMass*rand2.z;
velocity1.x = cmVel.x-relVel.x*mass2fract;
velocity1.y = cmVel.y-relVel.y*mass2fract;
velocity1.z = cmVel.z-relVel.z*mass2fract;
velocity2.x = cmVel.x+relVel.x*mass1fract;
velocity2.y = cmVel.y+relVel.y*mass1fract;
velocity2.z = cmVel.z+relVel.z*mass1fract;
velm[particles.x] = velocity1;
velm[particles.y] = velocity2;
posDelta[particles.x] = make_mixed4(stepSize*velocity1.x, stepSize*velocity1.y, stepSize*velocity1.z, 0);
posDelta[particles.y] = make_mixed4(stepSize*velocity2.x, stepSize*velocity2.y, stepSize*velocity2.z, 0);
}
}
/**
* Perform the second step of Langevin integration.
*/
extern "C" __global__ void integrateDrudeLangevinPart2(real4* __restrict__ posq, real4* __restrict__ posqCorrection, const mixed4* __restrict__ posDelta, mixed4* __restrict__ velm, const mixed2* __restrict__ dt) {
double invStepSize = 1.0/dt[0].y;
int index = blockIdx.x*blockDim.x+threadIdx.x;
while (index < NUM_ATOMS) {
mixed4 vel = velm[index];
if (vel.w != 0) {
#ifdef USE_MIXED_PRECISION
real4 pos1 = posq[index];
real4 pos2 = posqCorrection[index];
mixed4 pos = make_mixed4(pos1.x+(mixed)pos2.x, pos1.y+(mixed)pos2.y, pos1.z+(mixed)pos2.z, pos1.w);
#else
real4 pos = posq[index];
#endif
mixed4 delta = posDelta[index];
pos.x += delta.x;
pos.y += delta.y;
pos.z += delta.z;
vel.x = (mixed) (invStepSize*delta.x);
vel.y = (mixed) (invStepSize*delta.y);
vel.z = (mixed) (invStepSize*delta.z);
#ifdef USE_MIXED_PRECISION
posq[index] = make_real4((real) pos.x, (real) pos.y, (real) pos.z, (real) pos.w);
posqCorrection[index] = make_real4(pos.x-(real) pos.x, pos.y-(real) pos.y, pos.z-(real) pos.z, 0);
#else
posq[index] = pos;
#endif
velm[index] = vel;
}
index += blockDim.x*gridDim.x;
}
}
/**
* Apply hard wall constraints
*/
extern "C" __global__ void applyHardWallConstraints(real4* __restrict__ posq, real4* __restrict__ posqCorrection, mixed4* __restrict__ velm,
const int2* __restrict__ pairParticles, const mixed2* __restrict__ dt, mixed maxDrudeDistance, mixed hardwallscaleDrude) {
mixed stepSize = dt[0].y;
for (int i = blockIdx.x*blockDim.x+threadIdx.x; i < NUM_PAIRS; i += blockDim.x*gridDim.x) {
int2 particles = pairParticles[i];
#ifdef USE_MIXED_PRECISION
real4 posReal1 = posq[particles.x];
real4 posReal2 = posq[particles.y];
real4 posCorr1 = posqCorrection[particles.x];
real4 posCorr2 = posqCorrection[particles.y];
mixed4 pos1 = make_mixed4(posReal1.x+(mixed)posCorr1.x, posReal1.y+(mixed)posCorr1.y, posReal1.z+(mixed)posCorr1.z, posReal1.w);
mixed4 pos2 = make_mixed4(posReal2.x+(mixed)posCorr2.x, posReal2.y+(mixed)posCorr2.y, posReal2.z+(mixed)posCorr2.z, posReal2.w);
#else
mixed4 pos1 = posq[particles.x];
mixed4 pos2 = posq[particles.y];
#endif
mixed4 delta = pos1-pos2;
mixed r = SQRT(delta.x*delta.x + delta.y*delta.y + delta.z*delta.z);
mixed rInv = RECIP(r);
if (rInv*maxDrudeDistance < 1) {
// The constraint has been violated, so make the inter-particle distance "bounce"
// off the hard wall.
mixed4 bondDir = delta*rInv;
mixed4 vel1 = velm[particles.x];
mixed4 vel2 = velm[particles.y];
mixed mass1 = RECIP(vel1.w);
mixed mass2 = RECIP(vel2.w);
mixed deltaR = r-maxDrudeDistance;
mixed deltaT = stepSize;
mixed dotvr1 = vel1.x*bondDir.x + vel1.y*bondDir.y + vel1.z*bondDir.z;
mixed4 vb1 = bondDir*dotvr1;
mixed4 vp1 = vel1-vb1;
if (vel2.w == 0) {
// The parent particle is massless, so move only the Drude particle.
if (dotvr1 != 0)
deltaT = deltaR/fabs(dotvr1);
if (deltaT > stepSize)
deltaT = stepSize;
dotvr1 = -dotvr1*hardwallscaleDrude/(fabs(dotvr1)*SQRT(mass1));
mixed dr = -deltaR + deltaT*dotvr1;
pos1.x += bondDir.x*dr;
pos1.y += bondDir.y*dr;
pos1.z += bondDir.z*dr;
#ifdef USE_MIXED_PRECISION
posq[particles.x] = make_real4((real) pos1.x, (real) pos1.y, (real) pos1.z, (real) pos1.w);
posqCorrection[particles.x] = make_real4(pos1.x-(real) pos1.x, pos1.y-(real) pos1.y, pos1.z-(real) pos1.z, 0);
#else
posq[particles.x] = pos1;
#endif
vel1.x = vp1.x + bondDir.x*dotvr1;
vel1.y = vp1.y + bondDir.y*dotvr1;
vel1.z = vp1.z + bondDir.z*dotvr1;
velm[particles.x] = vel1;
}
else {
// Move both particles.
mixed invTotalMass = RECIP(mass1+mass2);
mixed dotvr2 = vel2.x*bondDir.x + vel2.y*bondDir.y + vel2.z*bondDir.z;
mixed4 vb2 = bondDir*dotvr2;
mixed4 vp2 = vel2-vb2;
mixed vbCMass = (mass1*dotvr1 + mass2*dotvr2)*invTotalMass;
dotvr1 -= vbCMass;
dotvr2 -= vbCMass;
if (dotvr1 != dotvr2)
deltaT = deltaR/fabs(dotvr1-dotvr2);
if (deltaT > stepSize)
deltaT = stepSize;
mixed vBond = hardwallscaleDrude/SQRT(mass1);
dotvr1 = -dotvr1*vBond*mass2*invTotalMass/fabs(dotvr1);
dotvr2 = -dotvr2*vBond*mass1*invTotalMass/fabs(dotvr2);
mixed dr1 = -deltaR*mass2*invTotalMass + deltaT*dotvr1;
mixed dr2 = deltaR*mass1*invTotalMass + deltaT*dotvr2;
dotvr1 += vbCMass;
dotvr2 += vbCMass;
pos1.x += bondDir.x*dr1;
pos1.y += bondDir.y*dr1;
pos1.z += bondDir.z*dr1;
pos2.x += bondDir.x*dr2;
pos2.y += bondDir.y*dr2;
pos2.z += bondDir.z*dr2;
#ifdef USE_MIXED_PRECISION
posq[particles.x] = make_real4((real) pos1.x, (real) pos1.y, (real) pos1.z, (real) pos1.w);
posq[particles.y] = make_real4((real) pos2.x, (real) pos2.y, (real) pos2.z, (real) pos2.w);
posqCorrection[particles.x] = make_real4(pos1.x-(real) pos1.x, pos1.y-(real) pos1.y, pos1.z-(real) pos1.z, 0);
posqCorrection[particles.y] = make_real4(pos2.x-(real) pos2.x, pos2.y-(real) pos2.y, pos2.z-(real) pos2.z, 0);
#else
posq[particles.x] = pos1;
posq[particles.y] = pos2;
#endif
vel1.x = vp1.x + bondDir.x*dotvr1;
vel1.y = vp1.y + bondDir.y*dotvr1;
vel1.z = vp1.z + bondDir.z*dotvr1;
vel2.x = vp2.x + bondDir.x*dotvr2;
vel2.y = vp2.y + bondDir.y*dotvr2;
vel2.z = vp2.z + bondDir.z*dotvr2;
velm[particles.x] = vel1;
velm[particles.y] = vel2;
}
}
}
}
|
0e2fe71b4e3d05808ab28491d23e26bad470325d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <hip/hip_runtime.h>
#include <hip/device_functions.h>
#include <opencv2/opencv.hpp>
#include <iostream>
#include<cmath>
using namespace std;
#define MASK_WIDTH 5
__constant__ int d_const_Gaussian[MASK_WIDTH * MASK_WIDTH]; //Allocate constant memory
//Gaussian filtering
__global__ void GaussianFiltInCuda(unsigned char* dataIn, unsigned char* dataOut, cv::Size erodeElement, int imgWidth, int imgHeight)
{
int xIndex = threadIdx.x + blockIdx.x * blockDim.x;
int yIndex = threadIdx.y + blockIdx.y * blockDim.y;
int Index = xIndex + yIndex * imgWidth;
int elementWidth = erodeElement.width;
int elementHeight = erodeElement.height;
int halfEW = elementWidth / 2;
int halfEH = elementHeight / 2;
//Initialization output
dataOut[Index] = dataIn[Index];
//Prevent cross-border halfEW < xIndex < imgWidth-halfEW halfEH < yIndex < imgHeight-halfEH
if (xIndex > halfEW && xIndex < imgWidth - halfEW && yIndex > halfEH && yIndex < imgHeight - halfEH)
{
int sum = 0;
for (int i = -halfEH; i < halfEH + 1; i++)
{
for (int j = -halfEW; j < halfEW + 1; j++)
{
/* if (dataIn[(i + yIndex) * imgWidth + xIndex + j] < dataOut[yIndex * imgWidth + xIndex])
{
dataOut[yIndex * imgWidth + xIndex] = dataIn[(i + yIndex) * imgWidth + xIndex + j];
}*/
sum += dataIn[(i + yIndex) * imgWidth + xIndex + j] * d_const_Gaussian[(i + 2) * 5 + j + 2];
}
}
if (sum / 273 < 0)
dataOut[yIndex * imgWidth + xIndex] = 0;
else if (sum / 273 > 255)
dataOut[yIndex * imgWidth + xIndex] = 255;
else
dataOut[yIndex * imgWidth + xIndex] = sum / 273;
}
}
//denoising (bilateral filtering)
__global__ void bilateralInCuda(unsigned char* dataIn, unsigned char* dataOut, cv::Size dilateElement, int imgWidth, int imgHeight)
{
int xIndex = threadIdx.x + blockIdx.x * blockDim.x;
int yIndex = threadIdx.y + blockIdx.y * blockDim.y;
int elementWidth = dilateElement.width;
int elementHeight = dilateElement.height;
int halfEW = elementWidth / 2;
int halfEH = elementHeight / 2;
//Initialization output
dataOut[yIndex * imgWidth + xIndex] = dataIn[yIndex * imgWidth + xIndex];
//Prevent cross-border
if (xIndex > halfEW && xIndex < imgWidth - halfEW && yIndex > halfEH && yIndex < imgHeight - halfEH)
{
int sum = 0;
double num = 0;
int sigm = 50;
for (int i = -halfEH; i < halfEH + 1; i++)
{
for (int j = -halfEW; j < halfEW + 1; j++)
{
/*if (dataIn[(i + yIndex) * imgWidth + xIndex + j] > dataOut[yIndex * imgWidth + xIndex])
{
dataOut[yIndex * imgWidth + xIndex] = dataIn[(i + yIndex) * imgWidth + xIndex + j];
}*/
num = exp(-(double)((dataIn[(i + yIndex) * imgWidth + xIndex + j] - dataIn[yIndex * imgWidth + xIndex])* (dataIn[(i + yIndex) * imgWidth + xIndex + j] - dataIn[yIndex * imgWidth + xIndex]) / sigm / sigm) / 2);
sum += (int)dataIn[(i + yIndex) * imgWidth + xIndex + j] * d_const_Gaussian[(i + 2) * 5 + j + 2] * num;
}
}
if (sum / 273 < 0)
dataOut[yIndex * imgWidth + xIndex] = 0;
else if (sum / 273 > 255)
dataOut[yIndex * imgWidth + xIndex] = 255;
else
dataOut[yIndex * imgWidth + xIndex] = sum / 273;
}
}
int main()
{
int dev = 0;
hipDeviceProp_t devProp;
hipGetDeviceProperties(&devProp, dev);
std::cout << "Use GPU device " << dev << ": " << devProp.name << std::endl;
std::cout << "Number of SM" << devProp.multiProcessorCount << std::endl;
std::cout << "The shared memory size of each thread block" << devProp.sharedMemPerBlock / 1024.0 << " KB" << std::endl;
std::cout << "Maximum number of threads per thread block" << devProp.maxThreadsPerBlock << std::endl;
std::cout << "Maximum number of threads per EM" << devProp.maxThreadsPerMultiProcessor << std::endl;
std::cout << "Maximum number of warps per EM" << devProp.maxThreadsPerMultiProcessor / 32 << std::endl;
cv::Mat grayImg = cv::imread("1.jpg", 0);
unsigned char* d_in;
unsigned char* d_out1;
unsigned char* d_out2;
int imgWidth = grayImg.cols;
int imgHeight = grayImg.rows;
cv::Mat GAUImg(imgHeight, imgWidth, CV_8UC1, cv::Scalar(0)); //Define empty image to store Gaussian filtering results
cv::Mat BILAImg(imgHeight, imgWidth, CV_8UC1, cv::Scalar(0)); //Define an empty image to store the results of bilateral filtering
//Allocate GPU memory for GPU variable pointers
hipMalloc((void**)&d_in, imgWidth * imgHeight * sizeof(unsigned char));
hipMalloc((void**)&d_out1, imgWidth * imgHeight * sizeof(unsigned char));
hipMalloc((void**)&d_out2, imgWidth * imgHeight * sizeof(unsigned char));
//Copy CPU image data to GPU memory pointer variable
hipMemcpy(d_in, grayImg.data, imgWidth * imgHeight * sizeof(unsigned char), hipMemcpyHostToDevice);
dim3 threadsPerBlock(32, 32); //Define 32*32 dimension block thread block to improve the calculation speed as much as possible
dim3 blocksPerGrid((imgWidth + threadsPerBlock.x - 1) / threadsPerBlock.x,
(imgHeight + threadsPerBlock.y - 1) / threadsPerBlock.y);
cv::Size Element(5, 5);//Operator size
int Gaussian[25] = { 1,4,7,4,1,
4,16,26,16,4,
7,26,41,26,7,
4,16,26,16,4,
1,4,7,4,1 };//sum is 273
hipMemcpyToSymbol(d_const_Gaussian, Gaussian, 25 * sizeof(int));
//cuda Gaussian filter
GaussianFiltInCuda << <blocksPerGrid, threadsPerBlock >> > (d_in, d_out1, Element, imgWidth, imgHeight);
//cuda bilateral filtering
bilateralInCuda << <blocksPerGrid, threadsPerBlock >> > (d_in, d_out2, Element, imgWidth, imgHeight);
//Assign the GPU calculation result variable back to the host CPU(Device to host)
hipMemcpy(GAUImg.data, d_out1, imgWidth * imgHeight * sizeof(unsigned char), hipMemcpyDeviceToHost);
hipMemcpy(BILAImg.data, d_out2, imgWidth * imgHeight * sizeof(unsigned char), hipMemcpyDeviceToHost);
cv::imshow("orgin", grayImg);
cv::imshow("Gaussian", GAUImg);
cv::imshow("bilateralFilter", BILAImg);
cv::waitKey(100000);
//Free
hipFree(d_in);
hipFree(d_out1);
hipFree(d_out2);
return 0;
} | 0e2fe71b4e3d05808ab28491d23e26bad470325d.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <cuda.h>
#include <device_functions.h>
#include <opencv2/opencv.hpp>
#include <iostream>
#include<cmath>
using namespace std;
#define MASK_WIDTH 5
__constant__ int d_const_Gaussian[MASK_WIDTH * MASK_WIDTH]; //Allocate constant memory
//Gaussian filtering
__global__ void GaussianFiltInCuda(unsigned char* dataIn, unsigned char* dataOut, cv::Size erodeElement, int imgWidth, int imgHeight)
{
int xIndex = threadIdx.x + blockIdx.x * blockDim.x;
int yIndex = threadIdx.y + blockIdx.y * blockDim.y;
int Index = xIndex + yIndex * imgWidth;
int elementWidth = erodeElement.width;
int elementHeight = erodeElement.height;
int halfEW = elementWidth / 2;
int halfEH = elementHeight / 2;
//Initialization output
dataOut[Index] = dataIn[Index];
//Prevent cross-border halfEW < xIndex < imgWidth-halfEW halfEH < yIndex < imgHeight-halfEH
if (xIndex > halfEW && xIndex < imgWidth - halfEW && yIndex > halfEH && yIndex < imgHeight - halfEH)
{
int sum = 0;
for (int i = -halfEH; i < halfEH + 1; i++)
{
for (int j = -halfEW; j < halfEW + 1; j++)
{
/* if (dataIn[(i + yIndex) * imgWidth + xIndex + j] < dataOut[yIndex * imgWidth + xIndex])
{
dataOut[yIndex * imgWidth + xIndex] = dataIn[(i + yIndex) * imgWidth + xIndex + j];
}*/
sum += dataIn[(i + yIndex) * imgWidth + xIndex + j] * d_const_Gaussian[(i + 2) * 5 + j + 2];
}
}
if (sum / 273 < 0)
dataOut[yIndex * imgWidth + xIndex] = 0;
else if (sum / 273 > 255)
dataOut[yIndex * imgWidth + xIndex] = 255;
else
dataOut[yIndex * imgWidth + xIndex] = sum / 273;
}
}
//denoising (bilateral filtering)
__global__ void bilateralInCuda(unsigned char* dataIn, unsigned char* dataOut, cv::Size dilateElement, int imgWidth, int imgHeight)
{
int xIndex = threadIdx.x + blockIdx.x * blockDim.x;
int yIndex = threadIdx.y + blockIdx.y * blockDim.y;
int elementWidth = dilateElement.width;
int elementHeight = dilateElement.height;
int halfEW = elementWidth / 2;
int halfEH = elementHeight / 2;
//Initialization output
dataOut[yIndex * imgWidth + xIndex] = dataIn[yIndex * imgWidth + xIndex];
//Prevent cross-border
if (xIndex > halfEW && xIndex < imgWidth - halfEW && yIndex > halfEH && yIndex < imgHeight - halfEH)
{
int sum = 0;
double num = 0;
int sigm = 50;
for (int i = -halfEH; i < halfEH + 1; i++)
{
for (int j = -halfEW; j < halfEW + 1; j++)
{
/*if (dataIn[(i + yIndex) * imgWidth + xIndex + j] > dataOut[yIndex * imgWidth + xIndex])
{
dataOut[yIndex * imgWidth + xIndex] = dataIn[(i + yIndex) * imgWidth + xIndex + j];
}*/
num = exp(-(double)((dataIn[(i + yIndex) * imgWidth + xIndex + j] - dataIn[yIndex * imgWidth + xIndex])* (dataIn[(i + yIndex) * imgWidth + xIndex + j] - dataIn[yIndex * imgWidth + xIndex]) / sigm / sigm) / 2);
sum += (int)dataIn[(i + yIndex) * imgWidth + xIndex + j] * d_const_Gaussian[(i + 2) * 5 + j + 2] * num;
}
}
if (sum / 273 < 0)
dataOut[yIndex * imgWidth + xIndex] = 0;
else if (sum / 273 > 255)
dataOut[yIndex * imgWidth + xIndex] = 255;
else
dataOut[yIndex * imgWidth + xIndex] = sum / 273;
}
}
int main()
{
int dev = 0;
cudaDeviceProp devProp;
cudaGetDeviceProperties(&devProp, dev);
std::cout << "Use GPU device " << dev << ": " << devProp.name << std::endl;
std::cout << "Number of SM£º" << devProp.multiProcessorCount << std::endl;
std::cout << "The shared memory size of each thread block£º" << devProp.sharedMemPerBlock / 1024.0 << " KB" << std::endl;
std::cout << "Maximum number of threads per thread block£º" << devProp.maxThreadsPerBlock << std::endl;
std::cout << "Maximum number of threads per EM£º" << devProp.maxThreadsPerMultiProcessor << std::endl;
std::cout << "Maximum number of warps per EM£º" << devProp.maxThreadsPerMultiProcessor / 32 << std::endl;
cv::Mat grayImg = cv::imread("1.jpg", 0);
unsigned char* d_in;
unsigned char* d_out1;
unsigned char* d_out2;
int imgWidth = grayImg.cols;
int imgHeight = grayImg.rows;
cv::Mat GAUImg(imgHeight, imgWidth, CV_8UC1, cv::Scalar(0)); //Define empty image to store Gaussian filtering results
cv::Mat BILAImg(imgHeight, imgWidth, CV_8UC1, cv::Scalar(0)); //Define an empty image to store the results of bilateral filtering
//Allocate GPU memory for GPU variable pointers
cudaMalloc((void**)&d_in, imgWidth * imgHeight * sizeof(unsigned char));
cudaMalloc((void**)&d_out1, imgWidth * imgHeight * sizeof(unsigned char));
cudaMalloc((void**)&d_out2, imgWidth * imgHeight * sizeof(unsigned char));
//Copy CPU image data to GPU memory pointer variable
cudaMemcpy(d_in, grayImg.data, imgWidth * imgHeight * sizeof(unsigned char), cudaMemcpyHostToDevice);
dim3 threadsPerBlock(32, 32); //Define 32*32 dimension block thread block to improve the calculation speed as much as possible
dim3 blocksPerGrid((imgWidth + threadsPerBlock.x - 1) / threadsPerBlock.x,
(imgHeight + threadsPerBlock.y - 1) / threadsPerBlock.y);
cv::Size Element(5, 5);//Operator size
int Gaussian[25] = { 1,4,7,4,1,
4,16,26,16,4,
7,26,41,26,7,
4,16,26,16,4,
1,4,7,4,1 };//sum is 273
cudaMemcpyToSymbol(d_const_Gaussian, Gaussian, 25 * sizeof(int));
//cuda Gaussian filter
GaussianFiltInCuda << <blocksPerGrid, threadsPerBlock >> > (d_in, d_out1, Element, imgWidth, imgHeight);
//cuda bilateral filtering
bilateralInCuda << <blocksPerGrid, threadsPerBlock >> > (d_in, d_out2, Element, imgWidth, imgHeight);
//Assign the GPU calculation result variable back to the host CPU(Device to host)
cudaMemcpy(GAUImg.data, d_out1, imgWidth * imgHeight * sizeof(unsigned char), cudaMemcpyDeviceToHost);
cudaMemcpy(BILAImg.data, d_out2, imgWidth * imgHeight * sizeof(unsigned char), cudaMemcpyDeviceToHost);
cv::imshow("orgin", grayImg);
cv::imshow("Gaussian", GAUImg);
cv::imshow("bilateralFilter", BILAImg);
cv::waitKey(100000);
//Free
cudaFree(d_in);
cudaFree(d_out1);
cudaFree(d_out2);
return 0;
} |
51ebc2278078ee415620795cc28ed112dac9d8d2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "Prerequisites.cuh"
#include "Angles.cuh"
#include "CubicInterp.cuh"
#include "DeviceFunctions.cuh"
#include "FFT.cuh"
#include "Generics.cuh"
#include "Helper.cuh"
#include "Transformation.cuh"
namespace gtom
{
////////////////////////////
//CUDA kernel declarations//
////////////////////////////
template<bool cubicinterp, bool outputzerocentered> __global__ void Rotate3DKernel(cudaTex t_input, tfloat* d_output, int3 dims, glm::mat4* d_transforms, uint nangles);
template<bool cubicinterp> __global__ void Rotate3DExtractAt(cudaTex t_input, int3 dimvolume, tfloat* d_proj, int3 dimproj, size_t elementsproj, glm::mat3* d_rotations, tfloat3* d_positions);
template<bool cubicinterp, bool outputzerocentered> __global__ void Rotate2DKernel(cudaTex* t_input, tfloat* d_output, int2 dims, glm::mat2* d_transforms);
template<bool cubicinterp, bool outputzerocentered> __global__ void Rotate2DFTKernel(cudaTex t_Re, cudaTex t_Im, tcomplex* d_output, int3 dims, glm::mat2 transform, tfloat maxfreq);
template<bool cubicinterp, bool outputzerocentered> __global__ void Rotate3DFTKernel(cudaTex t_Re, cudaTex t_Im, tcomplex* d_output, int3 dims, glm::mat4* d_transform, float maxfreq2);
template<bool cubicinterp, bool outputzerocentered> __global__ void Rotate3DFTKernel(cudaTex t_Re, tfloat* d_output, int3 dims, glm::mat4* d_transform, float maxfreq2);
////////////////////
//Rotate 3D volume//
////////////////////
void d_Rotate3D(tfloat* d_volume, tfloat* d_output, int3 dims, tfloat3* h_angles, uint nangles, T_INTERP_MODE mode, bool iszerocentered)
{
tfloat* d_temp;
if (mode == T_INTERP_CUBIC)
hipMalloc((void**)&d_temp, Elements(dims) * sizeof(tfloat));
hipArray* a_input;
cudaTex t_input;
if (mode == T_INTERP_LINEAR)
d_BindTextureTo3DArray(d_volume, a_input, t_input, dims, hipFilterModeLinear, false);
else
{
hipMemcpy(d_temp, d_volume, Elements(dims) * sizeof(tfloat), hipMemcpyDeviceToDevice);
d_CubicBSplinePrefilter3D(d_temp, dims);
d_BindTextureTo3DArray(d_temp, a_input, t_input, dims, hipFilterModeLinear, false);
}
d_Rotate3D(t_input, d_output, dims, h_angles, nangles, mode, iszerocentered);
hipDestroyTextureObject(t_input);
hipFreeArray(a_input);
if (mode == T_INTERP_CUBIC)
hipFree(d_temp);
}
void d_Rotate3D(cudaTex t_volume, tfloat* d_output, int3 dims, tfloat3* h_angles, uint nangles, T_INTERP_MODE mode, bool iszerocentered)
{
glm::mat4* h_transforms = (glm::mat4*)malloc(nangles * sizeof(glm::mat4));
for (uint n = 0; n < nangles; n++)
h_transforms[n] = Matrix4Translation(tfloat3(dims.x / 2 + 0.5f, dims.y / 2 + 0.5f, dims.z / 2 + 0.5f)) *
glm::transpose(Matrix4Euler(h_angles[n])) *
Matrix4Translation(tfloat3(-dims.x / 2, -dims.y / 2, -dims.z / 2));
glm::mat4* d_transforms = (glm::mat4*)CudaMallocFromHostArray(h_transforms, nangles * sizeof(glm::mat4));
free(h_transforms);
dim3 TpB = dim3(16, 16);
dim3 grid = dim3((dims.x + 15) / 16, (dims.y + 15) / 16, dims.z);
if (iszerocentered)
{
if (mode == T_INTERP_LINEAR)
Rotate3DKernel<false, true> << <grid, TpB >> > (t_volume, d_output, dims, d_transforms, nangles);
else if (mode == T_INTERP_CUBIC)
Rotate3DKernel<true, true> << <grid, TpB >> > (t_volume, d_output, dims, d_transforms, nangles);
}
else
{
if (mode == T_INTERP_LINEAR)
Rotate3DKernel<false, false> << <grid, TpB >> > (t_volume, d_output, dims, d_transforms, nangles);
else if (mode == T_INTERP_CUBIC)
Rotate3DKernel<true, false> << <grid, TpB >> > (t_volume, d_output, dims, d_transforms, nangles);
}
hipFree(d_transforms);
}
////////////////////////////
//Rotate 3D and extract at//
////////////////////////////
void d_Rotate3DExtractAt(cudaTex t_volume, int3 dimsvolume, tfloat* d_proj, int3 dimsproj, tfloat3* h_angles, tfloat3* h_positions, T_INTERP_MODE mode, uint batch)
{
glm::mat3* h_matrices = (glm::mat3*)malloc(sizeof(glm::mat3) * batch);
for (int i = 0; i < batch; i++)
h_matrices[i] = glm::transpose(Matrix3Euler(h_angles[i]));
glm::mat3* d_matrices = (glm::mat3*)CudaMallocFromHostArray(h_matrices, sizeof(glm::mat3) * batch);
free(h_matrices);
tfloat3* d_positions = (tfloat3*)CudaMallocFromHostArray(h_positions, batch * sizeof(tfloat3));
d_Rotate3DExtractAt(t_volume, dimsvolume, d_proj, dimsproj, d_matrices, d_positions, mode, batch);
hipFree(d_matrices);
hipFree(d_positions);
}
void d_Rotate3DExtractAt(cudaTex t_volume, int3 dimsvolume, tfloat* d_proj, int3 dimsproj, glm::mat3* d_matrices, tfloat3* d_positions, T_INTERP_MODE mode, uint batch)
{
uint ndimsvolume = DimensionCount(dimsvolume);
uint ndimsproj = DimensionCount(dimsproj);
if (ndimsvolume < ndimsproj)
throw;
uint elements = Elements(dimsproj);
dim3 grid = dim3(tmin(128, (elements + 127) / 128), batch, 1);
if (ndimsproj >= 2)
{
if (mode == T_INTERP_CUBIC)
Rotate3DExtractAt<true> << <grid, 128 >> > (t_volume, dimsvolume, d_proj, dimsproj, elements, d_matrices, d_positions);
else
Rotate3DExtractAt<false> << <grid, 128 >> > (t_volume, dimsvolume, d_proj, dimsproj, elements, d_matrices, d_positions);
}
else
throw;
}
/////////////
//Rotate 2D//
/////////////
void d_Rotate2D(tfloat* d_input, tfloat* d_output, int2 dims, tfloat* h_angles, T_INTERP_MODE mode, bool isoutputzerocentered, uint batch)
{
tfloat* d_temp;
if (mode == T_INTERP_CUBIC)
hipMalloc((void**)&d_temp, Elements2(dims) * batch * sizeof(tfloat));
hipArray_t* a_input = (hipArray_t*)malloc(batch * sizeof(hipArray_t));
cudaTex* t_input = (cudaTex*)malloc(batch * sizeof(cudaTex));
if (mode == T_INTERP_LINEAR)
d_BindTextureToArray(d_input, a_input, t_input, dims, hipFilterModeLinear, false, batch);
else
{
hipMemcpy(d_temp, d_input, Elements2(dims) * batch * sizeof(tfloat), hipMemcpyDeviceToDevice);
d_CubicBSplinePrefilter2D(d_temp, dims, batch);
d_BindTextureToArray(d_temp, a_input, t_input, dims, hipFilterModeLinear, false, batch);
}
cudaTex* dt_input = (cudaTex*)CudaMallocFromHostArray(t_input, batch * sizeof(cudaTex));
d_Rotate2D(dt_input, d_output, dims, h_angles, mode, isoutputzerocentered, batch);
hipFree(dt_input);
for (uint b = 0; b < batch; b++)
{
hipDestroyTextureObject(t_input[b]);
hipFreeArray(a_input[b]);
}
free(t_input);
free(a_input);
if (mode == T_INTERP_CUBIC)
hipFree(d_temp);
}
void d_Rotate2D(cudaTex* t_input, tfloat* d_output, int2 dims, tfloat* h_angles, T_INTERP_MODE mode, bool isoutputzerocentered, uint batch)
{
glm::mat2* h_transforms = (glm::mat2*)malloc(batch * sizeof(glm::mat2));
for (uint b = 0; b < batch; b++)
h_transforms[b] = Matrix2Rotation(-h_angles[b]);
glm::mat2* d_transforms = (glm::mat2*)CudaMallocFromHostArray(h_transforms, batch * sizeof(glm::mat2));
free(h_transforms);
dim3 TpB = dim3(16, 16);
dim3 grid = dim3((dims.x + 15) / 16, (dims.y + 15) / 16, batch);
if (isoutputzerocentered)
{
if (mode == T_INTERP_LINEAR)
Rotate2DKernel<false, true> << <grid, TpB >> > (t_input, d_output, dims, d_transforms);
else if (mode == T_INTERP_CUBIC)
Rotate2DKernel<true, true> << <grid, TpB >> > (t_input, d_output, dims, d_transforms);
}
else
{
if (mode == T_INTERP_LINEAR)
Rotate2DKernel<false, false> << <grid, TpB >> > (t_input, d_output, dims, d_transforms);
else if (mode == T_INTERP_CUBIC)
Rotate2DKernel<true, false> << <grid, TpB >> > (t_input, d_output, dims, d_transforms);
}
hipFree(d_transforms);
}
//////////////////////////////
//Rotate 2D in Fourier space//
//////////////////////////////
void d_Rotate2DFT(tcomplex* d_input, tcomplex* d_output, int3 dims, tfloat* angles, tfloat maxfreq, T_INTERP_MODE mode, bool isoutputzerocentered, int batch)
{
tfloat* d_real;
hipMalloc((void**)&d_real, ElementsFFT(dims) * sizeof(tfloat));
tfloat* d_imag;
hipMalloc((void**)&d_imag, ElementsFFT(dims) * sizeof(tfloat));
for (int b = 0; b < batch; b++)
{
d_ConvertTComplexToSplitComplex(d_input + ElementsFFT(dims) * b, d_real, d_imag, ElementsFFT(dims));
if (mode == T_INTERP_CUBIC)
{
d_CubicBSplinePrefilter2D(d_real, toInt2(dims.x / 2 + 1, dims.y));
d_CubicBSplinePrefilter2D(d_imag, toInt2(dims.x / 2 + 1, dims.y));
}
hipArray* a_Re;
hipArray* a_Im;
cudaTex t_Re, t_Im;
d_BindTextureToArray(d_real, a_Re, t_Re, toInt2(dims.x / 2 + 1, dims.y), hipFilterModeLinear, false);
d_BindTextureToArray(d_imag, a_Im, t_Im, toInt2(dims.x / 2 + 1, dims.y), hipFilterModeLinear, false);
d_Rotate2DFT(t_Re, t_Im, d_output + ElementsFFT(dims) * b, dims, angles[b], maxfreq, mode, isoutputzerocentered);
hipDestroyTextureObject(t_Re);
hipDestroyTextureObject(t_Im);
hipFreeArray(a_Re);
hipFreeArray(a_Im);
}
hipFree(d_imag);
hipFree(d_real);
}
void d_Rotate2DFT(cudaTex t_inputRe, cudaTex t_inputIm, tcomplex* d_output, int3 dims, tfloat angle, tfloat maxfreq, T_INTERP_MODE mode, bool isoutputzerocentered)
{
glm::mat2 rotation = Matrix2Rotation(-angle);
dim3 TpB = dim3(16, 16);
dim3 grid = dim3((dims.x / 2 + 1 + 15) / 16, (dims.y + 15) / 16);
if (isoutputzerocentered)
{
if (mode == T_INTERP_LINEAR)
Rotate2DFTKernel<false, true> << <grid, TpB >> > (t_inputRe, t_inputIm, d_output, dims, rotation, maxfreq);
else if (mode == T_INTERP_CUBIC)
Rotate2DFTKernel<true, true> << <grid, TpB >> > (t_inputRe, t_inputIm, d_output, dims, rotation, maxfreq);
}
else
{
if (mode == T_INTERP_LINEAR)
Rotate2DFTKernel<false, false> << <grid, TpB >> > (t_inputRe, t_inputIm, d_output, dims, rotation, maxfreq);
else if (mode == T_INTERP_CUBIC)
Rotate2DFTKernel<true, false> << <grid, TpB >> > (t_inputRe, t_inputIm, d_output, dims, rotation, maxfreq);
}
}
//////////////////////////////
//Rotate 3D in Fourier space//
//////////////////////////////
void d_Rotate3DFT(tcomplex* d_volume, tcomplex* d_output, int3 dims, tfloat3* h_angles, int nangles, T_INTERP_MODE mode, bool outputzerocentered)
{
int3 dimsfft = toInt3(dims.x / 2 + 1, dims.y, dims.z);
tfloat* d_tempRe;
hipMalloc((void**)&d_tempRe, ElementsFFT(dims) * sizeof(tfloat));
tfloat* d_tempIm;
hipMalloc((void**)&d_tempIm, ElementsFFT(dims) * sizeof(tfloat));
hipArray* a_Re, *a_Im;
cudaTex t_Re, t_Im;
d_ConvertTComplexToSplitComplex(d_volume, d_tempRe, d_tempIm, ElementsFFT(dims));
if (mode == T_INTERP_CUBIC)
{
d_CubicBSplinePrefilter3D(d_tempRe, dimsfft);
d_CubicBSplinePrefilter3D(d_tempIm, dimsfft);
}
d_BindTextureTo3DArray(d_tempRe, a_Re, t_Re, dimsfft, hipFilterModeLinear, false);
d_BindTextureTo3DArray(d_tempIm, a_Im, t_Im, dimsfft, hipFilterModeLinear, false);
hipFree(d_tempRe);
hipFree(d_tempIm);
d_Rotate3DFT(t_Re, t_Im, d_output, dims, h_angles, nangles, mode, outputzerocentered);
hipDestroyTextureObject(t_Re);
hipDestroyTextureObject(t_Im);
hipFreeArray(a_Re);
hipFreeArray(a_Im);
}
void d_Rotate3DFT(cudaTex t_Re, cudaTex t_Im, tcomplex* d_output, int3 dims, tfloat3* h_angles, int nangles, T_INTERP_MODE mode, bool outputzerocentered)
{
glm::mat4* h_transform = (glm::mat4*)malloc(nangles * sizeof(glm::mat4));
for (int b = 0; b < nangles; b++)
h_transform[b] = glm::transpose(Matrix4Euler(h_angles[b])) *
Matrix4Translation(tfloat3(-dims.x / 2, -dims.y / 2, -dims.z / 2));
glm::mat4* d_transform = (glm::mat4*)CudaMallocFromHostArray(h_transform, nangles * sizeof(glm::mat4));
float maxfreq2 = (float)(dims.x * dims.x / 4);
dim3 TpB = dim3(16, 16);
dim3 grid = dim3((dims.x / 2 + 1 + 15) / 16, (dims.y + 15) / 16, dims.z * nangles);
if (outputzerocentered)
{
if (mode == T_INTERP_LINEAR)
Rotate3DFTKernel<false, true> << <grid, TpB >> > (t_Re, t_Im, d_output, dims, d_transform, maxfreq2);
if (mode == T_INTERP_CUBIC)
Rotate3DFTKernel<true, true> << <grid, TpB >> > (t_Re, t_Im, d_output, dims, d_transform, maxfreq2);
}
else
{
if (mode == T_INTERP_LINEAR)
Rotate3DFTKernel<false, false> << <grid, TpB >> > (t_Re, t_Im, d_output, dims, d_transform, maxfreq2);
if (mode == T_INTERP_CUBIC)
Rotate3DFTKernel<true, false> << <grid, TpB >> > (t_Re, t_Im, d_output, dims, d_transform, maxfreq2);
}
hipFree(d_transform);
free(h_transform);
}
void d_Rotate3DFT(tfloat* d_volume, tfloat* d_output, int3 dims, tfloat3* h_angles, int nangles, T_INTERP_MODE mode, bool outputzerocentered)
{
int3 dimsfft = toInt3(dims.x / 2 + 1, dims.y, dims.z);
tfloat* d_tempRe;
hipMalloc((void**)&d_tempRe, ElementsFFT(dims) * sizeof(tfloat));
hipArray* a_Re;
cudaTex t_Re;
hipMemcpy(d_tempRe, d_volume, ElementsFFT(dims) * sizeof(tfloat), hipMemcpyDeviceToDevice);
if (mode == T_INTERP_CUBIC)
d_CubicBSplinePrefilter3D(d_tempRe, dimsfft);
d_BindTextureTo3DArray(d_tempRe, a_Re, t_Re, dimsfft, hipFilterModeLinear, false);
hipFree(d_tempRe);
d_Rotate3DFT(t_Re, d_output, dims, h_angles, nangles, mode, outputzerocentered);
hipDestroyTextureObject(t_Re);
hipFreeArray(a_Re);
}
void d_Rotate3DFT(cudaTex t_volume, tfloat* d_output, int3 dims, tfloat3* h_angles, int nangles, T_INTERP_MODE mode, bool outputzerocentered)
{
glm::mat4* h_transform = (glm::mat4*)malloc(nangles * sizeof(glm::mat4));
for (int b = 0; b < nangles; b++)
h_transform[b] = glm::transpose(Matrix4Euler(h_angles[b])) *
Matrix4Translation(tfloat3(-dims.x / 2, -dims.y / 2, -dims.z / 2));
glm::mat4* d_transform = (glm::mat4*)CudaMallocFromHostArray(h_transform, nangles * sizeof(glm::mat4));
float maxfreq2 = (float)(dims.x * dims.x / 4);
dim3 TpB = dim3(16, 16);
dim3 grid = dim3((dims.x / 2 + 1 + 15) / 16, (dims.y + 15) / 16, dims.z * nangles);
if (outputzerocentered)
{
if (mode == T_INTERP_LINEAR)
Rotate3DFTKernel<false, true> << <grid, TpB >> > (t_volume, d_output, dims, d_transform, maxfreq2);
if (mode == T_INTERP_CUBIC)
Rotate3DFTKernel<true, true> << <grid, TpB >> > (t_volume, d_output, dims, d_transform, maxfreq2);
}
else
{
if (mode == T_INTERP_LINEAR)
Rotate3DFTKernel<false, false> << <grid, TpB >> > (t_volume, d_output, dims, d_transform, maxfreq2);
if (mode == T_INTERP_CUBIC)
Rotate3DFTKernel<true, false> << <grid, TpB >> > (t_volume, d_output, dims, d_transform, maxfreq2);
}
hipFree(d_transform);
free(h_transform);
}
////////////////
//CUDA kernels//
////////////////
template<bool cubicinterp, bool outputzerocentered> __global__ void Rotate3DKernel(cudaTex t_input, tfloat* d_output, int3 dims, glm::mat4* d_transforms, uint nangles)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= dims.x)
return;
int idy = blockIdx.y * blockDim.y + threadIdx.y;
if (idy >= dims.y)
return;
int idz = blockIdx.z;
int x, y, z;
if (outputzerocentered)
{
x = idx;
y = idy;
z = idz;
}
else
{
x = FFTShift(idx, dims.x);
y = FFTShift(idy, dims.y);
z = FFTShift(idz, dims.z);
}
for (uint b = 0; b < nangles; b++)
{
glm::vec4 pos = d_transforms[b] * glm::vec4(x, y, z, 1); // No need to center pos, done by transform
tfloat value;
if (cubicinterp)
value = cubicTex3DSimple<tfloat>(t_input, pos.x, pos.y, pos.z);
else
value = tex3D<tfloat>(t_input, pos.x, pos.y, pos.z);
d_output[(b * dims.z + (idz * dims.y + idy)) * dims.x + idx] = value;
}
}
template<bool cubicinterp> __global__ void Rotate3DExtractAt(cudaTex t_input, int3 dimvolume, tfloat* d_proj, int3 dimproj, size_t elementsproj, glm::mat3* d_rotations, tfloat3* d_positions)
{
d_proj += elementsproj * blockIdx.y;
uint line = dimproj.x;
uint slice = Elements2(dimproj);
glm::mat3 rotation = d_rotations[blockIdx.y];
glm::vec3 position = glm::vec3(d_positions[blockIdx.y].x, d_positions[blockIdx.y].y, d_positions[blockIdx.y].z);
int3 centervolume = dimproj / 2;
for (uint id = blockIdx.x * blockDim.x + threadIdx.x; id < elementsproj; id += gridDim.x * blockDim.x)
{
uint idx = id % line;
uint idy = (id % slice) / line;
uint idz = id / slice;
int x = idx;
int y = idy;
int z = idz;
glm::vec3 pos = glm::vec3(x - centervolume.x, y - centervolume.y, z - centervolume.z);
pos = rotation * pos;
pos += position;
pos += 0.5f;
if (cubicinterp)
d_proj[id] = cubicTex3DSimple<tfloat>(t_input, pos.x, pos.y, pos.z);
else
d_proj[id] = tex3D<tfloat>(t_input, pos.x, pos.y, pos.z);
}
}
template<bool cubicinterp, bool outputzerocentered> __global__ void Rotate2DKernel(cudaTex* t_input, tfloat* d_output, int2 dims, glm::mat2* d_transforms)
{
uint idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= dims.x)
return;
uint idy = blockIdx.y * blockDim.y + threadIdx.y;
if (idy >= dims.y)
return;
int x, y;
if (outputzerocentered)
{
x = idx;
y = idy;
}
else
{
x = FFTShift(idx, dims.x);
y = FFTShift(idy, dims.y);
}
glm::vec2 pos = d_transforms[blockIdx.z] * glm::vec2(x - dims.x / 2, y - dims.y / 2) + glm::vec2(dims.x / 2 + 0.5f, dims.y / 2 + 0.5f);
tfloat val;
if (!cubicinterp)
val = tex2D<tfloat>(t_input[blockIdx.z], pos.x, pos.y);
else
val = cubicTex2D(t_input[blockIdx.z], pos.x, pos.y);
d_output[(blockIdx.z * dims.y + idy) * dims.x + idx] = val;
}
template<bool cubicinterp, bool outputzerocentered> __global__ void Rotate2DFTKernel(cudaTex t_Re, cudaTex t_Im, tcomplex* d_output, int3 dims, glm::mat2 transform, tfloat maxfreq)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx > dims.x / 2)
return;
int idy = blockIdx.y * blockDim.y + threadIdx.y;
if (idy >= dims.y)
return;
int x, y;
if (outputzerocentered)
{
x = idx;
y = idy;
}
else
{
x = dims.x / 2 - idx;
y = dims.y - 1 - ((idy + dims.y / 2 - 1) % dims.y);
}
glm::vec2 pos = transform * glm::vec2(idx - dims.x / 2, idy - dims.y / 2);
if (glm::length(pos) > maxfreq)
{
d_output[y * (dims.x / 2 + 1) + x] = make_cuComplex(0.0f, 0.0f);
return;
}
bool isnegative = false;
if (pos.x > 0.00001f)
{
pos = -pos;
isnegative = true;
}
pos += glm::vec2((float)(dims.x / 2) + 0.5f, (float)(dims.y / 2) + 0.5f);
tfloat valre, valim;
if (!cubicinterp)
{
valre = tex2D<tfloat>(t_Re, pos.x, pos.y);
valim = tex2D<tfloat>(t_Im, pos.x, pos.y);
}
else
{
valre = cubicTex2D(t_Re, pos.x, pos.y);
valim = cubicTex2D(t_Im, pos.x, pos.y);
}
if (isnegative)
valim = -valim;
d_output[y * (dims.x / 2 + 1) + x] = make_cuComplex(valre, valim);
}
template<bool cubicinterp, bool outputzerocentered> __global__ void Rotate3DFTKernel(cudaTex t_Re, cudaTex t_Im, tcomplex* d_output, int3 dims, glm::mat4* d_transform, float maxfreq2)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx > dims.x / 2)
return;
uint idglobal = blockIdx.z / dims.z;
d_output += ElementsFFT(dims) * idglobal;
d_transform += idglobal;
int idy = blockIdx.y * blockDim.y + threadIdx.y;
if (idy >= dims.y)
return;
int idz = blockIdx.z % dims.z;
int x, y, z;
if (outputzerocentered)
{
x = idx;
y = idy;
z = idz;
}
else
{
x = dims.x / 2 - idx;
y = dims.y - 1 - ((idy + dims.y / 2 - 1) % dims.y);
z = dims.z - 1 - ((idz + dims.z / 2 - 1) % dims.z);
}
glm::vec4 pos = *d_transform * glm::vec4(x, y, z, 1);
float radiussq = pos.x * pos.x + pos.y * pos.y + pos.z * pos.z;
if (radiussq >= maxfreq2)
{
d_output[(idz * dims.y + idy) * (dims.x / 2 + 1) + idx] = make_cuComplex(0, 0);
return;
}
bool isnegative = false;
if (pos.x > 1e-6f)
{
pos = -pos;
isnegative = true;
}
pos += (float)(dims.x / 2) + 0.5f;
tfloat valre, valim;
if (!cubicinterp)
{
valre = tex3D<tfloat>(t_Re, pos.x, pos.y, pos.z);
valim = tex3D<tfloat>(t_Im, pos.x, pos.y, pos.z);
}
else
{
valre = cubicTex3D(t_Re, pos.x, pos.y, pos.z);
valim = cubicTex3D(t_Im, pos.x, pos.y, pos.z);
}
if (isnegative)
valim = -valim;
d_output[(idz * dims.y + idy) * (dims.x / 2 + 1) + idx] = make_cuComplex(valre, valim);
}
template<bool cubicinterp, bool outputzerocentered> __global__ void Rotate3DFTKernel(cudaTex t_Re, tfloat* d_output, int3 dims, glm::mat4* d_transform, float maxfreq2)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx > dims.x / 2)
return;
uint idglobal = blockIdx.z / dims.z;
d_output += ElementsFFT(dims) * idglobal;
d_transform += idglobal;
int idy = blockIdx.y * blockDim.y + threadIdx.y;
if (idy >= dims.y)
return;
int idz = blockIdx.z % dims.z;
int x, y, z;
if (outputzerocentered)
{
x = idx;
y = idy;
z = idz;
}
else
{
x = dims.x / 2 - idx;
y = dims.y - 1 - ((idy + dims.y / 2 - 1) % dims.y);
z = dims.z - 1 - ((idz + dims.z / 2 - 1) % dims.z);
}
glm::vec4 pos = *d_transform * glm::vec4(x, y, z, 1);
float radiussq = pos.x * pos.x + pos.y * pos.y + pos.z * pos.z;
if (radiussq >= maxfreq2)
{
d_output[(idz * dims.y + idy) * (dims.x / 2 + 1) + idx] = 0;
return;
}
if (pos.x > 1e-6f)
pos = -pos;
pos += (float)(dims.x / 2) + 0.5f;
tfloat valre;
if (!cubicinterp)
valre = tex3D<tfloat>(t_Re, pos.x, pos.y, pos.z);
else
valre = cubicTex3D(t_Re, pos.x, pos.y, pos.z);
d_output[(idz * dims.y + idy) * (dims.x / 2 + 1) + idx] = valre;
}
} | 51ebc2278078ee415620795cc28ed112dac9d8d2.cu | #include "Prerequisites.cuh"
#include "Angles.cuh"
#include "CubicInterp.cuh"
#include "DeviceFunctions.cuh"
#include "FFT.cuh"
#include "Generics.cuh"
#include "Helper.cuh"
#include "Transformation.cuh"
namespace gtom
{
////////////////////////////
//CUDA kernel declarations//
////////////////////////////
template<bool cubicinterp, bool outputzerocentered> __global__ void Rotate3DKernel(cudaTex t_input, tfloat* d_output, int3 dims, glm::mat4* d_transforms, uint nangles);
template<bool cubicinterp> __global__ void Rotate3DExtractAt(cudaTex t_input, int3 dimvolume, tfloat* d_proj, int3 dimproj, size_t elementsproj, glm::mat3* d_rotations, tfloat3* d_positions);
template<bool cubicinterp, bool outputzerocentered> __global__ void Rotate2DKernel(cudaTex* t_input, tfloat* d_output, int2 dims, glm::mat2* d_transforms);
template<bool cubicinterp, bool outputzerocentered> __global__ void Rotate2DFTKernel(cudaTex t_Re, cudaTex t_Im, tcomplex* d_output, int3 dims, glm::mat2 transform, tfloat maxfreq);
template<bool cubicinterp, bool outputzerocentered> __global__ void Rotate3DFTKernel(cudaTex t_Re, cudaTex t_Im, tcomplex* d_output, int3 dims, glm::mat4* d_transform, float maxfreq2);
template<bool cubicinterp, bool outputzerocentered> __global__ void Rotate3DFTKernel(cudaTex t_Re, tfloat* d_output, int3 dims, glm::mat4* d_transform, float maxfreq2);
////////////////////
//Rotate 3D volume//
////////////////////
void d_Rotate3D(tfloat* d_volume, tfloat* d_output, int3 dims, tfloat3* h_angles, uint nangles, T_INTERP_MODE mode, bool iszerocentered)
{
tfloat* d_temp;
if (mode == T_INTERP_CUBIC)
cudaMalloc((void**)&d_temp, Elements(dims) * sizeof(tfloat));
cudaArray* a_input;
cudaTex t_input;
if (mode == T_INTERP_LINEAR)
d_BindTextureTo3DArray(d_volume, a_input, t_input, dims, cudaFilterModeLinear, false);
else
{
cudaMemcpy(d_temp, d_volume, Elements(dims) * sizeof(tfloat), cudaMemcpyDeviceToDevice);
d_CubicBSplinePrefilter3D(d_temp, dims);
d_BindTextureTo3DArray(d_temp, a_input, t_input, dims, cudaFilterModeLinear, false);
}
d_Rotate3D(t_input, d_output, dims, h_angles, nangles, mode, iszerocentered);
cudaDestroyTextureObject(t_input);
cudaFreeArray(a_input);
if (mode == T_INTERP_CUBIC)
cudaFree(d_temp);
}
void d_Rotate3D(cudaTex t_volume, tfloat* d_output, int3 dims, tfloat3* h_angles, uint nangles, T_INTERP_MODE mode, bool iszerocentered)
{
glm::mat4* h_transforms = (glm::mat4*)malloc(nangles * sizeof(glm::mat4));
for (uint n = 0; n < nangles; n++)
h_transforms[n] = Matrix4Translation(tfloat3(dims.x / 2 + 0.5f, dims.y / 2 + 0.5f, dims.z / 2 + 0.5f)) *
glm::transpose(Matrix4Euler(h_angles[n])) *
Matrix4Translation(tfloat3(-dims.x / 2, -dims.y / 2, -dims.z / 2));
glm::mat4* d_transforms = (glm::mat4*)CudaMallocFromHostArray(h_transforms, nangles * sizeof(glm::mat4));
free(h_transforms);
dim3 TpB = dim3(16, 16);
dim3 grid = dim3((dims.x + 15) / 16, (dims.y + 15) / 16, dims.z);
if (iszerocentered)
{
if (mode == T_INTERP_LINEAR)
Rotate3DKernel<false, true> << <grid, TpB >> > (t_volume, d_output, dims, d_transforms, nangles);
else if (mode == T_INTERP_CUBIC)
Rotate3DKernel<true, true> << <grid, TpB >> > (t_volume, d_output, dims, d_transforms, nangles);
}
else
{
if (mode == T_INTERP_LINEAR)
Rotate3DKernel<false, false> << <grid, TpB >> > (t_volume, d_output, dims, d_transforms, nangles);
else if (mode == T_INTERP_CUBIC)
Rotate3DKernel<true, false> << <grid, TpB >> > (t_volume, d_output, dims, d_transforms, nangles);
}
cudaFree(d_transforms);
}
////////////////////////////
//Rotate 3D and extract at//
////////////////////////////
void d_Rotate3DExtractAt(cudaTex t_volume, int3 dimsvolume, tfloat* d_proj, int3 dimsproj, tfloat3* h_angles, tfloat3* h_positions, T_INTERP_MODE mode, uint batch)
{
glm::mat3* h_matrices = (glm::mat3*)malloc(sizeof(glm::mat3) * batch);
for (int i = 0; i < batch; i++)
h_matrices[i] = glm::transpose(Matrix3Euler(h_angles[i]));
glm::mat3* d_matrices = (glm::mat3*)CudaMallocFromHostArray(h_matrices, sizeof(glm::mat3) * batch);
free(h_matrices);
tfloat3* d_positions = (tfloat3*)CudaMallocFromHostArray(h_positions, batch * sizeof(tfloat3));
d_Rotate3DExtractAt(t_volume, dimsvolume, d_proj, dimsproj, d_matrices, d_positions, mode, batch);
cudaFree(d_matrices);
cudaFree(d_positions);
}
void d_Rotate3DExtractAt(cudaTex t_volume, int3 dimsvolume, tfloat* d_proj, int3 dimsproj, glm::mat3* d_matrices, tfloat3* d_positions, T_INTERP_MODE mode, uint batch)
{
uint ndimsvolume = DimensionCount(dimsvolume);
uint ndimsproj = DimensionCount(dimsproj);
if (ndimsvolume < ndimsproj)
throw;
uint elements = Elements(dimsproj);
dim3 grid = dim3(tmin(128, (elements + 127) / 128), batch, 1);
if (ndimsproj >= 2)
{
if (mode == T_INTERP_CUBIC)
Rotate3DExtractAt<true> << <grid, 128 >> > (t_volume, dimsvolume, d_proj, dimsproj, elements, d_matrices, d_positions);
else
Rotate3DExtractAt<false> << <grid, 128 >> > (t_volume, dimsvolume, d_proj, dimsproj, elements, d_matrices, d_positions);
}
else
throw;
}
/////////////
//Rotate 2D//
/////////////
void d_Rotate2D(tfloat* d_input, tfloat* d_output, int2 dims, tfloat* h_angles, T_INTERP_MODE mode, bool isoutputzerocentered, uint batch)
{
tfloat* d_temp;
if (mode == T_INTERP_CUBIC)
cudaMalloc((void**)&d_temp, Elements2(dims) * batch * sizeof(tfloat));
cudaArray_t* a_input = (cudaArray_t*)malloc(batch * sizeof(cudaArray_t));
cudaTex* t_input = (cudaTex*)malloc(batch * sizeof(cudaTex));
if (mode == T_INTERP_LINEAR)
d_BindTextureToArray(d_input, a_input, t_input, dims, cudaFilterModeLinear, false, batch);
else
{
cudaMemcpy(d_temp, d_input, Elements2(dims) * batch * sizeof(tfloat), cudaMemcpyDeviceToDevice);
d_CubicBSplinePrefilter2D(d_temp, dims, batch);
d_BindTextureToArray(d_temp, a_input, t_input, dims, cudaFilterModeLinear, false, batch);
}
cudaTex* dt_input = (cudaTex*)CudaMallocFromHostArray(t_input, batch * sizeof(cudaTex));
d_Rotate2D(dt_input, d_output, dims, h_angles, mode, isoutputzerocentered, batch);
cudaFree(dt_input);
for (uint b = 0; b < batch; b++)
{
cudaDestroyTextureObject(t_input[b]);
cudaFreeArray(a_input[b]);
}
free(t_input);
free(a_input);
if (mode == T_INTERP_CUBIC)
cudaFree(d_temp);
}
void d_Rotate2D(cudaTex* t_input, tfloat* d_output, int2 dims, tfloat* h_angles, T_INTERP_MODE mode, bool isoutputzerocentered, uint batch)
{
glm::mat2* h_transforms = (glm::mat2*)malloc(batch * sizeof(glm::mat2));
for (uint b = 0; b < batch; b++)
h_transforms[b] = Matrix2Rotation(-h_angles[b]);
glm::mat2* d_transforms = (glm::mat2*)CudaMallocFromHostArray(h_transforms, batch * sizeof(glm::mat2));
free(h_transforms);
dim3 TpB = dim3(16, 16);
dim3 grid = dim3((dims.x + 15) / 16, (dims.y + 15) / 16, batch);
if (isoutputzerocentered)
{
if (mode == T_INTERP_LINEAR)
Rotate2DKernel<false, true> << <grid, TpB >> > (t_input, d_output, dims, d_transforms);
else if (mode == T_INTERP_CUBIC)
Rotate2DKernel<true, true> << <grid, TpB >> > (t_input, d_output, dims, d_transforms);
}
else
{
if (mode == T_INTERP_LINEAR)
Rotate2DKernel<false, false> << <grid, TpB >> > (t_input, d_output, dims, d_transforms);
else if (mode == T_INTERP_CUBIC)
Rotate2DKernel<true, false> << <grid, TpB >> > (t_input, d_output, dims, d_transforms);
}
cudaFree(d_transforms);
}
//////////////////////////////
//Rotate 2D in Fourier space//
//////////////////////////////
void d_Rotate2DFT(tcomplex* d_input, tcomplex* d_output, int3 dims, tfloat* angles, tfloat maxfreq, T_INTERP_MODE mode, bool isoutputzerocentered, int batch)
{
tfloat* d_real;
cudaMalloc((void**)&d_real, ElementsFFT(dims) * sizeof(tfloat));
tfloat* d_imag;
cudaMalloc((void**)&d_imag, ElementsFFT(dims) * sizeof(tfloat));
for (int b = 0; b < batch; b++)
{
d_ConvertTComplexToSplitComplex(d_input + ElementsFFT(dims) * b, d_real, d_imag, ElementsFFT(dims));
if (mode == T_INTERP_CUBIC)
{
d_CubicBSplinePrefilter2D(d_real, toInt2(dims.x / 2 + 1, dims.y));
d_CubicBSplinePrefilter2D(d_imag, toInt2(dims.x / 2 + 1, dims.y));
}
cudaArray* a_Re;
cudaArray* a_Im;
cudaTex t_Re, t_Im;
d_BindTextureToArray(d_real, a_Re, t_Re, toInt2(dims.x / 2 + 1, dims.y), cudaFilterModeLinear, false);
d_BindTextureToArray(d_imag, a_Im, t_Im, toInt2(dims.x / 2 + 1, dims.y), cudaFilterModeLinear, false);
d_Rotate2DFT(t_Re, t_Im, d_output + ElementsFFT(dims) * b, dims, angles[b], maxfreq, mode, isoutputzerocentered);
cudaDestroyTextureObject(t_Re);
cudaDestroyTextureObject(t_Im);
cudaFreeArray(a_Re);
cudaFreeArray(a_Im);
}
cudaFree(d_imag);
cudaFree(d_real);
}
void d_Rotate2DFT(cudaTex t_inputRe, cudaTex t_inputIm, tcomplex* d_output, int3 dims, tfloat angle, tfloat maxfreq, T_INTERP_MODE mode, bool isoutputzerocentered)
{
glm::mat2 rotation = Matrix2Rotation(-angle);
dim3 TpB = dim3(16, 16);
dim3 grid = dim3((dims.x / 2 + 1 + 15) / 16, (dims.y + 15) / 16);
if (isoutputzerocentered)
{
if (mode == T_INTERP_LINEAR)
Rotate2DFTKernel<false, true> << <grid, TpB >> > (t_inputRe, t_inputIm, d_output, dims, rotation, maxfreq);
else if (mode == T_INTERP_CUBIC)
Rotate2DFTKernel<true, true> << <grid, TpB >> > (t_inputRe, t_inputIm, d_output, dims, rotation, maxfreq);
}
else
{
if (mode == T_INTERP_LINEAR)
Rotate2DFTKernel<false, false> << <grid, TpB >> > (t_inputRe, t_inputIm, d_output, dims, rotation, maxfreq);
else if (mode == T_INTERP_CUBIC)
Rotate2DFTKernel<true, false> << <grid, TpB >> > (t_inputRe, t_inputIm, d_output, dims, rotation, maxfreq);
}
}
//////////////////////////////
//Rotate 3D in Fourier space//
//////////////////////////////
void d_Rotate3DFT(tcomplex* d_volume, tcomplex* d_output, int3 dims, tfloat3* h_angles, int nangles, T_INTERP_MODE mode, bool outputzerocentered)
{
int3 dimsfft = toInt3(dims.x / 2 + 1, dims.y, dims.z);
tfloat* d_tempRe;
cudaMalloc((void**)&d_tempRe, ElementsFFT(dims) * sizeof(tfloat));
tfloat* d_tempIm;
cudaMalloc((void**)&d_tempIm, ElementsFFT(dims) * sizeof(tfloat));
cudaArray* a_Re, *a_Im;
cudaTex t_Re, t_Im;
d_ConvertTComplexToSplitComplex(d_volume, d_tempRe, d_tempIm, ElementsFFT(dims));
if (mode == T_INTERP_CUBIC)
{
d_CubicBSplinePrefilter3D(d_tempRe, dimsfft);
d_CubicBSplinePrefilter3D(d_tempIm, dimsfft);
}
d_BindTextureTo3DArray(d_tempRe, a_Re, t_Re, dimsfft, cudaFilterModeLinear, false);
d_BindTextureTo3DArray(d_tempIm, a_Im, t_Im, dimsfft, cudaFilterModeLinear, false);
cudaFree(d_tempRe);
cudaFree(d_tempIm);
d_Rotate3DFT(t_Re, t_Im, d_output, dims, h_angles, nangles, mode, outputzerocentered);
cudaDestroyTextureObject(t_Re);
cudaDestroyTextureObject(t_Im);
cudaFreeArray(a_Re);
cudaFreeArray(a_Im);
}
void d_Rotate3DFT(cudaTex t_Re, cudaTex t_Im, tcomplex* d_output, int3 dims, tfloat3* h_angles, int nangles, T_INTERP_MODE mode, bool outputzerocentered)
{
glm::mat4* h_transform = (glm::mat4*)malloc(nangles * sizeof(glm::mat4));
for (int b = 0; b < nangles; b++)
h_transform[b] = glm::transpose(Matrix4Euler(h_angles[b])) *
Matrix4Translation(tfloat3(-dims.x / 2, -dims.y / 2, -dims.z / 2));
glm::mat4* d_transform = (glm::mat4*)CudaMallocFromHostArray(h_transform, nangles * sizeof(glm::mat4));
float maxfreq2 = (float)(dims.x * dims.x / 4);
dim3 TpB = dim3(16, 16);
dim3 grid = dim3((dims.x / 2 + 1 + 15) / 16, (dims.y + 15) / 16, dims.z * nangles);
if (outputzerocentered)
{
if (mode == T_INTERP_LINEAR)
Rotate3DFTKernel<false, true> << <grid, TpB >> > (t_Re, t_Im, d_output, dims, d_transform, maxfreq2);
if (mode == T_INTERP_CUBIC)
Rotate3DFTKernel<true, true> << <grid, TpB >> > (t_Re, t_Im, d_output, dims, d_transform, maxfreq2);
}
else
{
if (mode == T_INTERP_LINEAR)
Rotate3DFTKernel<false, false> << <grid, TpB >> > (t_Re, t_Im, d_output, dims, d_transform, maxfreq2);
if (mode == T_INTERP_CUBIC)
Rotate3DFTKernel<true, false> << <grid, TpB >> > (t_Re, t_Im, d_output, dims, d_transform, maxfreq2);
}
cudaFree(d_transform);
free(h_transform);
}
void d_Rotate3DFT(tfloat* d_volume, tfloat* d_output, int3 dims, tfloat3* h_angles, int nangles, T_INTERP_MODE mode, bool outputzerocentered)
{
int3 dimsfft = toInt3(dims.x / 2 + 1, dims.y, dims.z);
tfloat* d_tempRe;
cudaMalloc((void**)&d_tempRe, ElementsFFT(dims) * sizeof(tfloat));
cudaArray* a_Re;
cudaTex t_Re;
cudaMemcpy(d_tempRe, d_volume, ElementsFFT(dims) * sizeof(tfloat), cudaMemcpyDeviceToDevice);
if (mode == T_INTERP_CUBIC)
d_CubicBSplinePrefilter3D(d_tempRe, dimsfft);
d_BindTextureTo3DArray(d_tempRe, a_Re, t_Re, dimsfft, cudaFilterModeLinear, false);
cudaFree(d_tempRe);
d_Rotate3DFT(t_Re, d_output, dims, h_angles, nangles, mode, outputzerocentered);
cudaDestroyTextureObject(t_Re);
cudaFreeArray(a_Re);
}
void d_Rotate3DFT(cudaTex t_volume, tfloat* d_output, int3 dims, tfloat3* h_angles, int nangles, T_INTERP_MODE mode, bool outputzerocentered)
{
glm::mat4* h_transform = (glm::mat4*)malloc(nangles * sizeof(glm::mat4));
for (int b = 0; b < nangles; b++)
h_transform[b] = glm::transpose(Matrix4Euler(h_angles[b])) *
Matrix4Translation(tfloat3(-dims.x / 2, -dims.y / 2, -dims.z / 2));
glm::mat4* d_transform = (glm::mat4*)CudaMallocFromHostArray(h_transform, nangles * sizeof(glm::mat4));
float maxfreq2 = (float)(dims.x * dims.x / 4);
dim3 TpB = dim3(16, 16);
dim3 grid = dim3((dims.x / 2 + 1 + 15) / 16, (dims.y + 15) / 16, dims.z * nangles);
if (outputzerocentered)
{
if (mode == T_INTERP_LINEAR)
Rotate3DFTKernel<false, true> << <grid, TpB >> > (t_volume, d_output, dims, d_transform, maxfreq2);
if (mode == T_INTERP_CUBIC)
Rotate3DFTKernel<true, true> << <grid, TpB >> > (t_volume, d_output, dims, d_transform, maxfreq2);
}
else
{
if (mode == T_INTERP_LINEAR)
Rotate3DFTKernel<false, false> << <grid, TpB >> > (t_volume, d_output, dims, d_transform, maxfreq2);
if (mode == T_INTERP_CUBIC)
Rotate3DFTKernel<true, false> << <grid, TpB >> > (t_volume, d_output, dims, d_transform, maxfreq2);
}
cudaFree(d_transform);
free(h_transform);
}
////////////////
//CUDA kernels//
////////////////
template<bool cubicinterp, bool outputzerocentered> __global__ void Rotate3DKernel(cudaTex t_input, tfloat* d_output, int3 dims, glm::mat4* d_transforms, uint nangles)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= dims.x)
return;
int idy = blockIdx.y * blockDim.y + threadIdx.y;
if (idy >= dims.y)
return;
int idz = blockIdx.z;
int x, y, z;
if (outputzerocentered)
{
x = idx;
y = idy;
z = idz;
}
else
{
x = FFTShift(idx, dims.x);
y = FFTShift(idy, dims.y);
z = FFTShift(idz, dims.z);
}
for (uint b = 0; b < nangles; b++)
{
glm::vec4 pos = d_transforms[b] * glm::vec4(x, y, z, 1); // No need to center pos, done by transform
tfloat value;
if (cubicinterp)
value = cubicTex3DSimple<tfloat>(t_input, pos.x, pos.y, pos.z);
else
value = tex3D<tfloat>(t_input, pos.x, pos.y, pos.z);
d_output[(b * dims.z + (idz * dims.y + idy)) * dims.x + idx] = value;
}
}
template<bool cubicinterp> __global__ void Rotate3DExtractAt(cudaTex t_input, int3 dimvolume, tfloat* d_proj, int3 dimproj, size_t elementsproj, glm::mat3* d_rotations, tfloat3* d_positions)
{
d_proj += elementsproj * blockIdx.y;
uint line = dimproj.x;
uint slice = Elements2(dimproj);
glm::mat3 rotation = d_rotations[blockIdx.y];
glm::vec3 position = glm::vec3(d_positions[blockIdx.y].x, d_positions[blockIdx.y].y, d_positions[blockIdx.y].z);
int3 centervolume = dimproj / 2;
for (uint id = blockIdx.x * blockDim.x + threadIdx.x; id < elementsproj; id += gridDim.x * blockDim.x)
{
uint idx = id % line;
uint idy = (id % slice) / line;
uint idz = id / slice;
int x = idx;
int y = idy;
int z = idz;
glm::vec3 pos = glm::vec3(x - centervolume.x, y - centervolume.y, z - centervolume.z);
pos = rotation * pos;
pos += position;
pos += 0.5f;
if (cubicinterp)
d_proj[id] = cubicTex3DSimple<tfloat>(t_input, pos.x, pos.y, pos.z);
else
d_proj[id] = tex3D<tfloat>(t_input, pos.x, pos.y, pos.z);
}
}
template<bool cubicinterp, bool outputzerocentered> __global__ void Rotate2DKernel(cudaTex* t_input, tfloat* d_output, int2 dims, glm::mat2* d_transforms)
{
uint idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= dims.x)
return;
uint idy = blockIdx.y * blockDim.y + threadIdx.y;
if (idy >= dims.y)
return;
int x, y;
if (outputzerocentered)
{
x = idx;
y = idy;
}
else
{
x = FFTShift(idx, dims.x);
y = FFTShift(idy, dims.y);
}
glm::vec2 pos = d_transforms[blockIdx.z] * glm::vec2(x - dims.x / 2, y - dims.y / 2) + glm::vec2(dims.x / 2 + 0.5f, dims.y / 2 + 0.5f);
tfloat val;
if (!cubicinterp)
val = tex2D<tfloat>(t_input[blockIdx.z], pos.x, pos.y);
else
val = cubicTex2D(t_input[blockIdx.z], pos.x, pos.y);
d_output[(blockIdx.z * dims.y + idy) * dims.x + idx] = val;
}
template<bool cubicinterp, bool outputzerocentered> __global__ void Rotate2DFTKernel(cudaTex t_Re, cudaTex t_Im, tcomplex* d_output, int3 dims, glm::mat2 transform, tfloat maxfreq)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx > dims.x / 2)
return;
int idy = blockIdx.y * blockDim.y + threadIdx.y;
if (idy >= dims.y)
return;
int x, y;
if (outputzerocentered)
{
x = idx;
y = idy;
}
else
{
x = dims.x / 2 - idx;
y = dims.y - 1 - ((idy + dims.y / 2 - 1) % dims.y);
}
glm::vec2 pos = transform * glm::vec2(idx - dims.x / 2, idy - dims.y / 2);
if (glm::length(pos) > maxfreq)
{
d_output[y * (dims.x / 2 + 1) + x] = make_cuComplex(0.0f, 0.0f);
return;
}
bool isnegative = false;
if (pos.x > 0.00001f)
{
pos = -pos;
isnegative = true;
}
pos += glm::vec2((float)(dims.x / 2) + 0.5f, (float)(dims.y / 2) + 0.5f);
tfloat valre, valim;
if (!cubicinterp)
{
valre = tex2D<tfloat>(t_Re, pos.x, pos.y);
valim = tex2D<tfloat>(t_Im, pos.x, pos.y);
}
else
{
valre = cubicTex2D(t_Re, pos.x, pos.y);
valim = cubicTex2D(t_Im, pos.x, pos.y);
}
if (isnegative)
valim = -valim;
d_output[y * (dims.x / 2 + 1) + x] = make_cuComplex(valre, valim);
}
template<bool cubicinterp, bool outputzerocentered> __global__ void Rotate3DFTKernel(cudaTex t_Re, cudaTex t_Im, tcomplex* d_output, int3 dims, glm::mat4* d_transform, float maxfreq2)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx > dims.x / 2)
return;
uint idglobal = blockIdx.z / dims.z;
d_output += ElementsFFT(dims) * idglobal;
d_transform += idglobal;
int idy = blockIdx.y * blockDim.y + threadIdx.y;
if (idy >= dims.y)
return;
int idz = blockIdx.z % dims.z;
int x, y, z;
if (outputzerocentered)
{
x = idx;
y = idy;
z = idz;
}
else
{
x = dims.x / 2 - idx;
y = dims.y - 1 - ((idy + dims.y / 2 - 1) % dims.y);
z = dims.z - 1 - ((idz + dims.z / 2 - 1) % dims.z);
}
glm::vec4 pos = *d_transform * glm::vec4(x, y, z, 1);
float radiussq = pos.x * pos.x + pos.y * pos.y + pos.z * pos.z;
if (radiussq >= maxfreq2)
{
d_output[(idz * dims.y + idy) * (dims.x / 2 + 1) + idx] = make_cuComplex(0, 0);
return;
}
bool isnegative = false;
if (pos.x > 1e-6f)
{
pos = -pos;
isnegative = true;
}
pos += (float)(dims.x / 2) + 0.5f;
tfloat valre, valim;
if (!cubicinterp)
{
valre = tex3D<tfloat>(t_Re, pos.x, pos.y, pos.z);
valim = tex3D<tfloat>(t_Im, pos.x, pos.y, pos.z);
}
else
{
valre = cubicTex3D(t_Re, pos.x, pos.y, pos.z);
valim = cubicTex3D(t_Im, pos.x, pos.y, pos.z);
}
if (isnegative)
valim = -valim;
d_output[(idz * dims.y + idy) * (dims.x / 2 + 1) + idx] = make_cuComplex(valre, valim);
}
template<bool cubicinterp, bool outputzerocentered> __global__ void Rotate3DFTKernel(cudaTex t_Re, tfloat* d_output, int3 dims, glm::mat4* d_transform, float maxfreq2)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx > dims.x / 2)
return;
uint idglobal = blockIdx.z / dims.z;
d_output += ElementsFFT(dims) * idglobal;
d_transform += idglobal;
int idy = blockIdx.y * blockDim.y + threadIdx.y;
if (idy >= dims.y)
return;
int idz = blockIdx.z % dims.z;
int x, y, z;
if (outputzerocentered)
{
x = idx;
y = idy;
z = idz;
}
else
{
x = dims.x / 2 - idx;
y = dims.y - 1 - ((idy + dims.y / 2 - 1) % dims.y);
z = dims.z - 1 - ((idz + dims.z / 2 - 1) % dims.z);
}
glm::vec4 pos = *d_transform * glm::vec4(x, y, z, 1);
float radiussq = pos.x * pos.x + pos.y * pos.y + pos.z * pos.z;
if (radiussq >= maxfreq2)
{
d_output[(idz * dims.y + idy) * (dims.x / 2 + 1) + idx] = 0;
return;
}
if (pos.x > 1e-6f)
pos = -pos;
pos += (float)(dims.x / 2) + 0.5f;
tfloat valre;
if (!cubicinterp)
valre = tex3D<tfloat>(t_Re, pos.x, pos.y, pos.z);
else
valre = cubicTex3D(t_Re, pos.x, pos.y, pos.z);
d_output[(idz * dims.y + idy) * (dims.x / 2 + 1) + idx] = valre;
}
} |
c8fad318a63cb97c9d79aa9eec6524e5d20ac819.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author Yurii Shyrma (iuriish@yahoo.com), created on 26.04.2019
//
#include<ops/declarable/helpers/polyGamma.h>
#include<ops/declarable/helpers/zeta.h>
#include <NDArrayFactory.h>
namespace nd4j {
namespace ops {
namespace helpers {
///////////////////////////////////////////////////////////////////
template<typename T>
__global__ static void polyGammaCuda(const void *vn, const Nd4jLong *nShapeInfo,
const void *vx, const Nd4jLong *xShapeInfo,
void *vz, const Nd4jLong *zShapeInfo) {
const auto n = reinterpret_cast<const T*>(vn);
const auto x = reinterpret_cast<const T*>(vx);
auto z = reinterpret_cast<T*>(vz);
__shared__ Nd4jLong len;
if (threadIdx.x == 0)
len = shape::length(nShapeInfo);
__syncthreads();
const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
const auto totalThreads = gridDim.x * blockDim.x;
for (int i = tid; i < len; i += totalThreads) {
const auto nOffset = shape::getIndexOffset(i, nShapeInfo, len);
const auto xOffset = shape::getIndexOffset(i, xShapeInfo, len);
const auto zOffset = shape::getIndexOffset(i, zShapeInfo, len);
const T nVal = n[nOffset];
int sign = (static_cast<int>(nVal) + 1) % 2 ? -1 : 1;
T factorial = 1;
if(nVal != 0 && nVal != 1)
for(int i = 2; i <= nVal; ++i)
factorial *= i;
z[zOffset] = sign * factorial * zetaScalar<T>(nVal + 1, x[xOffset]);
}
}
///////////////////////////////////////////////////////////////////
template<typename T>
static void polyGammaCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const hipStream_t *stream, const void *vn, const Nd4jLong *nShapeInfo, const void *vx, const Nd4jLong *xShapeInfo, void *vz, const Nd4jLong *zShapeInfo) {
hipLaunchKernelGGL(( polyGammaCuda<T>), dim3(blocksPerGrid), dim3(threadsPerBlock), 1024, *stream, vn, nShapeInfo, vx, xShapeInfo, vz, zShapeInfo);
}
///////////////////////////////////////////////////////////////////
void polyGamma(nd4j::LaunchContext * context, const NDArray& n, const NDArray& x, NDArray& z) {
if(!n.isActualOnDeviceSide()) n.syncToDevice();
if(!x.isActualOnDeviceSide()) x.syncToDevice();
int threadsPerBlock = MAX_NUM_THREADS;
int blocksPerGrid = (z.lengthOf() + threadsPerBlock - 1) / threadsPerBlock;
BUILD_SINGLE_SELECTOR(n.dataType(), polyGammaCudaLauncher, (blocksPerGrid, threadsPerBlock, context->getCudaStream(), n.getSpecialBuffer(), n.getSpecialShapeInfo(), x.getSpecialBuffer(), x.getSpecialShapeInfo(), z.getSpecialBuffer(), z.getSpecialShapeInfo()), FLOAT_TYPES);
n.tickReadHost();
x.tickReadHost();
z.tickWriteDevice();
}
BUILD_SINGLE_TEMPLATE(template void polyGammaCudaLauncher, (const int blocksPerGrid, const int threadsPerBlock, const hipStream_t *stream, const void *vn, const Nd4jLong *nShapeInfo, const void *vx, const Nd4jLong *xShapeInfo, void *vz, const Nd4jLong *zShapeInfo), FLOAT_TYPES);
}
}
}
| c8fad318a63cb97c9d79aa9eec6524e5d20ac819.cu | /*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author Yurii Shyrma (iuriish@yahoo.com), created on 26.04.2019
//
#include<ops/declarable/helpers/polyGamma.h>
#include<ops/declarable/helpers/zeta.h>
#include <NDArrayFactory.h>
namespace nd4j {
namespace ops {
namespace helpers {
///////////////////////////////////////////////////////////////////
template<typename T>
__global__ static void polyGammaCuda(const void *vn, const Nd4jLong *nShapeInfo,
const void *vx, const Nd4jLong *xShapeInfo,
void *vz, const Nd4jLong *zShapeInfo) {
const auto n = reinterpret_cast<const T*>(vn);
const auto x = reinterpret_cast<const T*>(vx);
auto z = reinterpret_cast<T*>(vz);
__shared__ Nd4jLong len;
if (threadIdx.x == 0)
len = shape::length(nShapeInfo);
__syncthreads();
const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
const auto totalThreads = gridDim.x * blockDim.x;
for (int i = tid; i < len; i += totalThreads) {
const auto nOffset = shape::getIndexOffset(i, nShapeInfo, len);
const auto xOffset = shape::getIndexOffset(i, xShapeInfo, len);
const auto zOffset = shape::getIndexOffset(i, zShapeInfo, len);
const T nVal = n[nOffset];
int sign = (static_cast<int>(nVal) + 1) % 2 ? -1 : 1;
T factorial = 1;
if(nVal != 0 && nVal != 1)
for(int i = 2; i <= nVal; ++i)
factorial *= i;
z[zOffset] = sign * factorial * zetaScalar<T>(nVal + 1, x[xOffset]);
}
}
///////////////////////////////////////////////////////////////////
template<typename T>
static void polyGammaCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const cudaStream_t *stream, const void *vn, const Nd4jLong *nShapeInfo, const void *vx, const Nd4jLong *xShapeInfo, void *vz, const Nd4jLong *zShapeInfo) {
polyGammaCuda<T><<<blocksPerGrid, threadsPerBlock, 1024, *stream>>>(vn, nShapeInfo, vx, xShapeInfo, vz, zShapeInfo);
}
///////////////////////////////////////////////////////////////////
void polyGamma(nd4j::LaunchContext * context, const NDArray& n, const NDArray& x, NDArray& z) {
if(!n.isActualOnDeviceSide()) n.syncToDevice();
if(!x.isActualOnDeviceSide()) x.syncToDevice();
int threadsPerBlock = MAX_NUM_THREADS;
int blocksPerGrid = (z.lengthOf() + threadsPerBlock - 1) / threadsPerBlock;
BUILD_SINGLE_SELECTOR(n.dataType(), polyGammaCudaLauncher, (blocksPerGrid, threadsPerBlock, context->getCudaStream(), n.getSpecialBuffer(), n.getSpecialShapeInfo(), x.getSpecialBuffer(), x.getSpecialShapeInfo(), z.getSpecialBuffer(), z.getSpecialShapeInfo()), FLOAT_TYPES);
n.tickReadHost();
x.tickReadHost();
z.tickWriteDevice();
}
BUILD_SINGLE_TEMPLATE(template void polyGammaCudaLauncher, (const int blocksPerGrid, const int threadsPerBlock, const cudaStream_t *stream, const void *vn, const Nd4jLong *nShapeInfo, const void *vx, const Nd4jLong *xShapeInfo, void *vz, const Nd4jLong *zShapeInfo), FLOAT_TYPES);
}
}
}
|
2e5b4f4f9122e106849eed8a3b306c5acf366009.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "mat_transpose.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const float *a = NULL;
hipMalloc(&a, XSIZE*YSIZE);
float *b = NULL;
hipMalloc(&b, XSIZE*YSIZE);
int n = XSIZE*YSIZE;
int m = 2;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
mat_transpose), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,n,m);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
mat_transpose), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,n,m);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
mat_transpose), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,n,m);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 2e5b4f4f9122e106849eed8a3b306c5acf366009.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "mat_transpose.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const float *a = NULL;
cudaMalloc(&a, XSIZE*YSIZE);
float *b = NULL;
cudaMalloc(&b, XSIZE*YSIZE);
int n = XSIZE*YSIZE;
int m = 2;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
mat_transpose<<<gridBlock,threadBlock>>>(a,b,n,m);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
mat_transpose<<<gridBlock,threadBlock>>>(a,b,n,m);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
mat_transpose<<<gridBlock,threadBlock>>>(a,b,n,m);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
2d0f60f92675f5b488fa111ac05bfe328689dd73.hip | // !!! This is a file automatically generated by hipify!!!
/*
autor fredy m
uaem
desonses@gmail.com para mas comentarios
*/
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
/*
ejemplo que llena un vector que invierte sus valores
*/
#define N 30 //tamano de los vectores
__global__ void invierte(float *a, float *b) {
int id = threadIdx.x;
//int id = threadIdx.x + blockDim.x * blockIdx.x;// para n-bloques de 1 hilo
if (id < N)
{
b[id] = a[N-id];
}
}
__host__ void check_CUDA_Error(const char *mensaje) {
hipError_t error;
hipDeviceSynchronize();
error = hipGetLastError();
if (error != hipSuccess) {
printf("ERROR %d: %s (%s)\n", error, hipGetErrorString(error), mensaje);
}
}
int main(int argc, char** argv)
{
float *vector1, *resultado;
float *dev_vector1, *dev_resultado;
//reserva de memoria en el host
vector1 = (float*)malloc(N * sizeof(float));
resultado = (float*)malloc(N * sizeof(float));
//reserva de memoria en el device
hipMalloc((void**)&dev_vector1, N * sizeof(float));
check_CUDA_Error("Error Malloc dev_vector");
hipMalloc((void**)&dev_resultado, N * sizeof(float));
check_CUDA_Error("Error Malloc dev_resultado");
// inicializacion de los vectores
printf("vector inicial: \n");
for (int i = 0; i < N; i++) {
vector1[i] = (float)rand() / RAND_MAX;
printf("%.2f, ", vector1[i]);
}
//enviar los datos hacia el Device
hipMemcpy(dev_vector1, vector1, N * sizeof(float), hipMemcpyHostToDevice);
check_CUDA_Error("Error CudaMemcpy");
//MEDICION DE TIEMPO EN GPU
// declaracion de eventos para medir el tiempo de ejecucion en la GPU
hipEvent_t start;
hipEvent_t stop;
// creacion de eventos
hipEventCreate(&start);
hipEventCreate(&stop);
// marca de inicio
hipEventRecord(start, 0);
//Add <<<nBloques,hilosB>>>(dev_vector1, dev_vector2, dev_resultado);
hipLaunchKernelGGL(( invierte), dim3(1), dim3(N) , 0, 0, dev_vector1, dev_resultado);
// cambiar (N,1) para n bloques de 1 hilo
check_CUDA_Error("Error Kernel");
// marca de final
hipEventRecord(stop, 0);
// sincronizacion GPU-CPU
hipEventSynchronize(stop);
// calculo del tiempo en milisegundos
float elapsedTime;
hipEventElapsedTime(&elapsedTime, start, stop);
//MEDICION DE TIEMPO EN GPU
//recogida de los datos
printf("\n");
printf("vector de regreso:\n");
hipMemcpy(resultado, dev_resultado, N * sizeof(float), hipMemcpyDeviceToHost);
check_CUDA_Error("Error CudaMemcpy2");
for (int i = 0; i < N; i++) {
printf("%.2f, ", resultado[i]);
}
// impresion de resultados
printf("\n");
printf("> Tiempo de ejecucion: %f ms\n", elapsedTime);
return 0;
hipFree(dev_vector1);
hipFree(dev_resultado);
free(vector1);
free(resultado);
}
| 2d0f60f92675f5b488fa111ac05bfe328689dd73.cu | /*
autor fredy m
uaem
desonses@gmail.com para mas comentarios
*/
#include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
/*
ejemplo que llena un vector que invierte sus valores
*/
#define N 30 //tamano de los vectores
__global__ void invierte(float *a, float *b) {
int id = threadIdx.x;
//int id = threadIdx.x + blockDim.x * blockIdx.x;// para n-bloques de 1 hilo
if (id < N)
{
b[id] = a[N-id];
}
}
__host__ void check_CUDA_Error(const char *mensaje) {
cudaError_t error;
cudaDeviceSynchronize();
error = cudaGetLastError();
if (error != cudaSuccess) {
printf("ERROR %d: %s (%s)\n", error, cudaGetErrorString(error), mensaje);
}
}
int main(int argc, char** argv)
{
float *vector1, *resultado;
float *dev_vector1, *dev_resultado;
//reserva de memoria en el host
vector1 = (float*)malloc(N * sizeof(float));
resultado = (float*)malloc(N * sizeof(float));
//reserva de memoria en el device
cudaMalloc((void**)&dev_vector1, N * sizeof(float));
check_CUDA_Error("Error Malloc dev_vector");
cudaMalloc((void**)&dev_resultado, N * sizeof(float));
check_CUDA_Error("Error Malloc dev_resultado");
// inicializacion de los vectores
printf("vector inicial: \n");
for (int i = 0; i < N; i++) {
vector1[i] = (float)rand() / RAND_MAX;
printf("%.2f, ", vector1[i]);
}
//enviar los datos hacia el Device
cudaMemcpy(dev_vector1, vector1, N * sizeof(float), cudaMemcpyHostToDevice);
check_CUDA_Error("Error CudaMemcpy");
//MEDICION DE TIEMPO EN GPU
// declaracion de eventos para medir el tiempo de ejecucion en la GPU
cudaEvent_t start;
cudaEvent_t stop;
// creacion de eventos
cudaEventCreate(&start);
cudaEventCreate(&stop);
// marca de inicio
cudaEventRecord(start, 0);
//Add <<<nBloques,hilosB>>>(dev_vector1, dev_vector2, dev_resultado);
invierte<<<1, N >>>(dev_vector1, dev_resultado);
// cambiar (N,1) para n bloques de 1 hilo
check_CUDA_Error("Error Kernel");
// marca de final
cudaEventRecord(stop, 0);
// sincronizacion GPU-CPU
cudaEventSynchronize(stop);
// calculo del tiempo en milisegundos
float elapsedTime;
cudaEventElapsedTime(&elapsedTime, start, stop);
//MEDICION DE TIEMPO EN GPU
//recogida de los datos
printf("\n");
printf("vector de regreso:\n");
cudaMemcpy(resultado, dev_resultado, N * sizeof(float), cudaMemcpyDeviceToHost);
check_CUDA_Error("Error CudaMemcpy2");
for (int i = 0; i < N; i++) {
printf("%.2f, ", resultado[i]);
}
// impresion de resultados
printf("\n");
printf("> Tiempo de ejecucion: %f ms\n", elapsedTime);
return 0;
cudaFree(dev_vector1);
cudaFree(dev_resultado);
free(vector1);
free(resultado);
}
|
3e4a3f446657e673bbd48a2602bcde2ae4debc0a.hip | // !!! This is a file automatically generated by hipify!!!
#include "SamplingKernel.cuh"
#define gpuErr(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true)
{
if (code != hipSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
void SampleKernelD(WTAll &argWT, DTChunk &argDT, Document &argDoc, hiprandState_t* randState, int argStreamId, hipStream_t& stream)
{
////unsigned int blockCounter = 0;
//unsigned int* deviceCounter;
//hipMalloc(&deviceCounter, sizeof(unsigned int));
hipMemsetAsync(argDoc.deviceCounterSampleKernelD[argStreamId], 0, sizeof(unsigned int), stream);
/*hipMemcpyAsync(argDoc.deviceCounterSampleKernelD[argStreamId], &argDoc.counterSampleKernelD, sizeof(unsigned int), hipMemcpyHostToDevice, stream);*/
// srand(time(NULL));
// hiprandState_t* randState;
// hipMalloc(&randState, sizeof(hiprandState_t)*GridDim*BlockDim);
// H_ERR(hipDeviceSynchronize());
// gpuErr(hipPeekAtLastError());
//initRandState << <GridDim, BlockDim, 0, stream >> >(randState);
/*H_ERR(hipDeviceSynchronize());*/
// for (int i = 0; i < iterWT; i++) {
//hipMemcpy(deviceCounter, &blockCounter, sizeof(unsigned int), hipMemcpyHostToDevice);
LDAKernelTrainD << <GridDim, BlockDim, 0, stream >> > (alpha, beta, argDoc.deviceMapWord2Doc[argStreamId], argDoc.deviceTLTopic[argStreamId], argDT.deviceNZDTCount[argStreamId], argDoc.deviceTLDocCount[argStreamId], argDoc.deviceTLDocOffset[argStreamId], argDT.deviceDTCount[argStreamId], argDT.deviceDTOffset[argStreamId], argWT.deviceNZWTCount, argWT.deviceWTIndex, argWT.deviceWTValue, argDoc.deviceTLWordCount[argStreamId], argDoc.deviceTLWordOffset[argStreamId], argWT.deviceWTCount, argWT.deviceWTOffset, argWT.deviceWTRowSum, argDoc.deviceCounterSampleKernelD[argStreamId], argDoc.deviceMapDoc2Word[argStreamId], argDoc.docLengthVec[argDT.chunkId], argWT.wordLength, argDoc.devicePerplexityMid[argStreamId], randState, argDoc.deviceWTHeadDense[argStreamId], argWT.wordLength, argDoc.deviceWordMaxTopic[argStreamId], argDoc.deviceWordSecondMaxTopic[argStreamId], argDoc.deviceMaxProb[argStreamId], argDoc.deviceThresProb[argStreamId], argDoc.deviceTimeRecord[argStreamId], argDoc.tokenSegment, argDoc.deviceRandomfloat[argStreamId], argDoc.deviceEffectiveTokenIndex[argStreamId], argDoc.deviceNewTokenCount[argStreamId], argDT.deviceDTIndexValue[argStreamId],argDoc.deviceMaxSecTopic[argStreamId], argDoc.deviceQArray[argStreamId]);
/*H_ERR(hipDeviceSynchronize());*/
}
//(double alpha, double beta, int* d_Index, int* d_TopicIndex, int* d_SparseDTCount, int* d_SparseDTIndex, int* d_SparseDTValue, int* d_TokenCountDT, int* d_TokenOffsetDT, int* d_DocListCount, int* d_DocListOffset, int* d_WTDense, int* d_WTDenseCopy, int* d_TokenCount, int* d_TokenOffset, int* d_WordListCount, int* d_WordListOffset, int* d_WTRowSum, int* d_blockCounter, int*d_DocIndex, int D, int W, double* d_Perplexity, hiprandState_t *randState, double *WTHeadDense, int numOfWordD);
//void SampleKernel(WTAll &argWT, DTChunk &argDT, Document &argDoc, hiprandState_t* randState, int argStreamId, hipStream_t& stream) {
//
// int numOfWordD = argWT.wordLength - argWT.numOfWordS;
// /*unsigned int* deviceCounter;
// hipMalloc(&deviceCounter, sizeof(unsigned int));
// hipMemset(deviceCounter, 0, sizeof(unsigned int));*/
// hipMemsetAsync(argDoc.deviceCounterSampleKernelS[argStreamId], 0, sizeof(unsigned int), stream);
//
// /*hipMemcpyAsync(argDoc.deviceCounterSampleKernelS[argStreamId], &argDoc.counterSampleKernelS, sizeof(unsigned int), hipMemcpyHostToDevice, stream);*/
//
// //initRandState << <GridDim, BlockDim, 0, stream>> >(randState);
// /*H_ERR(hipDeviceSynchronize());*/
//
// LDAKernelTrain << <GridDim, BlockDim, 0, stream >> > (alpha, beta, argDoc.deviceMapWord2Doc[argStreamId], argDoc.deviceTLTopic[argStreamId], argDT.deviceNZDTCount[argStreamId], argDoc.deviceTLDocCount[argStreamId], argDoc.deviceTLDocOffset[argStreamId], argDT.deviceDTCount[argStreamId], argDT.deviceDTOffset[argStreamId], argWT.deviceNZWTCount, argWT.deviceWTIndex, argWT.deviceWTValue, argDoc.deviceTLWordCount[argStreamId], argDoc.deviceTLWordOffset[argStreamId], argWT.deviceWTCount, argWT.deviceWTOffset, argWT.deviceWTRowSum, argDoc.deviceCounterSampleKernelS[argStreamId], argDoc.deviceMapDoc2Word[argStreamId], argDoc.docLengthVec[argDT.chunkId], argWT.wordLength, argDoc.devicePerplexityMid[argStreamId], randState, argDoc.deviceWTHeadDense[argStreamId], numOfWordD, argWT.numOfWordS, argDoc.deviceWordMaxTopic[argStreamId], argDoc.deviceWordSecondMaxTopic[argStreamId], argDoc.deviceMaxSecTopic[argStreamId], argDT.deviceDTIndexValue[argStreamId]);
//
// //H_ERR(hipDeviceSynchronize());
//
//
//
//
//}
void MaxTopicKernel(WTAll &argWT, Document &argDoc, int argStreamId, hipStream_t& stream) {
// int numOfWordD = argWT.wordLength - argWT.numOfWordS;
// /*unsigned int* deviceCounter;
// hipMalloc(&deviceCounter, sizeof(unsigned int));*/
// hipMemsetAsync(argDoc.deviceCounterMaxTopicKernel[argStreamId], 0, sizeof(unsigned int),stream);
// /*hipMemcpyAsync(argDoc.deviceCounterMaxTopicKernel[argStreamId], &argDoc.counterMaxTopicKernel, sizeof(unsigned int), hipMemcpyHostToDevice, stream);*/
// /*hipMemcpyAsync(argDoc.deviceCounterMaxTopicKernel[argStreamId], &argDoc.counterMaxTopicKernel, sizeof(unsigned int), hipMemcpyHostToDevice, stream);*/
//
// MaxTopicDense_Update_Kernel << <GridDim, BlockDim, 0, stream >> >(argDoc.deviceWordMaxTopic[argStreamId], argDoc.deviceWordSecondMaxTopic[argStreamId], argWTDen.deviceWTDense, argDoc.deviceTLWordCount[argStreamId], argDoc.deviceTLWordOffset[argStreamId], argWT.deviceWTOffset, numOfWordD, argDoc.deviceCounterMaxTopicKernel[argStreamId], argWT.deviceWTRowSum, argWT.wordLength, beta, argDoc.deviceWordThirdMaxTopic[argStreamId], argDoc.deviceMaxSecTopic[argStreamId], argDoc.deviceQArray[argStreamId], argDoc.deviceWordMaxProb[argStreamId], argDoc.deviceWordSecondMaxProb[argStreamId], argDoc.deviceWordThirdMaxProb[argStreamId]);
// //H_ERR(hipDeviceSynchronize());
// /*hipMemcpyAsync(argDoc.deviceCounterMaxTopicKernel[argStreamId], &argDoc.counterMaxTopicKernel, sizeof(unsigned int), hipMemcpyHostToDevice, stream);*/
hipMemsetAsync(argDoc.deviceCounterMaxTopicKernel[argStreamId], 0, sizeof(unsigned int),stream);
MaxTopicSparse_Update_Kernel << <GridDim, BlockDim, 0, stream>> >(argDoc.deviceWordMaxTopic[argStreamId], argDoc.deviceWordSecondMaxTopic[argStreamId], argDoc.deviceTLWordCount[argStreamId], argDoc.deviceTLWordOffset[argStreamId], argWT.deviceWTOffset, argWT.wordLength, argDoc.deviceCounterMaxTopicKernel[argStreamId], argWT.deviceWTRowSum, argWT.wordLength, argWT.numOfWordS, argWT.deviceWTOffset, argWT.deviceNZWTCount, argWT.deviceWTIndex, argWT.deviceWTValue, beta, argDoc.deviceWordThirdMaxTopic[argStreamId], argDoc.deviceMaxSecTopic[argStreamId], argDoc.deviceQArray[argStreamId], argDoc.deviceWordMaxProb[argStreamId], argDoc.deviceWordSecondMaxProb[argStreamId], argDoc.deviceWordThirdMaxProb[argStreamId], alpha);
/*H_ERR(hipDeviceSynchronize());*/
}
//void UpdateProbKernelD(WTD &argWTDen, WTAll &argWT, DTChunk &argDT, Document &argDoc, hiprandState_t* randState, int argStreamId, hipStream_t& stream)
//{
//
// //unsigned int blockCounter = 0;
// //unsigned int* deviceCounter;
// //hipMalloc(&deviceCounter, sizeof(unsigned int));
// hipMemsetAsync(argDoc.deviceCounterUpdateProbKernel[argStreamId], 0, sizeof(unsigned int),stream);
// /*hipMemcpyAsync(argDoc.deviceCounterUpdateProbKernel[argStreamId], &argDoc.counterUpdateProbKernel, sizeof(unsigned int), hipMemcpyHostToDevice, stream);*/
//
// initRandState << <GridDim, BlockDim, 0, stream >> >(randState);
// /*H_ERR(hipDeviceSynchronize());*/
//
// // for (int i = 0; i < iterWT; i++) {
//
// //hipMemcpy(deviceCounter, &blockCounter, sizeof(unsigned int), hipMemcpyHostToDevice);
//
// UpdateProbKernelTrainD << <256, 256, 0, stream >> > (alpha, beta, argDoc.deviceMapWord2Doc[argStreamId], argDoc.deviceTLTopic[argStreamId], argDT.deviceNZDTCount[argStreamId], argDoc.deviceTLDocCount[argStreamId], argDoc.deviceTLDocOffset[argStreamId], argDT.deviceDTCount[argStreamId], argDT.deviceDTOffset[argStreamId], argWTDen.deviceWTDense, argWTDen.deviceWTDenseCopy, argDoc.deviceTLWordCount[argStreamId], argDoc.deviceTLWordOffset[argStreamId], argWT.deviceWTCount, argWT.deviceWTOffset, argWT.deviceWTRowSum, argDoc.deviceCounterUpdateProbKernel[argStreamId], argDoc.deviceMapDoc2Word[argStreamId], argDoc.docLengthVec[argDT.chunkId], argWT.wordLength, argDoc.devicePerplexityMid[argStreamId], randState, argDoc.deviceWTHeadDense[argStreamId], argWTDen.numOfWordD, argDoc.deviceWordMaxTopic[argStreamId], argDoc.deviceWordSecondMaxTopic[argStreamId], argDoc.deviceMaxProb[argStreamId], argDoc.deviceThresProb[argStreamId], argDoc.deviceWordThirdMaxTopic[argStreamId], argDoc.deviceRandomfloat[argStreamId], argDoc.deviceEffectiveTokenIndex[argStreamId], argDoc.deviceNewTokenCount[argStreamId], argDoc.deviceMaxSecTopic[argStreamId], argDoc.deviceQArray[argStreamId], argDoc.deviceWordMaxProb[argStreamId], argDoc.deviceWordSecondMaxProb[argStreamId], argDoc.deviceWordThirdMaxProb[argStreamId], argDoc.tokenSegment);
//
// /*H_ERR(hipDeviceSynchronize());
//*/
//}
void UpdateProbKernelD(WTAll &argWT, DTChunk &argDT, Document &argDoc, hiprandState_t* randState, int argStreamId, hipStream_t& stream)
{
//hipMemsetAsync(argDoc.deviceCounterUpdateProbKernel[argStreamId], 0, sizeof(unsigned int),stream);
/*hipMemcpyAsync(argDoc.deviceCounterUpdateProbKernel[argStreamId], &argDoc.counterUpdateProbKernel, sizeof(unsigned int), hipMemcpyHostToDevice, stream);*/
/*H_ERR(hipDeviceSynchronize());*/
// for (int i = 0; i < iterWT; i++) {
//hipMemcpy(deviceCounter, &blockCounter, sizeof(unsigned int), hipMemcpyHostToDevice);
//UpdateProbKernelTrainD0 << <256, 256, 0, stream >> > (alpha, beta, argDoc.deviceMapWord2Doc[argStreamId], argDoc.deviceTLTopic[argStreamId], argDT.deviceNZDTCount[argStreamId], argDoc.deviceTLDocCount[argStreamId], argDoc.deviceTLDocOffset[argStreamId], argDT.deviceDTCount[argStreamId], argDT.deviceDTOffset[argStreamId], argWTDen.deviceWTDense, argWTDen.deviceWTDenseCopy, argDoc.deviceTLWordCount[argStreamId], argDoc.deviceTLWordOffset[argStreamId], argWT.deviceWTCount, argWT.deviceWTOffset, argWT.deviceWTRowSum, argDoc.deviceCounterUpdateProbKernel[argStreamId], argDoc.deviceMapDoc2Word[argStreamId], argDoc.docLengthVec[argDT.chunkId], argWT.wordLength, argDoc.devicePerplexityMid[argStreamId], randState, argDoc.deviceWTHeadDense[argStreamId], argWTDen.numOfWordD, argDoc.deviceWordMaxTopic[argStreamId], argDoc.deviceWordSecondMaxTopic[argStreamId], argDoc.deviceMaxProb[argStreamId], argDoc.deviceThresProb[argStreamId], argDoc.deviceWordThirdMaxTopic[argStreamId], argDoc.deviceRandomfloat[argStreamId], argDoc.deviceEffectiveTokenIndex[argStreamId], argDoc.deviceNewTokenCount[argStreamId], argDoc.deviceMaxSecTopic[argStreamId], argDoc.deviceQArray[argStreamId], argDoc.deviceWordMaxProb[argStreamId], argDoc.deviceWordSecondMaxProb[argStreamId], argDoc.deviceWordThirdMaxProb[argStreamId], argDoc.tokenSegment, argDoc.deviceTotalTokenCount[argStreamId]);
hipMemsetAsync(argDoc.deviceCounterUpdateProbKernel[argStreamId], 0, sizeof(unsigned int),stream);
/*hipMemcpyAsync(argDoc.deviceCounterUpdateProbKernel[argStreamId], &argDoc.counterUpdateProbKernel, sizeof(unsigned int), hipMemcpyHostToDevice, stream);*/
initRandState << <GridDim, BlockDim, 0, stream >> >(randState);
/*H_ERR(hipDeviceSynchronize());*/
// for (int i = 0; i < iterWT; i++) {
//hipMemcpy(deviceCounter, &blockCounter, sizeof(unsigned int), hipMemcpyHostToDevice);
UpdateProbKernelTrainD1 << <256, 256, 0, stream >> > (alpha, beta, argDoc.deviceMapWord2Doc[argStreamId], argDoc.deviceTLTopic[argStreamId], argDT.deviceNZDTCount[argStreamId], argDoc.deviceTLDocCount[argStreamId], argDoc.deviceTLDocOffset[argStreamId], argDT.deviceDTCount[argStreamId], argDT.deviceDTOffset[argStreamId], argDoc.deviceTLWordCount[argStreamId], argDoc.deviceTLWordOffset[argStreamId], argWT.deviceWTCount, argWT.deviceWTOffset, argWT.deviceWTRowSum, argDoc.deviceCounterUpdateProbKernel[argStreamId], argDoc.deviceMapDoc2Word[argStreamId], argDoc.docLengthVec[argDT.chunkId], argWT.wordLength, argDoc.devicePerplexityMid[argStreamId], randState, argDoc.deviceWTHeadDense[argStreamId], argWT.wordLength, argDoc.deviceWordMaxTopic[argStreamId], argDoc.deviceWordSecondMaxTopic[argStreamId], argDoc.deviceMaxProb[argStreamId], argDoc.deviceThresProb[argStreamId], argDoc.deviceWordThirdMaxTopic[argStreamId], argDoc.deviceRandomfloat[argStreamId], argDoc.deviceEffectiveTokenIndex[argStreamId], argDoc.deviceNewTokenCount[argStreamId], argDoc.deviceMaxSecTopic[argStreamId], argDoc.deviceQArray[argStreamId], argDoc.deviceWordMaxProb[argStreamId], argDoc.deviceWordSecondMaxProb[argStreamId], argDoc.deviceWordThirdMaxProb[argStreamId], argDoc.tokenSegment, argWT.deviceNZWTCount, argWT.deviceWTIndex, argWT.deviceWTValue, argWT.deviceWTRowSum);
/*H_ERR(hipDeviceSynchronize());
*
*/
//unsigned int blockCounter = 0;
//unsigned int* deviceCounter;
//hipMalloc(&deviceCounter, sizeof(unsigned int));
//hipMemsetAsync(argDoc.deviceCounterUpdateProbKernel[argStreamId], 0, sizeof(unsigned int),stream);
/*hipMemcpyAsync(argDoc.deviceCounterUpdateProbKernel[argStreamId], &argDoc.counterUpdateProbKernel, sizeof(unsigned int), hipMemcpyHostToDevice, stream);*/
//initRandState << <GridDim, BlockDim, 0, stream >> >(randState);
/*H_ERR(hipDeviceSynchronize());*/
// for (int i = 0; i < iterWT; i++) {
//hipMemcpy(deviceCounter, &blockCounter, sizeof(unsigned int), hipMemcpyHostToDevice);
//UpdateProbKernelTrainD2 << <256, 256, 0, stream >> > (alpha, beta, argDoc.deviceMapWord2Doc[argStreamId], argDoc.deviceTLTopic[argStreamId], argDT.deviceNZDTCount[argStreamId], argDoc.deviceTLDocCount[argStreamId], argDoc.deviceTLDocOffset[argStreamId], argDT.deviceDTCount[argStreamId], argDT.deviceDTOffset[argStreamId], argWTDen.deviceWTDense, argWTDen.deviceWTDenseCopy, argDoc.deviceTLWordCount[argStreamId], argDoc.deviceTLWordOffset[argStreamId], argWT.deviceWTCount, argWT.deviceWTOffset, argWT.deviceWTRowSum, argDoc.deviceCounterUpdateProbKernel[argStreamId], argDoc.deviceMapDoc2Word[argStreamId], argDoc.docLengthVec[argDT.chunkId], argWT.wordLength, argDoc.devicePerplexityMid[argStreamId], randState, argDoc.deviceWTHeadDense[argStreamId], argWTDen.numOfWordD, argDoc.deviceWordMaxTopic[argStreamId], argDoc.deviceWordSecondMaxTopic[argStreamId], argDoc.deviceMaxProb[argStreamId], argDoc.deviceThresProb[argStreamId], argDoc.deviceWordThirdMaxTopic[argStreamId], argDoc.deviceRandomfloat[argStreamId], argDoc.deviceEffectiveTokenIndex[argStreamId], argDoc.deviceNewTokenCount[argStreamId], argDoc.deviceMaxSecTopic[argStreamId], argDoc.deviceQArray[argStreamId], argDoc.deviceWordMaxProb[argStreamId], argDoc.deviceWordSecondMaxProb[argStreamId], argDoc.deviceWordThirdMaxProb[argStreamId], argDoc.tokenSegment);
/*H_ERR(hipDeviceSynchronize());*/
}
void PerplexityKernel(Document &argDoc, int argStreamId, hipStream_t& stream) {
LDATrainPerplexityReduce << <1, BlockDim, 0, stream >> > (argDoc.devicePerplexityMid[argStreamId], argDoc.totalNumOfTokens, argDoc.devicePerplexityAve[argStreamId]);
hipMemcpyAsync(argDoc.perplexityAve, argDoc.devicePerplexityAve[argStreamId],sizeof(float), hipMemcpyDeviceToHost, stream);
/*H_ERR(hipDeviceSynchronize());*/
}
//
//void SampleKernel(WTAll &argWT, DTChunk &argDT, Document &argDoc, hiprandState_t* randState) {
//
// int blockCounter = 0;
// int iterWT = (argWT.numOfWordS - 1) / GridDim + 1;
// float Perplexity = 0.0;
// int numOfWordD = argWT.wordLength - argWT.numOfWordS;
// // srand(time(NULL));
//
// // hiprandState_t* randState;
// // hipMalloc(&randState, sizeof(hiprandState_t)*GridDim*BlockDim);
// // H_ERR(hipDeviceSynchronize());
// // gpuErr(hipPeekAtLastError());
//
// initRandState << <GridDim, BlockDim >> >(randState);
// H_ERR(hipDeviceSynchronize());
//
// for (int i = 0; i < iterWT; i++) {
//
// hipMemcpy(argDoc.d_blockCounter, &blockCounter, sizeof(int), hipMemcpyHostToDevice);
//
// LDAKernelTrain << <GridDim, BlockDim >> > (alpha, beta, argDoc.deviceMapWord2Doc, argDoc.deviceTLTopic, argDT.deviceNZDTCount, argDT.deviceDTIndex, argDT.deviceDTValue, argDoc.deviceTLDocCount, argDoc.deviceTLDocOffset, argDT.deviceDTCount, argDT.deviceDTOffset, argWT.deviceNZWTCount, argWT.deviceWTIndex, argWT.deviceWTValue, argDoc.deviceTLWordCount, argDoc.deviceTLWordOffset, argWT.deviceWTCount, argWT.deviceWTOffset, argWT.deviceWTRowSum, argDoc.d_blockCounter, argDoc.deviceMapDoc2Word, argDoc.docLengthVec[argDT.chunkId], argWT.wordLength, argDoc.devicePerplexity, randState, argDoc.deviceWTHeadDense, numOfWordD, argWT.numOfWordS);
// H_ERR(hipDeviceSynchronize());
// blockCounter++;
//
// }
// LDATrainPerplexityReduce1 << <GridDim, BlockDim >> > (argDoc.devicePerplexity, argDoc.devicePerplexityMid, argDoc.TLLengthVec[argDT.chunkId]);
//
// H_ERR(hipDeviceSynchronize());
//
//
//}
//
//
//
| 3e4a3f446657e673bbd48a2602bcde2ae4debc0a.cu |
#include "SamplingKernel.cuh"
#define gpuErr(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
void SampleKernelD(WTAll &argWT, DTChunk &argDT, Document &argDoc, curandState* randState, int argStreamId, cudaStream_t& stream)
{
////unsigned int blockCounter = 0;
//unsigned int* deviceCounter;
//cudaMalloc(&deviceCounter, sizeof(unsigned int));
cudaMemsetAsync(argDoc.deviceCounterSampleKernelD[argStreamId], 0, sizeof(unsigned int), stream);
/*cudaMemcpyAsync(argDoc.deviceCounterSampleKernelD[argStreamId], &argDoc.counterSampleKernelD, sizeof(unsigned int), cudaMemcpyHostToDevice, stream);*/
// srand(time(NULL));
// curandState* randState;
// cudaMalloc(&randState, sizeof(curandState)*GridDim*BlockDim);
// H_ERR(cudaDeviceSynchronize());
// gpuErr(cudaPeekAtLastError());
//initRandState << <GridDim, BlockDim, 0, stream >> >(randState);
/*H_ERR(cudaDeviceSynchronize());*/
// for (int i = 0; i < iterWT; i++) {
//cudaMemcpy(deviceCounter, &blockCounter, sizeof(unsigned int), cudaMemcpyHostToDevice);
LDAKernelTrainD << <GridDim, BlockDim, 0, stream >> > (alpha, beta, argDoc.deviceMapWord2Doc[argStreamId], argDoc.deviceTLTopic[argStreamId], argDT.deviceNZDTCount[argStreamId], argDoc.deviceTLDocCount[argStreamId], argDoc.deviceTLDocOffset[argStreamId], argDT.deviceDTCount[argStreamId], argDT.deviceDTOffset[argStreamId], argWT.deviceNZWTCount, argWT.deviceWTIndex, argWT.deviceWTValue, argDoc.deviceTLWordCount[argStreamId], argDoc.deviceTLWordOffset[argStreamId], argWT.deviceWTCount, argWT.deviceWTOffset, argWT.deviceWTRowSum, argDoc.deviceCounterSampleKernelD[argStreamId], argDoc.deviceMapDoc2Word[argStreamId], argDoc.docLengthVec[argDT.chunkId], argWT.wordLength, argDoc.devicePerplexityMid[argStreamId], randState, argDoc.deviceWTHeadDense[argStreamId], argWT.wordLength, argDoc.deviceWordMaxTopic[argStreamId], argDoc.deviceWordSecondMaxTopic[argStreamId], argDoc.deviceMaxProb[argStreamId], argDoc.deviceThresProb[argStreamId], argDoc.deviceTimeRecord[argStreamId], argDoc.tokenSegment, argDoc.deviceRandomfloat[argStreamId], argDoc.deviceEffectiveTokenIndex[argStreamId], argDoc.deviceNewTokenCount[argStreamId], argDT.deviceDTIndexValue[argStreamId],argDoc.deviceMaxSecTopic[argStreamId], argDoc.deviceQArray[argStreamId]);
/*H_ERR(cudaDeviceSynchronize());*/
}
//(double alpha, double beta, int* d_Index, int* d_TopicIndex, int* d_SparseDTCount, int* d_SparseDTIndex, int* d_SparseDTValue, int* d_TokenCountDT, int* d_TokenOffsetDT, int* d_DocListCount, int* d_DocListOffset, int* d_WTDense, int* d_WTDenseCopy, int* d_TokenCount, int* d_TokenOffset, int* d_WordListCount, int* d_WordListOffset, int* d_WTRowSum, int* d_blockCounter, int*d_DocIndex, int D, int W, double* d_Perplexity, curandState *randState, double *WTHeadDense, int numOfWordD);
//void SampleKernel(WTAll &argWT, DTChunk &argDT, Document &argDoc, curandState* randState, int argStreamId, cudaStream_t& stream) {
//
// int numOfWordD = argWT.wordLength - argWT.numOfWordS;
// /*unsigned int* deviceCounter;
// cudaMalloc(&deviceCounter, sizeof(unsigned int));
// cudaMemset(deviceCounter, 0, sizeof(unsigned int));*/
// cudaMemsetAsync(argDoc.deviceCounterSampleKernelS[argStreamId], 0, sizeof(unsigned int), stream);
//
// /*cudaMemcpyAsync(argDoc.deviceCounterSampleKernelS[argStreamId], &argDoc.counterSampleKernelS, sizeof(unsigned int), cudaMemcpyHostToDevice, stream);*/
//
// //initRandState << <GridDim, BlockDim, 0, stream>> >(randState);
// /*H_ERR(cudaDeviceSynchronize());*/
//
// LDAKernelTrain << <GridDim, BlockDim, 0, stream >> > (alpha, beta, argDoc.deviceMapWord2Doc[argStreamId], argDoc.deviceTLTopic[argStreamId], argDT.deviceNZDTCount[argStreamId], argDoc.deviceTLDocCount[argStreamId], argDoc.deviceTLDocOffset[argStreamId], argDT.deviceDTCount[argStreamId], argDT.deviceDTOffset[argStreamId], argWT.deviceNZWTCount, argWT.deviceWTIndex, argWT.deviceWTValue, argDoc.deviceTLWordCount[argStreamId], argDoc.deviceTLWordOffset[argStreamId], argWT.deviceWTCount, argWT.deviceWTOffset, argWT.deviceWTRowSum, argDoc.deviceCounterSampleKernelS[argStreamId], argDoc.deviceMapDoc2Word[argStreamId], argDoc.docLengthVec[argDT.chunkId], argWT.wordLength, argDoc.devicePerplexityMid[argStreamId], randState, argDoc.deviceWTHeadDense[argStreamId], numOfWordD, argWT.numOfWordS, argDoc.deviceWordMaxTopic[argStreamId], argDoc.deviceWordSecondMaxTopic[argStreamId], argDoc.deviceMaxSecTopic[argStreamId], argDT.deviceDTIndexValue[argStreamId]);
//
// //H_ERR(cudaDeviceSynchronize());
//
//
//
//
//}
void MaxTopicKernel(WTAll &argWT, Document &argDoc, int argStreamId, cudaStream_t& stream) {
// int numOfWordD = argWT.wordLength - argWT.numOfWordS;
// /*unsigned int* deviceCounter;
// cudaMalloc(&deviceCounter, sizeof(unsigned int));*/
// cudaMemsetAsync(argDoc.deviceCounterMaxTopicKernel[argStreamId], 0, sizeof(unsigned int),stream);
// /*cudaMemcpyAsync(argDoc.deviceCounterMaxTopicKernel[argStreamId], &argDoc.counterMaxTopicKernel, sizeof(unsigned int), cudaMemcpyHostToDevice, stream);*/
// /*cudaMemcpyAsync(argDoc.deviceCounterMaxTopicKernel[argStreamId], &argDoc.counterMaxTopicKernel, sizeof(unsigned int), cudaMemcpyHostToDevice, stream);*/
//
// MaxTopicDense_Update_Kernel << <GridDim, BlockDim, 0, stream >> >(argDoc.deviceWordMaxTopic[argStreamId], argDoc.deviceWordSecondMaxTopic[argStreamId], argWTDen.deviceWTDense, argDoc.deviceTLWordCount[argStreamId], argDoc.deviceTLWordOffset[argStreamId], argWT.deviceWTOffset, numOfWordD, argDoc.deviceCounterMaxTopicKernel[argStreamId], argWT.deviceWTRowSum, argWT.wordLength, beta, argDoc.deviceWordThirdMaxTopic[argStreamId], argDoc.deviceMaxSecTopic[argStreamId], argDoc.deviceQArray[argStreamId], argDoc.deviceWordMaxProb[argStreamId], argDoc.deviceWordSecondMaxProb[argStreamId], argDoc.deviceWordThirdMaxProb[argStreamId]);
// //H_ERR(cudaDeviceSynchronize());
// /*cudaMemcpyAsync(argDoc.deviceCounterMaxTopicKernel[argStreamId], &argDoc.counterMaxTopicKernel, sizeof(unsigned int), cudaMemcpyHostToDevice, stream);*/
cudaMemsetAsync(argDoc.deviceCounterMaxTopicKernel[argStreamId], 0, sizeof(unsigned int),stream);
MaxTopicSparse_Update_Kernel << <GridDim, BlockDim, 0, stream>> >(argDoc.deviceWordMaxTopic[argStreamId], argDoc.deviceWordSecondMaxTopic[argStreamId], argDoc.deviceTLWordCount[argStreamId], argDoc.deviceTLWordOffset[argStreamId], argWT.deviceWTOffset, argWT.wordLength, argDoc.deviceCounterMaxTopicKernel[argStreamId], argWT.deviceWTRowSum, argWT.wordLength, argWT.numOfWordS, argWT.deviceWTOffset, argWT.deviceNZWTCount, argWT.deviceWTIndex, argWT.deviceWTValue, beta, argDoc.deviceWordThirdMaxTopic[argStreamId], argDoc.deviceMaxSecTopic[argStreamId], argDoc.deviceQArray[argStreamId], argDoc.deviceWordMaxProb[argStreamId], argDoc.deviceWordSecondMaxProb[argStreamId], argDoc.deviceWordThirdMaxProb[argStreamId], alpha);
/*H_ERR(cudaDeviceSynchronize());*/
}
//void UpdateProbKernelD(WTD &argWTDen, WTAll &argWT, DTChunk &argDT, Document &argDoc, curandState* randState, int argStreamId, cudaStream_t& stream)
//{
//
// //unsigned int blockCounter = 0;
// //unsigned int* deviceCounter;
// //cudaMalloc(&deviceCounter, sizeof(unsigned int));
// cudaMemsetAsync(argDoc.deviceCounterUpdateProbKernel[argStreamId], 0, sizeof(unsigned int),stream);
// /*cudaMemcpyAsync(argDoc.deviceCounterUpdateProbKernel[argStreamId], &argDoc.counterUpdateProbKernel, sizeof(unsigned int), cudaMemcpyHostToDevice, stream);*/
//
// initRandState << <GridDim, BlockDim, 0, stream >> >(randState);
// /*H_ERR(cudaDeviceSynchronize());*/
//
// // for (int i = 0; i < iterWT; i++) {
//
// //cudaMemcpy(deviceCounter, &blockCounter, sizeof(unsigned int), cudaMemcpyHostToDevice);
//
// UpdateProbKernelTrainD << <256, 256, 0, stream >> > (alpha, beta, argDoc.deviceMapWord2Doc[argStreamId], argDoc.deviceTLTopic[argStreamId], argDT.deviceNZDTCount[argStreamId], argDoc.deviceTLDocCount[argStreamId], argDoc.deviceTLDocOffset[argStreamId], argDT.deviceDTCount[argStreamId], argDT.deviceDTOffset[argStreamId], argWTDen.deviceWTDense, argWTDen.deviceWTDenseCopy, argDoc.deviceTLWordCount[argStreamId], argDoc.deviceTLWordOffset[argStreamId], argWT.deviceWTCount, argWT.deviceWTOffset, argWT.deviceWTRowSum, argDoc.deviceCounterUpdateProbKernel[argStreamId], argDoc.deviceMapDoc2Word[argStreamId], argDoc.docLengthVec[argDT.chunkId], argWT.wordLength, argDoc.devicePerplexityMid[argStreamId], randState, argDoc.deviceWTHeadDense[argStreamId], argWTDen.numOfWordD, argDoc.deviceWordMaxTopic[argStreamId], argDoc.deviceWordSecondMaxTopic[argStreamId], argDoc.deviceMaxProb[argStreamId], argDoc.deviceThresProb[argStreamId], argDoc.deviceWordThirdMaxTopic[argStreamId], argDoc.deviceRandomfloat[argStreamId], argDoc.deviceEffectiveTokenIndex[argStreamId], argDoc.deviceNewTokenCount[argStreamId], argDoc.deviceMaxSecTopic[argStreamId], argDoc.deviceQArray[argStreamId], argDoc.deviceWordMaxProb[argStreamId], argDoc.deviceWordSecondMaxProb[argStreamId], argDoc.deviceWordThirdMaxProb[argStreamId], argDoc.tokenSegment);
//
// /*H_ERR(cudaDeviceSynchronize());
//*/
//}
void UpdateProbKernelD(WTAll &argWT, DTChunk &argDT, Document &argDoc, curandState* randState, int argStreamId, cudaStream_t& stream)
{
//cudaMemsetAsync(argDoc.deviceCounterUpdateProbKernel[argStreamId], 0, sizeof(unsigned int),stream);
/*cudaMemcpyAsync(argDoc.deviceCounterUpdateProbKernel[argStreamId], &argDoc.counterUpdateProbKernel, sizeof(unsigned int), cudaMemcpyHostToDevice, stream);*/
/*H_ERR(cudaDeviceSynchronize());*/
// for (int i = 0; i < iterWT; i++) {
//cudaMemcpy(deviceCounter, &blockCounter, sizeof(unsigned int), cudaMemcpyHostToDevice);
//UpdateProbKernelTrainD0 << <256, 256, 0, stream >> > (alpha, beta, argDoc.deviceMapWord2Doc[argStreamId], argDoc.deviceTLTopic[argStreamId], argDT.deviceNZDTCount[argStreamId], argDoc.deviceTLDocCount[argStreamId], argDoc.deviceTLDocOffset[argStreamId], argDT.deviceDTCount[argStreamId], argDT.deviceDTOffset[argStreamId], argWTDen.deviceWTDense, argWTDen.deviceWTDenseCopy, argDoc.deviceTLWordCount[argStreamId], argDoc.deviceTLWordOffset[argStreamId], argWT.deviceWTCount, argWT.deviceWTOffset, argWT.deviceWTRowSum, argDoc.deviceCounterUpdateProbKernel[argStreamId], argDoc.deviceMapDoc2Word[argStreamId], argDoc.docLengthVec[argDT.chunkId], argWT.wordLength, argDoc.devicePerplexityMid[argStreamId], randState, argDoc.deviceWTHeadDense[argStreamId], argWTDen.numOfWordD, argDoc.deviceWordMaxTopic[argStreamId], argDoc.deviceWordSecondMaxTopic[argStreamId], argDoc.deviceMaxProb[argStreamId], argDoc.deviceThresProb[argStreamId], argDoc.deviceWordThirdMaxTopic[argStreamId], argDoc.deviceRandomfloat[argStreamId], argDoc.deviceEffectiveTokenIndex[argStreamId], argDoc.deviceNewTokenCount[argStreamId], argDoc.deviceMaxSecTopic[argStreamId], argDoc.deviceQArray[argStreamId], argDoc.deviceWordMaxProb[argStreamId], argDoc.deviceWordSecondMaxProb[argStreamId], argDoc.deviceWordThirdMaxProb[argStreamId], argDoc.tokenSegment, argDoc.deviceTotalTokenCount[argStreamId]);
cudaMemsetAsync(argDoc.deviceCounterUpdateProbKernel[argStreamId], 0, sizeof(unsigned int),stream);
/*cudaMemcpyAsync(argDoc.deviceCounterUpdateProbKernel[argStreamId], &argDoc.counterUpdateProbKernel, sizeof(unsigned int), cudaMemcpyHostToDevice, stream);*/
initRandState << <GridDim, BlockDim, 0, stream >> >(randState);
/*H_ERR(cudaDeviceSynchronize());*/
// for (int i = 0; i < iterWT; i++) {
//cudaMemcpy(deviceCounter, &blockCounter, sizeof(unsigned int), cudaMemcpyHostToDevice);
UpdateProbKernelTrainD1 << <256, 256, 0, stream >> > (alpha, beta, argDoc.deviceMapWord2Doc[argStreamId], argDoc.deviceTLTopic[argStreamId], argDT.deviceNZDTCount[argStreamId], argDoc.deviceTLDocCount[argStreamId], argDoc.deviceTLDocOffset[argStreamId], argDT.deviceDTCount[argStreamId], argDT.deviceDTOffset[argStreamId], argDoc.deviceTLWordCount[argStreamId], argDoc.deviceTLWordOffset[argStreamId], argWT.deviceWTCount, argWT.deviceWTOffset, argWT.deviceWTRowSum, argDoc.deviceCounterUpdateProbKernel[argStreamId], argDoc.deviceMapDoc2Word[argStreamId], argDoc.docLengthVec[argDT.chunkId], argWT.wordLength, argDoc.devicePerplexityMid[argStreamId], randState, argDoc.deviceWTHeadDense[argStreamId], argWT.wordLength, argDoc.deviceWordMaxTopic[argStreamId], argDoc.deviceWordSecondMaxTopic[argStreamId], argDoc.deviceMaxProb[argStreamId], argDoc.deviceThresProb[argStreamId], argDoc.deviceWordThirdMaxTopic[argStreamId], argDoc.deviceRandomfloat[argStreamId], argDoc.deviceEffectiveTokenIndex[argStreamId], argDoc.deviceNewTokenCount[argStreamId], argDoc.deviceMaxSecTopic[argStreamId], argDoc.deviceQArray[argStreamId], argDoc.deviceWordMaxProb[argStreamId], argDoc.deviceWordSecondMaxProb[argStreamId], argDoc.deviceWordThirdMaxProb[argStreamId], argDoc.tokenSegment, argWT.deviceNZWTCount, argWT.deviceWTIndex, argWT.deviceWTValue, argWT.deviceWTRowSum);
/*H_ERR(cudaDeviceSynchronize());
*
*/
//unsigned int blockCounter = 0;
//unsigned int* deviceCounter;
//cudaMalloc(&deviceCounter, sizeof(unsigned int));
//cudaMemsetAsync(argDoc.deviceCounterUpdateProbKernel[argStreamId], 0, sizeof(unsigned int),stream);
/*cudaMemcpyAsync(argDoc.deviceCounterUpdateProbKernel[argStreamId], &argDoc.counterUpdateProbKernel, sizeof(unsigned int), cudaMemcpyHostToDevice, stream);*/
//initRandState << <GridDim, BlockDim, 0, stream >> >(randState);
/*H_ERR(cudaDeviceSynchronize());*/
// for (int i = 0; i < iterWT; i++) {
//cudaMemcpy(deviceCounter, &blockCounter, sizeof(unsigned int), cudaMemcpyHostToDevice);
//UpdateProbKernelTrainD2 << <256, 256, 0, stream >> > (alpha, beta, argDoc.deviceMapWord2Doc[argStreamId], argDoc.deviceTLTopic[argStreamId], argDT.deviceNZDTCount[argStreamId], argDoc.deviceTLDocCount[argStreamId], argDoc.deviceTLDocOffset[argStreamId], argDT.deviceDTCount[argStreamId], argDT.deviceDTOffset[argStreamId], argWTDen.deviceWTDense, argWTDen.deviceWTDenseCopy, argDoc.deviceTLWordCount[argStreamId], argDoc.deviceTLWordOffset[argStreamId], argWT.deviceWTCount, argWT.deviceWTOffset, argWT.deviceWTRowSum, argDoc.deviceCounterUpdateProbKernel[argStreamId], argDoc.deviceMapDoc2Word[argStreamId], argDoc.docLengthVec[argDT.chunkId], argWT.wordLength, argDoc.devicePerplexityMid[argStreamId], randState, argDoc.deviceWTHeadDense[argStreamId], argWTDen.numOfWordD, argDoc.deviceWordMaxTopic[argStreamId], argDoc.deviceWordSecondMaxTopic[argStreamId], argDoc.deviceMaxProb[argStreamId], argDoc.deviceThresProb[argStreamId], argDoc.deviceWordThirdMaxTopic[argStreamId], argDoc.deviceRandomfloat[argStreamId], argDoc.deviceEffectiveTokenIndex[argStreamId], argDoc.deviceNewTokenCount[argStreamId], argDoc.deviceMaxSecTopic[argStreamId], argDoc.deviceQArray[argStreamId], argDoc.deviceWordMaxProb[argStreamId], argDoc.deviceWordSecondMaxProb[argStreamId], argDoc.deviceWordThirdMaxProb[argStreamId], argDoc.tokenSegment);
/*H_ERR(cudaDeviceSynchronize());*/
}
void PerplexityKernel(Document &argDoc, int argStreamId, cudaStream_t& stream) {
LDATrainPerplexityReduce << <1, BlockDim, 0, stream >> > (argDoc.devicePerplexityMid[argStreamId], argDoc.totalNumOfTokens, argDoc.devicePerplexityAve[argStreamId]);
cudaMemcpyAsync(argDoc.perplexityAve, argDoc.devicePerplexityAve[argStreamId],sizeof(float), cudaMemcpyDeviceToHost, stream);
/*H_ERR(cudaDeviceSynchronize());*/
}
//
//void SampleKernel(WTAll &argWT, DTChunk &argDT, Document &argDoc, curandState* randState) {
//
// int blockCounter = 0;
// int iterWT = (argWT.numOfWordS - 1) / GridDim + 1;
// float Perplexity = 0.0;
// int numOfWordD = argWT.wordLength - argWT.numOfWordS;
// // srand(time(NULL));
//
// // curandState* randState;
// // cudaMalloc(&randState, sizeof(curandState)*GridDim*BlockDim);
// // H_ERR(cudaDeviceSynchronize());
// // gpuErr(cudaPeekAtLastError());
//
// initRandState << <GridDim, BlockDim >> >(randState);
// H_ERR(cudaDeviceSynchronize());
//
// for (int i = 0; i < iterWT; i++) {
//
// cudaMemcpy(argDoc.d_blockCounter, &blockCounter, sizeof(int), cudaMemcpyHostToDevice);
//
// LDAKernelTrain << <GridDim, BlockDim >> > (alpha, beta, argDoc.deviceMapWord2Doc, argDoc.deviceTLTopic, argDT.deviceNZDTCount, argDT.deviceDTIndex, argDT.deviceDTValue, argDoc.deviceTLDocCount, argDoc.deviceTLDocOffset, argDT.deviceDTCount, argDT.deviceDTOffset, argWT.deviceNZWTCount, argWT.deviceWTIndex, argWT.deviceWTValue, argDoc.deviceTLWordCount, argDoc.deviceTLWordOffset, argWT.deviceWTCount, argWT.deviceWTOffset, argWT.deviceWTRowSum, argDoc.d_blockCounter, argDoc.deviceMapDoc2Word, argDoc.docLengthVec[argDT.chunkId], argWT.wordLength, argDoc.devicePerplexity, randState, argDoc.deviceWTHeadDense, numOfWordD, argWT.numOfWordS);
// H_ERR(cudaDeviceSynchronize());
// blockCounter++;
//
// }
// LDATrainPerplexityReduce1 << <GridDim, BlockDim >> > (argDoc.devicePerplexity, argDoc.devicePerplexityMid, argDoc.TLLengthVec[argDT.chunkId]);
//
// H_ERR(cudaDeviceSynchronize());
//
//
//}
//
//
//
|
11fac4df04cf8fc7c3a645a49ad279261fd5afaa.hip | // !!! This is a file automatically generated by hipify!!!
/*****************************************************************************/
/* Copyright (c) 2015, Karl Pauwels */
/* All rights reserved. */
/* */
/* Redistribution and use in source and binary forms, with or without */
/* modification, are permitted provided that the following conditions */
/* are met: */
/* */
/* 1. Redistributions of source code must retain the above copyright */
/* notice, this list of conditions and the following disclaimer. */
/* */
/* 2. Redistributions in binary form must reproduce the above copyright */
/* notice, this list of conditions and the following disclaimer in the */
/* documentation and/or other materials provided with the distribution. */
/* */
/* 3. Neither the name of the copyright holder nor the names of its */
/* contributors may be used to endorse or promote products derived from */
/* this software without specific prior written permission. */
/* */
/* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS */
/* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT */
/* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR */
/* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT */
/* HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, */
/* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT */
/* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, */
/* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY */
/* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT */
/* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE */
/* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */
/*****************************************************************************/
#include <hipcub/hipcub.hpp>
#include <cub_radix_sorter_kernels.h>
namespace util {
template <class Key, class Value>
size_t GetTempStorageSize(int num_items, int begin_bit, int end_bit) {
size_t temp_storage_bytes;
Key *d_key_buf = 0;
Key *d_key_alt_buf = 0;
Value *d_value_buf = 0;
Value *d_value_alt_buf = 0;
cub::DoubleBuffer<Key> d_keys(d_key_buf, d_key_alt_buf);
cub::DoubleBuffer<Value> d_values(d_value_buf, d_value_alt_buf);
hipcub::DeviceRadixSort::SortPairs(NULL, temp_storage_bytes, d_keys, d_values,
num_items, begin_bit, end_bit);
return (temp_storage_bytes);
}
template <class Key, class Value>
void CubSort(Key *&d_key_sorted, Value *&d_value_sorted, Key *d_key_buf,
Key *d_key_alt_buf, Value *d_value_buf, Value *d_value_alt_buf,
int num_items, int begin_bit, int end_bit, void *d_temp_storage,
size_t temp_storage_bytes, hipStream_t stream) {
// Create a set of DoubleBuffers to wrap pairs of device pointers
cub::DoubleBuffer<Key> d_keys(d_key_buf, d_key_alt_buf);
cub::DoubleBuffer<Value> d_values(d_value_buf, d_value_alt_buf);
hipcub::DeviceRadixSort::SortPairs(d_temp_storage, temp_storage_bytes, d_keys,
d_values, num_items, begin_bit, end_bit,
stream);
// Set output pointer to current doublebuffer
d_key_sorted = d_keys.Current();
d_value_sorted = d_values.Current();
}
template size_t GetTempStorageSize<unsigned int, unsigned int>(int num_items,
int begin_bit,
int end_bit);
template size_t GetTempStorageSize<unsigned int, int>(int num_items,
int begin_bit,
int end_bit);
template size_t GetTempStorageSize<int, float>(int num_items, int begin_bit,
int end_bit);
template void CubSort<int, int>(int *&d_key_sorted, int *&d_value_sorted,
int *d_key_buf, int *d_key_alt_buf,
int *d_value_buf, int *d_value_alt_buf,
int num_items, int begin_bit, int end_bit,
void *d_temp_storage, size_t temp_storage_bytes,
hipStream_t stream);
template void CubSort<unsigned int, int>(
unsigned int *&d_key_sorted, int *&d_value_sorted, unsigned int *d_key_buf,
unsigned int *d_key_alt_buf, int *d_value_buf, int *d_value_alt_buf,
int num_items, int begin_bit, int end_bit, void *d_temp_storage,
size_t temp_storage_bytes, hipStream_t stream);
template void CubSort<unsigned int, unsigned int>(
unsigned int *&d_key_sorted, unsigned int *&d_value_sorted,
unsigned int *d_key_buf, unsigned int *d_key_alt_buf,
unsigned int *d_value_buf, unsigned int *d_value_alt_buf, int num_items,
int begin_bit, int end_bit, void *d_temp_storage, size_t temp_storage_bytes,
hipStream_t stream);
}
| 11fac4df04cf8fc7c3a645a49ad279261fd5afaa.cu | /*****************************************************************************/
/* Copyright (c) 2015, Karl Pauwels */
/* All rights reserved. */
/* */
/* Redistribution and use in source and binary forms, with or without */
/* modification, are permitted provided that the following conditions */
/* are met: */
/* */
/* 1. Redistributions of source code must retain the above copyright */
/* notice, this list of conditions and the following disclaimer. */
/* */
/* 2. Redistributions in binary form must reproduce the above copyright */
/* notice, this list of conditions and the following disclaimer in the */
/* documentation and/or other materials provided with the distribution. */
/* */
/* 3. Neither the name of the copyright holder nor the names of its */
/* contributors may be used to endorse or promote products derived from */
/* this software without specific prior written permission. */
/* */
/* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS */
/* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT */
/* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR */
/* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT */
/* HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, */
/* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT */
/* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, */
/* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY */
/* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT */
/* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE */
/* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */
/*****************************************************************************/
#include <cub/cub.cuh>
#include <cub_radix_sorter_kernels.h>
namespace util {
template <class Key, class Value>
size_t GetTempStorageSize(int num_items, int begin_bit, int end_bit) {
size_t temp_storage_bytes;
Key *d_key_buf = 0;
Key *d_key_alt_buf = 0;
Value *d_value_buf = 0;
Value *d_value_alt_buf = 0;
cub::DoubleBuffer<Key> d_keys(d_key_buf, d_key_alt_buf);
cub::DoubleBuffer<Value> d_values(d_value_buf, d_value_alt_buf);
cub::DeviceRadixSort::SortPairs(NULL, temp_storage_bytes, d_keys, d_values,
num_items, begin_bit, end_bit);
return (temp_storage_bytes);
}
template <class Key, class Value>
void CubSort(Key *&d_key_sorted, Value *&d_value_sorted, Key *d_key_buf,
Key *d_key_alt_buf, Value *d_value_buf, Value *d_value_alt_buf,
int num_items, int begin_bit, int end_bit, void *d_temp_storage,
size_t temp_storage_bytes, cudaStream_t stream) {
// Create a set of DoubleBuffers to wrap pairs of device pointers
cub::DoubleBuffer<Key> d_keys(d_key_buf, d_key_alt_buf);
cub::DoubleBuffer<Value> d_values(d_value_buf, d_value_alt_buf);
cub::DeviceRadixSort::SortPairs(d_temp_storage, temp_storage_bytes, d_keys,
d_values, num_items, begin_bit, end_bit,
stream);
// Set output pointer to current doublebuffer
d_key_sorted = d_keys.Current();
d_value_sorted = d_values.Current();
}
template size_t GetTempStorageSize<unsigned int, unsigned int>(int num_items,
int begin_bit,
int end_bit);
template size_t GetTempStorageSize<unsigned int, int>(int num_items,
int begin_bit,
int end_bit);
template size_t GetTempStorageSize<int, float>(int num_items, int begin_bit,
int end_bit);
template void CubSort<int, int>(int *&d_key_sorted, int *&d_value_sorted,
int *d_key_buf, int *d_key_alt_buf,
int *d_value_buf, int *d_value_alt_buf,
int num_items, int begin_bit, int end_bit,
void *d_temp_storage, size_t temp_storage_bytes,
cudaStream_t stream);
template void CubSort<unsigned int, int>(
unsigned int *&d_key_sorted, int *&d_value_sorted, unsigned int *d_key_buf,
unsigned int *d_key_alt_buf, int *d_value_buf, int *d_value_alt_buf,
int num_items, int begin_bit, int end_bit, void *d_temp_storage,
size_t temp_storage_bytes, cudaStream_t stream);
template void CubSort<unsigned int, unsigned int>(
unsigned int *&d_key_sorted, unsigned int *&d_value_sorted,
unsigned int *d_key_buf, unsigned int *d_key_alt_buf,
unsigned int *d_value_buf, unsigned int *d_value_alt_buf, int num_items,
int begin_bit, int end_bit, void *d_temp_storage, size_t temp_storage_bytes,
cudaStream_t stream);
}
|
2563b2c376e7c2123ecc25b00a11ccb64390583a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "THHTensorMath.h"
#include "THHGeneral.h"
#include "THHApply.cuh"
// Compute the offsets into the given tensors for a linear index. For the 't2'
// tensor, dimension 'dim' is skipped. The tensors are assumed to have the same
// size (with the exception of 't2' in dimension 'dim').
// This version uses a static number of dimensions.
template <typename IndexType, int Dims>
struct IndexToScatterGatherOffsets {
static __device__ void compute(
IndexType linearId, const int dim,
const TensorInfo<IndexType>& index, IndexType* indexOffset,
const TensorInfo<IndexType>& t1, IndexType* t1Offset,
const TensorInfo<IndexType>& t2, IndexType* t2Offset) {
for (int d = Dims - 1; d >= 0; d--) {
IndexType curDimIndex = linearId % index.sizes[d];
*indexOffset += curDimIndex * index.strides[d];
*t1Offset += curDimIndex * t1.strides[d];
if (d != dim) {
*t2Offset += curDimIndex * t2.strides[d];
}
linearId /= index.sizes[d];
}
}
static __device__ void compute(
IndexType linearId, const int dim,
const TensorInfo<IndexType>& index, IndexType* indexOffset,
const TensorInfo<IndexType>& t2, IndexType* t2Offset) {
for (int d = Dims - 1; d >= 0; d--) {
IndexType curDimIndex = linearId % index.sizes[d];
*indexOffset += curDimIndex * index.strides[d];
if (d != dim) {
*t2Offset += curDimIndex * t2.strides[d];
}
linearId /= index.sizes[d];
}
}
};
// Same as above but using a dynamic number of dimensions.
template <typename IndexType>
struct IndexToScatterGatherOffsets<IndexType, -1> {
static __device__ void compute(
IndexType linearId, const int dim,
const TensorInfo<IndexType>& index, IndexType* indexOffset,
const TensorInfo<IndexType>& t1, IndexType* t1Offset,
const TensorInfo<IndexType>& t2, IndexType* t2Offset) {
for (int d = index.dims - 1; d >= 0; d--) {
IndexType curDimIndex = linearId % index.sizes[d];
*indexOffset += curDimIndex * index.strides[d];
*t1Offset += curDimIndex * t1.strides[d];
if (d != dim) {
*t2Offset += curDimIndex * t2.strides[d];
}
linearId /= index.sizes[d];
}
}
static __device__ void compute(
IndexType linearId, const int dim,
const TensorInfo<IndexType>& index, IndexType* indexOffset,
const TensorInfo<IndexType>& t2, IndexType* t2Offset) {
for (int d = index.dims - 1; d >= 0; d--) {
IndexType curDimIndex = linearId % index.sizes[d];
*indexOffset += curDimIndex * index.strides[d];
if (d != dim) {
*t2Offset += curDimIndex * t2.strides[d];
}
linearId /= index.sizes[d];
}
}
};
template <typename IndexType, int Dims>
__global__ void THCudaTensor_gatherKernel(
TensorInfo<IndexType> tensor,
TensorInfo<IndexType> src,
TensorInfo<IndexType> index,
const int dim,
const IndexType totalElements) {
for (IndexType linearId = blockIdx.x * blockDim.x + threadIdx.x;
linearId < totalElements;
linearId += gridDim.x * blockDim.x) {
IndexType tensorOffset = 0;
IndexType srcOffset = 0;
IndexType indexOffset = 0;
IndexToScatterGatherOffsets<IndexType, Dims>::compute(linearId, dim,
index, &indexOffset,
tensor, &tensorOffset,
src, &srcOffset);
IndexType indexValue = (IndexType)index.data[indexOffset] - 1;
srcOffset += indexValue * src.strides[dim];
tensor.data[tensorOffset] = src.data[srcOffset];
}
}
#define RUN(TYPE, DIMS) \
hipLaunchKernelGGL(( THCudaTensor_gatherKernel<TYPE, DIMS>) \
, dim3(grid), dim3(block), 0, THCState_getCurrentStream(state), \
tensorInfo, srcInfo, indexInfo, dim, (TYPE)totalElements);
void THCudaTensor_gather(THCState* state, THCudaTensor *tensor, THCudaTensor *src, int dim, THCudaTensor *index) {
THAssert(THCudaTensor_checkGPU(state, 3, tensor, src, index));
THArgCheck(THCudaTensor_nDimension(state, src) == THCudaTensor_nDimension(state, tensor), 2,
"Input tensor must have same dimensions as output tensor");
THArgCheck(dim >= 0 && dim < THCudaTensor_nDimension(state, tensor), 3,
"Index dimension is out of bounds");
THArgCheck(THCudaTensor_nDimension(state, index) == THCudaTensor_nDimension(state, src), 4,
"Index tensor must have same dimensions as input tensor");
THArgCheck(THCudaTensor_isSameSizeAs(state, tensor, index), 4,
"Index tensor must have the same size as output tensor.");
for (int d = 0; d < THCudaTensor_nDimension(state, tensor); d++) {
if (d != dim) {
THArgCheck(THCudaTensor_size(state, tensor, d) == THCudaTensor_size(state, src, d), 2,
"Input tensor must have same size as output tensor apart from the specified dimension");
}
}
if (THCudaTensor_nDimension(state, tensor) > MAX_CUTORCH_DIMS) {
return THArgCheck(false, 1, CUTORCH_DIM_WARNING);
}
const long totalElements = THCudaTensor_nElement(state, index);
const dim3 block = getApplyBlock();
dim3 grid;
if (!getApplyGrid(state, totalElements, grid)) {
return THArgCheck(false, 1, CUTORCH_DIM_WARNING);
}
THCudaTensor* oldTensor = NULL;
if (THC_overlappingIndices(state, tensor)) {
oldTensor = tensor;
tensor = THCudaTensor_newContiguous(state, tensor);
}
if (THC_canUse32BitIndexMath(state, tensor) &&
THC_canUse32BitIndexMath(state, src) &&
THC_canUse32BitIndexMath(state, index)) {
TensorInfo<unsigned int> tensorInfo(state, tensor);
TensorInfo<unsigned int> srcInfo(state, src);
TensorInfo<unsigned int> indexInfo(state, index);
// Specialize for a small number of dimensions.
switch (indexInfo.dims) {
case 1:
RUN(unsigned int, 1);
break;
case 2:
RUN(unsigned int, 2);
break;
case 3:
RUN(unsigned int, 3);
break;
default:
RUN(unsigned int, -1);
break;
}
} else {
TensorInfo<unsigned long> tensorInfo(state, tensor);
TensorInfo<unsigned long> srcInfo(state, src);
TensorInfo<unsigned long> indexInfo(state, index);
RUN(unsigned long, -1)
}
if (oldTensor) {
THCudaTensor_copyIgnoringOverlaps(state, oldTensor, tensor);
THCudaTensor_free(state, tensor);
tensor = oldTensor;
}
}
#undef RUN
template <typename IndexType, int Dims>
__global__ void THCudaTensor_scatterKernel(
TensorInfo<IndexType> tensor,
TensorInfo<IndexType> src,
TensorInfo<IndexType> index,
const int dim,
const IndexType totalElements) {
for (IndexType linearId = blockIdx.x * blockDim.x + threadIdx.x;
linearId < totalElements;
linearId += gridDim.x * blockDim.x) {
IndexType tensorOffset = 0;
IndexType srcOffset = 0;
IndexType indexOffset = 0;
IndexToScatterGatherOffsets<IndexType, Dims>::compute(linearId, dim,
index, &indexOffset,
src, &srcOffset,
tensor, &tensorOffset);
IndexType indexValue = (IndexType)index.data[indexOffset] - 1;
tensorOffset += indexValue * tensor.strides[dim];
tensor.data[tensorOffset] = src.data[srcOffset];
}
}
#define RUN(TYPE, DIMS) \
hipLaunchKernelGGL(( THCudaTensor_scatterKernel<TYPE, DIMS>) \
, dim3(grid), dim3(block), 0, THCState_getCurrentStream(state), \
tensorInfo, srcInfo, indexInfo, dim, (TYPE)totalElements);
void THCudaTensor_scatter(THCState* state, THCudaTensor *tensor, int dim, THCudaTensor *index, THCudaTensor *src) {
THAssert(THCudaTensor_checkGPU(state, 3, tensor, src, index));
THArgCheck(dim >= 0 && dim < THCudaTensor_nDimension(state, tensor), 2,
"Index dimension is out of bounds");
THArgCheck(THCudaTensor_nDimension(state, index) == THCudaTensor_nDimension(state, src), 3,
"Index tensor must have same dimensions as input tensor");
THArgCheck(THCudaTensor_nDimension(state, src) == THCudaTensor_nDimension(state, tensor), 4,
"Input tensor must have same dimensions as output tensor");
THArgCheck(THCudaTensor_isSameSizeAs(state, src, index), 3,
"Index tensor must have the same size as input tensor.");
for (int d = 0; d < THCudaTensor_nDimension(state, tensor); d++) {
if (d != dim) {
THArgCheck(THCudaTensor_size(state, tensor, d) == THCudaTensor_size(state, src, d), 4,
"Input tensor must have same size as output tensor apart from the specified dimension");
}
}
if (THCudaTensor_nDimension(state, tensor) > MAX_CUTORCH_DIMS) {
return THArgCheck(false, 1, CUTORCH_DIM_WARNING);
}
const long totalElements = THCudaTensor_nElement(state, index);
const dim3 block = getApplyBlock();
dim3 grid;
if (!getApplyGrid(state, totalElements, grid)) {
return THArgCheck(false, 1, CUTORCH_DIM_WARNING);
}
THCudaTensor* oldTensor = NULL;
if (THC_overlappingIndices(state, tensor)) {
oldTensor = tensor;
tensor = THCudaTensor_newContiguous(state, tensor);
}
if (THC_canUse32BitIndexMath(state, tensor) &&
THC_canUse32BitIndexMath(state, src) &&
THC_canUse32BitIndexMath(state, index)) {
TensorInfo<unsigned int> tensorInfo(state, tensor);
TensorInfo<unsigned int> srcInfo(state, src);
TensorInfo<unsigned int> indexInfo(state, index);
// Specialize for a small number of dimensions.
switch (indexInfo.dims) {
case 1:
RUN(unsigned int, 1);
break;
case 2:
RUN(unsigned int, 2);
break;
case 3:
RUN(unsigned int, 3);
break;
default:
RUN(unsigned int, -1);
break;
}
} else {
TensorInfo<unsigned long> tensorInfo(state, tensor);
TensorInfo<unsigned long> srcInfo(state, src);
TensorInfo<unsigned long> indexInfo(state, index);
RUN(unsigned long, -1)
}
if (oldTensor) {
THCudaTensor_copyIgnoringOverlaps(state, oldTensor, tensor);
THCudaTensor_free(state, tensor);
tensor = oldTensor;
}
}
#undef RUN
template <typename IndexType, int Dims>
__global__ void THCudaTensor_scatterFillKernel(
TensorInfo<IndexType> tensor,
TensorInfo<IndexType> index,
float value,
const int dim,
const IndexType totalElements) {
for (IndexType linearId = blockIdx.x * blockDim.x + threadIdx.x;
linearId < totalElements;
linearId += gridDim.x * blockDim.x) {
IndexType tensorOffset = 0;
IndexType indexOffset = 0;
IndexToScatterGatherOffsets<IndexType, Dims>::compute(linearId, dim,
index, &indexOffset,
tensor, &tensorOffset);
IndexType indexValue = (IndexType)index.data[indexOffset] - 1;
tensorOffset += indexValue * tensor.strides[dim];
tensor.data[tensorOffset] = value;
}
}
#define RUN(TYPE, DIMS) \
hipLaunchKernelGGL(( THCudaTensor_scatterFillKernel<TYPE, DIMS>) \
, dim3(grid), dim3(block), 0, THCState_getCurrentStream(state), \
tensorInfo, indexInfo, value, dim, (TYPE)totalElements);
void THCudaTensor_scatterFill(THCState* state, THCudaTensor *tensor, int dim, THCudaTensor *index, float value) {
THAssert(THCudaTensor_checkGPU(state, 2, tensor, index));
THArgCheck(dim >= 0 && dim < THCudaTensor_nDimension(state, tensor), 2,
"Index dimension is out of bounds");
THArgCheck(THCudaTensor_nDimension(state, index) == THCudaTensor_nDimension(state, tensor), 3,
"Index tensor must have same dimensions as output tensor");
for (int d = 0; d < THCudaTensor_nDimension(state, tensor); d++) {
if (d != dim) {
THArgCheck(THCudaTensor_size(state, tensor, d) == THCudaTensor_size(state, index, d), 4,
"Index tensor must have same size as output tensor apart from the specified dimension");
}
}
if (THCudaTensor_nDimension(state, tensor) > MAX_CUTORCH_DIMS) {
return THArgCheck(false, 1, CUTORCH_DIM_WARNING);
}
const long totalElements = THCudaTensor_nElement(state, index);
const dim3 block = getApplyBlock();
dim3 grid;
if (!getApplyGrid(state, totalElements, grid)) {
return THArgCheck(false, 1, CUTORCH_DIM_WARNING);
}
THCudaTensor* oldTensor = NULL;
if (THC_overlappingIndices(state, tensor)) {
oldTensor = tensor;
tensor = THCudaTensor_newContiguous(state, tensor);
}
if (THC_canUse32BitIndexMath(state, tensor) &&
THC_canUse32BitIndexMath(state, index)) {
TensorInfo<unsigned int> tensorInfo(state, tensor);
TensorInfo<unsigned int> indexInfo(state, index);
// Specialize for a small number of dimensions.
switch (indexInfo.dims) {
case 1:
RUN(unsigned int, 1);
break;
case 2:
RUN(unsigned int, 2);
break;
case 3:
RUN(unsigned int, 3);
break;
default:
RUN(unsigned int, -1);
break;
}
} else {
TensorInfo<unsigned long> tensorInfo(state, tensor);
TensorInfo<unsigned long> indexInfo(state, index);
RUN(unsigned long, -1);
}
if (oldTensor) {
THCudaTensor_copyIgnoringOverlaps(state, oldTensor, tensor);
THCudaTensor_free(state, tensor);
tensor = oldTensor;
}
}
#undef RUN
| 2563b2c376e7c2123ecc25b00a11ccb64390583a.cu | #include "THCTensorMath.h"
#include "THCGeneral.h"
#include "THCApply.cuh"
// Compute the offsets into the given tensors for a linear index. For the 't2'
// tensor, dimension 'dim' is skipped. The tensors are assumed to have the same
// size (with the exception of 't2' in dimension 'dim').
// This version uses a static number of dimensions.
template <typename IndexType, int Dims>
struct IndexToScatterGatherOffsets {
static __device__ void compute(
IndexType linearId, const int dim,
const TensorInfo<IndexType>& index, IndexType* indexOffset,
const TensorInfo<IndexType>& t1, IndexType* t1Offset,
const TensorInfo<IndexType>& t2, IndexType* t2Offset) {
for (int d = Dims - 1; d >= 0; d--) {
IndexType curDimIndex = linearId % index.sizes[d];
*indexOffset += curDimIndex * index.strides[d];
*t1Offset += curDimIndex * t1.strides[d];
if (d != dim) {
*t2Offset += curDimIndex * t2.strides[d];
}
linearId /= index.sizes[d];
}
}
static __device__ void compute(
IndexType linearId, const int dim,
const TensorInfo<IndexType>& index, IndexType* indexOffset,
const TensorInfo<IndexType>& t2, IndexType* t2Offset) {
for (int d = Dims - 1; d >= 0; d--) {
IndexType curDimIndex = linearId % index.sizes[d];
*indexOffset += curDimIndex * index.strides[d];
if (d != dim) {
*t2Offset += curDimIndex * t2.strides[d];
}
linearId /= index.sizes[d];
}
}
};
// Same as above but using a dynamic number of dimensions.
template <typename IndexType>
struct IndexToScatterGatherOffsets<IndexType, -1> {
static __device__ void compute(
IndexType linearId, const int dim,
const TensorInfo<IndexType>& index, IndexType* indexOffset,
const TensorInfo<IndexType>& t1, IndexType* t1Offset,
const TensorInfo<IndexType>& t2, IndexType* t2Offset) {
for (int d = index.dims - 1; d >= 0; d--) {
IndexType curDimIndex = linearId % index.sizes[d];
*indexOffset += curDimIndex * index.strides[d];
*t1Offset += curDimIndex * t1.strides[d];
if (d != dim) {
*t2Offset += curDimIndex * t2.strides[d];
}
linearId /= index.sizes[d];
}
}
static __device__ void compute(
IndexType linearId, const int dim,
const TensorInfo<IndexType>& index, IndexType* indexOffset,
const TensorInfo<IndexType>& t2, IndexType* t2Offset) {
for (int d = index.dims - 1; d >= 0; d--) {
IndexType curDimIndex = linearId % index.sizes[d];
*indexOffset += curDimIndex * index.strides[d];
if (d != dim) {
*t2Offset += curDimIndex * t2.strides[d];
}
linearId /= index.sizes[d];
}
}
};
template <typename IndexType, int Dims>
__global__ void THCudaTensor_gatherKernel(
TensorInfo<IndexType> tensor,
TensorInfo<IndexType> src,
TensorInfo<IndexType> index,
const int dim,
const IndexType totalElements) {
for (IndexType linearId = blockIdx.x * blockDim.x + threadIdx.x;
linearId < totalElements;
linearId += gridDim.x * blockDim.x) {
IndexType tensorOffset = 0;
IndexType srcOffset = 0;
IndexType indexOffset = 0;
IndexToScatterGatherOffsets<IndexType, Dims>::compute(linearId, dim,
index, &indexOffset,
tensor, &tensorOffset,
src, &srcOffset);
IndexType indexValue = (IndexType)index.data[indexOffset] - 1;
srcOffset += indexValue * src.strides[dim];
tensor.data[tensorOffset] = src.data[srcOffset];
}
}
#define RUN(TYPE, DIMS) \
THCudaTensor_gatherKernel<TYPE, DIMS> \
<<<grid, block, 0, THCState_getCurrentStream(state)>>>( \
tensorInfo, srcInfo, indexInfo, dim, (TYPE)totalElements);
void THCudaTensor_gather(THCState* state, THCudaTensor *tensor, THCudaTensor *src, int dim, THCudaTensor *index) {
THAssert(THCudaTensor_checkGPU(state, 3, tensor, src, index));
THArgCheck(THCudaTensor_nDimension(state, src) == THCudaTensor_nDimension(state, tensor), 2,
"Input tensor must have same dimensions as output tensor");
THArgCheck(dim >= 0 && dim < THCudaTensor_nDimension(state, tensor), 3,
"Index dimension is out of bounds");
THArgCheck(THCudaTensor_nDimension(state, index) == THCudaTensor_nDimension(state, src), 4,
"Index tensor must have same dimensions as input tensor");
THArgCheck(THCudaTensor_isSameSizeAs(state, tensor, index), 4,
"Index tensor must have the same size as output tensor.");
for (int d = 0; d < THCudaTensor_nDimension(state, tensor); d++) {
if (d != dim) {
THArgCheck(THCudaTensor_size(state, tensor, d) == THCudaTensor_size(state, src, d), 2,
"Input tensor must have same size as output tensor apart from the specified dimension");
}
}
if (THCudaTensor_nDimension(state, tensor) > MAX_CUTORCH_DIMS) {
return THArgCheck(false, 1, CUTORCH_DIM_WARNING);
}
const long totalElements = THCudaTensor_nElement(state, index);
const dim3 block = getApplyBlock();
dim3 grid;
if (!getApplyGrid(state, totalElements, grid)) {
return THArgCheck(false, 1, CUTORCH_DIM_WARNING);
}
THCudaTensor* oldTensor = NULL;
if (THC_overlappingIndices(state, tensor)) {
oldTensor = tensor;
tensor = THCudaTensor_newContiguous(state, tensor);
}
if (THC_canUse32BitIndexMath(state, tensor) &&
THC_canUse32BitIndexMath(state, src) &&
THC_canUse32BitIndexMath(state, index)) {
TensorInfo<unsigned int> tensorInfo(state, tensor);
TensorInfo<unsigned int> srcInfo(state, src);
TensorInfo<unsigned int> indexInfo(state, index);
// Specialize for a small number of dimensions.
switch (indexInfo.dims) {
case 1:
RUN(unsigned int, 1);
break;
case 2:
RUN(unsigned int, 2);
break;
case 3:
RUN(unsigned int, 3);
break;
default:
RUN(unsigned int, -1);
break;
}
} else {
TensorInfo<unsigned long> tensorInfo(state, tensor);
TensorInfo<unsigned long> srcInfo(state, src);
TensorInfo<unsigned long> indexInfo(state, index);
RUN(unsigned long, -1)
}
if (oldTensor) {
THCudaTensor_copyIgnoringOverlaps(state, oldTensor, tensor);
THCudaTensor_free(state, tensor);
tensor = oldTensor;
}
}
#undef RUN
template <typename IndexType, int Dims>
__global__ void THCudaTensor_scatterKernel(
TensorInfo<IndexType> tensor,
TensorInfo<IndexType> src,
TensorInfo<IndexType> index,
const int dim,
const IndexType totalElements) {
for (IndexType linearId = blockIdx.x * blockDim.x + threadIdx.x;
linearId < totalElements;
linearId += gridDim.x * blockDim.x) {
IndexType tensorOffset = 0;
IndexType srcOffset = 0;
IndexType indexOffset = 0;
IndexToScatterGatherOffsets<IndexType, Dims>::compute(linearId, dim,
index, &indexOffset,
src, &srcOffset,
tensor, &tensorOffset);
IndexType indexValue = (IndexType)index.data[indexOffset] - 1;
tensorOffset += indexValue * tensor.strides[dim];
tensor.data[tensorOffset] = src.data[srcOffset];
}
}
#define RUN(TYPE, DIMS) \
THCudaTensor_scatterKernel<TYPE, DIMS> \
<<<grid, block, 0, THCState_getCurrentStream(state)>>>( \
tensorInfo, srcInfo, indexInfo, dim, (TYPE)totalElements);
void THCudaTensor_scatter(THCState* state, THCudaTensor *tensor, int dim, THCudaTensor *index, THCudaTensor *src) {
THAssert(THCudaTensor_checkGPU(state, 3, tensor, src, index));
THArgCheck(dim >= 0 && dim < THCudaTensor_nDimension(state, tensor), 2,
"Index dimension is out of bounds");
THArgCheck(THCudaTensor_nDimension(state, index) == THCudaTensor_nDimension(state, src), 3,
"Index tensor must have same dimensions as input tensor");
THArgCheck(THCudaTensor_nDimension(state, src) == THCudaTensor_nDimension(state, tensor), 4,
"Input tensor must have same dimensions as output tensor");
THArgCheck(THCudaTensor_isSameSizeAs(state, src, index), 3,
"Index tensor must have the same size as input tensor.");
for (int d = 0; d < THCudaTensor_nDimension(state, tensor); d++) {
if (d != dim) {
THArgCheck(THCudaTensor_size(state, tensor, d) == THCudaTensor_size(state, src, d), 4,
"Input tensor must have same size as output tensor apart from the specified dimension");
}
}
if (THCudaTensor_nDimension(state, tensor) > MAX_CUTORCH_DIMS) {
return THArgCheck(false, 1, CUTORCH_DIM_WARNING);
}
const long totalElements = THCudaTensor_nElement(state, index);
const dim3 block = getApplyBlock();
dim3 grid;
if (!getApplyGrid(state, totalElements, grid)) {
return THArgCheck(false, 1, CUTORCH_DIM_WARNING);
}
THCudaTensor* oldTensor = NULL;
if (THC_overlappingIndices(state, tensor)) {
oldTensor = tensor;
tensor = THCudaTensor_newContiguous(state, tensor);
}
if (THC_canUse32BitIndexMath(state, tensor) &&
THC_canUse32BitIndexMath(state, src) &&
THC_canUse32BitIndexMath(state, index)) {
TensorInfo<unsigned int> tensorInfo(state, tensor);
TensorInfo<unsigned int> srcInfo(state, src);
TensorInfo<unsigned int> indexInfo(state, index);
// Specialize for a small number of dimensions.
switch (indexInfo.dims) {
case 1:
RUN(unsigned int, 1);
break;
case 2:
RUN(unsigned int, 2);
break;
case 3:
RUN(unsigned int, 3);
break;
default:
RUN(unsigned int, -1);
break;
}
} else {
TensorInfo<unsigned long> tensorInfo(state, tensor);
TensorInfo<unsigned long> srcInfo(state, src);
TensorInfo<unsigned long> indexInfo(state, index);
RUN(unsigned long, -1)
}
if (oldTensor) {
THCudaTensor_copyIgnoringOverlaps(state, oldTensor, tensor);
THCudaTensor_free(state, tensor);
tensor = oldTensor;
}
}
#undef RUN
template <typename IndexType, int Dims>
__global__ void THCudaTensor_scatterFillKernel(
TensorInfo<IndexType> tensor,
TensorInfo<IndexType> index,
float value,
const int dim,
const IndexType totalElements) {
for (IndexType linearId = blockIdx.x * blockDim.x + threadIdx.x;
linearId < totalElements;
linearId += gridDim.x * blockDim.x) {
IndexType tensorOffset = 0;
IndexType indexOffset = 0;
IndexToScatterGatherOffsets<IndexType, Dims>::compute(linearId, dim,
index, &indexOffset,
tensor, &tensorOffset);
IndexType indexValue = (IndexType)index.data[indexOffset] - 1;
tensorOffset += indexValue * tensor.strides[dim];
tensor.data[tensorOffset] = value;
}
}
#define RUN(TYPE, DIMS) \
THCudaTensor_scatterFillKernel<TYPE, DIMS> \
<<<grid, block, 0, THCState_getCurrentStream(state)>>>( \
tensorInfo, indexInfo, value, dim, (TYPE)totalElements);
void THCudaTensor_scatterFill(THCState* state, THCudaTensor *tensor, int dim, THCudaTensor *index, float value) {
THAssert(THCudaTensor_checkGPU(state, 2, tensor, index));
THArgCheck(dim >= 0 && dim < THCudaTensor_nDimension(state, tensor), 2,
"Index dimension is out of bounds");
THArgCheck(THCudaTensor_nDimension(state, index) == THCudaTensor_nDimension(state, tensor), 3,
"Index tensor must have same dimensions as output tensor");
for (int d = 0; d < THCudaTensor_nDimension(state, tensor); d++) {
if (d != dim) {
THArgCheck(THCudaTensor_size(state, tensor, d) == THCudaTensor_size(state, index, d), 4,
"Index tensor must have same size as output tensor apart from the specified dimension");
}
}
if (THCudaTensor_nDimension(state, tensor) > MAX_CUTORCH_DIMS) {
return THArgCheck(false, 1, CUTORCH_DIM_WARNING);
}
const long totalElements = THCudaTensor_nElement(state, index);
const dim3 block = getApplyBlock();
dim3 grid;
if (!getApplyGrid(state, totalElements, grid)) {
return THArgCheck(false, 1, CUTORCH_DIM_WARNING);
}
THCudaTensor* oldTensor = NULL;
if (THC_overlappingIndices(state, tensor)) {
oldTensor = tensor;
tensor = THCudaTensor_newContiguous(state, tensor);
}
if (THC_canUse32BitIndexMath(state, tensor) &&
THC_canUse32BitIndexMath(state, index)) {
TensorInfo<unsigned int> tensorInfo(state, tensor);
TensorInfo<unsigned int> indexInfo(state, index);
// Specialize for a small number of dimensions.
switch (indexInfo.dims) {
case 1:
RUN(unsigned int, 1);
break;
case 2:
RUN(unsigned int, 2);
break;
case 3:
RUN(unsigned int, 3);
break;
default:
RUN(unsigned int, -1);
break;
}
} else {
TensorInfo<unsigned long> tensorInfo(state, tensor);
TensorInfo<unsigned long> indexInfo(state, index);
RUN(unsigned long, -1);
}
if (oldTensor) {
THCudaTensor_copyIgnoringOverlaps(state, oldTensor, tensor);
THCudaTensor_free(state, tensor);
tensor = oldTensor;
}
}
#undef RUN
|
a7e8eeaba03912279dff1be53b386b1bf83655d3.hip | // !!! This is a file automatically generated by hipify!!!
/*
*/
#include<iostream>
#include <stdio.h>
#include <fstream>
#include <math.h>
#include <hip/hip_runtime.h>
#include <time.h>
#include <hiprand/hiprand_kernel.h>
#include <hip/hip_runtime.h>
#include <cusolverDn.h>
#include <errno.h>
#include <string.h>
#include <unistd.h>
#include <iomanip>
#include "include/params.cuh"
#include "include/init.cuh"
#include "include/Matrix.cuh"
#include "include/DataStructure.cuh"
#include "include/MCMPC.cuh"
#include "include/NewtonLikeMethod.cuh"
#include "include/optimum_conditions.cuh"
#include "include/dataToFile.cuh"
// #include "include/cudaErrorCheck.cuh"
#define CHECK(call) \
{ \
const hipError_t error = call; \
if (error != hipSuccess) \
{ \
printf("Error: %s:%d, ", __FILE__, __LINE__); \
printf("code:%d, reason: %s\n", error, \
hipGetErrorString(error)); \
exit(1); \
} \
}
#define CHECK_CUBLAS(call,str) \
{ \
if ( call != HIPBLAS_STATUS_SUCCESS) \
{ \
printf("CUBLAS Error: %s : %s %d\n", str, __FILE__, __LINE__); \
exit(1); \
} \
}
#define CHECK_CUSOLVER(call,str) \
{ \
if ( call != CUSOLVER_STATUS_SUCCESS) \
{ \
printf("CUBLAS Error: %s : %s %d\n", str, __FILE__, __LINE__); \
exit(1); \
} \
}
int main(int argc, char **argv)
{
/* */
hipsolverDnHandle_t cusolverH = NULL;
// hipsolverEigMode_t jobz = HIPSOLVER_EIG_MODE_VECTOR;
hipblasFillMode_t uplo = HIPBLAS_FILL_MODE_LOWER;
CHECK_CUSOLVER( hipsolverDnCreate(&cusolverH),"Failed to Create cusolver handle");
hipblasHandle_t handle_cublas = 0;
hipblasCreate(&handle_cublas);
/* */
FILE *fp, *opco;
time_t timeValue;
struct tm *timeObject;
time( &timeValue );
timeObject = localtime( &timeValue );
char filename1[35], filename2[40];
sprintf(filename1,"data_system_%d%d_%d%d.txt",timeObject->tm_mon + 1, timeObject->tm_mday, timeObject->tm_hour,timeObject->tm_min);
sprintf(filename2,"optimum_condition_%d%d_%d%d.txt", timeObject->tm_mon + 1, timeObject->tm_mday, timeObject->tm_hour, timeObject->tm_min);
fp = fopen(filename1,"w");
opco = fopen(filename2,"w");
/* */
// float hostParams[DIM_OF_PARAMETERS], hostState[DIM_OF_STATES], hostConstraint[NUM_OF_CONSTRAINTS], hostWeightMatrix[DIM_OF_WEIGHT_MATRIX];
SystemControlVariable *hostSCV, *deviceSCV;
hostSCV = (SystemControlVariable*)malloc(sizeof(SystemControlVariable));
init_variables( hostSCV );
CHECK( hipMalloc(&deviceSCV, sizeof(SystemControlVariable)) );
CHECK( hipMemcpy(deviceSCV, hostSCV, sizeof(SystemControlVariable), hipMemcpyHostToDevice) );
/* GPU */
unsigned int numBlocks, /*randomBlocks,*/ randomNums, /*Blocks,*/ dimHessian, numUnknownParamQHP, numUnknownParamHessian;
unsigned int paramsSizeQuadHyperPlane;
randomNums = NUM_OF_SAMPLES * (DIM_OF_INPUT + 1) * HORIZON;
// randomBlocks = countBlocks(randomNums, THREAD_PER_BLOCKS);
numBlocks = countBlocks(NUM_OF_SAMPLES, THREAD_PER_BLOCKS);
// Blocks = numBlocks;
dimHessian = HORIZON * HORIZON;
numUnknownParamQHP = NUM_OF_PARABOLOID_COEFFICIENT;
numUnknownParamHessian = numUnknownParamQHP - (HORIZON + 1);
paramsSizeQuadHyperPlane = numUnknownParamQHP; //
paramsSizeQuadHyperPlane = paramsSizeQuadHyperPlane + addTermForLSM;
// dim3 block(MAX_DIVISOR,1);
dim3 block(1,1);
dim3 grid((numUnknownParamQHP + block.x - 1)/ block.x, (numUnknownParamQHP + block.y -1) / block.y);
printf("#NumBlocks = %d\n", numBlocks);
printf("#NumBlocks = %d\n", numUnknownParamQHP);
#ifdef WRITE_MATRIX_INFORMATION
float *WriteHessian, *WriteRegular;
WriteHessian = (float *)malloc(sizeof(float)*dimHessian);
WriteRegular = (float *)malloc(sizeof(float)* NUM_OF_PARABOLOID_COEFFICIENT * NUM_OF_PARABOLOID_COEFFICIENT);
int timerParam[5] = { };
dataName *name;
name = (dataName*)malloc(sizeof(dataName)*3);
#endif
/* MCMPCseed */
hiprandState_t *deviceRandomSeed;
hipMalloc((void **)&deviceRandomSeed, randomNums * sizeof(hiprandState_t));
hipLaunchKernelGGL(( setup_kernel), dim3(NUM_OF_SAMPLES), dim3((DIM_OF_INPUT + 1) * HORIZON), 0, 0, deviceRandomSeed, rand());
hipDeviceSynchronize();
/* */
SampleInfo *deviceSampleInfo, *hostSampleInfo, *hostEliteSampleInfo, *deviceEliteSampleInfo;
hostSampleInfo = (SampleInfo *)malloc(sizeof(SampleInfo) * NUM_OF_SAMPLES);
hostEliteSampleInfo = (SampleInfo*)malloc(sizeof(SampleInfo) * NUM_OF_ELITES);
hipMalloc(&deviceSampleInfo, sizeof(SampleInfo) * NUM_OF_SAMPLES);
hipMalloc(&deviceEliteSampleInfo, sizeof(SampleInfo) * NUM_OF_ELITES);
/*SampleInfo *TemporarySampleInfo, *deviceTempSampleInfo;
TemporarySampleInfo = (SampleInfo *)malloc(sizeof(SampleInfo) * paramsSizeQuadHyperPlane);
CHECK(hipMalloc(&deviceTempSampleInfo, sizeof(SampleInfo) * paramsSizeQuadHyperPlane) );*/
Tolerance *hostTol;
hostTol = (Tolerance*)malloc(sizeof(Tolerance)*HORIZON+1);
/* <---*/
float *Hessian, *invHessian, *lowerHessian, *HessianElements;
float *Gradient;
CHECK( hipMalloc(&Hessian, sizeof(float) * dimHessian) );
CHECK( hipMalloc(&invHessian, sizeof(float) * dimHessian) );
CHECK( hipMalloc(&lowerHessian, sizeof(float) * dimHessian) );
CHECK( hipMalloc(&HessianElements, sizeof(float) * numUnknownParamQHP) );
CHECK( hipMalloc(&Gradient, sizeof(float) * HORIZON) );
/* */
float *Gmatrix, *invGmatrix, *CVector, *ansCVector;
CHECK( hipMalloc(&CVector, sizeof(float) * numUnknownParamQHP) );
CHECK( hipMalloc(&ansCVector, sizeof(float) * numUnknownParamQHP) );
CHECK( hipMalloc(&Gmatrix, sizeof(float) * numUnknownParamQHP * numUnknownParamQHP) );
CHECK( hipMalloc(&invGmatrix, sizeof(float) * numUnknownParamQHP * numUnknownParamQHP) );
QHP *deviceQHP;
CHECK( hipMalloc(&deviceQHP, sizeof(QHP) * paramsSizeQuadHyperPlane) );
unsigned int qhpBlocks;
qhpBlocks = countBlocks(paramsSizeQuadHyperPlane, THREAD_PER_BLOCKS);
printf("#qhpBlocks = %d\n", qhpBlocks);
//
const int m_Rmatrix = numUnknownParamQHP;
int work_size, w_si_hessian;
float *work_space, *w_sp_hessian;
int *devInfo;
CHECK( hipMalloc((void**)&devInfo, sizeof(int) ) );
/* thrust/ */
thrust::host_vector<int> indices_host_vec( NUM_OF_SAMPLES );
thrust::device_vector<int> indices_device_vec = indices_host_vec;
thrust::host_vector<float> sort_key_host_vec( NUM_OF_SAMPLES );
thrust::device_vector<float> sort_key_device_vec = sort_key_host_vec;
/* */
float *hostData, *deviceData, *hostTempData, *deviceTempData;
hostData = (float *)malloc(sizeof(float) * HORIZON);
hostTempData = (float *)malloc(sizeof(float) * HORIZON);
CHECK(hipMalloc(&deviceData, sizeof(float) * HORIZON));
hipMalloc(&deviceTempData, sizeof(float) * HORIZON);
for(int i = 0; i < HORIZON; i++){
hostData[i] = 0.0f;
}
CHECK( hipMemcpy(deviceData, hostData, sizeof(float) * HORIZON, hipMemcpyHostToDevice));
/* */
float F_input = 0.0f;
float MCMPC_F, Proposed_F;
// float costFromMCMPC, costFromProposed, toleranceFromMCMPC, toleranceFromProposed;
float cost_now;
float optimumConditions[2] = { };
float optimumCondition_p[2] = { };
float var;
float process_gpu_time, procedure_all_time;
clock_t start_t, stop_t;
hipEvent_t start, stop;
dim3 inverseGmatrix(numUnknownParamQHP, numUnknownParamQHP);
dim3 grid_inverse(HORIZON, HORIZON);
dim3 threads((HORIZON + grid_inverse.x -1) / grid_inverse.x, (HORIZON + grid_inverse.y -1) / grid_inverse.y);
#ifdef USING_QR_DECOMPOSITION
// float *QR_work_space = NULL;
float *ws_QR_operation = NULL;
int geqrf_work_size = 0;
int ormqr_work_size = 0;
int QR_work_size = 0;
const int nrhs = 1;
float *QR_tau = NULL;
hipblasSideMode_t side = HIPBLAS_SIDE_LEFT;
hipblasOperation_t trans = HIPBLAS_OP_T;
hipblasOperation_t trans_N = HIPBLAS_OP_N;
hipblasFillMode_t uplo_QR = HIPBLAS_FILL_MODE_UPPER;
hipblasDiagType_t cub_diag = HIPBLAS_DIAG_NON_UNIT;
CHECK(hipMalloc((void**)&QR_tau, sizeof(float) * numUnknownParamQHP));
#endif
for(int t = 0; t < SIM_TIME; t++)
{
shift_Input_vec( hostData );
CHECK( hipMemcpy(deviceData, hostData, sizeof(float) * HORIZON, hipMemcpyHostToDevice) );
start_t = clock();
if(t == 0)
{
start_t = clock();
for(int iter = 0; iter < ITERATIONS_MAX; iter++)
{
var = variance / sqrt(iter + 1);
// var = variance / 2;
hipLaunchKernelGGL(( MCMPC_Cart_and_SinglePole), dim3(numBlocks), dim3(THREAD_PER_BLOCKS), 0, 0, deviceSCV, var, deviceRandomSeed, deviceData, deviceSampleInfo, thrust::raw_pointer_cast( sort_key_device_vec.data() ));
hipDeviceSynchronize();
thrust::sequence(indices_device_vec.begin(), indices_device_vec.end());
thrust::sort_by_key(sort_key_device_vec.begin(), sort_key_device_vec.end(), indices_device_vec.begin());
hipLaunchKernelGGL(( getEliteSampleInfo), dim3(NUM_OF_ELITES), dim3(1), 0, 0, deviceEliteSampleInfo, deviceSampleInfo, thrust::raw_pointer_cast( indices_device_vec.data() ));
CHECK( hipMemcpy(hostEliteSampleInfo, deviceEliteSampleInfo, sizeof(SampleInfo) * NUM_OF_ELITES, hipMemcpyDeviceToHost) );
// weighted_mean(hostData, NUM_OF_ELITES, hostSampleInfo);
weighted_mean(hostData, NUM_OF_ELITES, hostEliteSampleInfo);
MCMPC_F = hostData[0];
/*if(iter == 0)
{
sprintf(name[2].inputfile, "initSolution.txt");
name[2].dimSize = HORIZON;
resd_InitSolution_Input(hostData, &name[2]);
}*/
CHECK( hipMemcpy(deviceData, hostData, sizeof(float) * HORIZON, hipMemcpyHostToDevice) );
calc_OC_for_Cart_and_SinglePole_hostF(optimumConditions, hostData, hostSCV, hostTol);
printf("cost :: %f KKT_Error :: %f\n", optimumConditions[0], optimumConditions[1]);
}
name[1].dimSize = HORIZON;
sprintf(name[1].name,"InitInputData.txt");
write_Vector_Information(hostData, &name[1]);
stop_t = clock();
procedure_all_time = stop_t - start_t;
printf("Geometrical cooling MCMPC computation time :: %f\n", procedure_all_time / CLOCKS_PER_SEC);
}else{
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
start_t = clock();
for(int iter = 0; iter < ITERATIONS; iter++)
{
var = variance / 2.0f;
hipLaunchKernelGGL(( MCMPC_Cart_and_SinglePole), dim3(numBlocks), dim3(THREAD_PER_BLOCKS), 0, 0, deviceSCV, var, deviceRandomSeed, deviceData, deviceSampleInfo, thrust::raw_pointer_cast( sort_key_device_vec.data() ));
hipDeviceSynchronize();
thrust::sequence(indices_device_vec.begin(), indices_device_vec.end());
thrust::sort_by_key(sort_key_device_vec.begin(), sort_key_device_vec.end(), indices_device_vec.begin());
// CHECK( hipMemcpy(hostSampleInfo, deviceSampleInfo, sizeof(SampleInfo) * NUM_OF_SAMPLES, hipMemcpyDeviceToHost) );
hipLaunchKernelGGL(( getEliteSampleInfo), dim3(NUM_OF_ELITES), dim3(1), 0, 0, deviceEliteSampleInfo, deviceSampleInfo, thrust::raw_pointer_cast( indices_device_vec.data() ));
CHECK( hipMemcpy(hostEliteSampleInfo, deviceEliteSampleInfo, sizeof(SampleInfo) * NUM_OF_ELITES, hipMemcpyDeviceToHost) );
weighted_mean(hostData, NUM_OF_ELITES, hostEliteSampleInfo);
MCMPC_F = hostData[0];
CHECK( hipMemcpy(deviceData, hostData, sizeof(float) * HORIZON, hipMemcpyHostToDevice) );
var = neighborVar;
hipLaunchKernelGGL(( MCMPC_Cart_and_SinglePole), dim3(numBlocks), dim3(THREAD_PER_BLOCKS), 0, 0, deviceSCV, var, deviceRandomSeed, deviceData, deviceSampleInfo, thrust::raw_pointer_cast( sort_key_device_vec.data() ));
hipDeviceSynchronize();
thrust::sequence(indices_device_vec.begin(), indices_device_vec.end());
thrust::sort_by_key(sort_key_device_vec.begin(), sort_key_device_vec.end(), indices_device_vec.begin());
hipLaunchKernelGGL(( NewtonLikeMethodGetTensorVector), dim3(qhpBlocks), dim3(THREAD_PER_BLOCKS), 0, 0, deviceQHP, deviceSampleInfo, thrust::raw_pointer_cast( indices_device_vec.data() ));
hipDeviceSynchronize();
// 1024"NUM_OF_PARABOLOID_COEFFICIENT"(thread / block)
//
// NewtonLikeMethodGenNormalizationMatrix<<<grid, block>>>(Gmatrix, deviceQHP, paramsSizeQuadHyperPlane, NUM_OF_PARABOLOID_COEFFICIENT);
/*-----------------Error detect 2021.07.20----------------------------*/
// Following Function has any Error (ThreadId or BlockId) --> it is required to modify original mode.
// NewtonLikeMethodGenNormalEquation<<<grid, block>>>(Gmatrix, CVector, deviceQHP, paramsSizeQuadHyperPlane, NUM_OF_PARABOLOID_COEFFICIENT);
hipLaunchKernelGGL(( NewtonLikeMethodGetRegularMatrix), dim3(NUM_OF_PARABOLOID_COEFFICIENT), dim3(NUM_OF_PARABOLOID_COEFFICIENT), 0, 0, Gmatrix, deviceQHP, paramsSizeQuadHyperPlane);
hipLaunchKernelGGL(( NewtonLikeMethodGetRegularVector), dim3(NUM_OF_PARABOLOID_COEFFICIENT), dim3(1), 0, 0, CVector, deviceQHP, paramsSizeQuadHyperPlane);
hipDeviceSynchronize();
#ifdef WRITE_MATRIX_INFORMATION
if(t<300){
if(t % 50 == 0){
get_timeParam(timerParam, timeObject->tm_mon+1, timeObject->tm_mday, timeObject->tm_hour, timeObject->tm_min, t);
sprintf(name[0].name, "RegularMatrix");
name[0].dimSize = NUM_OF_PARABOLOID_COEFFICIENT;
CHECK(hipMemcpy(WriteRegular, Gmatrix, sizeof(float) * NUM_OF_PARABOLOID_COEFFICIENT * NUM_OF_PARABOLOID_COEFFICIENT, hipMemcpyDeviceToHost));
write_Matrix_Information(WriteRegular, &name[0], timerParam);
}
}else{
if(t % 250 == 0){
get_timeParam(timerParam, timeObject->tm_mon+1, timeObject->tm_mday, timeObject->tm_hour, timeObject->tm_min, t);
sprintf(name[0].name, "RegularMatrix");
name[0].dimSize = NUM_OF_PARABOLOID_COEFFICIENT;
CHECK(hipMemcpy(WriteRegular, Gmatrix, sizeof(float) * NUM_OF_PARABOLOID_COEFFICIENT * NUM_OF_PARABOLOID_COEFFICIENT, hipMemcpyDeviceToHost));
write_Matrix_Information(WriteRegular, &name[0], timerParam);
}
}
#endif
#ifndef USING_QR_DECOMPOSITION
//(Gx = v v)
// NewtonLikeMethodGenNormalizationVector<<<NUM_OF_PARABOLOID_COEFFICIENT, 1>>>(CVector, deviceQHP, paramsSizeQuadHyperPlane);
// hipDeviceSynchronize();
CHECK_CUSOLVER( hipsolverDnSpotrf_bufferSize(cusolverH, uplo, m_Rmatrix, Gmatrix, m_Rmatrix, &work_size), "Failed to get bufferSize");
CHECK(hipMalloc((void**)&work_space, sizeof(float) * work_size));
CHECK_CUSOLVER( hipsolverDnSpotrf(cusolverH, uplo, m_Rmatrix, Gmatrix, m_Rmatrix, work_space, work_size, devInfo), "Failed to inverse operation for G");
hipLaunchKernelGGL(( MatrixSetUpLargeIdentityMatrix), dim3(grid), dim3(block), 0, 0, invGmatrix, NUM_OF_PARABOLOID_COEFFICIENT);
hipDeviceSynchronize();
CHECK_CUSOLVER( hipsolverDnSpotrs(cusolverH, uplo, m_Rmatrix, m_Rmatrix, Gmatrix, m_Rmatrix, invGmatrix, m_Rmatrix, devInfo), "Failed to get inverse Matrix G");
// cuBlas
CHECK_CUBLAS( hipblasSgemv(handle_cublas, HIPBLAS_OP_N, m_Rmatrix, m_Rmatrix, &alpha, invGmatrix, m_Rmatrix, CVector, 1, &beta, ansCVector, 1),"Failed to get Estimate Input Sequences");
#else
if(t==1){
CHECK_CUSOLVER( hipsolverDnSgeqrf_bufferSize(cusolverH, m_Rmatrix, m_Rmatrix, Gmatrix, m_Rmatrix, &geqrf_work_size), "Failed to get buffersize for QR decom [1]" );
CHECK_CUSOLVER( hipsolverDnSormqr_bufferSize(cusolverH, side, trans, m_Rmatrix, nrhs, m_Rmatrix, Gmatrix, m_Rmatrix, QR_tau, CVector, m_Rmatrix, &ormqr_work_size), "Failed to get buffersize for QR decom [2]" );
QR_work_size = (geqrf_work_size > ormqr_work_size)? geqrf_work_size : ormqr_work_size;
}
CHECK( hipMalloc((void**)&ws_QR_operation, sizeof(float) * QR_work_size) );
/* compute QR factorization */
CHECK_CUSOLVER( hipsolverDnSgeqrf(cusolverH, m_Rmatrix, m_Rmatrix, Gmatrix, m_Rmatrix, QR_tau, ws_QR_operation, QR_work_size, devInfo),"Failed to compute QR factorization" );
CHECK_CUSOLVER( hipsolverDnSormqr(cusolverH, side, trans, m_Rmatrix, nrhs, m_Rmatrix, Gmatrix, m_Rmatrix, QR_tau, CVector, m_Rmatrix, ws_QR_operation, QR_work_size, devInfo), "Failed to compute Q^T*B" );
CHECK(hipDeviceSynchronize());
CHECK_CUBLAS( hipblasStrsm(handle_cublas, side, uplo_QR, trans_N, cub_diag, m_Rmatrix, nrhs, &alpha, Gmatrix, m_Rmatrix, CVector, m_Rmatrix), "Failed to compute X = R^-1Q^T*B" );
CHECK(hipDeviceSynchronize());
hipLaunchKernelGGL(( NewtonLikeMethodCopyVector), dim3(numUnknownParamQHP), dim3(1), 0, 0, ansCVector, CVector);
#endif
hipLaunchKernelGGL(( NewtonLikeMethodGetHessianElements), dim3(numUnknownParamHessian), dim3(1), 0, 0, HessianElements, ansCVector);
CHECK(hipDeviceSynchronize());
//
hipLaunchKernelGGL(( NewtonLikeMethodGetHessianOriginal), dim3(HORIZON), dim3(HORIZON), 0, 0, Hessian, HessianElements);
CHECK(hipDeviceSynchronize());
hipLaunchKernelGGL(( NewtonLikeMethodGetLowerTriangle), dim3(HORIZON), dim3(HORIZON), 0, 0, lowerHessian, Hessian);
CHECK(hipDeviceSynchronize());
// NewtonLikeMethodGetFullHessianLtoU<<<HORIZON, HORIZON>>>(Hessian, lowerHessian);
hipLaunchKernelGGL(( NewtonLikeMethodGetFullHessianUtoL), dim3(HORIZON), dim3(HORIZON), 0, 0, lowerHessian, Hessian);
hipLaunchKernelGGL(( NewtonLikeMethodGetGradient), dim3(HORIZON), dim3(1), 0, 0, Gradient, ansCVector, numUnknownParamHessian);
hipLaunchKernelGGL(( MatrixMultiplyOperation), dim3(HORIZON),dim3(HORIZON), 0, 0, Hessian, 2.0f, lowerHessian);
CHECK_CUSOLVER( hipsolverDnSpotrf_bufferSize(cusolverH, uplo, HORIZON, Hessian, HORIZON, &w_si_hessian), "Failed to get bufferSize of computing the inverse of Hessian");
CHECK( hipMalloc((void**)&w_sp_hessian, sizeof(float) * w_si_hessian) );
CHECK_CUSOLVER( hipsolverDnSpotrf(cusolverH, uplo, HORIZON, Hessian, HORIZON, w_sp_hessian, w_si_hessian, devInfo), "Failed to inverse operation");
hipLaunchKernelGGL(( MatrixSetUpSmallIdentityMatrix), dim3(HORIZON), dim3(HORIZON), 0, 0, invHessian);
CHECK_CUSOLVER( hipsolverDnSpotrs(cusolverH, uplo, HORIZON, HORIZON, Hessian, HORIZON, invHessian, HORIZON, devInfo), "Failed to get inverse of Hessian");
// -1
hipLaunchKernelGGL(( MatrixMultiplyOperation), dim3(HORIZON), dim3(HORIZON), 0, 0, Hessian, -1.0f, invHessian);
CHECK_CUBLAS(hipblasSgemv(handle_cublas, HIPBLAS_OP_N, HORIZON, HORIZON, &alpha, Hessian, HORIZON, Gradient, 1, &beta, deviceTempData, 1), "Failed to get result by proposed method");
CHECK( hipMemcpy(hostTempData, deviceTempData, sizeof(float) * HORIZON, hipMemcpyDeviceToHost) );
NewtonLikeMethodInputSaturation(hostTempData, hostSCV->constraints[1], hostSCV->constraints[0]);
Proposed_F = hostTempData[0]; //
// ->(vs MC)->(by RungeKutta4.5)->
calc_OC_for_Cart_and_SinglePole_hostF(optimumConditions, hostData, hostSCV, hostTol); //MC
calc_OC_for_Cart_and_SinglePole_hostF(optimumCondition_p, hostTempData, hostSCV, hostTol); //
}
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime(&process_gpu_time, start, stop);
stop_t = clock();
procedure_all_time = stop_t - start_t;
}
printf("TIME stpe :: %f", t * interval);
printf("MCMPC optimum condition := %f Proposed optimum condition := %f\n", optimumConditions[1], optimumCondition_p[1]);
printf("MCMPC cost value := %f Proposed cost value := %f\n", optimumConditions[0], optimumCondition_p[0]);
//
if(optimumCondition_p[0] < optimumConditions[0] /*&& optimumCondition_p[1] < optimumConditions[1]*/)
{
F_input = Proposed_F;
cost_now = optimumCondition_p[0];
CHECK( hipMemcpy(deviceData, hostTempData, sizeof(float) * HORIZON, hipMemcpyHostToDevice) );
}else{
F_input = MCMPC_F;
cost_now = optimumConditions[0];
CHECK( hipMemcpy(deviceData, hostData, sizeof(float) * HORIZON, hipMemcpyHostToDevice) );
}
Runge_Kutta45_for_SecondaryOderSystem( hostSCV, F_input, interval);
CHECK( hipMemcpy(deviceSCV, hostSCV, sizeof(SystemControlVariable), hipMemcpyHostToDevice) );
fprintf(fp, "%f %f %f %f %f %f %f %f\n", t * interval, F_input, MCMPC_F, Proposed_F, hostSCV->state[0], hostSCV->state[1], hostSCV->state[2], hostSCV->state[3]);
fprintf(opco, "%f %f %f %f %f %f %f %f\n", t * interval, cost_now, optimumConditions[0], optimumCondition_p[0], optimumConditions[1], optimumCondition_p[1], process_gpu_time/10e3, procedure_all_time/CLOCKS_PER_SEC);
}
if(cusolverH) hipsolverDnDestroy(cusolverH);
if(handle_cublas) hipblasDestroy(handle_cublas);
fclose(fp);
fclose(opco);
hipDeviceReset( );
printf("%s\n", hipGetErrorString(hipGetLastError()));
return 0;
} | a7e8eeaba03912279dff1be53b386b1bf83655d3.cu | /*
*/
#include<iostream>
#include <stdio.h>
#include <fstream>
#include <math.h>
#include <cuda.h>
#include <time.h>
#include <curand_kernel.h>
#include <cuda_runtime.h>
#include <cusolverDn.h>
#include <errno.h>
#include <string.h>
#include <unistd.h>
#include <iomanip>
#include "include/params.cuh"
#include "include/init.cuh"
#include "include/Matrix.cuh"
#include "include/DataStructure.cuh"
#include "include/MCMPC.cuh"
#include "include/NewtonLikeMethod.cuh"
#include "include/optimum_conditions.cuh"
#include "include/dataToFile.cuh"
// #include "include/cudaErrorCheck.cuh"
#define CHECK(call) \
{ \
const cudaError_t error = call; \
if (error != cudaSuccess) \
{ \
printf("Error: %s:%d, ", __FILE__, __LINE__); \
printf("code:%d, reason: %s\n", error, \
cudaGetErrorString(error)); \
exit(1); \
} \
}
#define CHECK_CUBLAS(call,str) \
{ \
if ( call != CUBLAS_STATUS_SUCCESS) \
{ \
printf("CUBLAS Error: %s : %s %d\n", str, __FILE__, __LINE__); \
exit(1); \
} \
}
#define CHECK_CUSOLVER(call,str) \
{ \
if ( call != CUSOLVER_STATUS_SUCCESS) \
{ \
printf("CUBLAS Error: %s : %s %d\n", str, __FILE__, __LINE__); \
exit(1); \
} \
}
int main(int argc, char **argv)
{
/* 行列演算ライブラリ用に宣言する変数群 */
cusolverDnHandle_t cusolverH = NULL;
// cusolverEigMode_t jobz = CUSOLVER_EIG_MODE_VECTOR;
cublasFillMode_t uplo = CUBLAS_FILL_MODE_LOWER;
CHECK_CUSOLVER( cusolverDnCreate(&cusolverH),"Failed to Create cusolver handle");
cublasHandle_t handle_cublas = 0;
cublasCreate(&handle_cublas);
/* メインの実験データ書き込み用ファイルの宣言 */
FILE *fp, *opco;
time_t timeValue;
struct tm *timeObject;
time( &timeValue );
timeObject = localtime( &timeValue );
char filename1[35], filename2[40];
sprintf(filename1,"data_system_%d%d_%d%d.txt",timeObject->tm_mon + 1, timeObject->tm_mday, timeObject->tm_hour,timeObject->tm_min);
sprintf(filename2,"optimum_condition_%d%d_%d%d.txt", timeObject->tm_mon + 1, timeObject->tm_mday, timeObject->tm_hour, timeObject->tm_min);
fp = fopen(filename1,"w");
opco = fopen(filename2,"w");
/* ホスト・デバイス双方で使用するベクトルの宣言 */
// float hostParams[DIM_OF_PARAMETERS], hostState[DIM_OF_STATES], hostConstraint[NUM_OF_CONSTRAINTS], hostWeightMatrix[DIM_OF_WEIGHT_MATRIX];
SystemControlVariable *hostSCV, *deviceSCV;
hostSCV = (SystemControlVariable*)malloc(sizeof(SystemControlVariable));
init_variables( hostSCV );
CHECK( cudaMalloc(&deviceSCV, sizeof(SystemControlVariable)) );
CHECK( cudaMemcpy(deviceSCV, hostSCV, sizeof(SystemControlVariable), cudaMemcpyHostToDevice) );
/* GPUの設定用パラメータ */
unsigned int numBlocks, /*randomBlocks,*/ randomNums, /*Blocks,*/ dimHessian, numUnknownParamQHP, numUnknownParamHessian;
unsigned int paramsSizeQuadHyperPlane;
randomNums = NUM_OF_SAMPLES * (DIM_OF_INPUT + 1) * HORIZON;
// randomBlocks = countBlocks(randomNums, THREAD_PER_BLOCKS);
numBlocks = countBlocks(NUM_OF_SAMPLES, THREAD_PER_BLOCKS);
// Blocks = numBlocks;
dimHessian = HORIZON * HORIZON;
numUnknownParamQHP = NUM_OF_PARABOLOID_COEFFICIENT;
numUnknownParamHessian = numUnknownParamQHP - (HORIZON + 1);
paramsSizeQuadHyperPlane = numUnknownParamQHP; //ホライズンの大きさに併せて、局所サンプルのサイズを決定
paramsSizeQuadHyperPlane = paramsSizeQuadHyperPlane + addTermForLSM;
// dim3 block(MAX_DIVISOR,1);
dim3 block(1,1);
dim3 grid((numUnknownParamQHP + block.x - 1)/ block.x, (numUnknownParamQHP + block.y -1) / block.y);
printf("#NumBlocks = %d\n", numBlocks);
printf("#NumBlocks = %d\n", numUnknownParamQHP);
#ifdef WRITE_MATRIX_INFORMATION
float *WriteHessian, *WriteRegular;
WriteHessian = (float *)malloc(sizeof(float)*dimHessian);
WriteRegular = (float *)malloc(sizeof(float)* NUM_OF_PARABOLOID_COEFFICIENT * NUM_OF_PARABOLOID_COEFFICIENT);
int timerParam[5] = { };
dataName *name;
name = (dataName*)malloc(sizeof(dataName)*3);
#endif
/* MCMPC用の乱数生成用のseedを生成する */
curandState *deviceRandomSeed;
cudaMalloc((void **)&deviceRandomSeed, randomNums * sizeof(curandState));
setup_kernel<<<NUM_OF_SAMPLES, (DIM_OF_INPUT + 1) * HORIZON>>>(deviceRandomSeed, rand());
cudaDeviceSynchronize();
/* 入力・コスト・最適性残差等の情報をまとめた構造体の宣言 */
SampleInfo *deviceSampleInfo, *hostSampleInfo, *hostEliteSampleInfo, *deviceEliteSampleInfo;
hostSampleInfo = (SampleInfo *)malloc(sizeof(SampleInfo) * NUM_OF_SAMPLES);
hostEliteSampleInfo = (SampleInfo*)malloc(sizeof(SampleInfo) * NUM_OF_ELITES);
cudaMalloc(&deviceSampleInfo, sizeof(SampleInfo) * NUM_OF_SAMPLES);
cudaMalloc(&deviceEliteSampleInfo, sizeof(SampleInfo) * NUM_OF_ELITES);
/*SampleInfo *TemporarySampleInfo, *deviceTempSampleInfo;
TemporarySampleInfo = (SampleInfo *)malloc(sizeof(SampleInfo) * paramsSizeQuadHyperPlane);
CHECK(cudaMalloc(&deviceTempSampleInfo, sizeof(SampleInfo) * paramsSizeQuadHyperPlane) );*/
Tolerance *hostTol;
hostTol = (Tolerance*)malloc(sizeof(Tolerance)*HORIZON+1);
/* 2次超平面フィッティングの結果を反映する行列及びベクトルの宣言 (<---最適値計算にも使用)*/
float *Hessian, *invHessian, *lowerHessian, *HessianElements;
float *Gradient;
CHECK( cudaMalloc(&Hessian, sizeof(float) * dimHessian) );
CHECK( cudaMalloc(&invHessian, sizeof(float) * dimHessian) );
CHECK( cudaMalloc(&lowerHessian, sizeof(float) * dimHessian) );
CHECK( cudaMalloc(&HessianElements, sizeof(float) * numUnknownParamQHP) );
CHECK( cudaMalloc(&Gradient, sizeof(float) * HORIZON) );
/* 最小2乗法で2次超曲面を求める際に使用する配列の宣言 */
float *Gmatrix, *invGmatrix, *CVector, *ansCVector;
CHECK( cudaMalloc(&CVector, sizeof(float) * numUnknownParamQHP) );
CHECK( cudaMalloc(&ansCVector, sizeof(float) * numUnknownParamQHP) );
CHECK( cudaMalloc(&Gmatrix, sizeof(float) * numUnknownParamQHP * numUnknownParamQHP) );
CHECK( cudaMalloc(&invGmatrix, sizeof(float) * numUnknownParamQHP * numUnknownParamQHP) );
QHP *deviceQHP;
CHECK( cudaMalloc(&deviceQHP, sizeof(QHP) * paramsSizeQuadHyperPlane) );
unsigned int qhpBlocks;
qhpBlocks = countBlocks(paramsSizeQuadHyperPlane, THREAD_PER_BLOCKS);
printf("#qhpBlocks = %d\n", qhpBlocks);
// 行列演算ライブラリ用の変数の宣言及び定義
const int m_Rmatrix = numUnknownParamQHP;
int work_size, w_si_hessian;
float *work_space, *w_sp_hessian;
int *devInfo;
CHECK( cudaMalloc((void**)&devInfo, sizeof(int) ) );
/* thrust使用のためのホスト/デバイス用ベクトルの宣言 */
thrust::host_vector<int> indices_host_vec( NUM_OF_SAMPLES );
thrust::device_vector<int> indices_device_vec = indices_host_vec;
thrust::host_vector<float> sort_key_host_vec( NUM_OF_SAMPLES );
thrust::device_vector<float> sort_key_device_vec = sort_key_host_vec;
/* 推定入力のプロット・データ転送用 */
float *hostData, *deviceData, *hostTempData, *deviceTempData;
hostData = (float *)malloc(sizeof(float) * HORIZON);
hostTempData = (float *)malloc(sizeof(float) * HORIZON);
CHECK(cudaMalloc(&deviceData, sizeof(float) * HORIZON));
cudaMalloc(&deviceTempData, sizeof(float) * HORIZON);
for(int i = 0; i < HORIZON; i++){
hostData[i] = 0.0f;
}
CHECK( cudaMemcpy(deviceData, hostData, sizeof(float) * HORIZON, cudaMemcpyHostToDevice));
/* 制御ループの開始 */
float F_input = 0.0f;
float MCMPC_F, Proposed_F;
// float costFromMCMPC, costFromProposed, toleranceFromMCMPC, toleranceFromProposed;
float cost_now;
float optimumConditions[2] = { };
float optimumCondition_p[2] = { };
float var;
float process_gpu_time, procedure_all_time;
clock_t start_t, stop_t;
cudaEvent_t start, stop;
dim3 inverseGmatrix(numUnknownParamQHP, numUnknownParamQHP);
dim3 grid_inverse(HORIZON, HORIZON);
dim3 threads((HORIZON + grid_inverse.x -1) / grid_inverse.x, (HORIZON + grid_inverse.y -1) / grid_inverse.y);
#ifdef USING_QR_DECOMPOSITION
// float *QR_work_space = NULL;
float *ws_QR_operation = NULL;
int geqrf_work_size = 0;
int ormqr_work_size = 0;
int QR_work_size = 0;
const int nrhs = 1;
float *QR_tau = NULL;
cublasSideMode_t side = CUBLAS_SIDE_LEFT;
cublasOperation_t trans = CUBLAS_OP_T;
cublasOperation_t trans_N = CUBLAS_OP_N;
cublasFillMode_t uplo_QR = CUBLAS_FILL_MODE_UPPER;
cublasDiagType_t cub_diag = CUBLAS_DIAG_NON_UNIT;
CHECK(cudaMalloc((void**)&QR_tau, sizeof(float) * numUnknownParamQHP));
#endif
for(int t = 0; t < SIM_TIME; t++)
{
shift_Input_vec( hostData );
CHECK( cudaMemcpy(deviceData, hostData, sizeof(float) * HORIZON, cudaMemcpyHostToDevice) );
start_t = clock();
if(t == 0)
{
start_t = clock();
for(int iter = 0; iter < ITERATIONS_MAX; iter++)
{
var = variance / sqrt(iter + 1);
// var = variance / 2;
MCMPC_Cart_and_SinglePole<<<numBlocks, THREAD_PER_BLOCKS>>>( deviceSCV, var, deviceRandomSeed, deviceData, deviceSampleInfo, thrust::raw_pointer_cast( sort_key_device_vec.data() ));
cudaDeviceSynchronize();
thrust::sequence(indices_device_vec.begin(), indices_device_vec.end());
thrust::sort_by_key(sort_key_device_vec.begin(), sort_key_device_vec.end(), indices_device_vec.begin());
getEliteSampleInfo<<<NUM_OF_ELITES, 1>>>(deviceEliteSampleInfo, deviceSampleInfo, thrust::raw_pointer_cast( indices_device_vec.data() ));
CHECK( cudaMemcpy(hostEliteSampleInfo, deviceEliteSampleInfo, sizeof(SampleInfo) * NUM_OF_ELITES, cudaMemcpyDeviceToHost) );
// weighted_mean(hostData, NUM_OF_ELITES, hostSampleInfo);
weighted_mean(hostData, NUM_OF_ELITES, hostEliteSampleInfo);
MCMPC_F = hostData[0];
/*if(iter == 0)
{
sprintf(name[2].inputfile, "initSolution.txt");
name[2].dimSize = HORIZON;
resd_InitSolution_Input(hostData, &name[2]);
}*/
CHECK( cudaMemcpy(deviceData, hostData, sizeof(float) * HORIZON, cudaMemcpyHostToDevice) );
calc_OC_for_Cart_and_SinglePole_hostF(optimumConditions, hostData, hostSCV, hostTol);
printf("cost :: %f KKT_Error :: %f\n", optimumConditions[0], optimumConditions[1]);
}
name[1].dimSize = HORIZON;
sprintf(name[1].name,"InitInputData.txt");
write_Vector_Information(hostData, &name[1]);
stop_t = clock();
procedure_all_time = stop_t - start_t;
printf("Geometrical cooling MCMPC computation time :: %f\n", procedure_all_time / CLOCKS_PER_SEC);
}else{
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
start_t = clock();
for(int iter = 0; iter < ITERATIONS; iter++)
{
var = variance / 2.0f;
MCMPC_Cart_and_SinglePole<<<numBlocks, THREAD_PER_BLOCKS>>>( deviceSCV, var, deviceRandomSeed, deviceData, deviceSampleInfo, thrust::raw_pointer_cast( sort_key_device_vec.data() ));
cudaDeviceSynchronize();
thrust::sequence(indices_device_vec.begin(), indices_device_vec.end());
thrust::sort_by_key(sort_key_device_vec.begin(), sort_key_device_vec.end(), indices_device_vec.begin());
// CHECK( cudaMemcpy(hostSampleInfo, deviceSampleInfo, sizeof(SampleInfo) * NUM_OF_SAMPLES, cudaMemcpyDeviceToHost) );
getEliteSampleInfo<<<NUM_OF_ELITES, 1>>>(deviceEliteSampleInfo, deviceSampleInfo, thrust::raw_pointer_cast( indices_device_vec.data() ));
CHECK( cudaMemcpy(hostEliteSampleInfo, deviceEliteSampleInfo, sizeof(SampleInfo) * NUM_OF_ELITES, cudaMemcpyDeviceToHost) );
weighted_mean(hostData, NUM_OF_ELITES, hostEliteSampleInfo);
MCMPC_F = hostData[0];
CHECK( cudaMemcpy(deviceData, hostData, sizeof(float) * HORIZON, cudaMemcpyHostToDevice) );
var = neighborVar;
MCMPC_Cart_and_SinglePole<<<numBlocks, THREAD_PER_BLOCKS>>>( deviceSCV, var, deviceRandomSeed, deviceData, deviceSampleInfo, thrust::raw_pointer_cast( sort_key_device_vec.data() ));
cudaDeviceSynchronize();
thrust::sequence(indices_device_vec.begin(), indices_device_vec.end());
thrust::sort_by_key(sort_key_device_vec.begin(), sort_key_device_vec.end(), indices_device_vec.begin());
NewtonLikeMethodGetTensorVector<<< qhpBlocks, THREAD_PER_BLOCKS>>>(deviceQHP, deviceSampleInfo, thrust::raw_pointer_cast( indices_device_vec.data() ));
cudaDeviceSynchronize();
// 1024以下の"NUM_OF_PARABOLOID_COEFFICIENT"の最大約数を(thread数 / block)として計算させる方針で実行
// 以下は正規方程式における行列の各要素を取得する関数
// NewtonLikeMethodGenNormalizationMatrix<<<grid, block>>>(Gmatrix, deviceQHP, paramsSizeQuadHyperPlane, NUM_OF_PARABOLOID_COEFFICIENT);
/*-----------------Error detect 2021.07.20----------------------------*/
// Following Function has any Error (ThreadId or BlockId) --> it is required to modify original mode.
// NewtonLikeMethodGenNormalEquation<<<grid, block>>>(Gmatrix, CVector, deviceQHP, paramsSizeQuadHyperPlane, NUM_OF_PARABOLOID_COEFFICIENT);
NewtonLikeMethodGetRegularMatrix<<<NUM_OF_PARABOLOID_COEFFICIENT, NUM_OF_PARABOLOID_COEFFICIENT>>>(Gmatrix, deviceQHP, paramsSizeQuadHyperPlane);
NewtonLikeMethodGetRegularVector<<<NUM_OF_PARABOLOID_COEFFICIENT, 1>>>(CVector, deviceQHP, paramsSizeQuadHyperPlane);
cudaDeviceSynchronize();
#ifdef WRITE_MATRIX_INFORMATION
if(t<300){
if(t % 50 == 0){
get_timeParam(timerParam, timeObject->tm_mon+1, timeObject->tm_mday, timeObject->tm_hour, timeObject->tm_min, t);
sprintf(name[0].name, "RegularMatrix");
name[0].dimSize = NUM_OF_PARABOLOID_COEFFICIENT;
CHECK(cudaMemcpy(WriteRegular, Gmatrix, sizeof(float) * NUM_OF_PARABOLOID_COEFFICIENT * NUM_OF_PARABOLOID_COEFFICIENT, cudaMemcpyDeviceToHost));
write_Matrix_Information(WriteRegular, &name[0], timerParam);
}
}else{
if(t % 250 == 0){
get_timeParam(timerParam, timeObject->tm_mon+1, timeObject->tm_mday, timeObject->tm_hour, timeObject->tm_min, t);
sprintf(name[0].name, "RegularMatrix");
name[0].dimSize = NUM_OF_PARABOLOID_COEFFICIENT;
CHECK(cudaMemcpy(WriteRegular, Gmatrix, sizeof(float) * NUM_OF_PARABOLOID_COEFFICIENT * NUM_OF_PARABOLOID_COEFFICIENT, cudaMemcpyDeviceToHost));
write_Matrix_Information(WriteRegular, &name[0], timerParam);
}
}
#endif
#ifndef USING_QR_DECOMPOSITION
//以下は、正規方程式(最小二乗法で使用)のベクトル(正規方程式:Gx = v の v)の各要素を計算する関数
// NewtonLikeMethodGenNormalizationVector<<<NUM_OF_PARABOLOID_COEFFICIENT, 1>>>(CVector, deviceQHP, paramsSizeQuadHyperPlane);
// cudaDeviceSynchronize();
CHECK_CUSOLVER( cusolverDnSpotrf_bufferSize(cusolverH, uplo, m_Rmatrix, Gmatrix, m_Rmatrix, &work_size), "Failed to get bufferSize");
CHECK(cudaMalloc((void**)&work_space, sizeof(float) * work_size));
CHECK_CUSOLVER( cusolverDnSpotrf(cusolverH, uplo, m_Rmatrix, Gmatrix, m_Rmatrix, work_space, work_size, devInfo), "Failed to inverse operation for G");
MatrixSetUpLargeIdentityMatrix<<<grid, block>>>(invGmatrix, NUM_OF_PARABOLOID_COEFFICIENT);
cudaDeviceSynchronize();
CHECK_CUSOLVER( cusolverDnSpotrs(cusolverH, uplo, m_Rmatrix, m_Rmatrix, Gmatrix, m_Rmatrix, invGmatrix, m_Rmatrix, devInfo), "Failed to get inverse Matrix G");
// 正規方程式をcuBlasで解く
CHECK_CUBLAS( cublasSgemv(handle_cublas, CUBLAS_OP_N, m_Rmatrix, m_Rmatrix, &alpha, invGmatrix, m_Rmatrix, CVector, 1, &beta, ansCVector, 1),"Failed to get Estimate Input Sequences");
#else
if(t==1){
CHECK_CUSOLVER( cusolverDnSgeqrf_bufferSize(cusolverH, m_Rmatrix, m_Rmatrix, Gmatrix, m_Rmatrix, &geqrf_work_size), "Failed to get buffersize for QR decom [1]" );
CHECK_CUSOLVER( cusolverDnSormqr_bufferSize(cusolverH, side, trans, m_Rmatrix, nrhs, m_Rmatrix, Gmatrix, m_Rmatrix, QR_tau, CVector, m_Rmatrix, &ormqr_work_size), "Failed to get buffersize for QR decom [2]" );
QR_work_size = (geqrf_work_size > ormqr_work_size)? geqrf_work_size : ormqr_work_size;
}
CHECK( cudaMalloc((void**)&ws_QR_operation, sizeof(float) * QR_work_size) );
/* compute QR factorization */
CHECK_CUSOLVER( cusolverDnSgeqrf(cusolverH, m_Rmatrix, m_Rmatrix, Gmatrix, m_Rmatrix, QR_tau, ws_QR_operation, QR_work_size, devInfo),"Failed to compute QR factorization" );
CHECK_CUSOLVER( cusolverDnSormqr(cusolverH, side, trans, m_Rmatrix, nrhs, m_Rmatrix, Gmatrix, m_Rmatrix, QR_tau, CVector, m_Rmatrix, ws_QR_operation, QR_work_size, devInfo), "Failed to compute Q^T*B" );
CHECK(cudaDeviceSynchronize());
CHECK_CUBLAS( cublasStrsm(handle_cublas, side, uplo_QR, trans_N, cub_diag, m_Rmatrix, nrhs, &alpha, Gmatrix, m_Rmatrix, CVector, m_Rmatrix), "Failed to compute X = R^-1Q^T*B" );
CHECK(cudaDeviceSynchronize());
NewtonLikeMethodCopyVector<<<numUnknownParamQHP, 1>>>(ansCVector, CVector);
#endif
NewtonLikeMethodGetHessianElements<<<numUnknownParamHessian, 1>>>(HessianElements, ansCVector);
CHECK(cudaDeviceSynchronize());
// ヘシアンの上三角行列分の要素を取得
NewtonLikeMethodGetHessianOriginal<<<HORIZON, HORIZON>>>(Hessian, HessianElements);
CHECK(cudaDeviceSynchronize());
NewtonLikeMethodGetLowerTriangle<<<HORIZON, HORIZON>>>(lowerHessian, Hessian);
CHECK(cudaDeviceSynchronize());
// NewtonLikeMethodGetFullHessianLtoU<<<HORIZON, HORIZON>>>(Hessian, lowerHessian);
NewtonLikeMethodGetFullHessianUtoL<<<HORIZON, HORIZON>>>(lowerHessian, Hessian);
NewtonLikeMethodGetGradient<<<HORIZON, 1>>>(Gradient, ansCVector, numUnknownParamHessian);
MatrixMultiplyOperation<<<HORIZON,HORIZON>>>(Hessian, 2.0f, lowerHessian);
CHECK_CUSOLVER( cusolverDnSpotrf_bufferSize(cusolverH, uplo, HORIZON, Hessian, HORIZON, &w_si_hessian), "Failed to get bufferSize of computing the inverse of Hessian");
CHECK( cudaMalloc((void**)&w_sp_hessian, sizeof(float) * w_si_hessian) );
CHECK_CUSOLVER( cusolverDnSpotrf(cusolverH, uplo, HORIZON, Hessian, HORIZON, w_sp_hessian, w_si_hessian, devInfo), "Failed to inverse operation");
MatrixSetUpSmallIdentityMatrix<<<HORIZON, HORIZON>>>(invHessian);
CHECK_CUSOLVER( cusolverDnSpotrs(cusolverH, uplo, HORIZON, HORIZON, Hessian, HORIZON, invHessian, HORIZON, devInfo), "Failed to get inverse of Hessian");
// 逆行列を-1倍する操作
MatrixMultiplyOperation<<<HORIZON, HORIZON>>>(Hessian, -1.0f, invHessian);
CHECK_CUBLAS(cublasSgemv(handle_cublas, CUBLAS_OP_N, HORIZON, HORIZON, &alpha, Hessian, HORIZON, Gradient, 1, &beta, deviceTempData, 1), "Failed to get result by proposed method");
CHECK( cudaMemcpy(hostTempData, deviceTempData, sizeof(float) * HORIZON, cudaMemcpyDeviceToHost) );
NewtonLikeMethodInputSaturation(hostTempData, hostSCV->constraints[1], hostSCV->constraints[0]);
Proposed_F = hostTempData[0]; //提案手法による推定入力値のうち最初の時刻のものをコピーする
// 提案法の最適性条件を計算->比較(vs MC解)->物理シミュレーション(by RungeKutta4.5)->結果の保存
calc_OC_for_Cart_and_SinglePole_hostF(optimumConditions, hostData, hostSCV, hostTol); //MC解に対する最適性条件を計算
calc_OC_for_Cart_and_SinglePole_hostF(optimumCondition_p, hostTempData, hostSCV, hostTol); //提案手法に対する最適性条件を計算
}
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&process_gpu_time, start, stop);
stop_t = clock();
procedure_all_time = stop_t - start_t;
}
printf("TIME stpe :: %f", t * interval);
printf("MCMPC optimum condition := %f Proposed optimum condition := %f\n", optimumConditions[1], optimumCondition_p[1]);
printf("MCMPC cost value := %f Proposed cost value := %f\n", optimumConditions[0], optimumCondition_p[0]);
// 評価値比較に基づく投入する入力の決定
if(optimumCondition_p[0] < optimumConditions[0] /*&& optimumCondition_p[1] < optimumConditions[1]*/)
{
F_input = Proposed_F;
cost_now = optimumCondition_p[0];
CHECK( cudaMemcpy(deviceData, hostTempData, sizeof(float) * HORIZON, cudaMemcpyHostToDevice) );
}else{
F_input = MCMPC_F;
cost_now = optimumConditions[0];
CHECK( cudaMemcpy(deviceData, hostData, sizeof(float) * HORIZON, cudaMemcpyHostToDevice) );
}
Runge_Kutta45_for_SecondaryOderSystem( hostSCV, F_input, interval);
CHECK( cudaMemcpy(deviceSCV, hostSCV, sizeof(SystemControlVariable), cudaMemcpyHostToDevice) );
fprintf(fp, "%f %f %f %f %f %f %f %f\n", t * interval, F_input, MCMPC_F, Proposed_F, hostSCV->state[0], hostSCV->state[1], hostSCV->state[2], hostSCV->state[3]);
fprintf(opco, "%f %f %f %f %f %f %f %f\n", t * interval, cost_now, optimumConditions[0], optimumCondition_p[0], optimumConditions[1], optimumCondition_p[1], process_gpu_time/10e3, procedure_all_time/CLOCKS_PER_SEC);
}
if(cusolverH) cusolverDnDestroy(cusolverH);
if(handle_cublas) cublasDestroy(handle_cublas);
fclose(fp);
fclose(opco);
cudaDeviceReset( );
printf("%s\n", cudaGetErrorString(cudaGetLastError()));
return 0;
} |
33c57e0f0660e7bc0405292c2332917832b6931e.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "same_num_channels_add_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *data_l = NULL;
hipMalloc(&data_l, XSIZE*YSIZE);
float *data_r = NULL;
hipMalloc(&data_r, XSIZE*YSIZE);
float *result = NULL;
hipMalloc(&result, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
same_num_channels_add_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, data_l,data_r,result);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
same_num_channels_add_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, data_l,data_r,result);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
same_num_channels_add_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, data_l,data_r,result);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 33c57e0f0660e7bc0405292c2332917832b6931e.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "same_num_channels_add_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *data_l = NULL;
cudaMalloc(&data_l, XSIZE*YSIZE);
float *data_r = NULL;
cudaMalloc(&data_r, XSIZE*YSIZE);
float *result = NULL;
cudaMalloc(&result, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
same_num_channels_add_kernel<<<gridBlock,threadBlock>>>(data_l,data_r,result);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
same_num_channels_add_kernel<<<gridBlock,threadBlock>>>(data_l,data_r,result);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
same_num_channels_add_kernel<<<gridBlock,threadBlock>>>(data_l,data_r,result);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
2c26aa0b19f3d7978e11f303bb911aef61aa3c14.hip | // !!! This is a file automatically generated by hipify!!!
/**
* Copyright 2019-2022 by XGBoost Contributors
*
* \file data.cu
* \brief Handles setting metainfo from array interface.
*/
#include "../common/cuda_context.cuh"
#include "../common/device_helpers.cuh"
#include "../common/linalg_op.cuh"
#include "array_interface.h"
#include "device_adapter_hip.cuh"
#include "simple_dmatrix.h"
#include "validation.h"
#include "xgboost/data.h"
#include "xgboost/json.h"
#include "xgboost/logging.h"
namespace xgboost {
namespace {
auto SetDeviceToPtr(void const* ptr) {
hipPointerAttribute_t attr;
dh::safe_cuda(hipPointerGetAttributes(&attr, ptr));
int32_t ptr_device = attr.device;
dh::safe_cuda(hipSetDevice(ptr_device));
return ptr_device;
}
template <typename T, int32_t D>
void CopyTensorInfoImpl(CUDAContext const* ctx, Json arr_interface, linalg::Tensor<T, D>* p_out) {
ArrayInterface<D> array(arr_interface);
if (array.n == 0) {
p_out->SetDevice(0);
p_out->Reshape(array.shape);
return;
}
CHECK_EQ(array.valid.Capacity(), 0)
<< "Meta info like label or weight can not have missing value.";
auto ptr_device = SetDeviceToPtr(array.data);
p_out->SetDevice(ptr_device);
if (array.is_contiguous && array.type == ToDType<T>::kType) {
p_out->ModifyInplace([&](HostDeviceVector<T>* data, common::Span<size_t, D> shape) {
// set shape
std::copy(array.shape, array.shape + D, shape.data());
// set data
data->Resize(array.n);
dh::safe_cuda(hipMemcpyAsync(data->DevicePointer(), array.data, array.n * sizeof(T),
hipMemcpyDefault, ctx->Stream()));
});
return;
}
p_out->Reshape(array.shape);
auto t = p_out->View(DeviceOrd::CUDA(ptr_device));
linalg::ElementWiseTransformDevice(
t,
[=] __device__(size_t i, T) {
return linalg::detail::Apply(TypedIndex<T, D>{array},
linalg::UnravelIndex<D>(i, array.shape));
},
ctx->Stream());
}
void CopyGroupInfoImpl(ArrayInterface<1> column, std::vector<bst_group_t>* out) {
CHECK(column.type != ArrayInterfaceHandler::kF4 && column.type != ArrayInterfaceHandler::kF8)
<< "Expected integer for group info.";
auto ptr_device = SetDeviceToPtr(column.data);
CHECK_EQ(ptr_device, dh::CurrentDevice());
dh::TemporaryArray<bst_group_t> temp(column.Shape(0));
auto d_tmp = temp.data().get();
dh::LaunchN(column.Shape(0),
[=] __device__(size_t idx) { d_tmp[idx] = TypedIndex<size_t, 1>{column}(idx); });
auto length = column.Shape(0);
out->resize(length + 1);
out->at(0) = 0;
thrust::copy(temp.data(), temp.data() + length, out->begin() + 1);
std::partial_sum(out->begin(), out->end(), out->begin());
}
void CopyQidImpl(ArrayInterface<1> array_interface, std::vector<bst_group_t>* p_group_ptr) {
auto &group_ptr_ = *p_group_ptr;
auto it = dh::MakeTransformIterator<uint32_t>(
thrust::make_counting_iterator(0ul), [array_interface] __device__(size_t i) {
return TypedIndex<uint32_t, 1>{array_interface}(i);
});
dh::caching_device_vector<bool> flag(1);
auto d_flag = dh::ToSpan(flag);
auto d = SetDeviceToPtr(array_interface.data);
dh::LaunchN(1, [=] __device__(size_t) { d_flag[0] = true; });
dh::LaunchN(array_interface.Shape(0) - 1, [=] __device__(size_t i) {
auto typed = TypedIndex<uint32_t, 1>{array_interface};
if (typed(i) > typed(i + 1)) {
d_flag[0] = false;
}
});
bool non_dec = true;
dh::safe_cuda(hipMemcpy(&non_dec, flag.data().get(), sizeof(bool),
hipMemcpyDeviceToHost));
CHECK(non_dec) << "`qid` must be sorted in increasing order along with data.";
size_t bytes = 0;
dh::caching_device_vector<uint32_t> out(array_interface.Shape(0));
dh::caching_device_vector<uint32_t> cnt(array_interface.Shape(0));
HostDeviceVector<int> d_num_runs_out(1, 0, d);
hipcub::DeviceRunLengthEncode::Encode(
nullptr, bytes, it, out.begin(), cnt.begin(),
d_num_runs_out.DevicePointer(), array_interface.Shape(0));
dh::caching_device_vector<char> tmp(bytes);
hipcub::DeviceRunLengthEncode::Encode(
tmp.data().get(), bytes, it, out.begin(), cnt.begin(),
d_num_runs_out.DevicePointer(), array_interface.Shape(0));
auto h_num_runs_out = d_num_runs_out.HostSpan()[0];
group_ptr_.clear();
group_ptr_.resize(h_num_runs_out + 1, 0);
dh::XGBCachingDeviceAllocator<char> alloc;
thrust::inclusive_scan(thrust::hip::par(alloc), cnt.begin(),
cnt.begin() + h_num_runs_out, cnt.begin());
thrust::copy(cnt.begin(), cnt.begin() + h_num_runs_out,
group_ptr_.begin() + 1);
}
} // namespace
void MetaInfo::SetInfoFromCUDA(Context const& ctx, StringView key, Json array) {
// multi-dim float info
if (key == "base_margin") {
CopyTensorInfoImpl(ctx.CUDACtx(), array, &base_margin_);
return;
} else if (key == "label") {
CopyTensorInfoImpl(ctx.CUDACtx(), array, &labels);
auto ptr = labels.Data()->ConstDevicePointer();
auto valid = thrust::none_of(thrust::device, ptr, ptr + labels.Size(), data::LabelsCheck{});
CHECK(valid) << "Label contains NaN, infinity or a value too large.";
return;
}
// uint info
if (key == "group") {
ArrayInterface<1> array_interface{array};
CopyGroupInfoImpl(array_interface, &group_ptr_);
data::ValidateQueryGroup(group_ptr_);
return;
} else if (key == "qid") {
ArrayInterface<1> array_interface{array};
CopyQidImpl(array_interface, &group_ptr_);
data::ValidateQueryGroup(group_ptr_);
return;
}
// float info
linalg::Tensor<float, 1> t;
CopyTensorInfoImpl(ctx.CUDACtx(), array, &t);
if (key == "weight") {
this->weights_ = std::move(*t.Data());
auto ptr = weights_.ConstDevicePointer();
auto valid = thrust::none_of(thrust::device, ptr, ptr + weights_.Size(), data::WeightsCheck{});
CHECK(valid) << "Weights must be positive values.";
} else if (key == "label_lower_bound") {
this->labels_lower_bound_ = std::move(*t.Data());
} else if (key == "label_upper_bound") {
this->labels_upper_bound_ = std::move(*t.Data());
} else if (key == "feature_weights") {
this->feature_weights = std::move(*t.Data());
auto d_feature_weights = feature_weights.ConstDeviceSpan();
auto valid =
thrust::none_of(ctx.CUDACtx()->CTP(), d_feature_weights.data(),
d_feature_weights.data() + d_feature_weights.size(), data::WeightsCheck{});
CHECK(valid) << "Feature weight must be greater than 0.";
} else {
LOG(FATAL) << "Unknown key for MetaInfo: " << key;
}
}
template <typename AdapterT>
DMatrix* DMatrix::Create(AdapterT* adapter, float missing, int nthread,
const std::string& cache_prefix, DataSplitMode data_split_mode) {
CHECK_EQ(cache_prefix.size(), 0)
<< "Device memory construction is not currently supported with external "
"memory.";
return new data::SimpleDMatrix(adapter, missing, nthread, data_split_mode);
}
template DMatrix* DMatrix::Create<data::CudfAdapter>(
data::CudfAdapter* adapter, float missing, int nthread,
const std::string& cache_prefix, DataSplitMode data_split_mode);
template DMatrix* DMatrix::Create<data::CupyAdapter>(
data::CupyAdapter* adapter, float missing, int nthread,
const std::string& cache_prefix, DataSplitMode data_split_mode);
} // namespace xgboost
| 2c26aa0b19f3d7978e11f303bb911aef61aa3c14.cu | /**
* Copyright 2019-2022 by XGBoost Contributors
*
* \file data.cu
* \brief Handles setting metainfo from array interface.
*/
#include "../common/cuda_context.cuh"
#include "../common/device_helpers.cuh"
#include "../common/linalg_op.cuh"
#include "array_interface.h"
#include "device_adapter.cuh"
#include "simple_dmatrix.h"
#include "validation.h"
#include "xgboost/data.h"
#include "xgboost/json.h"
#include "xgboost/logging.h"
namespace xgboost {
namespace {
auto SetDeviceToPtr(void const* ptr) {
cudaPointerAttributes attr;
dh::safe_cuda(cudaPointerGetAttributes(&attr, ptr));
int32_t ptr_device = attr.device;
dh::safe_cuda(cudaSetDevice(ptr_device));
return ptr_device;
}
template <typename T, int32_t D>
void CopyTensorInfoImpl(CUDAContext const* ctx, Json arr_interface, linalg::Tensor<T, D>* p_out) {
ArrayInterface<D> array(arr_interface);
if (array.n == 0) {
p_out->SetDevice(0);
p_out->Reshape(array.shape);
return;
}
CHECK_EQ(array.valid.Capacity(), 0)
<< "Meta info like label or weight can not have missing value.";
auto ptr_device = SetDeviceToPtr(array.data);
p_out->SetDevice(ptr_device);
if (array.is_contiguous && array.type == ToDType<T>::kType) {
p_out->ModifyInplace([&](HostDeviceVector<T>* data, common::Span<size_t, D> shape) {
// set shape
std::copy(array.shape, array.shape + D, shape.data());
// set data
data->Resize(array.n);
dh::safe_cuda(cudaMemcpyAsync(data->DevicePointer(), array.data, array.n * sizeof(T),
cudaMemcpyDefault, ctx->Stream()));
});
return;
}
p_out->Reshape(array.shape);
auto t = p_out->View(DeviceOrd::CUDA(ptr_device));
linalg::ElementWiseTransformDevice(
t,
[=] __device__(size_t i, T) {
return linalg::detail::Apply(TypedIndex<T, D>{array},
linalg::UnravelIndex<D>(i, array.shape));
},
ctx->Stream());
}
void CopyGroupInfoImpl(ArrayInterface<1> column, std::vector<bst_group_t>* out) {
CHECK(column.type != ArrayInterfaceHandler::kF4 && column.type != ArrayInterfaceHandler::kF8)
<< "Expected integer for group info.";
auto ptr_device = SetDeviceToPtr(column.data);
CHECK_EQ(ptr_device, dh::CurrentDevice());
dh::TemporaryArray<bst_group_t> temp(column.Shape(0));
auto d_tmp = temp.data().get();
dh::LaunchN(column.Shape(0),
[=] __device__(size_t idx) { d_tmp[idx] = TypedIndex<size_t, 1>{column}(idx); });
auto length = column.Shape(0);
out->resize(length + 1);
out->at(0) = 0;
thrust::copy(temp.data(), temp.data() + length, out->begin() + 1);
std::partial_sum(out->begin(), out->end(), out->begin());
}
void CopyQidImpl(ArrayInterface<1> array_interface, std::vector<bst_group_t>* p_group_ptr) {
auto &group_ptr_ = *p_group_ptr;
auto it = dh::MakeTransformIterator<uint32_t>(
thrust::make_counting_iterator(0ul), [array_interface] __device__(size_t i) {
return TypedIndex<uint32_t, 1>{array_interface}(i);
});
dh::caching_device_vector<bool> flag(1);
auto d_flag = dh::ToSpan(flag);
auto d = SetDeviceToPtr(array_interface.data);
dh::LaunchN(1, [=] __device__(size_t) { d_flag[0] = true; });
dh::LaunchN(array_interface.Shape(0) - 1, [=] __device__(size_t i) {
auto typed = TypedIndex<uint32_t, 1>{array_interface};
if (typed(i) > typed(i + 1)) {
d_flag[0] = false;
}
});
bool non_dec = true;
dh::safe_cuda(cudaMemcpy(&non_dec, flag.data().get(), sizeof(bool),
cudaMemcpyDeviceToHost));
CHECK(non_dec) << "`qid` must be sorted in increasing order along with data.";
size_t bytes = 0;
dh::caching_device_vector<uint32_t> out(array_interface.Shape(0));
dh::caching_device_vector<uint32_t> cnt(array_interface.Shape(0));
HostDeviceVector<int> d_num_runs_out(1, 0, d);
cub::DeviceRunLengthEncode::Encode(
nullptr, bytes, it, out.begin(), cnt.begin(),
d_num_runs_out.DevicePointer(), array_interface.Shape(0));
dh::caching_device_vector<char> tmp(bytes);
cub::DeviceRunLengthEncode::Encode(
tmp.data().get(), bytes, it, out.begin(), cnt.begin(),
d_num_runs_out.DevicePointer(), array_interface.Shape(0));
auto h_num_runs_out = d_num_runs_out.HostSpan()[0];
group_ptr_.clear();
group_ptr_.resize(h_num_runs_out + 1, 0);
dh::XGBCachingDeviceAllocator<char> alloc;
thrust::inclusive_scan(thrust::cuda::par(alloc), cnt.begin(),
cnt.begin() + h_num_runs_out, cnt.begin());
thrust::copy(cnt.begin(), cnt.begin() + h_num_runs_out,
group_ptr_.begin() + 1);
}
} // namespace
void MetaInfo::SetInfoFromCUDA(Context const& ctx, StringView key, Json array) {
// multi-dim float info
if (key == "base_margin") {
CopyTensorInfoImpl(ctx.CUDACtx(), array, &base_margin_);
return;
} else if (key == "label") {
CopyTensorInfoImpl(ctx.CUDACtx(), array, &labels);
auto ptr = labels.Data()->ConstDevicePointer();
auto valid = thrust::none_of(thrust::device, ptr, ptr + labels.Size(), data::LabelsCheck{});
CHECK(valid) << "Label contains NaN, infinity or a value too large.";
return;
}
// uint info
if (key == "group") {
ArrayInterface<1> array_interface{array};
CopyGroupInfoImpl(array_interface, &group_ptr_);
data::ValidateQueryGroup(group_ptr_);
return;
} else if (key == "qid") {
ArrayInterface<1> array_interface{array};
CopyQidImpl(array_interface, &group_ptr_);
data::ValidateQueryGroup(group_ptr_);
return;
}
// float info
linalg::Tensor<float, 1> t;
CopyTensorInfoImpl(ctx.CUDACtx(), array, &t);
if (key == "weight") {
this->weights_ = std::move(*t.Data());
auto ptr = weights_.ConstDevicePointer();
auto valid = thrust::none_of(thrust::device, ptr, ptr + weights_.Size(), data::WeightsCheck{});
CHECK(valid) << "Weights must be positive values.";
} else if (key == "label_lower_bound") {
this->labels_lower_bound_ = std::move(*t.Data());
} else if (key == "label_upper_bound") {
this->labels_upper_bound_ = std::move(*t.Data());
} else if (key == "feature_weights") {
this->feature_weights = std::move(*t.Data());
auto d_feature_weights = feature_weights.ConstDeviceSpan();
auto valid =
thrust::none_of(ctx.CUDACtx()->CTP(), d_feature_weights.data(),
d_feature_weights.data() + d_feature_weights.size(), data::WeightsCheck{});
CHECK(valid) << "Feature weight must be greater than 0.";
} else {
LOG(FATAL) << "Unknown key for MetaInfo: " << key;
}
}
template <typename AdapterT>
DMatrix* DMatrix::Create(AdapterT* adapter, float missing, int nthread,
const std::string& cache_prefix, DataSplitMode data_split_mode) {
CHECK_EQ(cache_prefix.size(), 0)
<< "Device memory construction is not currently supported with external "
"memory.";
return new data::SimpleDMatrix(adapter, missing, nthread, data_split_mode);
}
template DMatrix* DMatrix::Create<data::CudfAdapter>(
data::CudfAdapter* adapter, float missing, int nthread,
const std::string& cache_prefix, DataSplitMode data_split_mode);
template DMatrix* DMatrix::Create<data::CupyAdapter>(
data::CupyAdapter* adapter, float missing, int nthread,
const std::string& cache_prefix, DataSplitMode data_split_mode);
} // namespace xgboost
|
799afdb9206bdf5a4dd15e4e240bdd069a4b25ae.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <cstdio>
#include <cstdint>
#include <cstring>
#include <cassert>
#include <vector>
#include <time.h>
#include <gmp.h>
#include <libff/algebra/curves/mnt753/mnt4753/mnt4753_pp.hpp>
#include <libff/algebra/curves/mnt753/mnt6753/mnt6753_pp.hpp>
#include "play_mul.cu"
using namespace libff;
const unsigned int bytes_per_elem = 128;
const unsigned int io_bytes_per_elem = 96;
// mnt4_q
uint8_t mnt4_modulus[bytes_per_elem] = {1,128,94,36,222,99,144,94,159,17,221,44,82,84,157,227,240,37,196,154,113,16,136,99,164,84,114,118,233,204,90,104,56,126,83,203,165,13,15,184,157,5,24,242,118,231,23,177,157,247,90,161,217,36,209,153,141,237,160,232,37,185,253,7,115,216,151,108,249,232,183,94,237,175,143,91,80,151,249,183,173,205,226,238,34,144,34,16,17,196,146,45,198,196,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
// mnt6_q
uint8_t mnt6_modulus[bytes_per_elem] = {1,0,0,64,226,118,7,217,79,58,161,15,23,153,160,78,151,87,0,63,188,129,195,214,164,58,153,52,118,249,223,185,54,38,33,41,148,202,235,62,155,169,89,200,40,92,108,178,157,247,90,161,217,36,209,153,141,237,160,232,37,185,253,7,115,216,151,108,249,232,183,94,237,175,143,91,80,151,249,183,173,205,226,238,34,144,34,16,17,196,146,45,198,196,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
void write_mnt4_fq(FILE* output, Fq<mnt4753_pp> x) {
fwrite((void *) x.mont_repr.data, libff::mnt4753_q_limbs * sizeof(mp_size_t), 1, output);
}
void write_mnt6_fq(FILE* output, Fq<mnt6753_pp> x) {
fwrite((void *) x.mont_repr.data, libff::mnt6753_q_limbs * sizeof(mp_size_t), 1, output);
}
void write_mnt4_fq2(FILE* output, Fqe<mnt4753_pp> x) {
write_mnt4_fq(output, x.c0);
write_mnt4_fq(output, x.c1);
}
Fq<mnt4753_pp> read_mnt4_fq(FILE* input) {
Fq<mnt4753_pp> x;
fread((void *) x.mont_repr.data, libff::mnt4753_q_limbs * sizeof(mp_size_t), 1, input);
return x;
}
Fq<mnt6753_pp> read_mnt6_fq(FILE* input) {
Fq<mnt6753_pp> x;
fread((void *) x.mont_repr.data, libff::mnt6753_q_limbs * sizeof(mp_size_t), 1, input);
return x;
}
Fqe<mnt4753_pp> read_mnt4_fq2(FILE* input) {
Fq<mnt4753_pp> c0 = read_mnt4_fq(input);
Fq<mnt4753_pp> c1 = read_mnt4_fq(input);
return Fqe<mnt4753_pp>(c0, c1);
}
struct delete_ptr { // Helper function to ease cleanup of container
template <typename P>
void operator () (P p) {
delete p;
}
};
struct delete_ptr_gpu { // Helper function to ease cleanup of container
template <typename P>
void operator () (P p) {
hipFree(p);
}
};
const char* input_a = "/home/arunesh/github/snark-challenge/reference-01-field-arithmetic/inputs";
uint8_t* read_mnt_fq_2(FILE* inputs) {
uint8_t* buf = (uint8_t*)calloc(bytes_per_elem, sizeof(uint8_t));
// the input is montgomery representation x * 2^768 whereas cuda-fixnum expects x * 2^1024 so we shift over by (1024-768)/8 bytes
fread((void*)buf, io_bytes_per_elem*sizeof(uint8_t), 1, inputs);
return buf;
}
uint8_t* read_mnt_fq_2_gpu(FILE* inputs) {
uint8_t* buf;
hipMallocManaged(&buf, bytes_per_elem , sizeof(uint8_t));
// the input is montgomery representation x * 2^768 whereas cuda-fixnum expects x * 2^1024 so we shift over by (1024-768)/8 bytes
fread((void*)buf, io_bytes_per_elem*sizeof(uint8_t), 1, inputs);
return buf;
}
__global__
void gpu_sum(uint64_t* a, uint64_t* b, int num) {
int thread_id = threadIdx.x;
printf("\n Gpu sum\n");
//for (int i = 0; i < num; i ++) {
uint64_t res = a[thread_id] + b[thread_id];
printf("\nthread_id = %d, a[%08lX] + b[%08lX] = %08lX", thread_id, a[thread_id], b[thread_id], res);
a[thread_id] = res;
//}
}
void cpu_sum(uint64_t* a, uint64_t* b, int num) {
uint64_t res;
printf("\n Cpu sum\n");
for (int i = 0; i < num; i ++) {
res = a[i] + b[i];
printf("\n i= %d a[%08lX] + b[%08lX] = %08lX", i, a[i], b[i], res);
a[i] = res;
}
}
bool check(uint8_t* a, uint8_t* b, int num) {
return memcmp(a, b, num * sizeof(uint8_t));
}
Fq<mnt4753_pp> to_fq(uint8_t* data) {
Fq<mnt4753_pp> x;
memcpy((void *) x.mont_repr.data, data, libff::mnt4753_q_limbs * sizeof(mp_size_t));
return x;
}
void fprint_fq(FILE* stream, Fq<mnt4753_pp> x) {
int size = libff::mnt4753_q_limbs * sizeof(mp_size_t);
uint8_t* array = (uint8_t*) x.mont_repr.data;
for (int i = 0; i < size; i ++) {
fprintf(stream, "%02x", array[i]);
}
fprintf(stream, "\n");
}
void fprint_uint8_array(FILE* stream, uint8_t* array, int size) {
for (int i = 0; i < size; i ++) {
fprintf(stream, "%02x", array[i]);
}
fprintf(stream, "\n");
}
void mul_play(std::vector<uint8_t*> x, std::vector<uint8_t*> y, FILE* debug_log) {
mnt4753_pp::init_public_params();
mnt6753_pp::init_public_params();
std::vector<Fq<mnt4753_pp>> x0;
std::vector<Fq<mnt4753_pp>> x1;
int n = x.size();
for (int i = 0; i < n; i++) {
x0.emplace_back(to_fq(x[i]));
x1.emplace_back(to_fq(y[i]));
Fq<mnt4753_pp> out = x0[i] * x1[i];
if (i < 2) {
fprintf(debug_log, "\n X[%d]:", i);
fprint_fq(debug_log, x0[i]);
fprintf(debug_log, "\n Y[%d]:", i);
fprint_fq(debug_log, x1[i]);
fprintf(debug_log, "\n FQ[%d]:", i);
fprint_fq(debug_log, out);
}
}
std::vector<uint8_t*>* result = compute_mont_mulcuda(x, y, mnt4_modulus, bytes_per_elem);
for (int i = 0; i < 2; i++) {
fprintf(debug_log, "\n x[%d]:", i);
fprint_uint8_array(debug_log, x[i], io_bytes_per_elem);
fprintf(debug_log, "\n y[%d]:", i);
fprint_uint8_array(debug_log, y[i], io_bytes_per_elem);
fprintf(debug_log, "\n GPU[%d]:", i);
fprint_uint8_array(debug_log, result->at(i), io_bytes_per_elem);
}
}
void compute_gmp_inverse_32() {
unsigned long l = 1;
mpz_t n;
mpz_init(n);
mpz_set_str(n, "4294967296", 10);
mpz_t m;
mpz_init(m);
unsigned long int m32;
memcpy((void*)&m32, mnt4_modulus, sizeof(uint32_t));
mpz_set_ui(m, m32);
printf("\n setting m32 to %08X\n", m32);
gmp_printf("\n setting n to %ZX\n", n);
gmp_printf("\n setting n to %Zd\n", n);
mpz_t rop;
mpz_init(rop);
mpz_invert(rop, m, n);
gmp_printf (" mpz %Zd\n", rop);
mpz_sub(rop, n, rop);
gmp_printf (" mpz %Zd\n", rop);
gmp_printf (" mpz %ZX\n", rop);
}
void compute_gmp_inverse() {
unsigned long l = 1;
mpz_t n;
mpz_init(n);
mpz_set_str(n, "18446744073709551616", 10);
mpz_t m;
mpz_init(m);
unsigned long int m32;
memcpy((void*)&m32, mnt4_modulus, sizeof(uint64_t));
mpz_set_ui(m, m32);
printf("\n setting m32 to %08X\n", m32);
gmp_printf("\n setting n to %ZX\n", n);
gmp_printf("\n setting n to %Zd\n", n);
mpz_t rop;
mpz_init(rop);
mpz_invert(rop, m, n);
gmp_printf (" mpz %Zd\n", rop);
mpz_sub(rop, n, rop);
gmp_printf (" mpz %Zd\n", rop);
gmp_printf (" mpz %ZX\n", rop);
}
int main(int argc, char* argv[]) {
printf("\nMain program. argc = %d \n", argc);
// argv[2] = input_a;
auto inputs = fopen(input_a, "r");
auto debug_file = fopen("debug_log", "w");
printf("\n Opening file %s for reading.\n", input_a);
size_t n;
clock_t start, end;
double time_used = 0.0;
double time_iter = 0.0;
fprintf(debug_file, "\n mnt4 modulus:\n");
fprint_uint8_array(debug_file, mnt4_modulus, bytes_per_elem);
printf("\n sieze of mplimb_t = %d, %d, %d", sizeof(mp_limb_t), sizeof(mp_size_t), libff::mnt4753_q_limbs);
compute_gmp_inverse();
while(true) {
size_t array_size = fread((void*) &n, sizeof(size_t), 1, inputs);
if (array_size == 0) break;
printf("\n Array size = %d\n", n);
std::vector<uint8_t*> x;
std::vector<uint8_t*> y;
std::vector<uint8_t*> z;
for (size_t i = 0; i < n; ++i) {
uint8_t* ptr = read_mnt_fq_2_gpu(inputs);
uint8_t* ptr2 = (uint8_t*)calloc(io_bytes_per_elem, sizeof(uint8_t));
std::memcpy(ptr2, ptr, io_bytes_per_elem*sizeof(uint8_t));
x.emplace_back(ptr);
z.emplace_back(ptr2);
}
for (size_t i = 0; i < n; ++i) {
y.emplace_back(read_mnt_fq_2_gpu(inputs));
}
std::vector<uint8_t*> x6;
std::vector<uint8_t*> y6;
for (size_t i = 0; i < n; ++i) {
x6.emplace_back(read_mnt_fq_2(inputs));
}
for (size_t i = 0; i < n; ++i) {
y6.emplace_back(read_mnt_fq_2(inputs));
}
int num_threads = io_bytes_per_elem / 8;
mul_play(x, y, debug_file);
start = clock();
std::vector<uint8_t*>* result;
for (size_t i = 0; i < 1; ++i) {
hipLaunchKernelGGL(( gpu_sum), dim3(1), dim3(num_threads) , 0, 0, (uint64_t*) x[i], (uint64_t*) y[i], io_bytes_per_elem / 8);
}
hipDeviceSynchronize();
end = clock();
time_iter = ((double) end-start) * 1000.0 / CLOCKS_PER_SEC;
time_used += time_iter;
printf("\n GPU Round N, time = %5.4f ms.\n", time_iter);
start = clock();
for (size_t i = 0; i < 1; ++i) {
cpu_sum((uint64_t*)z[i], (uint64_t*)y[i], io_bytes_per_elem/8);
}
end = clock();
time_iter = ((double) end-start) * 1000.0 / CLOCKS_PER_SEC;
printf("\n CPU Round N, time = %5.4f ms.\n", time_iter);
for (size_t i = 0; i < 1; ++i) {
if (check(x[i], z[i], io_bytes_per_elem) != 0) {
printf("\n Failed at %d.\n", i);
}
}
std::for_each(x.begin(), x.end(), delete_ptr_gpu());
x.clear();
std::for_each(y.begin(), y.end(), delete_ptr_gpu());
y.clear();
std::for_each(x6.begin(), x6.end(), delete_ptr());
x6.clear();
std::for_each(y6.begin(), y6.end(), delete_ptr());
y6.clear();
break;
}
printf("\n Total time = %5.4f ms.\n", time_used);
fclose(inputs);
fclose(debug_file);
}
| 799afdb9206bdf5a4dd15e4e240bdd069a4b25ae.cu | #include <algorithm>
#include <cstdio>
#include <cstdint>
#include <cstring>
#include <cassert>
#include <vector>
#include <time.h>
#include <gmp.h>
#include <libff/algebra/curves/mnt753/mnt4753/mnt4753_pp.hpp>
#include <libff/algebra/curves/mnt753/mnt6753/mnt6753_pp.hpp>
#include "play_mul.cu"
using namespace libff;
const unsigned int bytes_per_elem = 128;
const unsigned int io_bytes_per_elem = 96;
// mnt4_q
uint8_t mnt4_modulus[bytes_per_elem] = {1,128,94,36,222,99,144,94,159,17,221,44,82,84,157,227,240,37,196,154,113,16,136,99,164,84,114,118,233,204,90,104,56,126,83,203,165,13,15,184,157,5,24,242,118,231,23,177,157,247,90,161,217,36,209,153,141,237,160,232,37,185,253,7,115,216,151,108,249,232,183,94,237,175,143,91,80,151,249,183,173,205,226,238,34,144,34,16,17,196,146,45,198,196,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
// mnt6_q
uint8_t mnt6_modulus[bytes_per_elem] = {1,0,0,64,226,118,7,217,79,58,161,15,23,153,160,78,151,87,0,63,188,129,195,214,164,58,153,52,118,249,223,185,54,38,33,41,148,202,235,62,155,169,89,200,40,92,108,178,157,247,90,161,217,36,209,153,141,237,160,232,37,185,253,7,115,216,151,108,249,232,183,94,237,175,143,91,80,151,249,183,173,205,226,238,34,144,34,16,17,196,146,45,198,196,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
void write_mnt4_fq(FILE* output, Fq<mnt4753_pp> x) {
fwrite((void *) x.mont_repr.data, libff::mnt4753_q_limbs * sizeof(mp_size_t), 1, output);
}
void write_mnt6_fq(FILE* output, Fq<mnt6753_pp> x) {
fwrite((void *) x.mont_repr.data, libff::mnt6753_q_limbs * sizeof(mp_size_t), 1, output);
}
void write_mnt4_fq2(FILE* output, Fqe<mnt4753_pp> x) {
write_mnt4_fq(output, x.c0);
write_mnt4_fq(output, x.c1);
}
Fq<mnt4753_pp> read_mnt4_fq(FILE* input) {
Fq<mnt4753_pp> x;
fread((void *) x.mont_repr.data, libff::mnt4753_q_limbs * sizeof(mp_size_t), 1, input);
return x;
}
Fq<mnt6753_pp> read_mnt6_fq(FILE* input) {
Fq<mnt6753_pp> x;
fread((void *) x.mont_repr.data, libff::mnt6753_q_limbs * sizeof(mp_size_t), 1, input);
return x;
}
Fqe<mnt4753_pp> read_mnt4_fq2(FILE* input) {
Fq<mnt4753_pp> c0 = read_mnt4_fq(input);
Fq<mnt4753_pp> c1 = read_mnt4_fq(input);
return Fqe<mnt4753_pp>(c0, c1);
}
struct delete_ptr { // Helper function to ease cleanup of container
template <typename P>
void operator () (P p) {
delete p;
}
};
struct delete_ptr_gpu { // Helper function to ease cleanup of container
template <typename P>
void operator () (P p) {
cudaFree(p);
}
};
const char* input_a = "/home/arunesh/github/snark-challenge/reference-01-field-arithmetic/inputs";
uint8_t* read_mnt_fq_2(FILE* inputs) {
uint8_t* buf = (uint8_t*)calloc(bytes_per_elem, sizeof(uint8_t));
// the input is montgomery representation x * 2^768 whereas cuda-fixnum expects x * 2^1024 so we shift over by (1024-768)/8 bytes
fread((void*)buf, io_bytes_per_elem*sizeof(uint8_t), 1, inputs);
return buf;
}
uint8_t* read_mnt_fq_2_gpu(FILE* inputs) {
uint8_t* buf;
cudaMallocManaged(&buf, bytes_per_elem , sizeof(uint8_t));
// the input is montgomery representation x * 2^768 whereas cuda-fixnum expects x * 2^1024 so we shift over by (1024-768)/8 bytes
fread((void*)buf, io_bytes_per_elem*sizeof(uint8_t), 1, inputs);
return buf;
}
__global__
void gpu_sum(uint64_t* a, uint64_t* b, int num) {
int thread_id = threadIdx.x;
printf("\n Gpu sum\n");
//for (int i = 0; i < num; i ++) {
uint64_t res = a[thread_id] + b[thread_id];
printf("\nthread_id = %d, a[%08lX] + b[%08lX] = %08lX", thread_id, a[thread_id], b[thread_id], res);
a[thread_id] = res;
//}
}
void cpu_sum(uint64_t* a, uint64_t* b, int num) {
uint64_t res;
printf("\n Cpu sum\n");
for (int i = 0; i < num; i ++) {
res = a[i] + b[i];
printf("\n i= %d a[%08lX] + b[%08lX] = %08lX", i, a[i], b[i], res);
a[i] = res;
}
}
bool check(uint8_t* a, uint8_t* b, int num) {
return memcmp(a, b, num * sizeof(uint8_t));
}
Fq<mnt4753_pp> to_fq(uint8_t* data) {
Fq<mnt4753_pp> x;
memcpy((void *) x.mont_repr.data, data, libff::mnt4753_q_limbs * sizeof(mp_size_t));
return x;
}
void fprint_fq(FILE* stream, Fq<mnt4753_pp> x) {
int size = libff::mnt4753_q_limbs * sizeof(mp_size_t);
uint8_t* array = (uint8_t*) x.mont_repr.data;
for (int i = 0; i < size; i ++) {
fprintf(stream, "%02x", array[i]);
}
fprintf(stream, "\n");
}
void fprint_uint8_array(FILE* stream, uint8_t* array, int size) {
for (int i = 0; i < size; i ++) {
fprintf(stream, "%02x", array[i]);
}
fprintf(stream, "\n");
}
void mul_play(std::vector<uint8_t*> x, std::vector<uint8_t*> y, FILE* debug_log) {
mnt4753_pp::init_public_params();
mnt6753_pp::init_public_params();
std::vector<Fq<mnt4753_pp>> x0;
std::vector<Fq<mnt4753_pp>> x1;
int n = x.size();
for (int i = 0; i < n; i++) {
x0.emplace_back(to_fq(x[i]));
x1.emplace_back(to_fq(y[i]));
Fq<mnt4753_pp> out = x0[i] * x1[i];
if (i < 2) {
fprintf(debug_log, "\n X[%d]:", i);
fprint_fq(debug_log, x0[i]);
fprintf(debug_log, "\n Y[%d]:", i);
fprint_fq(debug_log, x1[i]);
fprintf(debug_log, "\n FQ[%d]:", i);
fprint_fq(debug_log, out);
}
}
std::vector<uint8_t*>* result = compute_mont_mulcuda(x, y, mnt4_modulus, bytes_per_elem);
for (int i = 0; i < 2; i++) {
fprintf(debug_log, "\n x[%d]:", i);
fprint_uint8_array(debug_log, x[i], io_bytes_per_elem);
fprintf(debug_log, "\n y[%d]:", i);
fprint_uint8_array(debug_log, y[i], io_bytes_per_elem);
fprintf(debug_log, "\n GPU[%d]:", i);
fprint_uint8_array(debug_log, result->at(i), io_bytes_per_elem);
}
}
void compute_gmp_inverse_32() {
unsigned long l = 1;
mpz_t n;
mpz_init(n);
mpz_set_str(n, "4294967296", 10);
mpz_t m;
mpz_init(m);
unsigned long int m32;
memcpy((void*)&m32, mnt4_modulus, sizeof(uint32_t));
mpz_set_ui(m, m32);
printf("\n setting m32 to %08X\n", m32);
gmp_printf("\n setting n to %ZX\n", n);
gmp_printf("\n setting n to %Zd\n", n);
mpz_t rop;
mpz_init(rop);
mpz_invert(rop, m, n);
gmp_printf (" mpz %Zd\n", rop);
mpz_sub(rop, n, rop);
gmp_printf (" mpz %Zd\n", rop);
gmp_printf (" mpz %ZX\n", rop);
}
void compute_gmp_inverse() {
unsigned long l = 1;
mpz_t n;
mpz_init(n);
mpz_set_str(n, "18446744073709551616", 10);
mpz_t m;
mpz_init(m);
unsigned long int m32;
memcpy((void*)&m32, mnt4_modulus, sizeof(uint64_t));
mpz_set_ui(m, m32);
printf("\n setting m32 to %08X\n", m32);
gmp_printf("\n setting n to %ZX\n", n);
gmp_printf("\n setting n to %Zd\n", n);
mpz_t rop;
mpz_init(rop);
mpz_invert(rop, m, n);
gmp_printf (" mpz %Zd\n", rop);
mpz_sub(rop, n, rop);
gmp_printf (" mpz %Zd\n", rop);
gmp_printf (" mpz %ZX\n", rop);
}
int main(int argc, char* argv[]) {
printf("\nMain program. argc = %d \n", argc);
// argv[2] = input_a;
auto inputs = fopen(input_a, "r");
auto debug_file = fopen("debug_log", "w");
printf("\n Opening file %s for reading.\n", input_a);
size_t n;
clock_t start, end;
double time_used = 0.0;
double time_iter = 0.0;
fprintf(debug_file, "\n mnt4 modulus:\n");
fprint_uint8_array(debug_file, mnt4_modulus, bytes_per_elem);
printf("\n sieze of mplimb_t = %d, %d, %d", sizeof(mp_limb_t), sizeof(mp_size_t), libff::mnt4753_q_limbs);
compute_gmp_inverse();
while(true) {
size_t array_size = fread((void*) &n, sizeof(size_t), 1, inputs);
if (array_size == 0) break;
printf("\n Array size = %d\n", n);
std::vector<uint8_t*> x;
std::vector<uint8_t*> y;
std::vector<uint8_t*> z;
for (size_t i = 0; i < n; ++i) {
uint8_t* ptr = read_mnt_fq_2_gpu(inputs);
uint8_t* ptr2 = (uint8_t*)calloc(io_bytes_per_elem, sizeof(uint8_t));
std::memcpy(ptr2, ptr, io_bytes_per_elem*sizeof(uint8_t));
x.emplace_back(ptr);
z.emplace_back(ptr2);
}
for (size_t i = 0; i < n; ++i) {
y.emplace_back(read_mnt_fq_2_gpu(inputs));
}
std::vector<uint8_t*> x6;
std::vector<uint8_t*> y6;
for (size_t i = 0; i < n; ++i) {
x6.emplace_back(read_mnt_fq_2(inputs));
}
for (size_t i = 0; i < n; ++i) {
y6.emplace_back(read_mnt_fq_2(inputs));
}
int num_threads = io_bytes_per_elem / 8;
mul_play(x, y, debug_file);
start = clock();
std::vector<uint8_t*>* result;
for (size_t i = 0; i < 1; ++i) {
gpu_sum<<< 1, num_threads >>>((uint64_t*) x[i], (uint64_t*) y[i], io_bytes_per_elem / 8);
}
cudaDeviceSynchronize();
end = clock();
time_iter = ((double) end-start) * 1000.0 / CLOCKS_PER_SEC;
time_used += time_iter;
printf("\n GPU Round N, time = %5.4f ms.\n", time_iter);
start = clock();
for (size_t i = 0; i < 1; ++i) {
cpu_sum((uint64_t*)z[i], (uint64_t*)y[i], io_bytes_per_elem/8);
}
end = clock();
time_iter = ((double) end-start) * 1000.0 / CLOCKS_PER_SEC;
printf("\n CPU Round N, time = %5.4f ms.\n", time_iter);
for (size_t i = 0; i < 1; ++i) {
if (check(x[i], z[i], io_bytes_per_elem) != 0) {
printf("\n Failed at %d.\n", i);
}
}
std::for_each(x.begin(), x.end(), delete_ptr_gpu());
x.clear();
std::for_each(y.begin(), y.end(), delete_ptr_gpu());
y.clear();
std::for_each(x6.begin(), x6.end(), delete_ptr());
x6.clear();
std::for_each(y6.begin(), y6.end(), delete_ptr());
y6.clear();
break;
}
printf("\n Total time = %5.4f ms.\n", time_used);
fclose(inputs);
fclose(debug_file);
}
|
2fb27864b1797fd90d15fb8382da9a90b11e1b74.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
///////////////////////////////////////////////////////////////////////////////////////////////////
// size of work for a thread
#define THR_M ( BLK_M / DIM_X )
#define THR_N ( BLK_N / DIM_Y )
///////////////////////////////////////////////////////////////////////////////////////////////////
#if (version == trans_nn)
#define kernel_name fermi_gemm_kernel_nn
#elif (version == trans_nt)
#define TRANS_B
#define kernel_name fermi_gemm_kernel_nt
#elif (version == trans_nc)
#define TRANS_B
#define CONJ_B
#define kernel_name fermi_gemm_kernel_nc
#elif (version == trans_tn)
#define TRANS_A
#define kernel_name fermi_gemm_kernel_tn
#elif (version == trans_tt)
#define TRANS_A
#define TRANS_B
#define kernel_name fermi_gemm_kernel_tt
#elif (version == trans_tc)
#define TRANS_A
#define TRANS_B
#define CONJ_B
#define kernel_name fermi_gemm_kernel_tc
#elif (version == trans_cn)
#define TRANS_A
#define CONJ_A
#define kernel_name fermi_gemm_kernel_cn
#elif (version == trans_ct)
#define TRANS_A
#define CONJ_A
#define TRANS_B
#define kernel_name fermi_gemm_kernel_ct
#elif (version == trans_cc)
#define TRANS_A
#define CONJ_A
#define TRANS_B
#define CONJ_B
#define kernel_name fermi_gemm_kernel_cc
#endif
///////////////////////////////////////////////////////////////////////////////////////////////////
extern "C" __global__
void kernel_name (int M, int N, int K,
const FloatingPoint_t *A, int LDA, const FloatingPoint_t *B, int LDB,
FloatingPoint_t *C, int LDC,
FloatingPoint_t alpha, FloatingPoint_t beta,
int offsetA, int offsetB)
{
int idx = threadIdx.x; // thread's m dimension
int idy = threadIdx.y; // thread's n dimension
int idt = DIM_X * idy + idx; // thread's global number
int idxA = idt % DIM_XA; // idx within A
int idyA = idt / DIM_XA; // idy within A
int idxB = idt % DIM_XB; // idx within B
int idyB = idt / DIM_XB; // idy within B
int blx = blockIdx.x; // block's m dimension
int bly = blockIdx.y; // block's n dimension
__shared__ FloatingPoint_t sA[BLK_K][BLK_M+1]; // +1 only required if A is transposed
__shared__ FloatingPoint_t sB[BLK_N][BLK_K+1]; // +1 always required
// Registers for the innermost loop
FloatingPoint_t rC[THR_N][THR_M];
FloatingPoint_t rA[THR_M];
FloatingPoint_t rB[THR_N];
// Registers for the dev->shmem copy
#ifdef TRANS_A
FloatingPoint_t ra[BLK_M/DIM_YA][BLK_K/DIM_XA];
#else
FloatingPoint_t ra[BLK_K/DIM_YA][BLK_M/DIM_XA];
#endif
#ifdef TRANS_B
FloatingPoint_t rb[BLK_K/DIM_YB][BLK_N/DIM_XB];
#else
FloatingPoint_t rb[BLK_N/DIM_YB][BLK_K/DIM_XB];
#endif
#ifdef TEXTURE_1D
#ifdef TRANS_A
int coord_A = offsetA + blx*BLK_M*LDA + idyA*LDA+idxA;
#else
int coord_A = offsetA + blx*BLK_M + idyA*LDA+idxA;
#endif
#ifdef TRANS_B
int coord_B = offsetB + bly*BLK_N + idyB*LDB+idxB;
#else
int coord_B = offsetB + bly*BLK_N*LDB + idyB*LDB+idxB;
#endif
#else
#ifdef TRANS_A
FloatingPoint_t *offs_dA = A + blx*BLK_M*LDA + idyA*LDA+idxA;
#else
FloatingPoint_t *offs_dA = A + blx*BLK_M + idyA*LDA+idxA;
#endif
#ifdef TRANS_B
FloatingPoint_t *offs_dB = B + bly*BLK_N + idyB*LDB+idxB;
#else
FloatingPoint_t *offs_dB = B + bly*BLK_N*LDB + idyB*LDB+idxB;
#endif
#endif
int m, n, k, kk;
// Zero C
#pragma unroll
for (n = 0; n < THR_N; n++)
#pragma unroll
for (m = 0; m < THR_M; m++)
rC[n][m] = make_FloatingPoint(0.0, 0.0);
// Load A dev->shmem
#ifdef TRANS_A
#pragma unroll
for (n = 0; n < BLK_M; n += DIM_YA)
#pragma unroll
for (m = 0; m < BLK_K; m += DIM_XA)
sA[m+idxA][n+idyA] = fetch(A, m, n);
#else
#pragma unroll
for (n = 0; n < BLK_K; n += DIM_YA)
#pragma unroll
for (m = 0; m < BLK_M; m += DIM_XA)
sA[n+idyA][m+idxA] = fetch(A, m, n);
#endif
// Load B dev->shmem
#ifdef TRANS_B
#pragma unroll
for (n = 0; n < BLK_K; n += DIM_YB)
#pragma unroll
for (m = 0; m < BLK_N; m += DIM_XB)
sB[m+idxB][n+idyB] = fetch(B, m, n);
#else
#pragma unroll
for (n = 0; n < BLK_N; n += DIM_YB)
#pragma unroll
for (m = 0; m < BLK_K; m += DIM_XB)
sB[n+idyB][m+idxB] = fetch(B, m, n);
#endif
__syncthreads();
for (kk = 0; kk < K-BLK_K; kk += BLK_K)
{
#ifdef TEXTURE_1D
#ifdef TRANS_A
coord_A += BLK_K;
#else
coord_A += BLK_K*LDA;
#endif
#ifdef TRANS_B
coord_B += BLK_K*LDB;
#else
coord_B += BLK_K;
#endif
#else
#ifdef TRANS_A
offs_dA += BLK_K;
#else
offs_dA += BLK_K*LDA;
#endif
#ifdef TRANS_B
offs_dB += BLK_K*LDB;
#else
offs_dB += BLK_K;
#endif
#endif
// Load A dev->regs
#ifdef TRANS_A
#pragma unroll
for (n = 0; n < BLK_M/DIM_YA; n++)
#pragma unroll
for (m = 0; m < BLK_K/DIM_XA; m++)
ra[n][m] = fetch(A, m*DIM_XA, n*DIM_YA);
#else
#pragma unroll
for (n = 0; n < BLK_K/DIM_YA; n++)
#pragma unroll
for (m = 0; m < BLK_M/DIM_XA; m++)
ra[n][m] = fetch(A, m*DIM_XA, n*DIM_YA);
#endif
// Load B dev->regs
#ifdef TRANS_B
#pragma unroll
for (n = 0; n < BLK_K/DIM_YB; n++)
#pragma unroll
for (m = 0; m < BLK_N/DIM_XB; m++)
rb[n][m] = fetch(B, m*DIM_XB, n*DIM_YB);
#else
#pragma unroll
for (n = 0; n < BLK_N/DIM_YB; n++)
#pragma unroll
for (m = 0; m < BLK_K/DIM_XB; m++)
rb[n][m] = fetch(B, m*DIM_XB, n*DIM_YB);
#endif
// Multiply
#pragma unroll
for (k = 0; k < BLK_K; k++)
{
// Load A shmem->regs
#pragma unroll
for (m = 0; m < THR_M; m++)
rA[m] = sA[k][m*DIM_X+idx];
// Load B shmem->regs
#pragma unroll
for (n = 0; n < THR_N; n++)
rB[n] = sB[n*DIM_Y+idy][k];
// Compute
#pragma unroll
for (n = 0; n < THR_N; n++)
#pragma unroll
for (m = 0; m < THR_M; m++)
#ifdef CONJ_A
#ifdef CONJ_B
fma(conj(rA[m]), conj(rB[n]), rC[n][m]);
#else
fma(conj(rA[m]), rB[n], rC[n][m]);
#endif
#else
#ifdef CONJ_B
fma(rA[m], conj(rB[n]), rC[n][m]);
#else
fma(rA[m], rB[n], rC[n][m]);
#endif
#endif
}
__syncthreads();
// Load A regs->shmem
#ifdef TRANS_A
#pragma unroll
for (n = 0; n < BLK_M/DIM_YA; n++)
#pragma unroll
for (m = 0; m < BLK_K/DIM_XA; m++)
sA[m*DIM_XA+idxA][n*DIM_YA+idyA] = ra[n][m];
#else
#pragma unroll
for (n = 0; n < BLK_K/DIM_YA; n++)
#pragma unroll
for (m = 0; m < BLK_M/DIM_XA; m++)
sA[n*DIM_YA+idyA][m*DIM_XA+idxA] = ra[n][m];
#endif
// Load B regs->shmem
#ifdef TRANS_B
#pragma unroll
for (n = 0; n < BLK_K/DIM_YB; n++)
#pragma unroll
for (m = 0; m < BLK_N/DIM_XB; m++)
sB[m*DIM_XB+idxB][n*DIM_YB+idyB] = rb[n][m];
#else
#pragma unroll
for (n = 0; n < BLK_N/DIM_YB; n++)
#pragma unroll
for (m = 0; m < BLK_K/DIM_XB; m++)
sB[n*DIM_YB+idyB][m*DIM_XB+idxB] = rb[n][m];
#endif
__syncthreads();
}
// Multiply
#pragma unroll
for (k = 0; k < BLK_K; k++)
{
// Load A shmem->regs
#pragma unroll
for (m = 0; m < THR_M; m++)
rA[m] = sA[k][m*DIM_X+idx];
// Load B shmem->regs
#pragma unroll
for (n = 0; n < THR_N; n++)
rB[n] = sB[n*DIM_Y+idy][k];
// Compute
#pragma unroll
for (n = 0; n < THR_N; n++)
#pragma unroll
for (m = 0; m < THR_M; m++)
#ifdef CONJ_A
#ifdef CONJ_B
fma(conj(rA[m]), conj(rB[n]), rC[n][m]);
#else
fma(conj(rA[m]), rB[n], rC[n][m]);
#endif
#else
#ifdef CONJ_B
fma(rA[m], conj(rB[n]), rC[n][m]);
#else
fma(rA[m], rB[n], rC[n][m]);
#endif
#endif
}
// Store C regs->dev
#pragma unroll
for (n = 0; n < THR_N; n++) {
int coord_dCn = bly*BLK_N + n*DIM_Y+idy;
#pragma unroll
for (m = 0; m < THR_M; m++) {
int coord_dCm = blx*BLK_M + m*DIM_X+idx;
if (coord_dCm < M && coord_dCn < N) {
int offsC = coord_dCn*LDC + coord_dCm;
FloatingPoint_t ®C = rC[n][m];
FloatingPoint_t &memC = C[offsC];
memC = add(mul(alpha, regC), mul(beta, memC));
}
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
#undef TRANS_A
#undef TRANS_B
#undef CONJ_A
#undef CONJ_B
/*
#undef BLK_M
#undef BLK_N
#undef BLK_K
#undef DIM_X
#undef DIM_Y
#undef DIM_XA
#undef DIM_YA
#undef DIM_XB
#undef DIM_YB
*/
#undef version
#undef THR_M
#undef THR_N
#undef kernel_name
| 2fb27864b1797fd90d15fb8382da9a90b11e1b74.cu | ///////////////////////////////////////////////////////////////////////////////////////////////////
// size of work for a thread
#define THR_M ( BLK_M / DIM_X )
#define THR_N ( BLK_N / DIM_Y )
///////////////////////////////////////////////////////////////////////////////////////////////////
#if (version == trans_nn)
#define kernel_name fermi_gemm_kernel_nn
#elif (version == trans_nt)
#define TRANS_B
#define kernel_name fermi_gemm_kernel_nt
#elif (version == trans_nc)
#define TRANS_B
#define CONJ_B
#define kernel_name fermi_gemm_kernel_nc
#elif (version == trans_tn)
#define TRANS_A
#define kernel_name fermi_gemm_kernel_tn
#elif (version == trans_tt)
#define TRANS_A
#define TRANS_B
#define kernel_name fermi_gemm_kernel_tt
#elif (version == trans_tc)
#define TRANS_A
#define TRANS_B
#define CONJ_B
#define kernel_name fermi_gemm_kernel_tc
#elif (version == trans_cn)
#define TRANS_A
#define CONJ_A
#define kernel_name fermi_gemm_kernel_cn
#elif (version == trans_ct)
#define TRANS_A
#define CONJ_A
#define TRANS_B
#define kernel_name fermi_gemm_kernel_ct
#elif (version == trans_cc)
#define TRANS_A
#define CONJ_A
#define TRANS_B
#define CONJ_B
#define kernel_name fermi_gemm_kernel_cc
#endif
///////////////////////////////////////////////////////////////////////////////////////////////////
extern "C" __global__
void kernel_name (int M, int N, int K,
const FloatingPoint_t *A, int LDA, const FloatingPoint_t *B, int LDB,
FloatingPoint_t *C, int LDC,
FloatingPoint_t alpha, FloatingPoint_t beta,
int offsetA, int offsetB)
{
int idx = threadIdx.x; // thread's m dimension
int idy = threadIdx.y; // thread's n dimension
int idt = DIM_X * idy + idx; // thread's global number
int idxA = idt % DIM_XA; // idx within A
int idyA = idt / DIM_XA; // idy within A
int idxB = idt % DIM_XB; // idx within B
int idyB = idt / DIM_XB; // idy within B
int blx = blockIdx.x; // block's m dimension
int bly = blockIdx.y; // block's n dimension
__shared__ FloatingPoint_t sA[BLK_K][BLK_M+1]; // +1 only required if A is transposed
__shared__ FloatingPoint_t sB[BLK_N][BLK_K+1]; // +1 always required
// Registers for the innermost loop
FloatingPoint_t rC[THR_N][THR_M];
FloatingPoint_t rA[THR_M];
FloatingPoint_t rB[THR_N];
// Registers for the dev->shmem copy
#ifdef TRANS_A
FloatingPoint_t ra[BLK_M/DIM_YA][BLK_K/DIM_XA];
#else
FloatingPoint_t ra[BLK_K/DIM_YA][BLK_M/DIM_XA];
#endif
#ifdef TRANS_B
FloatingPoint_t rb[BLK_K/DIM_YB][BLK_N/DIM_XB];
#else
FloatingPoint_t rb[BLK_N/DIM_YB][BLK_K/DIM_XB];
#endif
#ifdef TEXTURE_1D
#ifdef TRANS_A
int coord_A = offsetA + blx*BLK_M*LDA + idyA*LDA+idxA;
#else
int coord_A = offsetA + blx*BLK_M + idyA*LDA+idxA;
#endif
#ifdef TRANS_B
int coord_B = offsetB + bly*BLK_N + idyB*LDB+idxB;
#else
int coord_B = offsetB + bly*BLK_N*LDB + idyB*LDB+idxB;
#endif
#else
#ifdef TRANS_A
FloatingPoint_t *offs_dA = A + blx*BLK_M*LDA + idyA*LDA+idxA;
#else
FloatingPoint_t *offs_dA = A + blx*BLK_M + idyA*LDA+idxA;
#endif
#ifdef TRANS_B
FloatingPoint_t *offs_dB = B + bly*BLK_N + idyB*LDB+idxB;
#else
FloatingPoint_t *offs_dB = B + bly*BLK_N*LDB + idyB*LDB+idxB;
#endif
#endif
int m, n, k, kk;
// Zero C
#pragma unroll
for (n = 0; n < THR_N; n++)
#pragma unroll
for (m = 0; m < THR_M; m++)
rC[n][m] = make_FloatingPoint(0.0, 0.0);
// Load A dev->shmem
#ifdef TRANS_A
#pragma unroll
for (n = 0; n < BLK_M; n += DIM_YA)
#pragma unroll
for (m = 0; m < BLK_K; m += DIM_XA)
sA[m+idxA][n+idyA] = fetch(A, m, n);
#else
#pragma unroll
for (n = 0; n < BLK_K; n += DIM_YA)
#pragma unroll
for (m = 0; m < BLK_M; m += DIM_XA)
sA[n+idyA][m+idxA] = fetch(A, m, n);
#endif
// Load B dev->shmem
#ifdef TRANS_B
#pragma unroll
for (n = 0; n < BLK_K; n += DIM_YB)
#pragma unroll
for (m = 0; m < BLK_N; m += DIM_XB)
sB[m+idxB][n+idyB] = fetch(B, m, n);
#else
#pragma unroll
for (n = 0; n < BLK_N; n += DIM_YB)
#pragma unroll
for (m = 0; m < BLK_K; m += DIM_XB)
sB[n+idyB][m+idxB] = fetch(B, m, n);
#endif
__syncthreads();
for (kk = 0; kk < K-BLK_K; kk += BLK_K)
{
#ifdef TEXTURE_1D
#ifdef TRANS_A
coord_A += BLK_K;
#else
coord_A += BLK_K*LDA;
#endif
#ifdef TRANS_B
coord_B += BLK_K*LDB;
#else
coord_B += BLK_K;
#endif
#else
#ifdef TRANS_A
offs_dA += BLK_K;
#else
offs_dA += BLK_K*LDA;
#endif
#ifdef TRANS_B
offs_dB += BLK_K*LDB;
#else
offs_dB += BLK_K;
#endif
#endif
// Load A dev->regs
#ifdef TRANS_A
#pragma unroll
for (n = 0; n < BLK_M/DIM_YA; n++)
#pragma unroll
for (m = 0; m < BLK_K/DIM_XA; m++)
ra[n][m] = fetch(A, m*DIM_XA, n*DIM_YA);
#else
#pragma unroll
for (n = 0; n < BLK_K/DIM_YA; n++)
#pragma unroll
for (m = 0; m < BLK_M/DIM_XA; m++)
ra[n][m] = fetch(A, m*DIM_XA, n*DIM_YA);
#endif
// Load B dev->regs
#ifdef TRANS_B
#pragma unroll
for (n = 0; n < BLK_K/DIM_YB; n++)
#pragma unroll
for (m = 0; m < BLK_N/DIM_XB; m++)
rb[n][m] = fetch(B, m*DIM_XB, n*DIM_YB);
#else
#pragma unroll
for (n = 0; n < BLK_N/DIM_YB; n++)
#pragma unroll
for (m = 0; m < BLK_K/DIM_XB; m++)
rb[n][m] = fetch(B, m*DIM_XB, n*DIM_YB);
#endif
// Multiply
#pragma unroll
for (k = 0; k < BLK_K; k++)
{
// Load A shmem->regs
#pragma unroll
for (m = 0; m < THR_M; m++)
rA[m] = sA[k][m*DIM_X+idx];
// Load B shmem->regs
#pragma unroll
for (n = 0; n < THR_N; n++)
rB[n] = sB[n*DIM_Y+idy][k];
// Compute
#pragma unroll
for (n = 0; n < THR_N; n++)
#pragma unroll
for (m = 0; m < THR_M; m++)
#ifdef CONJ_A
#ifdef CONJ_B
fma(conj(rA[m]), conj(rB[n]), rC[n][m]);
#else
fma(conj(rA[m]), rB[n], rC[n][m]);
#endif
#else
#ifdef CONJ_B
fma(rA[m], conj(rB[n]), rC[n][m]);
#else
fma(rA[m], rB[n], rC[n][m]);
#endif
#endif
}
__syncthreads();
// Load A regs->shmem
#ifdef TRANS_A
#pragma unroll
for (n = 0; n < BLK_M/DIM_YA; n++)
#pragma unroll
for (m = 0; m < BLK_K/DIM_XA; m++)
sA[m*DIM_XA+idxA][n*DIM_YA+idyA] = ra[n][m];
#else
#pragma unroll
for (n = 0; n < BLK_K/DIM_YA; n++)
#pragma unroll
for (m = 0; m < BLK_M/DIM_XA; m++)
sA[n*DIM_YA+idyA][m*DIM_XA+idxA] = ra[n][m];
#endif
// Load B regs->shmem
#ifdef TRANS_B
#pragma unroll
for (n = 0; n < BLK_K/DIM_YB; n++)
#pragma unroll
for (m = 0; m < BLK_N/DIM_XB; m++)
sB[m*DIM_XB+idxB][n*DIM_YB+idyB] = rb[n][m];
#else
#pragma unroll
for (n = 0; n < BLK_N/DIM_YB; n++)
#pragma unroll
for (m = 0; m < BLK_K/DIM_XB; m++)
sB[n*DIM_YB+idyB][m*DIM_XB+idxB] = rb[n][m];
#endif
__syncthreads();
}
// Multiply
#pragma unroll
for (k = 0; k < BLK_K; k++)
{
// Load A shmem->regs
#pragma unroll
for (m = 0; m < THR_M; m++)
rA[m] = sA[k][m*DIM_X+idx];
// Load B shmem->regs
#pragma unroll
for (n = 0; n < THR_N; n++)
rB[n] = sB[n*DIM_Y+idy][k];
// Compute
#pragma unroll
for (n = 0; n < THR_N; n++)
#pragma unroll
for (m = 0; m < THR_M; m++)
#ifdef CONJ_A
#ifdef CONJ_B
fma(conj(rA[m]), conj(rB[n]), rC[n][m]);
#else
fma(conj(rA[m]), rB[n], rC[n][m]);
#endif
#else
#ifdef CONJ_B
fma(rA[m], conj(rB[n]), rC[n][m]);
#else
fma(rA[m], rB[n], rC[n][m]);
#endif
#endif
}
// Store C regs->dev
#pragma unroll
for (n = 0; n < THR_N; n++) {
int coord_dCn = bly*BLK_N + n*DIM_Y+idy;
#pragma unroll
for (m = 0; m < THR_M; m++) {
int coord_dCm = blx*BLK_M + m*DIM_X+idx;
if (coord_dCm < M && coord_dCn < N) {
int offsC = coord_dCn*LDC + coord_dCm;
FloatingPoint_t ®C = rC[n][m];
FloatingPoint_t &memC = C[offsC];
memC = add(mul(alpha, regC), mul(beta, memC));
}
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
#undef TRANS_A
#undef TRANS_B
#undef CONJ_A
#undef CONJ_B
/*
#undef BLK_M
#undef BLK_N
#undef BLK_K
#undef DIM_X
#undef DIM_Y
#undef DIM_XA
#undef DIM_YA
#undef DIM_XB
#undef DIM_YB
*/
#undef version
#undef THR_M
#undef THR_N
#undef kernel_name
|
9811a52412ce4cbbc0940523ec81bf8495542e43.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <stdexcept>
#include <string>
#include <vector>
#include <stdio.h>
#include <multfly_ref.h>
#include <multfly_device.cuh>
#define CUDA_CHECK(code) do { \
if (code != hipSuccess) { \
throw std::runtime_error(__FILE__ ":" + std::to_string(__LINE__) + " Cuda error" + std::to_string(code)); \
} \
} while (0)
#define TEST_KEY_NAME "multfly_test"
__global__ void init_key(multfly_key *key, uint64_t global_seed, uint64_t global_ctr) {
multfly_device_initkey_fromliteral(key, TEST_KEY_NAME, global_seed, global_ctr);
}
__global__ void generate_u32(const multfly_key *key, uint32_t *result0, uint32_t *result1) {
uint64_t tid = blockIdx.x * blockDim.x + threadIdx.x;
result0[tid] = multfly_device_gen32(key, tid, 0);
result1[tid] = multfly_device_gen32(key, tid, 1);
}
int main() {
int ng = 0;
CUDA_CHECK(hipSetDevice(0));
int global_seed = 0;
int global_ctr = 0;
multfly_key *d_key;
CUDA_CHECK(hipMalloc(&d_key, sizeof(multfly_key)));
hipLaunchKernelGGL(( init_key), dim3(1), dim3(1), 0, 0, d_key, global_seed, global_ctr);
CUDA_CHECK(hipDeviceSynchronize());
multfly_key h_key, h_key_test;
CUDA_CHECK(hipMemcpy(&h_key, d_key, sizeof(multfly_key), hipMemcpyDeviceToHost));
multfly_initkey_fromliteral(&h_key_test, TEST_KEY_NAME, global_seed, global_ctr);
for (int i = 0; i < 8; i++) {
if (h_key.v_[i] != h_key_test.v_[i]) {
ng = 1;
std::cout << "init fail" << std::endl;
break;
}
}
int len = 8192;
int n_threads = 128;
int n_blocks = len / n_threads;
uint32_t *d_result0;
uint32_t *d_result1;
CUDA_CHECK(hipMalloc(&d_result0, sizeof(uint32_t) * len));
CUDA_CHECK(hipMalloc(&d_result1, sizeof(uint32_t) * len));
hipLaunchKernelGGL(( generate_u32), dim3(n_blocks), dim3(n_threads), 0, 0, d_key, d_result0, d_result1);
CUDA_CHECK(hipDeviceSynchronize());
std::vector<uint32_t> h_result0(len);
std::vector<uint32_t> h_result1(len);
uint32_t h_result_test0[4];
uint32_t h_result_test1[4];
CUDA_CHECK(hipMemcpy(h_result0.data(), d_result0, sizeof(uint32_t) * len, hipMemcpyDeviceToHost));
CUDA_CHECK(hipMemcpy(h_result1.data(), d_result1, sizeof(uint32_t) * len, hipMemcpyDeviceToHost));
for (int i = 0; i < len; i += 4) {
multfly_gen32(&h_key_test, i >> 2, 0, h_result_test0);
multfly_gen32(&h_key_test, i >> 2, 1, h_result_test1);
for (int j = 0; j < 4; j++) {
if (h_result0[i + j] != h_result_test0[j]) {
ng = 1;
std::cout << "uint32 result0 fail" << std::endl;
i = len;
break;
}
if (h_result1[i + j] != h_result_test1[j]) {
ng = 1;
std::cout << "uint32 result1 fail" << std::endl;
i = len;
break;
}
}
}
if (!ng) {
std::cout << "ok" << std::endl;
}
return ng;
}
| 9811a52412ce4cbbc0940523ec81bf8495542e43.cu | #include <iostream>
#include <stdexcept>
#include <string>
#include <vector>
#include <stdio.h>
#include <multfly_ref.h>
#include <multfly_device.cuh>
#define CUDA_CHECK(code) do { \
if (code != cudaSuccess) { \
throw std::runtime_error(__FILE__ ":" + std::to_string(__LINE__) + " Cuda error" + std::to_string(code)); \
} \
} while (0)
#define TEST_KEY_NAME "multfly_test"
__global__ void init_key(multfly_key *key, uint64_t global_seed, uint64_t global_ctr) {
multfly_device_initkey_fromliteral(key, TEST_KEY_NAME, global_seed, global_ctr);
}
__global__ void generate_u32(const multfly_key *key, uint32_t *result0, uint32_t *result1) {
uint64_t tid = blockIdx.x * blockDim.x + threadIdx.x;
result0[tid] = multfly_device_gen32(key, tid, 0);
result1[tid] = multfly_device_gen32(key, tid, 1);
}
int main() {
int ng = 0;
CUDA_CHECK(cudaSetDevice(0));
int global_seed = 0;
int global_ctr = 0;
multfly_key *d_key;
CUDA_CHECK(cudaMalloc(&d_key, sizeof(multfly_key)));
init_key<<<1, 1>>>(d_key, global_seed, global_ctr);
CUDA_CHECK(cudaDeviceSynchronize());
multfly_key h_key, h_key_test;
CUDA_CHECK(cudaMemcpy(&h_key, d_key, sizeof(multfly_key), cudaMemcpyDeviceToHost));
multfly_initkey_fromliteral(&h_key_test, TEST_KEY_NAME, global_seed, global_ctr);
for (int i = 0; i < 8; i++) {
if (h_key.v_[i] != h_key_test.v_[i]) {
ng = 1;
std::cout << "init fail" << std::endl;
break;
}
}
int len = 8192;
int n_threads = 128;
int n_blocks = len / n_threads;
uint32_t *d_result0;
uint32_t *d_result1;
CUDA_CHECK(cudaMalloc(&d_result0, sizeof(uint32_t) * len));
CUDA_CHECK(cudaMalloc(&d_result1, sizeof(uint32_t) * len));
generate_u32<<<n_blocks, n_threads>>>(d_key, d_result0, d_result1);
CUDA_CHECK(cudaDeviceSynchronize());
std::vector<uint32_t> h_result0(len);
std::vector<uint32_t> h_result1(len);
uint32_t h_result_test0[4];
uint32_t h_result_test1[4];
CUDA_CHECK(cudaMemcpy(h_result0.data(), d_result0, sizeof(uint32_t) * len, cudaMemcpyDeviceToHost));
CUDA_CHECK(cudaMemcpy(h_result1.data(), d_result1, sizeof(uint32_t) * len, cudaMemcpyDeviceToHost));
for (int i = 0; i < len; i += 4) {
multfly_gen32(&h_key_test, i >> 2, 0, h_result_test0);
multfly_gen32(&h_key_test, i >> 2, 1, h_result_test1);
for (int j = 0; j < 4; j++) {
if (h_result0[i + j] != h_result_test0[j]) {
ng = 1;
std::cout << "uint32 result0 fail" << std::endl;
i = len;
break;
}
if (h_result1[i + j] != h_result_test1[j]) {
ng = 1;
std::cout << "uint32 result1 fail" << std::endl;
i = len;
break;
}
}
}
if (!ng) {
std::cout << "ok" << std::endl;
}
return ng;
}
|
e718635555ff4719f07180bd2eb924bea17e42c9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "parrots_cuda_helper.hpp"
#include "tin_shift_cuda_kernel.cuh"
void TINShiftForwardCUDAKernelLauncher(const DArrayLite input,
const DArrayLite shift,
DArrayLite output, hipStream_t stream) {
int output_size = output.size();
int batch_size = input.dim(0);
int t_size = input.dim(1);
int channels = input.dim(2);
int hw_size = input.dim(3);
int group_size = shift.dim(1);
int group_channel = channels / group_size;
int num_kernels = batch_size * hw_size * channels;
PARROTS_DISPATCH_FLOATING_TYPES_AND_HALF(
input.elemType().prim(), ([&] {
hipLaunchKernelGGL(( tin_shift_forward_cuda_kernel<scalar_t>)
, dim3(GET_BLOCKS(num_kernels)), dim3(THREADS_PER_BLOCK), 0, stream,
output_size, input.ptr<scalar_t>(), shift.ptr<int>(),
output.ptr<scalar_t>(), batch_size, channels, t_size, hw_size,
group_size, group_channel);
}));
PARROTS_CUDA_CHECK(hipGetLastError());
}
void TINShiftBackwardCUDAKernelLauncher(const DArrayLite grad_output,
const DArrayLite shift,
DArrayLite grad_input,
hipStream_t stream) {
int output_size = grad_output.size();
int batch_size = grad_output.dim(0);
int t_size = grad_output.dim(1);
int channels = grad_output.dim(2);
int hw_size = grad_output.dim(3);
int group_size = shift.dim(1);
int group_channel = channels / group_size;
int num_kernels = batch_size * hw_size * channels;
PARROTS_DISPATCH_FLOATING_TYPES_AND_HALF(
grad_output.elemType().prim(), ([&] {
hipLaunchKernelGGL(( tin_shift_backward_cuda_kernel<scalar_t>)
, dim3(GET_BLOCKS(num_kernels)), dim3(THREADS_PER_BLOCK), 0, stream,
output_size, grad_output.ptr<scalar_t>(), shift.ptr<int>(),
grad_input.ptr<scalar_t>(), batch_size, channels, t_size,
hw_size, group_size, group_channel);
}));
PARROTS_CUDA_CHECK(hipGetLastError());
}
| e718635555ff4719f07180bd2eb924bea17e42c9.cu | #include "parrots_cuda_helper.hpp"
#include "tin_shift_cuda_kernel.cuh"
void TINShiftForwardCUDAKernelLauncher(const DArrayLite input,
const DArrayLite shift,
DArrayLite output, cudaStream_t stream) {
int output_size = output.size();
int batch_size = input.dim(0);
int t_size = input.dim(1);
int channels = input.dim(2);
int hw_size = input.dim(3);
int group_size = shift.dim(1);
int group_channel = channels / group_size;
int num_kernels = batch_size * hw_size * channels;
PARROTS_DISPATCH_FLOATING_TYPES_AND_HALF(
input.elemType().prim(), ([&] {
tin_shift_forward_cuda_kernel<scalar_t>
<<<GET_BLOCKS(num_kernels), THREADS_PER_BLOCK, 0, stream>>>(
output_size, input.ptr<scalar_t>(), shift.ptr<int>(),
output.ptr<scalar_t>(), batch_size, channels, t_size, hw_size,
group_size, group_channel);
}));
PARROTS_CUDA_CHECK(cudaGetLastError());
}
void TINShiftBackwardCUDAKernelLauncher(const DArrayLite grad_output,
const DArrayLite shift,
DArrayLite grad_input,
cudaStream_t stream) {
int output_size = grad_output.size();
int batch_size = grad_output.dim(0);
int t_size = grad_output.dim(1);
int channels = grad_output.dim(2);
int hw_size = grad_output.dim(3);
int group_size = shift.dim(1);
int group_channel = channels / group_size;
int num_kernels = batch_size * hw_size * channels;
PARROTS_DISPATCH_FLOATING_TYPES_AND_HALF(
grad_output.elemType().prim(), ([&] {
tin_shift_backward_cuda_kernel<scalar_t>
<<<GET_BLOCKS(num_kernels), THREADS_PER_BLOCK, 0, stream>>>(
output_size, grad_output.ptr<scalar_t>(), shift.ptr<int>(),
grad_input.ptr<scalar_t>(), batch_size, channels, t_size,
hw_size, group_size, group_channel);
}));
PARROTS_CUDA_CHECK(cudaGetLastError());
}
|
083c92d4d0be42be41af970a9d7503541d3f5bbb.hip | // !!! This is a file automatically generated by hipify!!!
/*!
* Copyright 2020 by Contributors
* \file device_dmatrix.cu
* \brief Device-memory version of DMatrix.
*/
#include <thrust/execution_policy.h>
#include <thrust/iterator/discard_iterator.h>
#include <thrust/iterator/transform_output_iterator.h>
#include <xgboost/base.h>
#include <xgboost/data.h>
#include <memory>
#include <utility>
#include "../common/hist_util.h"
#include "adapter.h"
#include "device_adapter_hip.cuh"
#include "ellpack_page.cuh"
#include "device_dmatrix.h"
namespace xgboost {
namespace data {
// Returns maximum row length
template <typename AdapterBatchT>
size_t GetRowCounts(const AdapterBatchT& batch, common::Span<size_t> offset,
int device_idx, float missing) {
IsValidFunctor is_valid(missing);
// Count elements per row
dh::LaunchN(device_idx, batch.Size(), [=] __device__(size_t idx) {
auto element = batch.GetElement(idx);
if (is_valid(element)) {
atomicAdd(reinterpret_cast<unsigned long long*>( // NOLINT
&offset[element.row_idx]),
static_cast<unsigned long long>(1)); // NOLINT
}
});
dh::XGBCachingDeviceAllocator<char> alloc;
size_t row_stride = thrust::reduce(
thrust::hip::par(alloc), thrust::device_pointer_cast(offset.data()),
thrust::device_pointer_cast(offset.data()) + offset.size(), size_t(0),
thrust::maximum<size_t>());
return row_stride;
}
template <typename AdapterBatchT>
struct WriteCompressedEllpackFunctor {
WriteCompressedEllpackFunctor(common::CompressedByteT* buffer,
const common::CompressedBufferWriter& writer,
AdapterBatchT batch,
EllpackDeviceAccessor accessor,
const IsValidFunctor& is_valid)
: d_buffer(buffer),
writer(writer),
batch(std::move(batch)),
accessor(std::move(accessor)),
is_valid(is_valid) {}
common::CompressedByteT* d_buffer;
common::CompressedBufferWriter writer;
AdapterBatchT batch;
EllpackDeviceAccessor accessor;
IsValidFunctor is_valid;
using Tuple = thrust::tuple<size_t, size_t, size_t>;
__device__ size_t operator()(Tuple out) {
auto e = batch.GetElement(out.get<2>());
if (is_valid(e)) {
// -1 because the scan is inclusive
size_t output_position =
accessor.row_stride * e.row_idx + out.get<1>() - 1;
auto bin_idx = accessor.SearchBin(e.value, e.column_idx);
writer.AtomicWriteSymbol(d_buffer, bin_idx, output_position);
}
return 0;
}
};
// Here the data is already correctly ordered and simply needs to be compacted
// to remove missing data
template <typename AdapterBatchT>
void CopyDataRowMajor(const AdapterBatchT& batch, EllpackPageImpl* dst,
int device_idx, float missing) {
// Some witchcraft happens here
// The goal is to copy valid elements out of the input to an ellpack matrix
// with a given row stride, using no extra working memory Standard stream
// compaction needs to be modified to do this, so we manually define a
// segmented stream compaction via operators on an inclusive scan. The output
// of this inclusive scan is fed to a custom function which works out the
// correct output position
auto counting = thrust::make_counting_iterator(0llu);
IsValidFunctor is_valid(missing);
auto key_iter = dh::MakeTransformIterator<size_t>(
counting,
[=] __device__(size_t idx) { return batch.GetElement(idx).row_idx; });
auto value_iter = dh::MakeTransformIterator<size_t>(
counting, [=] __device__(size_t idx) -> size_t {
return is_valid(batch.GetElement(idx));
});
auto key_value_index_iter = thrust::make_zip_iterator(
thrust::make_tuple(key_iter, value_iter, counting));
// Tuple[0] = The row index of the input, used as a key to define segments
// Tuple[1] = Scanned flags of valid elements for each row
// Tuple[2] = The index in the input data
using Tuple = thrust::tuple<size_t, size_t, size_t>;
auto device_accessor = dst->GetDeviceAccessor(device_idx);
common::CompressedBufferWriter writer(device_accessor.NumSymbols());
auto d_compressed_buffer = dst->gidx_buffer.DevicePointer();
// We redirect the scan output into this functor to do the actual writing
WriteCompressedEllpackFunctor<AdapterBatchT> functor(
d_compressed_buffer, writer, batch, device_accessor, is_valid);
thrust::discard_iterator<size_t> discard;
thrust::transform_output_iterator<
WriteCompressedEllpackFunctor<AdapterBatchT>, decltype(discard)>
out(discard, functor);
dh::XGBCachingDeviceAllocator<char> alloc;
thrust::inclusive_scan(thrust::hip::par(alloc), key_value_index_iter,
key_value_index_iter + batch.Size(), out,
[=] __device__(Tuple a, Tuple b) {
// Key equal
if (a.get<0>() == b.get<0>()) {
b.get<1>() += a.get<1>();
return b;
}
// Not equal
return b;
});
}
template <typename AdapterT, typename AdapterBatchT>
void CopyDataColumnMajor(AdapterT* adapter, const AdapterBatchT& batch,
EllpackPageImpl* dst, float missing) {
// Step 1: Get the sizes of the input columns
dh::caching_device_vector<size_t> column_sizes(adapter->NumColumns(), 0);
auto d_column_sizes = column_sizes.data().get();
// Populate column sizes
dh::LaunchN(adapter->DeviceIdx(), batch.Size(), [=] __device__(size_t idx) {
const auto& e = batch.GetElement(idx);
atomicAdd(reinterpret_cast<unsigned long long*>( // NOLINT
&d_column_sizes[e.column_idx]),
static_cast<unsigned long long>(1)); // NOLINT
});
thrust::host_vector<size_t> host_column_sizes = column_sizes;
// Step 2: Iterate over columns, place elements in correct row, increment
// temporary row pointers
dh::caching_device_vector<size_t> temp_row_ptr(adapter->NumRows(), 0);
auto d_temp_row_ptr = temp_row_ptr.data().get();
auto row_stride = dst->row_stride;
size_t begin = 0;
auto device_accessor = dst->GetDeviceAccessor(adapter->DeviceIdx());
common::CompressedBufferWriter writer(device_accessor.NumSymbols());
auto d_compressed_buffer = dst->gidx_buffer.DevicePointer();
IsValidFunctor is_valid(missing);
for (auto size : host_column_sizes) {
size_t end = begin + size;
dh::LaunchN(adapter->DeviceIdx(), end - begin, [=] __device__(size_t idx) {
auto writer_non_const =
writer; // For some reason this variable gets captured as const
const auto& e = batch.GetElement(idx + begin);
if (!is_valid(e)) return;
size_t output_position =
e.row_idx * row_stride + d_temp_row_ptr[e.row_idx];
auto bin_idx = device_accessor.SearchBin(e.value, e.column_idx);
writer_non_const.AtomicWriteSymbol(d_compressed_buffer, bin_idx,
output_position);
d_temp_row_ptr[e.row_idx] += 1;
});
begin = end;
}
}
void WriteNullValues(EllpackPageImpl* dst, int device_idx,
common::Span<size_t> row_counts) {
// Write the null values
auto device_accessor = dst->GetDeviceAccessor(device_idx);
common::CompressedBufferWriter writer(device_accessor.NumSymbols());
auto d_compressed_buffer = dst->gidx_buffer.DevicePointer();
auto row_stride = dst->row_stride;
dh::LaunchN(device_idx, row_stride * dst->n_rows, [=] __device__(size_t idx) {
auto writer_non_const =
writer; // For some reason this variable gets captured as const
size_t row_idx = idx / row_stride;
size_t row_offset = idx % row_stride;
if (row_offset >= row_counts[row_idx]) {
writer_non_const.AtomicWriteSymbol(d_compressed_buffer,
device_accessor.NullValue(), idx);
}
});
}
// Does not currently support metainfo as no on-device data source contains this
// Current implementation assumes a single batch. More batches can
// be supported in future. Does not currently support inferring row/column size
template <typename AdapterT>
DeviceDMatrix::DeviceDMatrix(AdapterT* adapter, float missing, int nthread, int max_bin) {
common::HistogramCuts cuts =
common::AdapterDeviceSketch(adapter, max_bin, missing);
auto& batch = adapter->Value();
// Work out how many valid entries we have in each row
dh::caching_device_vector<size_t> row_counts(adapter->NumRows() + 1, 0);
common::Span<size_t> row_counts_span(row_counts.data().get(),
row_counts.size());
size_t row_stride =
GetRowCounts(batch, row_counts_span, adapter->DeviceIdx(), missing);
dh::XGBCachingDeviceAllocator<char> alloc;
info_.num_nonzero_ = thrust::reduce(thrust::hip::par(alloc),
row_counts.begin(), row_counts.end());
info_.num_col_ = adapter->NumColumns();
info_.num_row_ = adapter->NumRows();
ellpack_page_.reset(new EllpackPage());
*ellpack_page_->Impl() =
EllpackPageImpl(adapter->DeviceIdx(), cuts, this->IsDense(), row_stride,
adapter->NumRows());
if (adapter->IsRowMajor()) {
CopyDataRowMajor(batch, ellpack_page_->Impl(), adapter->DeviceIdx(),
missing);
} else {
CopyDataColumnMajor(adapter, batch, ellpack_page_->Impl(), missing);
}
WriteNullValues(ellpack_page_->Impl(), adapter->DeviceIdx(), row_counts_span);
// Synchronise worker columns
rabit::Allreduce<rabit::op::Max>(&info_.num_col_, 1);
}
template DeviceDMatrix::DeviceDMatrix(CudfAdapter* adapter, float missing,
int nthread, int max_bin);
template DeviceDMatrix::DeviceDMatrix(CupyAdapter* adapter, float missing,
int nthread, int max_bin);
} // namespace data
} // namespace xgboost
| 083c92d4d0be42be41af970a9d7503541d3f5bbb.cu | /*!
* Copyright 2020 by Contributors
* \file device_dmatrix.cu
* \brief Device-memory version of DMatrix.
*/
#include <thrust/execution_policy.h>
#include <thrust/iterator/discard_iterator.h>
#include <thrust/iterator/transform_output_iterator.h>
#include <xgboost/base.h>
#include <xgboost/data.h>
#include <memory>
#include <utility>
#include "../common/hist_util.h"
#include "adapter.h"
#include "device_adapter.cuh"
#include "ellpack_page.cuh"
#include "device_dmatrix.h"
namespace xgboost {
namespace data {
// Returns maximum row length
template <typename AdapterBatchT>
size_t GetRowCounts(const AdapterBatchT& batch, common::Span<size_t> offset,
int device_idx, float missing) {
IsValidFunctor is_valid(missing);
// Count elements per row
dh::LaunchN(device_idx, batch.Size(), [=] __device__(size_t idx) {
auto element = batch.GetElement(idx);
if (is_valid(element)) {
atomicAdd(reinterpret_cast<unsigned long long*>( // NOLINT
&offset[element.row_idx]),
static_cast<unsigned long long>(1)); // NOLINT
}
});
dh::XGBCachingDeviceAllocator<char> alloc;
size_t row_stride = thrust::reduce(
thrust::cuda::par(alloc), thrust::device_pointer_cast(offset.data()),
thrust::device_pointer_cast(offset.data()) + offset.size(), size_t(0),
thrust::maximum<size_t>());
return row_stride;
}
template <typename AdapterBatchT>
struct WriteCompressedEllpackFunctor {
WriteCompressedEllpackFunctor(common::CompressedByteT* buffer,
const common::CompressedBufferWriter& writer,
AdapterBatchT batch,
EllpackDeviceAccessor accessor,
const IsValidFunctor& is_valid)
: d_buffer(buffer),
writer(writer),
batch(std::move(batch)),
accessor(std::move(accessor)),
is_valid(is_valid) {}
common::CompressedByteT* d_buffer;
common::CompressedBufferWriter writer;
AdapterBatchT batch;
EllpackDeviceAccessor accessor;
IsValidFunctor is_valid;
using Tuple = thrust::tuple<size_t, size_t, size_t>;
__device__ size_t operator()(Tuple out) {
auto e = batch.GetElement(out.get<2>());
if (is_valid(e)) {
// -1 because the scan is inclusive
size_t output_position =
accessor.row_stride * e.row_idx + out.get<1>() - 1;
auto bin_idx = accessor.SearchBin(e.value, e.column_idx);
writer.AtomicWriteSymbol(d_buffer, bin_idx, output_position);
}
return 0;
}
};
// Here the data is already correctly ordered and simply needs to be compacted
// to remove missing data
template <typename AdapterBatchT>
void CopyDataRowMajor(const AdapterBatchT& batch, EllpackPageImpl* dst,
int device_idx, float missing) {
// Some witchcraft happens here
// The goal is to copy valid elements out of the input to an ellpack matrix
// with a given row stride, using no extra working memory Standard stream
// compaction needs to be modified to do this, so we manually define a
// segmented stream compaction via operators on an inclusive scan. The output
// of this inclusive scan is fed to a custom function which works out the
// correct output position
auto counting = thrust::make_counting_iterator(0llu);
IsValidFunctor is_valid(missing);
auto key_iter = dh::MakeTransformIterator<size_t>(
counting,
[=] __device__(size_t idx) { return batch.GetElement(idx).row_idx; });
auto value_iter = dh::MakeTransformIterator<size_t>(
counting, [=] __device__(size_t idx) -> size_t {
return is_valid(batch.GetElement(idx));
});
auto key_value_index_iter = thrust::make_zip_iterator(
thrust::make_tuple(key_iter, value_iter, counting));
// Tuple[0] = The row index of the input, used as a key to define segments
// Tuple[1] = Scanned flags of valid elements for each row
// Tuple[2] = The index in the input data
using Tuple = thrust::tuple<size_t, size_t, size_t>;
auto device_accessor = dst->GetDeviceAccessor(device_idx);
common::CompressedBufferWriter writer(device_accessor.NumSymbols());
auto d_compressed_buffer = dst->gidx_buffer.DevicePointer();
// We redirect the scan output into this functor to do the actual writing
WriteCompressedEllpackFunctor<AdapterBatchT> functor(
d_compressed_buffer, writer, batch, device_accessor, is_valid);
thrust::discard_iterator<size_t> discard;
thrust::transform_output_iterator<
WriteCompressedEllpackFunctor<AdapterBatchT>, decltype(discard)>
out(discard, functor);
dh::XGBCachingDeviceAllocator<char> alloc;
thrust::inclusive_scan(thrust::cuda::par(alloc), key_value_index_iter,
key_value_index_iter + batch.Size(), out,
[=] __device__(Tuple a, Tuple b) {
// Key equal
if (a.get<0>() == b.get<0>()) {
b.get<1>() += a.get<1>();
return b;
}
// Not equal
return b;
});
}
template <typename AdapterT, typename AdapterBatchT>
void CopyDataColumnMajor(AdapterT* adapter, const AdapterBatchT& batch,
EllpackPageImpl* dst, float missing) {
// Step 1: Get the sizes of the input columns
dh::caching_device_vector<size_t> column_sizes(adapter->NumColumns(), 0);
auto d_column_sizes = column_sizes.data().get();
// Populate column sizes
dh::LaunchN(adapter->DeviceIdx(), batch.Size(), [=] __device__(size_t idx) {
const auto& e = batch.GetElement(idx);
atomicAdd(reinterpret_cast<unsigned long long*>( // NOLINT
&d_column_sizes[e.column_idx]),
static_cast<unsigned long long>(1)); // NOLINT
});
thrust::host_vector<size_t> host_column_sizes = column_sizes;
// Step 2: Iterate over columns, place elements in correct row, increment
// temporary row pointers
dh::caching_device_vector<size_t> temp_row_ptr(adapter->NumRows(), 0);
auto d_temp_row_ptr = temp_row_ptr.data().get();
auto row_stride = dst->row_stride;
size_t begin = 0;
auto device_accessor = dst->GetDeviceAccessor(adapter->DeviceIdx());
common::CompressedBufferWriter writer(device_accessor.NumSymbols());
auto d_compressed_buffer = dst->gidx_buffer.DevicePointer();
IsValidFunctor is_valid(missing);
for (auto size : host_column_sizes) {
size_t end = begin + size;
dh::LaunchN(adapter->DeviceIdx(), end - begin, [=] __device__(size_t idx) {
auto writer_non_const =
writer; // For some reason this variable gets captured as const
const auto& e = batch.GetElement(idx + begin);
if (!is_valid(e)) return;
size_t output_position =
e.row_idx * row_stride + d_temp_row_ptr[e.row_idx];
auto bin_idx = device_accessor.SearchBin(e.value, e.column_idx);
writer_non_const.AtomicWriteSymbol(d_compressed_buffer, bin_idx,
output_position);
d_temp_row_ptr[e.row_idx] += 1;
});
begin = end;
}
}
void WriteNullValues(EllpackPageImpl* dst, int device_idx,
common::Span<size_t> row_counts) {
// Write the null values
auto device_accessor = dst->GetDeviceAccessor(device_idx);
common::CompressedBufferWriter writer(device_accessor.NumSymbols());
auto d_compressed_buffer = dst->gidx_buffer.DevicePointer();
auto row_stride = dst->row_stride;
dh::LaunchN(device_idx, row_stride * dst->n_rows, [=] __device__(size_t idx) {
auto writer_non_const =
writer; // For some reason this variable gets captured as const
size_t row_idx = idx / row_stride;
size_t row_offset = idx % row_stride;
if (row_offset >= row_counts[row_idx]) {
writer_non_const.AtomicWriteSymbol(d_compressed_buffer,
device_accessor.NullValue(), idx);
}
});
}
// Does not currently support metainfo as no on-device data source contains this
// Current implementation assumes a single batch. More batches can
// be supported in future. Does not currently support inferring row/column size
template <typename AdapterT>
DeviceDMatrix::DeviceDMatrix(AdapterT* adapter, float missing, int nthread, int max_bin) {
common::HistogramCuts cuts =
common::AdapterDeviceSketch(adapter, max_bin, missing);
auto& batch = adapter->Value();
// Work out how many valid entries we have in each row
dh::caching_device_vector<size_t> row_counts(adapter->NumRows() + 1, 0);
common::Span<size_t> row_counts_span(row_counts.data().get(),
row_counts.size());
size_t row_stride =
GetRowCounts(batch, row_counts_span, adapter->DeviceIdx(), missing);
dh::XGBCachingDeviceAllocator<char> alloc;
info_.num_nonzero_ = thrust::reduce(thrust::cuda::par(alloc),
row_counts.begin(), row_counts.end());
info_.num_col_ = adapter->NumColumns();
info_.num_row_ = adapter->NumRows();
ellpack_page_.reset(new EllpackPage());
*ellpack_page_->Impl() =
EllpackPageImpl(adapter->DeviceIdx(), cuts, this->IsDense(), row_stride,
adapter->NumRows());
if (adapter->IsRowMajor()) {
CopyDataRowMajor(batch, ellpack_page_->Impl(), adapter->DeviceIdx(),
missing);
} else {
CopyDataColumnMajor(adapter, batch, ellpack_page_->Impl(), missing);
}
WriteNullValues(ellpack_page_->Impl(), adapter->DeviceIdx(), row_counts_span);
// Synchronise worker columns
rabit::Allreduce<rabit::op::Max>(&info_.num_col_, 1);
}
template DeviceDMatrix::DeviceDMatrix(CudfAdapter* adapter, float missing,
int nthread, int max_bin);
template DeviceDMatrix::DeviceDMatrix(CupyAdapter* adapter, float missing,
int nthread, int max_bin);
} // namespace data
} // namespace xgboost
|
e502a6fc64fddecbc0ad9d48dfe7abc3b340ddaf.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#define SIZE 1024
// Global functions - kernels. Device code. Run on GPU. Code that run on CPU is host code
__global__
void VectorAdd(int *a, int *b, int *c, int n) {
int i = threadIdx.x; // A readonly variable
// if more threads than elements
if (i < n) {
c[i] = a[i] + b[i];
}
}
int main() {
int *a, *b, *c;
hipMallocManaged(&a, SIZE * sizeof(int));
hipMallocManaged(&b, SIZE * sizeof(int));
hipMallocManaged(&c, SIZE * sizeof(int));
for (int i = 0; i < SIZE; i++) {
a[i] = i;
b[i] = i;
c[i] = 0;
}
hipLaunchKernelGGL(( VectorAdd) , dim3(1), dim3(SIZE), 0, 0, a, b, c, SIZE);
hipDeviceSynchronize();
for (int i = 0; i < 10; ++i) {
printf("c[%d] = %d\n", i, c[i]);
}
hipFree(a);
hipFree(b);
hipFree(c);
return 0;
}
| e502a6fc64fddecbc0ad9d48dfe7abc3b340ddaf.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#define SIZE 1024
// Global functions - kernels. Device code. Run on GPU. Code that run on CPU is host code
__global__
void VectorAdd(int *a, int *b, int *c, int n) {
int i = threadIdx.x; // A readonly variable
// if more threads than elements
if (i < n) {
c[i] = a[i] + b[i];
}
}
int main() {
int *a, *b, *c;
cudaMallocManaged(&a, SIZE * sizeof(int));
cudaMallocManaged(&b, SIZE * sizeof(int));
cudaMallocManaged(&c, SIZE * sizeof(int));
for (int i = 0; i < SIZE; i++) {
a[i] = i;
b[i] = i;
c[i] = 0;
}
VectorAdd <<<1, SIZE>>> (a, b, c, SIZE);
cudaDeviceSynchronize();
for (int i = 0; i < 10; ++i) {
printf("c[%d] = %d\n", i, c[i]);
}
cudaFree(a);
cudaFree(b);
cudaFree(c);
return 0;
}
|
59e5f8e21ea4ad42b0ed9d62245c29f0145665ae.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2011-2021, CESNET z.s.p.o
* Copyright (c) 2011, Silicon Genome, LLC.
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/**
* @file
* @brief
* This file contains postprocessors a common format for computational kernels
* to raw image. It also does color space transformations.
*/
#include "gpujpeg_colorspace.h"
#include "gpujpeg_preprocessor_common.h"
#include "gpujpeg_postprocessor.h"
#include "gpujpeg_util.h"
/**
* Store value to component data buffer in specified position by buffer size and subsampling
*
* @param value
* @param position_x
* @param position_y
* @param comp
*/
template<
uint8_t s_samp_factor_h = GPUJPEG_DYNAMIC,
uint8_t s_samp_factor_v = GPUJPEG_DYNAMIC
>
struct gpujpeg_preprocessor_comp_to_raw_load
{
static __device__ void
perform(uint8_t & value, int position_x, int position_y, struct gpujpeg_preprocessor_data_component & comp)
{
uint8_t samp_factor_h = s_samp_factor_h;
if ( samp_factor_h == GPUJPEG_DYNAMIC ) {
samp_factor_h = comp.sampling_factor.horizontal;
}
uint8_t samp_factor_v = s_samp_factor_v;
if ( samp_factor_v == GPUJPEG_DYNAMIC ) {
samp_factor_v = comp.sampling_factor.vertical;
}
position_x = position_x / samp_factor_h;
position_y = position_y / samp_factor_v;
int data_position = position_y * comp.data_width + position_x;
value = comp.d_data[data_position];
}
};
template<>
struct gpujpeg_preprocessor_comp_to_raw_load<1, 1>
{
static __device__ void
perform(uint8_t & value, int position_x, int position_y, struct gpujpeg_preprocessor_data_component & comp)
{
int data_position = position_y * comp.data_width + position_x;
value = comp.d_data[data_position];
}
};
template<enum gpujpeg_pixel_format pixel_format>
inline __device__ void gpujpeg_comp_to_raw_store(uint8_t *d_data_raw, int &image_width, int &image_height, int &image_position, int &x, int &y, uint8_t &r1, uint8_t &r2, uint8_t &r3);
template<>
inline __device__ void gpujpeg_comp_to_raw_store<GPUJPEG_U8>(uint8_t *d_data_raw, int &image_width, int &image_height, int &image_position, int &x, int &y, uint8_t &r1, uint8_t &r2, uint8_t &r3)
{
d_data_raw[image_position] = r1;
}
template<>
inline __device__ void gpujpeg_comp_to_raw_store<GPUJPEG_444_U8_P012>(uint8_t *d_data_raw, int &image_width, int &image_height, int &image_position, int &x, int &y, uint8_t &r1, uint8_t &r2, uint8_t &r3)
{
image_position = image_position * 3;
d_data_raw[image_position + 0] = r1;
d_data_raw[image_position + 1] = r2;
d_data_raw[image_position + 2] = r3;
}
template<>
inline __device__ void gpujpeg_comp_to_raw_store<GPUJPEG_444_U8_P012A>(uint8_t *d_data_raw, int &image_width, int &image_height, int &image_position, int &x, int &y, uint8_t &r1, uint8_t &r2, uint8_t &r3)
{
image_position = image_position * 4;
d_data_raw[image_position + 0] = r1;
d_data_raw[image_position + 1] = r2;
d_data_raw[image_position + 2] = r3;
d_data_raw[image_position + 3] = 0xFF;
}
template<>
inline __device__ void gpujpeg_comp_to_raw_store<GPUJPEG_444_U8_P012Z>(uint8_t *d_data_raw, int &image_width, int &image_height, int &image_position, int &x, int &y, uint8_t &r1, uint8_t &r2, uint8_t &r3)
{
image_position = image_position * 4;
d_data_raw[image_position + 0] = r1;
d_data_raw[image_position + 1] = r2;
d_data_raw[image_position + 2] = r3;
d_data_raw[image_position + 3] = 0x0;
}
template<>
inline __device__ void gpujpeg_comp_to_raw_store<GPUJPEG_444_U8_P0P1P2>(uint8_t *d_data_raw, int &image_width, int &image_height, int &image_position, int &x, int &y, uint8_t &r1, uint8_t &r2, uint8_t &r3)
{
d_data_raw[image_position] = r1;
d_data_raw[image_width * image_height + image_position] = r2;
d_data_raw[2 * image_width * image_height + image_position] = r3;
}
template<>
inline __device__ void gpujpeg_comp_to_raw_store<GPUJPEG_422_U8_P0P1P2>(uint8_t *d_data_raw, int &image_width, int &image_height, int &image_position, int &x, int &y, uint8_t &r1, uint8_t &r2, uint8_t &r3)
{
d_data_raw[image_position] = r1;
if ( (x % 2) == 0 ) {
d_data_raw[image_width * image_height + image_position / 2] = r2;
d_data_raw[image_width * image_height + image_height * ((image_width + 1) / 2) + image_position / 2] = r3;
}
}
template<>
inline __device__ void gpujpeg_comp_to_raw_store<GPUJPEG_422_U8_P1020>(uint8_t *d_data_raw, int &image_width, int &image_height, int &image_position, int &x, int &y, uint8_t &r1, uint8_t &r2, uint8_t &r3)
{
image_position = image_position * 2;
d_data_raw[image_position + 1] = r1;
if ( (x % 2) == 0 )
d_data_raw[image_position + 0] = r2;
else
d_data_raw[image_position + 0] = r3;
}
template<>
inline __device__ void gpujpeg_comp_to_raw_store<GPUJPEG_420_U8_P0P1P2>(uint8_t *d_data_raw, int &image_width, int &image_height, int &image_position, int &x, int &y, uint8_t &r1, uint8_t &r2, uint8_t &r3)
{
d_data_raw[image_position] = r1;
if ( (image_position % 2) == 0 && (y % 2) == 0 ) {
d_data_raw[image_width * image_height + y / 2 * ((image_width + 1) / 2) + x / 2] = r2;
d_data_raw[image_width * image_height + ((image_height + 1) / 2 + y / 2) * ((image_width + 1) / 2) + x / 2] = r3;
}
}
/**
* Kernel - Copy three separated component buffers into target image data
*
* @param d_c1 First component buffer
* @param d_c2 Second component buffer
* @param d_c3 Third component buffer
* @param d_target Image target data
* @param pixel_count Number of pixels to copy
* @return void
*/
typedef void (*gpujpeg_preprocessor_decode_kernel)(struct gpujpeg_preprocessor_data data, uint8_t* d_data_raw, int image_width, int image_height);
template<
enum gpujpeg_color_space color_space_internal,
enum gpujpeg_color_space color_space,
enum gpujpeg_pixel_format pixel_format,
uint8_t s_comp1_samp_factor_h, uint8_t s_comp1_samp_factor_v,
uint8_t s_comp2_samp_factor_h, uint8_t s_comp2_samp_factor_v,
uint8_t s_comp3_samp_factor_h, uint8_t s_comp3_samp_factor_v
>
__global__ void
gpujpeg_preprocessor_comp_to_raw_kernel(struct gpujpeg_preprocessor_data data, uint8_t* d_data_raw, int image_width, int image_height)
{
int x = threadIdx.x;
int gX = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x;
int image_position = gX + x;
if ( image_position >= (image_width * image_height) )
return;
int image_position_x = image_position % image_width;
int image_position_y = image_position / image_width;
// Load
uint8_t r1;
uint8_t r2;
uint8_t r3;
gpujpeg_preprocessor_comp_to_raw_load<s_comp1_samp_factor_h, s_comp1_samp_factor_v>::perform(r1, image_position_x, image_position_y, data.comp[0]);
gpujpeg_preprocessor_comp_to_raw_load<s_comp2_samp_factor_h, s_comp2_samp_factor_v>::perform(r2, image_position_x, image_position_y, data.comp[1]);
gpujpeg_preprocessor_comp_to_raw_load<s_comp3_samp_factor_h, s_comp3_samp_factor_v>::perform(r3, image_position_x, image_position_y, data.comp[2]);
// Color transform
gpujpeg_color_transform<color_space_internal, color_space>::perform(r1, r2, r3);
// Save
gpujpeg_comp_to_raw_store<pixel_format>(d_data_raw, image_width, image_height, image_position, image_position_x, image_position_y, r1, r2, r3);
}
/**
* Select preprocessor decode kernel
*
* @param decoder
* @return kernel
*/
template<enum gpujpeg_color_space color_space_internal>
gpujpeg_preprocessor_decode_kernel
gpujpeg_preprocessor_select_decode_kernel(struct gpujpeg_coder* coder)
{
gpujpeg_preprocessor_sampling_factor_t sampling_factor = gpujpeg_preprocessor_make_sampling_factor(
coder->sampling_factor.horizontal / coder->component[0].sampling_factor.horizontal,
coder->sampling_factor.vertical / coder->component[0].sampling_factor.vertical,
coder->sampling_factor.horizontal / coder->component[1].sampling_factor.horizontal,
coder->sampling_factor.vertical / coder->component[1].sampling_factor.vertical,
coder->sampling_factor.horizontal / coder->component[2].sampling_factor.horizontal,
coder->sampling_factor.vertical / coder->component[2].sampling_factor.vertical
);
#define RETURN_KERNEL_IF(PIXEL_FORMAT, COLOR, P1, P2, P3, P4, P5, P6) \
if ( sampling_factor == gpujpeg_preprocessor_make_sampling_factor(P1, P2, P3, P4, P5, P6) ) { \
int max_h = max(P1, max(P3, P5)); \
int max_v = max(P2, max(P4, P6)); \
if ( coder->param.verbose >= 2 ) { \
printf("Using faster kernel for postprocessor (precompiled %dx%d, %dx%d, %dx%d).\n", max_h / P1, max_v / P2, max_h / P3, max_v / P4, max_h / P5, max_v / P6); \
} \
if ( PIXEL_FORMAT == GPUJPEG_U8 ) { \
return &gpujpeg_preprocessor_comp_to_raw_kernel<color_space_internal, COLOR, GPUJPEG_U8, P1, P2, P3, P4, P5, P6>; \
} else if ( PIXEL_FORMAT == GPUJPEG_444_U8_P012 ) { \
return &gpujpeg_preprocessor_comp_to_raw_kernel<color_space_internal, COLOR, GPUJPEG_444_U8_P012, P1, P2, P3, P4, P5, P6>; \
} else if ( PIXEL_FORMAT == GPUJPEG_444_U8_P012A ) { \
return &gpujpeg_preprocessor_comp_to_raw_kernel<color_space_internal, COLOR, GPUJPEG_444_U8_P012A, P1, P2, P3, P4, P5, P6>; \
} else if ( PIXEL_FORMAT == GPUJPEG_444_U8_P012Z ) { \
return &gpujpeg_preprocessor_comp_to_raw_kernel<color_space_internal, COLOR, GPUJPEG_444_U8_P012Z, P1, P2, P3, P4, P5, P6>; \
} else if ( coder->param_image.pixel_format == GPUJPEG_422_U8_P1020 ) { \
return &gpujpeg_preprocessor_comp_to_raw_kernel<color_space_internal, COLOR, GPUJPEG_422_U8_P1020, P1, P2, P3, P4, P5, P6>; \
} else if ( coder->param_image.pixel_format == GPUJPEG_444_U8_P0P1P2 ) { \
return &gpujpeg_preprocessor_comp_to_raw_kernel<color_space_internal, COLOR, GPUJPEG_444_U8_P0P1P2, P1, P2, P3, P4, P5, P6>; \
} else if ( coder->param_image.pixel_format == GPUJPEG_422_U8_P0P1P2 ) { \
return &gpujpeg_preprocessor_comp_to_raw_kernel<color_space_internal, COLOR, GPUJPEG_422_U8_P0P1P2, P1, P2, P3, P4, P5, P6>; \
} else if ( coder->param_image.pixel_format == GPUJPEG_420_U8_P0P1P2 ) { \
return &gpujpeg_preprocessor_comp_to_raw_kernel<color_space_internal, COLOR, GPUJPEG_420_U8_P0P1P2, P1, P2, P3, P4, P5, P6>; \
} else { \
assert(false); \
} \
}
#define RETURN_KERNEL(PIXEL_FORMAT, COLOR) \
RETURN_KERNEL_IF(PIXEL_FORMAT, COLOR, 1, 1, 1, 1, 1, 1) \
else RETURN_KERNEL_IF(PIXEL_FORMAT, COLOR, 1, 1, 2, 2, 2, 2) \
else RETURN_KERNEL_IF(PIXEL_FORMAT, COLOR, 1, 1, 1, 2, 1, 2) \
else RETURN_KERNEL_IF(PIXEL_FORMAT, COLOR, 1, 1, 2, 1, 2, 1) \
else RETURN_KERNEL_IF(PIXEL_FORMAT, COLOR, 1, 1, 4, 4, 4, 4) \
else { \
if ( coder->param.verbose >= 2 ) { \
printf("Using slower kernel for postprocessor (dynamic %dx%d, %dx%d, %dx%d).\n", coder->component[0].sampling_factor.horizontal, coder->component[0].sampling_factor.vertical, coder->component[1].sampling_factor.horizontal, coder->component[1].sampling_factor.vertical, coder->component[2].sampling_factor.horizontal, coder->component[2].sampling_factor.vertical); \
} \
if ( PIXEL_FORMAT == GPUJPEG_U8 ) { \
return &gpujpeg_preprocessor_comp_to_raw_kernel<color_space_internal, COLOR, GPUJPEG_U8, GPUJPEG_DYNAMIC, GPUJPEG_DYNAMIC, GPUJPEG_DYNAMIC, GPUJPEG_DYNAMIC, GPUJPEG_DYNAMIC, GPUJPEG_DYNAMIC>; \
} else if ( PIXEL_FORMAT == GPUJPEG_444_U8_P012 ) { \
return &gpujpeg_preprocessor_comp_to_raw_kernel<color_space_internal, COLOR, GPUJPEG_444_U8_P012, GPUJPEG_DYNAMIC, GPUJPEG_DYNAMIC, GPUJPEG_DYNAMIC, GPUJPEG_DYNAMIC, GPUJPEG_DYNAMIC, GPUJPEG_DYNAMIC>; \
} else if ( PIXEL_FORMAT == GPUJPEG_444_U8_P012Z ) { \
return &gpujpeg_preprocessor_comp_to_raw_kernel<color_space_internal, COLOR, GPUJPEG_444_U8_P012Z, GPUJPEG_DYNAMIC, GPUJPEG_DYNAMIC, GPUJPEG_DYNAMIC, GPUJPEG_DYNAMIC, GPUJPEG_DYNAMIC, GPUJPEG_DYNAMIC>; \
} else if ( coder->param_image.pixel_format == GPUJPEG_422_U8_P1020 ) { \
return &gpujpeg_preprocessor_comp_to_raw_kernel<color_space_internal, COLOR, GPUJPEG_422_U8_P1020, GPUJPEG_DYNAMIC, GPUJPEG_DYNAMIC, GPUJPEG_DYNAMIC, GPUJPEG_DYNAMIC, GPUJPEG_DYNAMIC, GPUJPEG_DYNAMIC>; \
} else if ( coder->param_image.pixel_format == GPUJPEG_444_U8_P0P1P2 ) { \
return &gpujpeg_preprocessor_comp_to_raw_kernel<color_space_internal, COLOR, GPUJPEG_444_U8_P0P1P2, GPUJPEG_DYNAMIC, GPUJPEG_DYNAMIC, GPUJPEG_DYNAMIC, GPUJPEG_DYNAMIC, GPUJPEG_DYNAMIC, GPUJPEG_DYNAMIC>; \
} else if ( coder->param_image.pixel_format == GPUJPEG_422_U8_P0P1P2 ) { \
return &gpujpeg_preprocessor_comp_to_raw_kernel<color_space_internal, COLOR, GPUJPEG_422_U8_P0P1P2, GPUJPEG_DYNAMIC, GPUJPEG_DYNAMIC, GPUJPEG_DYNAMIC, GPUJPEG_DYNAMIC, GPUJPEG_DYNAMIC, GPUJPEG_DYNAMIC>; \
} else if ( coder->param_image.pixel_format == GPUJPEG_420_U8_P0P1P2 ) { \
return &gpujpeg_preprocessor_comp_to_raw_kernel<color_space_internal, COLOR, GPUJPEG_420_U8_P0P1P2, GPUJPEG_DYNAMIC, GPUJPEG_DYNAMIC, GPUJPEG_DYNAMIC, GPUJPEG_DYNAMIC, GPUJPEG_DYNAMIC, GPUJPEG_DYNAMIC>; \
} else { \
assert(false); \
} \
} \
// None color space
if ( coder->param_image.color_space == GPUJPEG_NONE ) {
RETURN_KERNEL(coder->param_image.pixel_format, GPUJPEG_NONE)
}
// RGB color space
else if ( coder->param_image.color_space == GPUJPEG_RGB ) {
RETURN_KERNEL(coder->param_image.pixel_format, GPUJPEG_RGB)
}
// YCbCr color space
else if ( coder->param_image.color_space == GPUJPEG_YCBCR_BT601 ) {
RETURN_KERNEL(coder->param_image.pixel_format, GPUJPEG_YCBCR_BT601)
}
// YCbCr color space
else if ( coder->param_image.color_space == GPUJPEG_YCBCR_BT601_256LVLS ) {
RETURN_KERNEL(coder->param_image.pixel_format, GPUJPEG_YCBCR_BT601_256LVLS)
}
// YCbCr color space
else if ( coder->param_image.color_space == GPUJPEG_YCBCR_BT709 ) {
RETURN_KERNEL(coder->param_image.pixel_format, GPUJPEG_YCBCR_BT709)
}
// YUV color space
else if ( coder->param_image.color_space == GPUJPEG_YUV ) {
RETURN_KERNEL(coder->param_image.pixel_format, GPUJPEG_YUV)
}
// Unknown color space
else {
assert(false);
}
#undef RETURN_KERNEL_IF
#undef RETURN_KERNEL
return NULL;
}
static int gpujpeg_preprocessor_decode_no_transform(struct gpujpeg_coder * coder)
{
if (coder->param_image.comp_count == 3 && coder->param_image.color_space != coder->param.color_space_internal) {
/*fprintf(stderr, "Decoding JPEG to a planar pixel format is supported only when no color transformation is required. "
"JPEG internal color space is set to \"%s\", image is \"%s\".\n",
gpujpeg_color_space_get_name(coder->param.color_space_internal),
gpujpeg_color_space_get_name(coder->param_image.color_space));*/
return 0;
}
const int *sampling_factors = gpujpeg_pixel_format_get_sampling_factor(coder->param_image.pixel_format);
for (int i = 0; i < coder->param_image.comp_count; ++i) {
if (coder->component[i].sampling_factor.horizontal != sampling_factors[i * 2]
|| coder->component[i].sampling_factor.vertical != sampling_factors[i * 2 + 1]) {
const char *name = gpujpeg_pixel_format_get_name(coder->param_image.pixel_format);
/*fprintf(stderr, "Decoding JPEG to a planar pixel format cannot change subsampling (%s to %s).\n",
gpujpeg_subsampling_get_name(coder->param_image.comp_count, coder->component),
gpujpeg_pixel_format_get_name(coder->param_image.pixel_format));*/
return 0;
}
}
return 1;
}
static int gpujpeg_preprocessor_decode_aligned(struct gpujpeg_coder * coder)
{
for (int i = 0; i < coder->param_image.comp_count; ++i) {
if (coder->component[i].data_width != coder->component[i].width) {
return 0;
}
}
return 1;
}
/* Documented at declaration */
int
gpujpeg_preprocessor_decoder_init(struct gpujpeg_coder* coder)
{
coder->preprocessor = NULL;
if (!gpujpeg_pixel_format_is_interleaved(coder->param_image.pixel_format) &&
gpujpeg_preprocessor_decode_no_transform(coder) &&
gpujpeg_preprocessor_decode_aligned(coder)) {
if ( coder->param.verbose >= 2 ) {
printf("Matching format detected - not using postprocessor, using memcpy instead.");
}
return 0;
}
assert(coder->param_image.comp_count == 3);
if (coder->param.color_space_internal == GPUJPEG_NONE) {
coder->preprocessor = (void*)gpujpeg_preprocessor_select_decode_kernel<GPUJPEG_NONE>(coder);
}
else if (coder->param.color_space_internal == GPUJPEG_RGB) {
coder->preprocessor = (void*)gpujpeg_preprocessor_select_decode_kernel<GPUJPEG_RGB>(coder);
}
else if (coder->param.color_space_internal == GPUJPEG_YCBCR_BT601) {
coder->preprocessor = (void*)gpujpeg_preprocessor_select_decode_kernel<GPUJPEG_YCBCR_BT601>(coder);
}
else if (coder->param.color_space_internal == GPUJPEG_YCBCR_BT601_256LVLS) {
coder->preprocessor = (void*)gpujpeg_preprocessor_select_decode_kernel<GPUJPEG_YCBCR_BT601_256LVLS>(coder);
}
else if (coder->param.color_space_internal == GPUJPEG_YCBCR_BT709) {
coder->preprocessor = (void*)gpujpeg_preprocessor_select_decode_kernel<GPUJPEG_YCBCR_BT709>(coder);
}
else {
assert(false);
}
if (coder->preprocessor == NULL) {
return -1;
}
return 0;
}
/**
* Copies raw data GPU memory without running any postprocessor kernel.
*
* This assumes that the JPEG has same color space as input raw image and
* currently also that the component subsampling correspond between raw and
* JPEG (although at least different horizontal subsampling can be quite
* easily done).
*
* @invariant gpujpeg_preprocessor_decode_no_transform(coder) != 0
*/
static int
gpujpeg_preprocessor_decoder_copy_planar_data(struct gpujpeg_coder * coder, hipStream_t stream)
{
assert(coder->param_image.comp_count == 1 ||
coder->param_image.comp_count == 3);
size_t data_raw_offset = 0;
bool needs_stride = false; // true if width is not divisible by MCU width
for (int i = 0; i < coder->param_image.comp_count; ++i) {
needs_stride = needs_stride || coder->component[i].width != coder->component[i].data_width;
}
if (!needs_stride) {
for (int i = 0; i < coder->param_image.comp_count; ++i) {
size_t component_size = coder->component[i].width * coder->component[i].height;
hipMemcpyAsync(coder->d_data_raw + data_raw_offset, coder->component[i].d_data, component_size, hipMemcpyDeviceToDevice, stream);
data_raw_offset += component_size;
}
} else {
for (int i = 0; i < coder->param_image.comp_count; ++i) {
int spitch = coder->component[i].data_width;
int dpitch = coder->component[i].width;
size_t component_size = spitch * coder->component[i].height;
hipMemcpy2DAsync(coder->d_data_raw + data_raw_offset, dpitch, coder->component[i].d_data, spitch, coder->component[i].width, coder->component[i].height, hipMemcpyDeviceToDevice, stream);
data_raw_offset += component_size;
}
}
gpujpeg_cuda_check_error("Preprocessor copy failed", return -1);
return 0;
}
/* Documented at declaration */
int
gpujpeg_preprocessor_decode(struct gpujpeg_coder* coder, hipStream_t stream)
{
if (!coder->preprocessor) {
return gpujpeg_preprocessor_decoder_copy_planar_data(coder, stream);
}
assert(coder->param_image.comp_count == 3);
// Select kernel
gpujpeg_preprocessor_decode_kernel kernel = (gpujpeg_preprocessor_decode_kernel)coder->preprocessor;
assert(kernel != NULL);
int image_width = coder->param_image.width;
int image_height = coder->param_image.height;
// When saving 4:2:2 data of odd width, the data should have even width, so round it
if (coder->param_image.pixel_format == GPUJPEG_422_U8_P1020) {
image_width = gpujpeg_div_and_round_up(coder->param_image.width, 2) * 2;
}
// Prepare unit size
/// @todo this stuff doesn't look correct - we multiply by unitSize and then divide by it
int unitSize = gpujpeg_pixel_format_get_unit_size(coder->param_image.pixel_format);
if (unitSize == 0) {
unitSize = 1;
}
// Prepare kernel
int alignedSize = gpujpeg_div_and_round_up(image_width * image_height, RGB_8BIT_THREADS) * RGB_8BIT_THREADS * unitSize;
dim3 threads (RGB_8BIT_THREADS);
dim3 grid (alignedSize / (RGB_8BIT_THREADS * unitSize));
assert(alignedSize % (RGB_8BIT_THREADS * unitSize) == 0);
if ( grid.x > GPUJPEG_CUDA_MAXIMUM_GRID_SIZE ) {
grid.y = gpujpeg_div_and_round_up(grid.x, GPUJPEG_CUDA_MAXIMUM_GRID_SIZE);
grid.x = GPUJPEG_CUDA_MAXIMUM_GRID_SIZE;
}
// Run kernel
struct gpujpeg_preprocessor_data data;
for ( int comp = 0; comp < 3; comp++ ) {
assert(coder->sampling_factor.horizontal % coder->component[comp].sampling_factor.horizontal == 0);
assert(coder->sampling_factor.vertical % coder->component[comp].sampling_factor.vertical == 0);
data.comp[comp].d_data = coder->component[comp].d_data;
data.comp[comp].sampling_factor.horizontal = coder->sampling_factor.horizontal / coder->component[comp].sampling_factor.horizontal;
data.comp[comp].sampling_factor.vertical = coder->sampling_factor.vertical / coder->component[comp].sampling_factor.vertical;
data.comp[comp].data_width = coder->component[comp].data_width;
}
hipLaunchKernelGGL(( kernel), dim3(grid), dim3(threads), 0, stream,
data,
coder->d_data_raw,
image_width,
image_height
);
gpujpeg_cuda_check_error("Preprocessor encoding failed", return -1);
return 0;
}
/* vi: set expandtab sw=4: */
| 59e5f8e21ea4ad42b0ed9d62245c29f0145665ae.cu | /*
* Copyright (c) 2011-2021, CESNET z.s.p.o
* Copyright (c) 2011, Silicon Genome, LLC.
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/**
* @file
* @brief
* This file contains postprocessors a common format for computational kernels
* to raw image. It also does color space transformations.
*/
#include "gpujpeg_colorspace.h"
#include "gpujpeg_preprocessor_common.h"
#include "gpujpeg_postprocessor.h"
#include "gpujpeg_util.h"
/**
* Store value to component data buffer in specified position by buffer size and subsampling
*
* @param value
* @param position_x
* @param position_y
* @param comp
*/
template<
uint8_t s_samp_factor_h = GPUJPEG_DYNAMIC,
uint8_t s_samp_factor_v = GPUJPEG_DYNAMIC
>
struct gpujpeg_preprocessor_comp_to_raw_load
{
static __device__ void
perform(uint8_t & value, int position_x, int position_y, struct gpujpeg_preprocessor_data_component & comp)
{
uint8_t samp_factor_h = s_samp_factor_h;
if ( samp_factor_h == GPUJPEG_DYNAMIC ) {
samp_factor_h = comp.sampling_factor.horizontal;
}
uint8_t samp_factor_v = s_samp_factor_v;
if ( samp_factor_v == GPUJPEG_DYNAMIC ) {
samp_factor_v = comp.sampling_factor.vertical;
}
position_x = position_x / samp_factor_h;
position_y = position_y / samp_factor_v;
int data_position = position_y * comp.data_width + position_x;
value = comp.d_data[data_position];
}
};
template<>
struct gpujpeg_preprocessor_comp_to_raw_load<1, 1>
{
static __device__ void
perform(uint8_t & value, int position_x, int position_y, struct gpujpeg_preprocessor_data_component & comp)
{
int data_position = position_y * comp.data_width + position_x;
value = comp.d_data[data_position];
}
};
template<enum gpujpeg_pixel_format pixel_format>
inline __device__ void gpujpeg_comp_to_raw_store(uint8_t *d_data_raw, int &image_width, int &image_height, int &image_position, int &x, int &y, uint8_t &r1, uint8_t &r2, uint8_t &r3);
template<>
inline __device__ void gpujpeg_comp_to_raw_store<GPUJPEG_U8>(uint8_t *d_data_raw, int &image_width, int &image_height, int &image_position, int &x, int &y, uint8_t &r1, uint8_t &r2, uint8_t &r3)
{
d_data_raw[image_position] = r1;
}
template<>
inline __device__ void gpujpeg_comp_to_raw_store<GPUJPEG_444_U8_P012>(uint8_t *d_data_raw, int &image_width, int &image_height, int &image_position, int &x, int &y, uint8_t &r1, uint8_t &r2, uint8_t &r3)
{
image_position = image_position * 3;
d_data_raw[image_position + 0] = r1;
d_data_raw[image_position + 1] = r2;
d_data_raw[image_position + 2] = r3;
}
template<>
inline __device__ void gpujpeg_comp_to_raw_store<GPUJPEG_444_U8_P012A>(uint8_t *d_data_raw, int &image_width, int &image_height, int &image_position, int &x, int &y, uint8_t &r1, uint8_t &r2, uint8_t &r3)
{
image_position = image_position * 4;
d_data_raw[image_position + 0] = r1;
d_data_raw[image_position + 1] = r2;
d_data_raw[image_position + 2] = r3;
d_data_raw[image_position + 3] = 0xFF;
}
template<>
inline __device__ void gpujpeg_comp_to_raw_store<GPUJPEG_444_U8_P012Z>(uint8_t *d_data_raw, int &image_width, int &image_height, int &image_position, int &x, int &y, uint8_t &r1, uint8_t &r2, uint8_t &r3)
{
image_position = image_position * 4;
d_data_raw[image_position + 0] = r1;
d_data_raw[image_position + 1] = r2;
d_data_raw[image_position + 2] = r3;
d_data_raw[image_position + 3] = 0x0;
}
template<>
inline __device__ void gpujpeg_comp_to_raw_store<GPUJPEG_444_U8_P0P1P2>(uint8_t *d_data_raw, int &image_width, int &image_height, int &image_position, int &x, int &y, uint8_t &r1, uint8_t &r2, uint8_t &r3)
{
d_data_raw[image_position] = r1;
d_data_raw[image_width * image_height + image_position] = r2;
d_data_raw[2 * image_width * image_height + image_position] = r3;
}
template<>
inline __device__ void gpujpeg_comp_to_raw_store<GPUJPEG_422_U8_P0P1P2>(uint8_t *d_data_raw, int &image_width, int &image_height, int &image_position, int &x, int &y, uint8_t &r1, uint8_t &r2, uint8_t &r3)
{
d_data_raw[image_position] = r1;
if ( (x % 2) == 0 ) {
d_data_raw[image_width * image_height + image_position / 2] = r2;
d_data_raw[image_width * image_height + image_height * ((image_width + 1) / 2) + image_position / 2] = r3;
}
}
template<>
inline __device__ void gpujpeg_comp_to_raw_store<GPUJPEG_422_U8_P1020>(uint8_t *d_data_raw, int &image_width, int &image_height, int &image_position, int &x, int &y, uint8_t &r1, uint8_t &r2, uint8_t &r3)
{
image_position = image_position * 2;
d_data_raw[image_position + 1] = r1;
if ( (x % 2) == 0 )
d_data_raw[image_position + 0] = r2;
else
d_data_raw[image_position + 0] = r3;
}
template<>
inline __device__ void gpujpeg_comp_to_raw_store<GPUJPEG_420_U8_P0P1P2>(uint8_t *d_data_raw, int &image_width, int &image_height, int &image_position, int &x, int &y, uint8_t &r1, uint8_t &r2, uint8_t &r3)
{
d_data_raw[image_position] = r1;
if ( (image_position % 2) == 0 && (y % 2) == 0 ) {
d_data_raw[image_width * image_height + y / 2 * ((image_width + 1) / 2) + x / 2] = r2;
d_data_raw[image_width * image_height + ((image_height + 1) / 2 + y / 2) * ((image_width + 1) / 2) + x / 2] = r3;
}
}
/**
* Kernel - Copy three separated component buffers into target image data
*
* @param d_c1 First component buffer
* @param d_c2 Second component buffer
* @param d_c3 Third component buffer
* @param d_target Image target data
* @param pixel_count Number of pixels to copy
* @return void
*/
typedef void (*gpujpeg_preprocessor_decode_kernel)(struct gpujpeg_preprocessor_data data, uint8_t* d_data_raw, int image_width, int image_height);
template<
enum gpujpeg_color_space color_space_internal,
enum gpujpeg_color_space color_space,
enum gpujpeg_pixel_format pixel_format,
uint8_t s_comp1_samp_factor_h, uint8_t s_comp1_samp_factor_v,
uint8_t s_comp2_samp_factor_h, uint8_t s_comp2_samp_factor_v,
uint8_t s_comp3_samp_factor_h, uint8_t s_comp3_samp_factor_v
>
__global__ void
gpujpeg_preprocessor_comp_to_raw_kernel(struct gpujpeg_preprocessor_data data, uint8_t* d_data_raw, int image_width, int image_height)
{
int x = threadIdx.x;
int gX = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x;
int image_position = gX + x;
if ( image_position >= (image_width * image_height) )
return;
int image_position_x = image_position % image_width;
int image_position_y = image_position / image_width;
// Load
uint8_t r1;
uint8_t r2;
uint8_t r3;
gpujpeg_preprocessor_comp_to_raw_load<s_comp1_samp_factor_h, s_comp1_samp_factor_v>::perform(r1, image_position_x, image_position_y, data.comp[0]);
gpujpeg_preprocessor_comp_to_raw_load<s_comp2_samp_factor_h, s_comp2_samp_factor_v>::perform(r2, image_position_x, image_position_y, data.comp[1]);
gpujpeg_preprocessor_comp_to_raw_load<s_comp3_samp_factor_h, s_comp3_samp_factor_v>::perform(r3, image_position_x, image_position_y, data.comp[2]);
// Color transform
gpujpeg_color_transform<color_space_internal, color_space>::perform(r1, r2, r3);
// Save
gpujpeg_comp_to_raw_store<pixel_format>(d_data_raw, image_width, image_height, image_position, image_position_x, image_position_y, r1, r2, r3);
}
/**
* Select preprocessor decode kernel
*
* @param decoder
* @return kernel
*/
template<enum gpujpeg_color_space color_space_internal>
gpujpeg_preprocessor_decode_kernel
gpujpeg_preprocessor_select_decode_kernel(struct gpujpeg_coder* coder)
{
gpujpeg_preprocessor_sampling_factor_t sampling_factor = gpujpeg_preprocessor_make_sampling_factor(
coder->sampling_factor.horizontal / coder->component[0].sampling_factor.horizontal,
coder->sampling_factor.vertical / coder->component[0].sampling_factor.vertical,
coder->sampling_factor.horizontal / coder->component[1].sampling_factor.horizontal,
coder->sampling_factor.vertical / coder->component[1].sampling_factor.vertical,
coder->sampling_factor.horizontal / coder->component[2].sampling_factor.horizontal,
coder->sampling_factor.vertical / coder->component[2].sampling_factor.vertical
);
#define RETURN_KERNEL_IF(PIXEL_FORMAT, COLOR, P1, P2, P3, P4, P5, P6) \
if ( sampling_factor == gpujpeg_preprocessor_make_sampling_factor(P1, P2, P3, P4, P5, P6) ) { \
int max_h = max(P1, max(P3, P5)); \
int max_v = max(P2, max(P4, P6)); \
if ( coder->param.verbose >= 2 ) { \
printf("Using faster kernel for postprocessor (precompiled %dx%d, %dx%d, %dx%d).\n", max_h / P1, max_v / P2, max_h / P3, max_v / P4, max_h / P5, max_v / P6); \
} \
if ( PIXEL_FORMAT == GPUJPEG_U8 ) { \
return &gpujpeg_preprocessor_comp_to_raw_kernel<color_space_internal, COLOR, GPUJPEG_U8, P1, P2, P3, P4, P5, P6>; \
} else if ( PIXEL_FORMAT == GPUJPEG_444_U8_P012 ) { \
return &gpujpeg_preprocessor_comp_to_raw_kernel<color_space_internal, COLOR, GPUJPEG_444_U8_P012, P1, P2, P3, P4, P5, P6>; \
} else if ( PIXEL_FORMAT == GPUJPEG_444_U8_P012A ) { \
return &gpujpeg_preprocessor_comp_to_raw_kernel<color_space_internal, COLOR, GPUJPEG_444_U8_P012A, P1, P2, P3, P4, P5, P6>; \
} else if ( PIXEL_FORMAT == GPUJPEG_444_U8_P012Z ) { \
return &gpujpeg_preprocessor_comp_to_raw_kernel<color_space_internal, COLOR, GPUJPEG_444_U8_P012Z, P1, P2, P3, P4, P5, P6>; \
} else if ( coder->param_image.pixel_format == GPUJPEG_422_U8_P1020 ) { \
return &gpujpeg_preprocessor_comp_to_raw_kernel<color_space_internal, COLOR, GPUJPEG_422_U8_P1020, P1, P2, P3, P4, P5, P6>; \
} else if ( coder->param_image.pixel_format == GPUJPEG_444_U8_P0P1P2 ) { \
return &gpujpeg_preprocessor_comp_to_raw_kernel<color_space_internal, COLOR, GPUJPEG_444_U8_P0P1P2, P1, P2, P3, P4, P5, P6>; \
} else if ( coder->param_image.pixel_format == GPUJPEG_422_U8_P0P1P2 ) { \
return &gpujpeg_preprocessor_comp_to_raw_kernel<color_space_internal, COLOR, GPUJPEG_422_U8_P0P1P2, P1, P2, P3, P4, P5, P6>; \
} else if ( coder->param_image.pixel_format == GPUJPEG_420_U8_P0P1P2 ) { \
return &gpujpeg_preprocessor_comp_to_raw_kernel<color_space_internal, COLOR, GPUJPEG_420_U8_P0P1P2, P1, P2, P3, P4, P5, P6>; \
} else { \
assert(false); \
} \
}
#define RETURN_KERNEL(PIXEL_FORMAT, COLOR) \
RETURN_KERNEL_IF(PIXEL_FORMAT, COLOR, 1, 1, 1, 1, 1, 1) \
else RETURN_KERNEL_IF(PIXEL_FORMAT, COLOR, 1, 1, 2, 2, 2, 2) \
else RETURN_KERNEL_IF(PIXEL_FORMAT, COLOR, 1, 1, 1, 2, 1, 2) \
else RETURN_KERNEL_IF(PIXEL_FORMAT, COLOR, 1, 1, 2, 1, 2, 1) \
else RETURN_KERNEL_IF(PIXEL_FORMAT, COLOR, 1, 1, 4, 4, 4, 4) \
else { \
if ( coder->param.verbose >= 2 ) { \
printf("Using slower kernel for postprocessor (dynamic %dx%d, %dx%d, %dx%d).\n", coder->component[0].sampling_factor.horizontal, coder->component[0].sampling_factor.vertical, coder->component[1].sampling_factor.horizontal, coder->component[1].sampling_factor.vertical, coder->component[2].sampling_factor.horizontal, coder->component[2].sampling_factor.vertical); \
} \
if ( PIXEL_FORMAT == GPUJPEG_U8 ) { \
return &gpujpeg_preprocessor_comp_to_raw_kernel<color_space_internal, COLOR, GPUJPEG_U8, GPUJPEG_DYNAMIC, GPUJPEG_DYNAMIC, GPUJPEG_DYNAMIC, GPUJPEG_DYNAMIC, GPUJPEG_DYNAMIC, GPUJPEG_DYNAMIC>; \
} else if ( PIXEL_FORMAT == GPUJPEG_444_U8_P012 ) { \
return &gpujpeg_preprocessor_comp_to_raw_kernel<color_space_internal, COLOR, GPUJPEG_444_U8_P012, GPUJPEG_DYNAMIC, GPUJPEG_DYNAMIC, GPUJPEG_DYNAMIC, GPUJPEG_DYNAMIC, GPUJPEG_DYNAMIC, GPUJPEG_DYNAMIC>; \
} else if ( PIXEL_FORMAT == GPUJPEG_444_U8_P012Z ) { \
return &gpujpeg_preprocessor_comp_to_raw_kernel<color_space_internal, COLOR, GPUJPEG_444_U8_P012Z, GPUJPEG_DYNAMIC, GPUJPEG_DYNAMIC, GPUJPEG_DYNAMIC, GPUJPEG_DYNAMIC, GPUJPEG_DYNAMIC, GPUJPEG_DYNAMIC>; \
} else if ( coder->param_image.pixel_format == GPUJPEG_422_U8_P1020 ) { \
return &gpujpeg_preprocessor_comp_to_raw_kernel<color_space_internal, COLOR, GPUJPEG_422_U8_P1020, GPUJPEG_DYNAMIC, GPUJPEG_DYNAMIC, GPUJPEG_DYNAMIC, GPUJPEG_DYNAMIC, GPUJPEG_DYNAMIC, GPUJPEG_DYNAMIC>; \
} else if ( coder->param_image.pixel_format == GPUJPEG_444_U8_P0P1P2 ) { \
return &gpujpeg_preprocessor_comp_to_raw_kernel<color_space_internal, COLOR, GPUJPEG_444_U8_P0P1P2, GPUJPEG_DYNAMIC, GPUJPEG_DYNAMIC, GPUJPEG_DYNAMIC, GPUJPEG_DYNAMIC, GPUJPEG_DYNAMIC, GPUJPEG_DYNAMIC>; \
} else if ( coder->param_image.pixel_format == GPUJPEG_422_U8_P0P1P2 ) { \
return &gpujpeg_preprocessor_comp_to_raw_kernel<color_space_internal, COLOR, GPUJPEG_422_U8_P0P1P2, GPUJPEG_DYNAMIC, GPUJPEG_DYNAMIC, GPUJPEG_DYNAMIC, GPUJPEG_DYNAMIC, GPUJPEG_DYNAMIC, GPUJPEG_DYNAMIC>; \
} else if ( coder->param_image.pixel_format == GPUJPEG_420_U8_P0P1P2 ) { \
return &gpujpeg_preprocessor_comp_to_raw_kernel<color_space_internal, COLOR, GPUJPEG_420_U8_P0P1P2, GPUJPEG_DYNAMIC, GPUJPEG_DYNAMIC, GPUJPEG_DYNAMIC, GPUJPEG_DYNAMIC, GPUJPEG_DYNAMIC, GPUJPEG_DYNAMIC>; \
} else { \
assert(false); \
} \
} \
// None color space
if ( coder->param_image.color_space == GPUJPEG_NONE ) {
RETURN_KERNEL(coder->param_image.pixel_format, GPUJPEG_NONE)
}
// RGB color space
else if ( coder->param_image.color_space == GPUJPEG_RGB ) {
RETURN_KERNEL(coder->param_image.pixel_format, GPUJPEG_RGB)
}
// YCbCr color space
else if ( coder->param_image.color_space == GPUJPEG_YCBCR_BT601 ) {
RETURN_KERNEL(coder->param_image.pixel_format, GPUJPEG_YCBCR_BT601)
}
// YCbCr color space
else if ( coder->param_image.color_space == GPUJPEG_YCBCR_BT601_256LVLS ) {
RETURN_KERNEL(coder->param_image.pixel_format, GPUJPEG_YCBCR_BT601_256LVLS)
}
// YCbCr color space
else if ( coder->param_image.color_space == GPUJPEG_YCBCR_BT709 ) {
RETURN_KERNEL(coder->param_image.pixel_format, GPUJPEG_YCBCR_BT709)
}
// YUV color space
else if ( coder->param_image.color_space == GPUJPEG_YUV ) {
RETURN_KERNEL(coder->param_image.pixel_format, GPUJPEG_YUV)
}
// Unknown color space
else {
assert(false);
}
#undef RETURN_KERNEL_IF
#undef RETURN_KERNEL
return NULL;
}
static int gpujpeg_preprocessor_decode_no_transform(struct gpujpeg_coder * coder)
{
if (coder->param_image.comp_count == 3 && coder->param_image.color_space != coder->param.color_space_internal) {
/*fprintf(stderr, "Decoding JPEG to a planar pixel format is supported only when no color transformation is required. "
"JPEG internal color space is set to \"%s\", image is \"%s\".\n",
gpujpeg_color_space_get_name(coder->param.color_space_internal),
gpujpeg_color_space_get_name(coder->param_image.color_space));*/
return 0;
}
const int *sampling_factors = gpujpeg_pixel_format_get_sampling_factor(coder->param_image.pixel_format);
for (int i = 0; i < coder->param_image.comp_count; ++i) {
if (coder->component[i].sampling_factor.horizontal != sampling_factors[i * 2]
|| coder->component[i].sampling_factor.vertical != sampling_factors[i * 2 + 1]) {
const char *name = gpujpeg_pixel_format_get_name(coder->param_image.pixel_format);
/*fprintf(stderr, "Decoding JPEG to a planar pixel format cannot change subsampling (%s to %s).\n",
gpujpeg_subsampling_get_name(coder->param_image.comp_count, coder->component),
gpujpeg_pixel_format_get_name(coder->param_image.pixel_format));*/
return 0;
}
}
return 1;
}
static int gpujpeg_preprocessor_decode_aligned(struct gpujpeg_coder * coder)
{
for (int i = 0; i < coder->param_image.comp_count; ++i) {
if (coder->component[i].data_width != coder->component[i].width) {
return 0;
}
}
return 1;
}
/* Documented at declaration */
int
gpujpeg_preprocessor_decoder_init(struct gpujpeg_coder* coder)
{
coder->preprocessor = NULL;
if (!gpujpeg_pixel_format_is_interleaved(coder->param_image.pixel_format) &&
gpujpeg_preprocessor_decode_no_transform(coder) &&
gpujpeg_preprocessor_decode_aligned(coder)) {
if ( coder->param.verbose >= 2 ) {
printf("Matching format detected - not using postprocessor, using memcpy instead.");
}
return 0;
}
assert(coder->param_image.comp_count == 3);
if (coder->param.color_space_internal == GPUJPEG_NONE) {
coder->preprocessor = (void*)gpujpeg_preprocessor_select_decode_kernel<GPUJPEG_NONE>(coder);
}
else if (coder->param.color_space_internal == GPUJPEG_RGB) {
coder->preprocessor = (void*)gpujpeg_preprocessor_select_decode_kernel<GPUJPEG_RGB>(coder);
}
else if (coder->param.color_space_internal == GPUJPEG_YCBCR_BT601) {
coder->preprocessor = (void*)gpujpeg_preprocessor_select_decode_kernel<GPUJPEG_YCBCR_BT601>(coder);
}
else if (coder->param.color_space_internal == GPUJPEG_YCBCR_BT601_256LVLS) {
coder->preprocessor = (void*)gpujpeg_preprocessor_select_decode_kernel<GPUJPEG_YCBCR_BT601_256LVLS>(coder);
}
else if (coder->param.color_space_internal == GPUJPEG_YCBCR_BT709) {
coder->preprocessor = (void*)gpujpeg_preprocessor_select_decode_kernel<GPUJPEG_YCBCR_BT709>(coder);
}
else {
assert(false);
}
if (coder->preprocessor == NULL) {
return -1;
}
return 0;
}
/**
* Copies raw data GPU memory without running any postprocessor kernel.
*
* This assumes that the JPEG has same color space as input raw image and
* currently also that the component subsampling correspond between raw and
* JPEG (although at least different horizontal subsampling can be quite
* easily done).
*
* @invariant gpujpeg_preprocessor_decode_no_transform(coder) != 0
*/
static int
gpujpeg_preprocessor_decoder_copy_planar_data(struct gpujpeg_coder * coder, cudaStream_t stream)
{
assert(coder->param_image.comp_count == 1 ||
coder->param_image.comp_count == 3);
size_t data_raw_offset = 0;
bool needs_stride = false; // true if width is not divisible by MCU width
for (int i = 0; i < coder->param_image.comp_count; ++i) {
needs_stride = needs_stride || coder->component[i].width != coder->component[i].data_width;
}
if (!needs_stride) {
for (int i = 0; i < coder->param_image.comp_count; ++i) {
size_t component_size = coder->component[i].width * coder->component[i].height;
cudaMemcpyAsync(coder->d_data_raw + data_raw_offset, coder->component[i].d_data, component_size, cudaMemcpyDeviceToDevice, stream);
data_raw_offset += component_size;
}
} else {
for (int i = 0; i < coder->param_image.comp_count; ++i) {
int spitch = coder->component[i].data_width;
int dpitch = coder->component[i].width;
size_t component_size = spitch * coder->component[i].height;
cudaMemcpy2DAsync(coder->d_data_raw + data_raw_offset, dpitch, coder->component[i].d_data, spitch, coder->component[i].width, coder->component[i].height, cudaMemcpyDeviceToDevice, stream);
data_raw_offset += component_size;
}
}
gpujpeg_cuda_check_error("Preprocessor copy failed", return -1);
return 0;
}
/* Documented at declaration */
int
gpujpeg_preprocessor_decode(struct gpujpeg_coder* coder, cudaStream_t stream)
{
if (!coder->preprocessor) {
return gpujpeg_preprocessor_decoder_copy_planar_data(coder, stream);
}
assert(coder->param_image.comp_count == 3);
// Select kernel
gpujpeg_preprocessor_decode_kernel kernel = (gpujpeg_preprocessor_decode_kernel)coder->preprocessor;
assert(kernel != NULL);
int image_width = coder->param_image.width;
int image_height = coder->param_image.height;
// When saving 4:2:2 data of odd width, the data should have even width, so round it
if (coder->param_image.pixel_format == GPUJPEG_422_U8_P1020) {
image_width = gpujpeg_div_and_round_up(coder->param_image.width, 2) * 2;
}
// Prepare unit size
/// @todo this stuff doesn't look correct - we multiply by unitSize and then divide by it
int unitSize = gpujpeg_pixel_format_get_unit_size(coder->param_image.pixel_format);
if (unitSize == 0) {
unitSize = 1;
}
// Prepare kernel
int alignedSize = gpujpeg_div_and_round_up(image_width * image_height, RGB_8BIT_THREADS) * RGB_8BIT_THREADS * unitSize;
dim3 threads (RGB_8BIT_THREADS);
dim3 grid (alignedSize / (RGB_8BIT_THREADS * unitSize));
assert(alignedSize % (RGB_8BIT_THREADS * unitSize) == 0);
if ( grid.x > GPUJPEG_CUDA_MAXIMUM_GRID_SIZE ) {
grid.y = gpujpeg_div_and_round_up(grid.x, GPUJPEG_CUDA_MAXIMUM_GRID_SIZE);
grid.x = GPUJPEG_CUDA_MAXIMUM_GRID_SIZE;
}
// Run kernel
struct gpujpeg_preprocessor_data data;
for ( int comp = 0; comp < 3; comp++ ) {
assert(coder->sampling_factor.horizontal % coder->component[comp].sampling_factor.horizontal == 0);
assert(coder->sampling_factor.vertical % coder->component[comp].sampling_factor.vertical == 0);
data.comp[comp].d_data = coder->component[comp].d_data;
data.comp[comp].sampling_factor.horizontal = coder->sampling_factor.horizontal / coder->component[comp].sampling_factor.horizontal;
data.comp[comp].sampling_factor.vertical = coder->sampling_factor.vertical / coder->component[comp].sampling_factor.vertical;
data.comp[comp].data_width = coder->component[comp].data_width;
}
kernel<<<grid, threads, 0, stream>>>(
data,
coder->d_data_raw,
image_width,
image_height
);
gpujpeg_cuda_check_error("Preprocessor encoding failed", return -1);
return 0;
}
/* vi: set expandtab sw=4: */
|
4b8366a7dbf269ff2f690a65783cc30bc290ad1b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "vector.h"
__global__ void kernal1(int *A, int *B, int numElements)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < numElements)
B[i] = A[i]+B[i];
}
__global__ void kernal2(int *A, int k, int numElements)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < numElements)
A[i] = A[i]*k;
}
__host__ int my_vector::get_size()
{
return len;
}
__host__ my_vector my_vector::summa(my_vector a, my_vector b)
{
if (a.len!=b.len)
{
printf("error summa\n");
this->len=0;
return *this;
}
size_t size = a.len * sizeof(int);
this->len=a.len;
int i;
//iniziliaze host array
int *h_A = (int *)malloc(size);
int *h_B = (int *)malloc(size);
for (i=0; i<a.len; ++i) h_A[i]=a.X[i];
for (i=0; i<a.len; ++i) h_B[i]=b.X[i];
//inizialiaze global aray
int *d_A = NULL;
int *d_B = NULL;
hipError_t err = hipSuccess;
err = hipMalloc((void **)&d_A, size);
if (err != hipSuccess){printf("malloc A error\n");exit(0);}
err = hipMalloc((void **)&d_B, size);
if (err != hipSuccess){printf("malloc B error\n");exit(0);}
//Copy vector A and vector B
err = hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice);
if (err != hipSuccess){printf("copy error\n"); exit(0);}
err = hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice);
if (err != hipSuccess){printf("copy error\n"); exit(0);}
hipLaunchKernelGGL(( kernal1), dim3(a.len),dim3(1), 0, 0, d_A, d_B, a.len);
err = hipGetLastError();
if (err != hipSuccess){printf("kernal1 error\n"); exit(0);}
err = hipMemcpy(h_B, d_B, size, hipMemcpyDeviceToHost);
if (err != hipSuccess){printf("copy error\n"); exit(0);}
// Free device global memory
err = hipFree(d_A);
err = hipFree(d_B);
// Free host memory
for(i=0; i<a.len; ++i) this->X[i]=h_B[i];
free(h_A);
free(h_B);
return *this;
}
__host__ my_vector my_vector::mult (int k, my_vector a)
{
this->len=a.len;
size_t size = a.len * sizeof(int);
this->len=a.len;
int i;
//iniziliaze host array
int *h_A = (int *)malloc(size);
for (i=0; i<a.len; ++i) h_A[i]=a.X[i];
//inizialiaze global aray
int *d_A = NULL;
hipError_t err = hipSuccess;
err = hipMalloc((void **)&d_A, size);
if (err != hipSuccess){printf("malloc A error\n");exit(0);}
//Copy vector A and vector B
err = hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice);
if (err != hipSuccess){printf("copy error\n"); exit(0);}
hipLaunchKernelGGL(( kernal2), dim3(a.len),dim3(1), 0, 0, d_A, k, a.len);
err = hipGetLastError();
if (err != hipSuccess){printf("kernal2 error\n"); exit(0);}
err = hipMemcpy(h_A, d_A, size, hipMemcpyDeviceToHost);
if (err != hipSuccess){printf("copy error\n"); exit(0);}
// Free device global memory
err = hipFree(d_A);
// Free host memory
for(i=0; i<a.len; ++i) this->X[i]=h_A[i];
free(h_A);
return *this;
}
__host__ void my_vector::write ()
{
int i;
for (i=0; i<len; ++i) printf("%d ",X[i]);
printf("\n");
}
| 4b8366a7dbf269ff2f690a65783cc30bc290ad1b.cu | #include "vector.h"
__global__ void kernal1(int *A, int *B, int numElements)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < numElements)
B[i] = A[i]+B[i];
}
__global__ void kernal2(int *A, int k, int numElements)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < numElements)
A[i] = A[i]*k;
}
__host__ int my_vector::get_size()
{
return len;
}
__host__ my_vector my_vector::summa(my_vector a, my_vector b)
{
if (a.len!=b.len)
{
printf("error summa\n");
this->len=0;
return *this;
}
size_t size = a.len * sizeof(int);
this->len=a.len;
int i;
//iniziliaze host array
int *h_A = (int *)malloc(size);
int *h_B = (int *)malloc(size);
for (i=0; i<a.len; ++i) h_A[i]=a.X[i];
for (i=0; i<a.len; ++i) h_B[i]=b.X[i];
//inizialiaze global aray
int *d_A = NULL;
int *d_B = NULL;
cudaError err = cudaSuccess;
err = cudaMalloc((void **)&d_A, size);
if (err != cudaSuccess){printf("malloc A error\n");exit(0);}
err = cudaMalloc((void **)&d_B, size);
if (err != cudaSuccess){printf("malloc B error\n");exit(0);}
//Copy vector A and vector B
err = cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice);
if (err != cudaSuccess){printf("copy error\n"); exit(0);}
err = cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice);
if (err != cudaSuccess){printf("copy error\n"); exit(0);}
kernal1<<<a.len,1>>>(d_A, d_B, a.len);
err = cudaGetLastError();
if (err != cudaSuccess){printf("kernal1 error\n"); exit(0);}
err = cudaMemcpy(h_B, d_B, size, cudaMemcpyDeviceToHost);
if (err != cudaSuccess){printf("copy error\n"); exit(0);}
// Free device global memory
err = cudaFree(d_A);
err = cudaFree(d_B);
// Free host memory
for(i=0; i<a.len; ++i) this->X[i]=h_B[i];
free(h_A);
free(h_B);
return *this;
}
__host__ my_vector my_vector::mult (int k, my_vector a)
{
this->len=a.len;
size_t size = a.len * sizeof(int);
this->len=a.len;
int i;
//iniziliaze host array
int *h_A = (int *)malloc(size);
for (i=0; i<a.len; ++i) h_A[i]=a.X[i];
//inizialiaze global aray
int *d_A = NULL;
cudaError err = cudaSuccess;
err = cudaMalloc((void **)&d_A, size);
if (err != cudaSuccess){printf("malloc A error\n");exit(0);}
//Copy vector A and vector B
err = cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice);
if (err != cudaSuccess){printf("copy error\n"); exit(0);}
kernal2<<<a.len,1>>>(d_A, k, a.len);
err = cudaGetLastError();
if (err != cudaSuccess){printf("kernal2 error\n"); exit(0);}
err = cudaMemcpy(h_A, d_A, size, cudaMemcpyDeviceToHost);
if (err != cudaSuccess){printf("copy error\n"); exit(0);}
// Free device global memory
err = cudaFree(d_A);
// Free host memory
for(i=0; i<a.len; ++i) this->X[i]=h_A[i];
free(h_A);
return *this;
}
__host__ void my_vector::write ()
{
int i;
for (i=0; i<len; ++i) printf("%d ",X[i]);
printf("\n");
}
|
b45e95fefc1cb56b07721303a28399d0578fa9b5.hip | // !!! This is a file automatically generated by hipify!!!
// -------------------------------------------------------------
// cuDPP -- CUDA Data Parallel Primitives library
// -------------------------------------------------------------
// $Revision: $
// $Date: $
// -------------------------------------------------------------
// This source code is distributed under the terms of license.txt in
// the root directory of this source distribution.
// -------------------------------------------------------------
/**
* @file
* test_scan.cu
*
* @brief Host testrig routines to exercise cudpp's scan functionality.
*/
#include <stdio.h>
#include <cutil.h>
#include <time.h>
#include <limits.h>
#include "cudpp.h"
#include "cudpp_testrig_options.h"
extern "C"
void computeSumScanGold(float *reference, const float *idata,
const unsigned int len,
const CUDPPConfiguration &config);
extern "C"
void computeMultiplyScanGold(float *reference, const float *idata,
const unsigned int len,
const CUDPPConfiguration &config);
extern "C"
void computeMultiRowSumScanGold(float *reference, const float *idata,
const unsigned int len,
const unsigned int rows,
const CUDPPConfiguration &config);
extern "C"
void computeMaxScanGold( float *reference, const float *idata,
const unsigned int len,
const CUDPPConfiguration &config);
extern "C"
void computeMinScanGold( float *reference, const float *idata,
const unsigned int len,
const CUDPPConfiguration &config);
extern "C"
void computeSumSegmentedScanGold(float *reference, const float *idata,
const unsigned int* iflags,
const unsigned int len,
const CUDPPConfiguration &config);
extern "C"
void
computeMaxSegmentedScanGold(float* reference, const float* idata,
const unsigned int *iflag,
const unsigned int len,
const CUDPPConfiguration & config);
extern "C"
void
computeMultiplySegmentedScanGold(float* reference, const float* idata,
const unsigned int *iflag,
const unsigned int len,
const CUDPPConfiguration & config);
extern "C"
void
computeMinSegmentedScanGold(float* reference, const float* idata,
const unsigned int *iflag,
const unsigned int len,
const CUDPPConfiguration & config);
/**
* testScan exercises cudpp's unsegmented scan functionality.
* Possible command line arguments:
* - --op=OP: sets scan operation to OP (sum, max, min and multiply.)
* - --forward, --backward: sets direction of scan
* - --exclusive, --inclusive: sets exclusivity of scan
* - --n=#: number of elements in scan
* - Also "global" options (see setOptions)
* @param argc Number of arguments on the command line, passed
* directly from main
* @param argv Array of arguments on the command line, passed directly
* from main
* @param configPtr Configuration for scan, set by caller
* @return Number of tests that failed regression (0 for all pass)
* @see CUDPPConfiguration, setOptions, cudppScan
*/
int testScan(int argc, const char **argv, CUDPPConfiguration *configPtr)
{
int retval = 0;
testrigOptions testOptions;
setOptions(argc, argv, testOptions);
unsigned int timer;
CUT_SAFE_CALL(cutCreateTimer(&timer));
CUDPPConfiguration config;
config.algorithm = CUDPP_SCAN;
if (configPtr != NULL)
{
config = *configPtr;
}
else
{
CUDPPOption direction = CUDPP_OPTION_FORWARD;
CUDPPOption inclusivity = CUDPP_OPTION_EXCLUSIVE;
//default sum scan
config.op = CUDPP_ADD;
config.datatype = CUDPP_FLOAT;
if (testOptions.op && !strcmp(testOptions.op, "max"))
{
config.op = CUDPP_MAX;
}
else if (testOptions.op && !strcmp(testOptions.op, "min"))
{
config.op = CUDPP_MIN;
}
else if (testOptions.op && !strcmp(testOptions.op, "multiply"))
{
config.op = CUDPP_MULTIPLY;
}
if (CUTTrue == cutCheckCmdLineFlag(argc, argv, "backward"))
{
direction = CUDPP_OPTION_BACKWARD;
}
if (CUTTrue == cutCheckCmdLineFlag(argc, argv, "exclusive"))
{
inclusivity = CUDPP_OPTION_EXCLUSIVE;
}
if (CUTTrue == cutCheckCmdLineFlag(argc, argv, "inclusive"))
{
inclusivity = CUDPP_OPTION_INCLUSIVE;
}
config.options = direction | inclusivity;
}
int numElements = 8388608; // maximum test size
bool quiet = (CUTTrue == cutCheckCmdLineFlag(argc, (const char**) argv, "quiet"));
bool oneTest = false;
if (CUTTrue == cutGetCmdLineArgumenti(argc, (const char**) argv, "n",
&numElements))
{
oneTest = true;
}
unsigned int test[] = {39, 128, 256, 512, 1000, 1024, 1025, 32768, 45537, 65536, 131072,
262144, 500001, 524288, 1048577, 1048576, 1048581, 2097152, 4194304, 8388608};
int numTests = sizeof(test) / sizeof(test[0]);
if (oneTest)
{
test[0] = numElements;
numTests = 1;
}
CUDPPHandle scanPlan;
CUDPPResult result = CUDPP_SUCCESS;
result = cudppPlan(&scanPlan, config, numElements, 1, 0);
if (result != CUDPP_SUCCESS)
{
fprintf(stderr, "Error creating plan for Scan\n");
retval = (oneTest) ? 1 : numTests;
return retval;
}
unsigned int memSize = sizeof(float) * numElements;
// allocate host memory to store the input data
float* i_data = (float*) malloc( memSize);
// allocate host memory to store the output data
float* o_data = (float*) malloc( memSize);
// host memory to store input flags
// initialize the input data on the host
for(int i = 0; i < numElements; ++i)
{
i_data[i] = (float)(rand() & 1);
}
// allocate and compute reference solution
float* reference = (float*) malloc( memSize);
// allocate device memory input and output arrays
float* d_idata = NULL;
float* d_odata = NULL;
CUDA_SAFE_CALL( hipMalloc( (void**) &d_idata, memSize));
CUDA_SAFE_CALL( hipMalloc( (void**) &d_odata, memSize));
// copy host memory to device input array
CUDA_SAFE_CALL( hipMemcpy(d_idata, i_data, memSize,
hipMemcpyHostToDevice) );
// initialize all the other device arrays to be safe
CUDA_SAFE_CALL( hipMemcpy(d_odata, o_data, memSize,
hipMemcpyHostToDevice) );
for (int k = 0; k < numTests; ++k)
{
char op[10];
switch (config.op)
{
case CUDPP_ADD:
strcpy(op, "sum");
break;
case CUDPP_MULTIPLY:
strcpy(op, "multiply");
break;
case CUDPP_MAX:
strcpy(op, "max");
break;
case CUDPP_MIN:
strcpy(op, "min");
break;
}
if (!quiet)
{
printf("Running a%s%s %s-scan of %d elements\n",
(config.options & CUDPP_OPTION_BACKWARD) ? " backward" : "",
(config.options & CUDPP_OPTION_INCLUSIVE) ? " inclusive" : "",
op,
test[k]);
fflush(stdout);
}
cutResetTimer(timer);
cutStartTimer(timer);
if (config.op == CUDPP_ADD)
computeSumScanGold( reference, i_data, test[k], config);
else if (config.op == CUDPP_MULTIPLY)
computeMultiplyScanGold( reference, i_data, test[k], config);
else if (config.op == CUDPP_MAX)
computeMaxScanGold( reference, i_data, test[k], config);
else if (config.op == CUDPP_MIN)
computeMinScanGold( reference, i_data, test[k], config);
cutStopTimer(timer);
if (!quiet)
printf("CPU execution time = %f\n", cutGetTimerValue(timer));
cutResetTimer(timer);
// Run the scan
// run once to avoid timing startup overhead.
#ifndef __DEVICE_EMULATION__
cudppScan(scanPlan, d_odata, d_idata, test[k]);
#endif
cutStartTimer(timer);
for (int i = 0; i < testOptions.numIterations; i++)
{
cudppScan(scanPlan, d_odata, d_idata, test[k]);
}
hipDeviceSynchronize();
cutStopTimer(timer);
// copy result from device to host
CUDA_SAFE_CALL(hipMemcpy( o_data, d_odata, sizeof(float) * test[k],
hipMemcpyDeviceToHost));
// check if the result is equivalent to the expected soluion
CUTBoolean result = cutComparefe( reference, o_data, test[k], 0.001f);
retval += (CUTTrue == result) ? 0 : 1;
if (!quiet)
{
printf("%s test %s\n", testOptions.runMode,
(CUTTrue == result) ? "PASSED" : "FAILED");
printf("Average execution time: %f ms\n",
cutGetTimerValue(timer) / testOptions.numIterations);
}
else
{
printf("\t%10d\t%0.4f\n", test[k], cutGetTimerValue(timer) / testOptions.numIterations);
}
if (testOptions.debug)
{
for (int i = 0; i < numElements; ++i)
{
printf("%f ", o_data[i]);
}
printf("\n");
}
cutResetTimer(timer);
}
if (!quiet)
printf("\n");
result = cudppDestroyPlan(scanPlan);
if (result != CUDPP_SUCCESS)
{
printf("Error destroying CUDPPPlan for Scan\n");
}
// cleanup memory
cutDeleteTimer(timer);
free(i_data);
free(o_data);
free(reference);
hipFree(d_odata);
hipFree(d_idata);
return retval;
}
/**
* testSegmentedScan exercises cudpp's unsegmented scan functionality.
* Possible command line arguments:
* - --op=OP: sets scan operation to OP (sum, max, min and multiply.)
* - --forward: sets direction of scan
* - --exclusive, --inclusive: sets exclusivity of scan
* - --n=#: number of elements in scan
* - Also "global" options (see setOptions)
* @param argc Number of arguments on the command line, passed
* directly from main
* @param argv Array of arguments on the command line, passed directly
* from main
* @param configPtr Configuration for scan, set by caller
* @return Number of tests that failed regression (0 for all pass)
* @see CUDPPConfiguration, setOptions, cudppSegmentedScan
*/
int testSegmentedScan(int argc, const char **argv, CUDPPConfiguration *configPtr)
{
int retval = 0;
testrigOptions testOptions;
setOptions(argc, argv, testOptions);
unsigned int timer;
CUT_SAFE_CALL(cutCreateTimer(&timer));
CUDPPConfiguration config;
config.algorithm = CUDPP_SEGMENTED_SCAN;
if (configPtr != NULL)
{
config = *configPtr;
}
else
{
CUDPPOption direction = CUDPP_OPTION_FORWARD;
CUDPPOption inclusivity = CUDPP_OPTION_EXCLUSIVE;
//default segmented sum scan
config.op = CUDPP_ADD;
config.datatype = CUDPP_FLOAT;
if (testOptions.op && !strcmp(testOptions.op, "max"))
{
config.op = CUDPP_MAX;
}
if (testOptions.op && !strcmp(testOptions.op, "multiply"))
{
config.op = CUDPP_MULTIPLY;
}
if (testOptions.op && !strcmp(testOptions.op, "min"))
{
config.op = CUDPP_MIN;
}
if (CUTTrue == cutCheckCmdLineFlag(argc, argv, "backward"))
{
direction = CUDPP_OPTION_BACKWARD;
}
if (CUTTrue == cutCheckCmdLineFlag(argc, argv, "exclusive"))
{
inclusivity = CUDPP_OPTION_EXCLUSIVE;
}
if (CUTTrue == cutCheckCmdLineFlag(argc, argv, "inclusive"))
{
inclusivity = CUDPP_OPTION_INCLUSIVE;
}
config.options = direction | inclusivity;
}
int numElements = 8388608; // maximum test size
int numFlags = 4;
bool quiet = (CUTTrue == cutCheckCmdLineFlag(argc, (const char**) argv, "quiet"));
bool oneTest = false;
if (CUTTrue == cutGetCmdLineArgumenti(argc, (const char**) argv, "n",
&numElements))
{
oneTest = true;
}
unsigned int test[] = {32, 128, 256, 512, 1024, 1025, 32768, 45537, 65536, 131072,
262144, 500001, 524288, 1048577, 1048576, 1048581, 2097152, 4194304, 8388608};
int numTests = sizeof(test) / sizeof(test[0]);
if (oneTest)
{
test[0] = numElements;
numTests = 1;
}
CUDPPHandle segmentedScanPlan;
CUDPPResult result = CUDPP_SUCCESS;
result = cudppPlan(&segmentedScanPlan, config, numElements, 1, 0);
if (result != CUDPP_SUCCESS)
{
fprintf(stderr, "Error creating plan for Segmented Scan\n");
retval = (oneTest) ? 1 : numTests;
return retval;
}
unsigned int memSize = sizeof(float) * numElements;
// allocate host memory to store the input data
float* i_data = (float*) malloc( memSize);
// allocate host memory to store the input data
unsigned int* i_flags =
(unsigned int*) malloc(sizeof(unsigned int) * numElements);
// Set all flags to 0
memset(i_flags, 0, sizeof(unsigned int) * numElements);
// allocate host memory to store the output data
float* o_data = (float*) malloc( memSize);
// host memory to store input flags
// initialize the input data on the host
for(int i = 0; i < numElements; ++i)
{
i_data[i] = (float) 1; // (rand() & 1);
}
// allocate and compute reference solution
float* reference = (float*) malloc( memSize);
// allocate device memory input and output arrays
float* d_idata = NULL;
unsigned int *d_iflags = NULL;
float* d_odata = NULL;
CUDA_SAFE_CALL( hipMalloc( (void**) &d_idata, memSize));
CUDA_SAFE_CALL( hipMalloc( (void**) &d_iflags,
sizeof(unsigned int) * numElements));
CUDA_SAFE_CALL( hipMalloc( (void**) &d_odata, memSize));
// copy host memory to device input array
CUDA_SAFE_CALL( hipMemcpy(d_idata, i_data, memSize,
hipMemcpyHostToDevice) );
// initialize all the other device arrays to be safe
CUDA_SAFE_CALL( hipMemcpy(d_odata, o_data, memSize,
hipMemcpyHostToDevice) );
for (int k = 0; k < numTests; ++k)
{
// Generate flags
for(int i = 0; i < numFlags; ++i)
{
unsigned int idx;
// The flag at the first position is implicitly set
// so try to generate non-zero positions
while((idx = (unsigned int)
((test[k] - 1) * (rand() / (float)RAND_MAX)))
== 0)
{
}
// printf("Setting flag at pos %d\n", idx);
i_flags[idx] = 1;
}
// i_flags[5]=1;
// Copy flags to GPU
CUDA_SAFE_CALL( hipMemcpy(d_iflags, i_flags,
sizeof(unsigned int) * test[k],
hipMemcpyHostToDevice) );
char op[10];
switch (config.op)
{
case CUDPP_ADD:
strcpy(op, "sum");
break;
case CUDPP_MULTIPLY:
strcpy(op, "multiply");
break;
case CUDPP_MAX:
strcpy(op, "max");
break;
case CUDPP_MIN:
strcpy(op, "min");
break;
}
if (!quiet)
{
printf("Running a%s%s %s-segmented scan of %d elements\n",
(config.options & CUDPP_OPTION_BACKWARD) ? " backward" : "",
(config.options & CUDPP_OPTION_INCLUSIVE) ? " inclusive" : "",
op,
test[k]);
fflush(stdout);
}
fflush(stdout);
cutResetTimer(timer);
cutStartTimer(timer);
if(config.op == CUDPP_ADD)
computeSumSegmentedScanGold(reference, i_data, i_flags, test[k], config);
else if (config.op == CUDPP_MAX)
computeMaxSegmentedScanGold(reference, i_data, i_flags, test[k], config);
else if (config.op == CUDPP_MULTIPLY)
computeMultiplySegmentedScanGold(reference, i_data, i_flags, test[k], config);
else if (config.op == CUDPP_MIN)
computeMinSegmentedScanGold(reference, i_data, i_flags, test[k], config);
cutStopTimer(timer);
if (!quiet)
{
printf("CPU execution time = %f\n", cutGetTimerValue(timer));
}
cutResetTimer(timer);
// Run the scan
// run once to avoid timing startup overhead.
#ifndef __DEVICE_EMULATION__
cudppSegmentedScan(segmentedScanPlan, d_odata, d_idata, d_iflags, test[k]);
#endif
cutStartTimer(timer);
for (int i = 0; i < testOptions.numIterations; i++)
{
cudppSegmentedScan(segmentedScanPlan, d_odata, d_idata, d_iflags, test[k]);
}
hipDeviceSynchronize();
cutStopTimer(timer);
// copy result from device to host
CUDA_SAFE_CALL(hipMemcpy( o_data, d_odata, sizeof(float) * test[k],
hipMemcpyDeviceToHost));
// check if the result is equivalent to the expected soluion
CUTBoolean result = cutComparefe( reference, o_data, test[k], 0.001f);
retval += (CUTTrue == result) ? 0 : 1;
if (!quiet)
{
printf("%s test %s\n", testOptions.runMode,
(CUTTrue == result) ? "PASSED" : "FAILED");
printf("Average execution time: %f ms\n",
cutGetTimerValue(timer) / testOptions.numIterations);
}
else
{
printf("\t%10d\t%0.4f\n", test[k], cutGetTimerValue(timer) / testOptions.numIterations);
}
if (testOptions.debug)
{
for (unsigned int i = 0; i < test[k]; ++i)
{
if (reference[i] != o_data[i]) printf("%d %f %f\n", i, o_data[i], reference[i]);
// printf("%f %f\n", reference[i], o_data[i]);
}
// printf("\n");
// for (unsigned int i = 0; i < test[k]; ++i)
// {
// printf("%f ", reference[i]);
//}
// printf("\n");
}
cutResetTimer(timer); // needed after CUT alpha2
}
if (!quiet)
printf("\n");
result = cudppDestroyPlan(segmentedScanPlan);
if (result != CUDPP_SUCCESS)
{
printf("Error destroying CUDPPPlan for Scan\n");
}
// cleanup memory
cutDeleteTimer(timer);
free(i_data);
free(i_flags);
free(o_data);
free(reference);
hipFree(d_odata);
hipFree(d_idata);
hipFree(d_iflags);
return retval;
}
/**
* testMultiSumScan exercises cudpp's multiple-unsegmented-scan functionality.
* @param argc Number of arguments on the command line, passed
* directly from main
* @param argv Array of arguments on the command line, passed directly
* from main
* @return Number of tests that failed regression (0 for all pass)
* @see cudppMultiScan
*/
int testMultiSumScan(int argc, const char **argv)
{
int retval = 0;
testrigOptions testOptions;
setOptions(argc, argv, testOptions);
unsigned int timer;
CUT_SAFE_CALL(cutCreateTimer(&timer));
CUDPPConfiguration config;
CUDPPOption direction = CUDPP_OPTION_FORWARD;
CUDPPOption inclusivity = CUDPP_OPTION_EXCLUSIVE;
if (CUTTrue == cutCheckCmdLineFlag(argc, argv, "backward"))
{
direction = CUDPP_OPTION_BACKWARD;
}
config.algorithm = CUDPP_SCAN;
config.options = direction | inclusivity;
config.op = CUDPP_ADD;
config.datatype = CUDPP_FLOAT;
int numElements = 1024; // maximum test size
int numRows = 1024;
//bool oneTest = false;
if (CUTTrue == cutGetCmdLineArgumenti(argc, (const char**) argv, "n",
&numElements))
{
// oneTest = true;
}
if (CUTTrue == cutGetCmdLineArgumenti(argc, (const char**) argv, "r",
&numRows))
{
// oneTest = true;
}
size_t myPitch = numElements * sizeof(float);
size_t hmemSize = numRows * myPitch;
// allocate host memory to store the input data
float* i_data = (float*) malloc( hmemSize);
// allocate host memory to store the output data
float* o_data = (float*) malloc( hmemSize);
for( int i = 0; i < numElements * numRows; ++i)
{
i_data[i] = (float)(rand() & 31);
o_data[i] = -1;
}
// allocate and compute reference solution
float* reference = (float*) malloc(hmemSize);
computeMultiRowSumScanGold( reference, i_data, numElements, numRows, config);
// allocate device memory input and output arrays
float* d_idata = NULL;
float* d_odata = NULL;
size_t d_ipitch = 0;
size_t d_opitch = 0;
CUDA_SAFE_CALL( hipMallocPitch( (void**) &d_idata, &d_ipitch,
myPitch, numRows));
CUDA_SAFE_CALL( hipMallocPitch( (void**) &d_odata, &d_opitch,
myPitch, numRows));
// copy host memory to device input array
CUDA_SAFE_CALL( hipMemcpy2D(d_idata, d_ipitch, i_data, myPitch, myPitch,
numRows, hipMemcpyHostToDevice) );
// initialize all the other device arrays to be safe
CUDA_SAFE_CALL( hipMemcpy2D(d_odata, d_ipitch, o_data, myPitch, myPitch,
numRows, hipMemcpyHostToDevice) );
size_t rowPitch = d_ipitch / sizeof(float);
CUDPPHandle multiscanPlan = 0;
CUDPPResult ret = cudppPlan(&multiscanPlan, config, numElements, numRows, rowPitch);
if (ret != CUDPP_SUCCESS)
{
fprintf(stderr, "Error creating CUDPP Plan for multi-row Scan.\n");
retval = 1;
return retval;
}
printf("Running a%s sum-scan of %d rows of %d elements\n",
(config.options & CUDPP_OPTION_BACKWARD) ? " backward" : "",
numRows,
numElements);
fflush(stdout);
// run once to avoid timing startup overhead.
#ifndef __DEVICE_EMULATION__
cudppMultiScan(multiscanPlan, d_odata, d_idata, numElements, numRows);
#endif
cutStartTimer(timer);
for (int i = 0; i < testOptions.numIterations; i++)
{
cudppMultiScan(multiscanPlan, d_odata, d_idata, numElements, numRows);
}
hipDeviceSynchronize();
cutStopTimer(timer);
// copy result from device to host
CUDA_SAFE_CALL(hipMemcpy2D( o_data, myPitch, d_odata, d_opitch,
myPitch, numRows, hipMemcpyDeviceToHost));
// check if the result is equivalent to the expected soluion
CUTBoolean result = cutComparefe( reference, o_data, numElements*numRows,
0.001f);
retval += (CUTTrue == result) ? 0 : 1;
printf("%s test %s\n", testOptions.runMode,
(CUTTrue == result) ? "PASSED" : "FAILED");
printf("Average execution time: %f ms\n",
cutGetTimerValue(timer) / testOptions.numIterations);
printf("\n");
cudppDestroyPlan(multiscanPlan);
// cleanup memory
cutDeleteTimer(timer);
free( i_data);
free(o_data);
free( reference);
hipFree( d_odata);
hipFree( d_idata);
return retval;
}
| b45e95fefc1cb56b07721303a28399d0578fa9b5.cu | // -------------------------------------------------------------
// cuDPP -- CUDA Data Parallel Primitives library
// -------------------------------------------------------------
// $Revision: $
// $Date: $
// -------------------------------------------------------------
// This source code is distributed under the terms of license.txt in
// the root directory of this source distribution.
// -------------------------------------------------------------
/**
* @file
* test_scan.cu
*
* @brief Host testrig routines to exercise cudpp's scan functionality.
*/
#include <stdio.h>
#include <cutil.h>
#include <time.h>
#include <limits.h>
#include "cudpp.h"
#include "cudpp_testrig_options.h"
extern "C"
void computeSumScanGold(float *reference, const float *idata,
const unsigned int len,
const CUDPPConfiguration &config);
extern "C"
void computeMultiplyScanGold(float *reference, const float *idata,
const unsigned int len,
const CUDPPConfiguration &config);
extern "C"
void computeMultiRowSumScanGold(float *reference, const float *idata,
const unsigned int len,
const unsigned int rows,
const CUDPPConfiguration &config);
extern "C"
void computeMaxScanGold( float *reference, const float *idata,
const unsigned int len,
const CUDPPConfiguration &config);
extern "C"
void computeMinScanGold( float *reference, const float *idata,
const unsigned int len,
const CUDPPConfiguration &config);
extern "C"
void computeSumSegmentedScanGold(float *reference, const float *idata,
const unsigned int* iflags,
const unsigned int len,
const CUDPPConfiguration &config);
extern "C"
void
computeMaxSegmentedScanGold(float* reference, const float* idata,
const unsigned int *iflag,
const unsigned int len,
const CUDPPConfiguration & config);
extern "C"
void
computeMultiplySegmentedScanGold(float* reference, const float* idata,
const unsigned int *iflag,
const unsigned int len,
const CUDPPConfiguration & config);
extern "C"
void
computeMinSegmentedScanGold(float* reference, const float* idata,
const unsigned int *iflag,
const unsigned int len,
const CUDPPConfiguration & config);
/**
* testScan exercises cudpp's unsegmented scan functionality.
* Possible command line arguments:
* - --op=OP: sets scan operation to OP (sum, max, min and multiply.)
* - --forward, --backward: sets direction of scan
* - --exclusive, --inclusive: sets exclusivity of scan
* - --n=#: number of elements in scan
* - Also "global" options (see setOptions)
* @param argc Number of arguments on the command line, passed
* directly from main
* @param argv Array of arguments on the command line, passed directly
* from main
* @param configPtr Configuration for scan, set by caller
* @return Number of tests that failed regression (0 for all pass)
* @see CUDPPConfiguration, setOptions, cudppScan
*/
int testScan(int argc, const char **argv, CUDPPConfiguration *configPtr)
{
int retval = 0;
testrigOptions testOptions;
setOptions(argc, argv, testOptions);
unsigned int timer;
CUT_SAFE_CALL(cutCreateTimer(&timer));
CUDPPConfiguration config;
config.algorithm = CUDPP_SCAN;
if (configPtr != NULL)
{
config = *configPtr;
}
else
{
CUDPPOption direction = CUDPP_OPTION_FORWARD;
CUDPPOption inclusivity = CUDPP_OPTION_EXCLUSIVE;
//default sum scan
config.op = CUDPP_ADD;
config.datatype = CUDPP_FLOAT;
if (testOptions.op && !strcmp(testOptions.op, "max"))
{
config.op = CUDPP_MAX;
}
else if (testOptions.op && !strcmp(testOptions.op, "min"))
{
config.op = CUDPP_MIN;
}
else if (testOptions.op && !strcmp(testOptions.op, "multiply"))
{
config.op = CUDPP_MULTIPLY;
}
if (CUTTrue == cutCheckCmdLineFlag(argc, argv, "backward"))
{
direction = CUDPP_OPTION_BACKWARD;
}
if (CUTTrue == cutCheckCmdLineFlag(argc, argv, "exclusive"))
{
inclusivity = CUDPP_OPTION_EXCLUSIVE;
}
if (CUTTrue == cutCheckCmdLineFlag(argc, argv, "inclusive"))
{
inclusivity = CUDPP_OPTION_INCLUSIVE;
}
config.options = direction | inclusivity;
}
int numElements = 8388608; // maximum test size
bool quiet = (CUTTrue == cutCheckCmdLineFlag(argc, (const char**) argv, "quiet"));
bool oneTest = false;
if (CUTTrue == cutGetCmdLineArgumenti(argc, (const char**) argv, "n",
&numElements))
{
oneTest = true;
}
unsigned int test[] = {39, 128, 256, 512, 1000, 1024, 1025, 32768, 45537, 65536, 131072,
262144, 500001, 524288, 1048577, 1048576, 1048581, 2097152, 4194304, 8388608};
int numTests = sizeof(test) / sizeof(test[0]);
if (oneTest)
{
test[0] = numElements;
numTests = 1;
}
CUDPPHandle scanPlan;
CUDPPResult result = CUDPP_SUCCESS;
result = cudppPlan(&scanPlan, config, numElements, 1, 0);
if (result != CUDPP_SUCCESS)
{
fprintf(stderr, "Error creating plan for Scan\n");
retval = (oneTest) ? 1 : numTests;
return retval;
}
unsigned int memSize = sizeof(float) * numElements;
// allocate host memory to store the input data
float* i_data = (float*) malloc( memSize);
// allocate host memory to store the output data
float* o_data = (float*) malloc( memSize);
// host memory to store input flags
// initialize the input data on the host
for(int i = 0; i < numElements; ++i)
{
i_data[i] = (float)(rand() & 1);
}
// allocate and compute reference solution
float* reference = (float*) malloc( memSize);
// allocate device memory input and output arrays
float* d_idata = NULL;
float* d_odata = NULL;
CUDA_SAFE_CALL( cudaMalloc( (void**) &d_idata, memSize));
CUDA_SAFE_CALL( cudaMalloc( (void**) &d_odata, memSize));
// copy host memory to device input array
CUDA_SAFE_CALL( cudaMemcpy(d_idata, i_data, memSize,
cudaMemcpyHostToDevice) );
// initialize all the other device arrays to be safe
CUDA_SAFE_CALL( cudaMemcpy(d_odata, o_data, memSize,
cudaMemcpyHostToDevice) );
for (int k = 0; k < numTests; ++k)
{
char op[10];
switch (config.op)
{
case CUDPP_ADD:
strcpy(op, "sum");
break;
case CUDPP_MULTIPLY:
strcpy(op, "multiply");
break;
case CUDPP_MAX:
strcpy(op, "max");
break;
case CUDPP_MIN:
strcpy(op, "min");
break;
}
if (!quiet)
{
printf("Running a%s%s %s-scan of %d elements\n",
(config.options & CUDPP_OPTION_BACKWARD) ? " backward" : "",
(config.options & CUDPP_OPTION_INCLUSIVE) ? " inclusive" : "",
op,
test[k]);
fflush(stdout);
}
cutResetTimer(timer);
cutStartTimer(timer);
if (config.op == CUDPP_ADD)
computeSumScanGold( reference, i_data, test[k], config);
else if (config.op == CUDPP_MULTIPLY)
computeMultiplyScanGold( reference, i_data, test[k], config);
else if (config.op == CUDPP_MAX)
computeMaxScanGold( reference, i_data, test[k], config);
else if (config.op == CUDPP_MIN)
computeMinScanGold( reference, i_data, test[k], config);
cutStopTimer(timer);
if (!quiet)
printf("CPU execution time = %f\n", cutGetTimerValue(timer));
cutResetTimer(timer);
// Run the scan
// run once to avoid timing startup overhead.
#ifndef __DEVICE_EMULATION__
cudppScan(scanPlan, d_odata, d_idata, test[k]);
#endif
cutStartTimer(timer);
for (int i = 0; i < testOptions.numIterations; i++)
{
cudppScan(scanPlan, d_odata, d_idata, test[k]);
}
cudaThreadSynchronize();
cutStopTimer(timer);
// copy result from device to host
CUDA_SAFE_CALL(cudaMemcpy( o_data, d_odata, sizeof(float) * test[k],
cudaMemcpyDeviceToHost));
// check if the result is equivalent to the expected soluion
CUTBoolean result = cutComparefe( reference, o_data, test[k], 0.001f);
retval += (CUTTrue == result) ? 0 : 1;
if (!quiet)
{
printf("%s test %s\n", testOptions.runMode,
(CUTTrue == result) ? "PASSED" : "FAILED");
printf("Average execution time: %f ms\n",
cutGetTimerValue(timer) / testOptions.numIterations);
}
else
{
printf("\t%10d\t%0.4f\n", test[k], cutGetTimerValue(timer) / testOptions.numIterations);
}
if (testOptions.debug)
{
for (int i = 0; i < numElements; ++i)
{
printf("%f ", o_data[i]);
}
printf("\n");
}
cutResetTimer(timer);
}
if (!quiet)
printf("\n");
result = cudppDestroyPlan(scanPlan);
if (result != CUDPP_SUCCESS)
{
printf("Error destroying CUDPPPlan for Scan\n");
}
// cleanup memory
cutDeleteTimer(timer);
free(i_data);
free(o_data);
free(reference);
cudaFree(d_odata);
cudaFree(d_idata);
return retval;
}
/**
* testSegmentedScan exercises cudpp's unsegmented scan functionality.
* Possible command line arguments:
* - --op=OP: sets scan operation to OP (sum, max, min and multiply.)
* - --forward: sets direction of scan
* - --exclusive, --inclusive: sets exclusivity of scan
* - --n=#: number of elements in scan
* - Also "global" options (see setOptions)
* @param argc Number of arguments on the command line, passed
* directly from main
* @param argv Array of arguments on the command line, passed directly
* from main
* @param configPtr Configuration for scan, set by caller
* @return Number of tests that failed regression (0 for all pass)
* @see CUDPPConfiguration, setOptions, cudppSegmentedScan
*/
int testSegmentedScan(int argc, const char **argv, CUDPPConfiguration *configPtr)
{
int retval = 0;
testrigOptions testOptions;
setOptions(argc, argv, testOptions);
unsigned int timer;
CUT_SAFE_CALL(cutCreateTimer(&timer));
CUDPPConfiguration config;
config.algorithm = CUDPP_SEGMENTED_SCAN;
if (configPtr != NULL)
{
config = *configPtr;
}
else
{
CUDPPOption direction = CUDPP_OPTION_FORWARD;
CUDPPOption inclusivity = CUDPP_OPTION_EXCLUSIVE;
//default segmented sum scan
config.op = CUDPP_ADD;
config.datatype = CUDPP_FLOAT;
if (testOptions.op && !strcmp(testOptions.op, "max"))
{
config.op = CUDPP_MAX;
}
if (testOptions.op && !strcmp(testOptions.op, "multiply"))
{
config.op = CUDPP_MULTIPLY;
}
if (testOptions.op && !strcmp(testOptions.op, "min"))
{
config.op = CUDPP_MIN;
}
if (CUTTrue == cutCheckCmdLineFlag(argc, argv, "backward"))
{
direction = CUDPP_OPTION_BACKWARD;
}
if (CUTTrue == cutCheckCmdLineFlag(argc, argv, "exclusive"))
{
inclusivity = CUDPP_OPTION_EXCLUSIVE;
}
if (CUTTrue == cutCheckCmdLineFlag(argc, argv, "inclusive"))
{
inclusivity = CUDPP_OPTION_INCLUSIVE;
}
config.options = direction | inclusivity;
}
int numElements = 8388608; // maximum test size
int numFlags = 4;
bool quiet = (CUTTrue == cutCheckCmdLineFlag(argc, (const char**) argv, "quiet"));
bool oneTest = false;
if (CUTTrue == cutGetCmdLineArgumenti(argc, (const char**) argv, "n",
&numElements))
{
oneTest = true;
}
unsigned int test[] = {32, 128, 256, 512, 1024, 1025, 32768, 45537, 65536, 131072,
262144, 500001, 524288, 1048577, 1048576, 1048581, 2097152, 4194304, 8388608};
int numTests = sizeof(test) / sizeof(test[0]);
if (oneTest)
{
test[0] = numElements;
numTests = 1;
}
CUDPPHandle segmentedScanPlan;
CUDPPResult result = CUDPP_SUCCESS;
result = cudppPlan(&segmentedScanPlan, config, numElements, 1, 0);
if (result != CUDPP_SUCCESS)
{
fprintf(stderr, "Error creating plan for Segmented Scan\n");
retval = (oneTest) ? 1 : numTests;
return retval;
}
unsigned int memSize = sizeof(float) * numElements;
// allocate host memory to store the input data
float* i_data = (float*) malloc( memSize);
// allocate host memory to store the input data
unsigned int* i_flags =
(unsigned int*) malloc(sizeof(unsigned int) * numElements);
// Set all flags to 0
memset(i_flags, 0, sizeof(unsigned int) * numElements);
// allocate host memory to store the output data
float* o_data = (float*) malloc( memSize);
// host memory to store input flags
// initialize the input data on the host
for(int i = 0; i < numElements; ++i)
{
i_data[i] = (float) 1; // (rand() & 1);
}
// allocate and compute reference solution
float* reference = (float*) malloc( memSize);
// allocate device memory input and output arrays
float* d_idata = NULL;
unsigned int *d_iflags = NULL;
float* d_odata = NULL;
CUDA_SAFE_CALL( cudaMalloc( (void**) &d_idata, memSize));
CUDA_SAFE_CALL( cudaMalloc( (void**) &d_iflags,
sizeof(unsigned int) * numElements));
CUDA_SAFE_CALL( cudaMalloc( (void**) &d_odata, memSize));
// copy host memory to device input array
CUDA_SAFE_CALL( cudaMemcpy(d_idata, i_data, memSize,
cudaMemcpyHostToDevice) );
// initialize all the other device arrays to be safe
CUDA_SAFE_CALL( cudaMemcpy(d_odata, o_data, memSize,
cudaMemcpyHostToDevice) );
for (int k = 0; k < numTests; ++k)
{
// Generate flags
for(int i = 0; i < numFlags; ++i)
{
unsigned int idx;
// The flag at the first position is implicitly set
// so try to generate non-zero positions
while((idx = (unsigned int)
((test[k] - 1) * (rand() / (float)RAND_MAX)))
== 0)
{
}
// printf("Setting flag at pos %d\n", idx);
i_flags[idx] = 1;
}
// i_flags[5]=1;
// Copy flags to GPU
CUDA_SAFE_CALL( cudaMemcpy(d_iflags, i_flags,
sizeof(unsigned int) * test[k],
cudaMemcpyHostToDevice) );
char op[10];
switch (config.op)
{
case CUDPP_ADD:
strcpy(op, "sum");
break;
case CUDPP_MULTIPLY:
strcpy(op, "multiply");
break;
case CUDPP_MAX:
strcpy(op, "max");
break;
case CUDPP_MIN:
strcpy(op, "min");
break;
}
if (!quiet)
{
printf("Running a%s%s %s-segmented scan of %d elements\n",
(config.options & CUDPP_OPTION_BACKWARD) ? " backward" : "",
(config.options & CUDPP_OPTION_INCLUSIVE) ? " inclusive" : "",
op,
test[k]);
fflush(stdout);
}
fflush(stdout);
cutResetTimer(timer);
cutStartTimer(timer);
if(config.op == CUDPP_ADD)
computeSumSegmentedScanGold(reference, i_data, i_flags, test[k], config);
else if (config.op == CUDPP_MAX)
computeMaxSegmentedScanGold(reference, i_data, i_flags, test[k], config);
else if (config.op == CUDPP_MULTIPLY)
computeMultiplySegmentedScanGold(reference, i_data, i_flags, test[k], config);
else if (config.op == CUDPP_MIN)
computeMinSegmentedScanGold(reference, i_data, i_flags, test[k], config);
cutStopTimer(timer);
if (!quiet)
{
printf("CPU execution time = %f\n", cutGetTimerValue(timer));
}
cutResetTimer(timer);
// Run the scan
// run once to avoid timing startup overhead.
#ifndef __DEVICE_EMULATION__
cudppSegmentedScan(segmentedScanPlan, d_odata, d_idata, d_iflags, test[k]);
#endif
cutStartTimer(timer);
for (int i = 0; i < testOptions.numIterations; i++)
{
cudppSegmentedScan(segmentedScanPlan, d_odata, d_idata, d_iflags, test[k]);
}
cudaThreadSynchronize();
cutStopTimer(timer);
// copy result from device to host
CUDA_SAFE_CALL(cudaMemcpy( o_data, d_odata, sizeof(float) * test[k],
cudaMemcpyDeviceToHost));
// check if the result is equivalent to the expected soluion
CUTBoolean result = cutComparefe( reference, o_data, test[k], 0.001f);
retval += (CUTTrue == result) ? 0 : 1;
if (!quiet)
{
printf("%s test %s\n", testOptions.runMode,
(CUTTrue == result) ? "PASSED" : "FAILED");
printf("Average execution time: %f ms\n",
cutGetTimerValue(timer) / testOptions.numIterations);
}
else
{
printf("\t%10d\t%0.4f\n", test[k], cutGetTimerValue(timer) / testOptions.numIterations);
}
if (testOptions.debug)
{
for (unsigned int i = 0; i < test[k]; ++i)
{
if (reference[i] != o_data[i]) printf("%d %f %f\n", i, o_data[i], reference[i]);
// printf("%f %f\n", reference[i], o_data[i]);
}
// printf("\n");
// for (unsigned int i = 0; i < test[k]; ++i)
// {
// printf("%f ", reference[i]);
//}
// printf("\n");
}
cutResetTimer(timer); // needed after CUT alpha2
}
if (!quiet)
printf("\n");
result = cudppDestroyPlan(segmentedScanPlan);
if (result != CUDPP_SUCCESS)
{
printf("Error destroying CUDPPPlan for Scan\n");
}
// cleanup memory
cutDeleteTimer(timer);
free(i_data);
free(i_flags);
free(o_data);
free(reference);
cudaFree(d_odata);
cudaFree(d_idata);
cudaFree(d_iflags);
return retval;
}
/**
* testMultiSumScan exercises cudpp's multiple-unsegmented-scan functionality.
* @param argc Number of arguments on the command line, passed
* directly from main
* @param argv Array of arguments on the command line, passed directly
* from main
* @return Number of tests that failed regression (0 for all pass)
* @see cudppMultiScan
*/
int testMultiSumScan(int argc, const char **argv)
{
int retval = 0;
testrigOptions testOptions;
setOptions(argc, argv, testOptions);
unsigned int timer;
CUT_SAFE_CALL(cutCreateTimer(&timer));
CUDPPConfiguration config;
CUDPPOption direction = CUDPP_OPTION_FORWARD;
CUDPPOption inclusivity = CUDPP_OPTION_EXCLUSIVE;
if (CUTTrue == cutCheckCmdLineFlag(argc, argv, "backward"))
{
direction = CUDPP_OPTION_BACKWARD;
}
config.algorithm = CUDPP_SCAN;
config.options = direction | inclusivity;
config.op = CUDPP_ADD;
config.datatype = CUDPP_FLOAT;
int numElements = 1024; // maximum test size
int numRows = 1024;
//bool oneTest = false;
if (CUTTrue == cutGetCmdLineArgumenti(argc, (const char**) argv, "n",
&numElements))
{
// oneTest = true;
}
if (CUTTrue == cutGetCmdLineArgumenti(argc, (const char**) argv, "r",
&numRows))
{
// oneTest = true;
}
size_t myPitch = numElements * sizeof(float);
size_t hmemSize = numRows * myPitch;
// allocate host memory to store the input data
float* i_data = (float*) malloc( hmemSize);
// allocate host memory to store the output data
float* o_data = (float*) malloc( hmemSize);
for( int i = 0; i < numElements * numRows; ++i)
{
i_data[i] = (float)(rand() & 31);
o_data[i] = -1;
}
// allocate and compute reference solution
float* reference = (float*) malloc(hmemSize);
computeMultiRowSumScanGold( reference, i_data, numElements, numRows, config);
// allocate device memory input and output arrays
float* d_idata = NULL;
float* d_odata = NULL;
size_t d_ipitch = 0;
size_t d_opitch = 0;
CUDA_SAFE_CALL( cudaMallocPitch( (void**) &d_idata, &d_ipitch,
myPitch, numRows));
CUDA_SAFE_CALL( cudaMallocPitch( (void**) &d_odata, &d_opitch,
myPitch, numRows));
// copy host memory to device input array
CUDA_SAFE_CALL( cudaMemcpy2D(d_idata, d_ipitch, i_data, myPitch, myPitch,
numRows, cudaMemcpyHostToDevice) );
// initialize all the other device arrays to be safe
CUDA_SAFE_CALL( cudaMemcpy2D(d_odata, d_ipitch, o_data, myPitch, myPitch,
numRows, cudaMemcpyHostToDevice) );
size_t rowPitch = d_ipitch / sizeof(float);
CUDPPHandle multiscanPlan = 0;
CUDPPResult ret = cudppPlan(&multiscanPlan, config, numElements, numRows, rowPitch);
if (ret != CUDPP_SUCCESS)
{
fprintf(stderr, "Error creating CUDPP Plan for multi-row Scan.\n");
retval = 1;
return retval;
}
printf("Running a%s sum-scan of %d rows of %d elements\n",
(config.options & CUDPP_OPTION_BACKWARD) ? " backward" : "",
numRows,
numElements);
fflush(stdout);
// run once to avoid timing startup overhead.
#ifndef __DEVICE_EMULATION__
cudppMultiScan(multiscanPlan, d_odata, d_idata, numElements, numRows);
#endif
cutStartTimer(timer);
for (int i = 0; i < testOptions.numIterations; i++)
{
cudppMultiScan(multiscanPlan, d_odata, d_idata, numElements, numRows);
}
cudaThreadSynchronize();
cutStopTimer(timer);
// copy result from device to host
CUDA_SAFE_CALL(cudaMemcpy2D( o_data, myPitch, d_odata, d_opitch,
myPitch, numRows, cudaMemcpyDeviceToHost));
// check if the result is equivalent to the expected soluion
CUTBoolean result = cutComparefe( reference, o_data, numElements*numRows,
0.001f);
retval += (CUTTrue == result) ? 0 : 1;
printf("%s test %s\n", testOptions.runMode,
(CUTTrue == result) ? "PASSED" : "FAILED");
printf("Average execution time: %f ms\n",
cutGetTimerValue(timer) / testOptions.numIterations);
printf("\n");
cudppDestroyPlan(multiscanPlan);
// cleanup memory
cutDeleteTimer(timer);
free( i_data);
free(o_data);
free( reference);
cudaFree( d_odata);
cudaFree( d_idata);
return retval;
}
|
6dff39cdbe198c9a45b81e6ffc2fa1562b7058c3.hip | // !!! This is a file automatically generated by hipify!!!
/*
* bvg.cu
*
* Created on: Mar 19, 2012
* Author: u0332192
*/
#include <stdio.h>
#include <hip/hip_runtime.h>
__global__ void get_block_value(int* inv, int* outv)
{
return;
}
void main(int argc, char** argv)
{
unsigned int N = pow(2,4);
size_t sz = sizeof(int) * N;
int* inv = (int*)malloc(sz);
int i;
for(i=0;i<N;i++) inv[i] = i;
dim3 grd (N, 1);
dim3 blk (1, 1);
int* ginv;
hipMalloc(&ginv, sz);
hipMemcpy(ginv, inv, sz, hipMemcpyHostToDevice);
int* goutv;
hipMalloc(&goutv, sz);
hipMemset(goutv, 0, sz);
hipLaunchKernelGGL(( get_block_value), dim3(grd), dim3(blk), 0, 0, ginv, goutv);
return;
}
| 6dff39cdbe198c9a45b81e6ffc2fa1562b7058c3.cu | /*
* bvg.cu
*
* Created on: Mar 19, 2012
* Author: u0332192
*/
#include <stdio.h>
#include <cuda.h>
__global__ void get_block_value(int* inv, int* outv)
{
return;
}
void main(int argc, char** argv)
{
unsigned int N = pow(2,4);
size_t sz = sizeof(int) * N;
int* inv = (int*)malloc(sz);
int i;
for(i=0;i<N;i++) inv[i] = i;
dim3 grd (N, 1);
dim3 blk (1, 1);
int* ginv;
cudaMalloc(&ginv, sz);
cudaMemcpy(ginv, inv, sz, cudaMemcpyHostToDevice);
int* goutv;
cudaMalloc(&goutv, sz);
cudaMemset(goutv, 0, sz);
get_block_value<<<grd, blk>>>(ginv, goutv);
return;
}
|
270e02b86dc5cc69db883d63285fdc245381be3c.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <assert.h>
#define N 2//(64*64)//(2048*2048)
#define THREADS_PER_BLOCK 2//512
__global__ void Asum(int *a, int *b, int *c){
int index = threadIdx.x + blockIdx.x*blockDim.x;
c[index] = a[index] + b[index];
}
int main(void){
int *a, *b, *c;
int *dev_a, *dev_b, *dev_c;
int size = N*sizeof(int);
hipMalloc((void**)&dev_a, size);
hipMalloc((void**)&dev_b, size);
hipMalloc((void**)&dev_c,size);
a = (int*)malloc(size);
b = (int*)malloc(size);
c = (int*)malloc(size);
for (int i = 0; i < N; i++)
a[i] = 10;
for (int i = 0; i < N; i++)
b[i] = 10;
hipMemcpy(dev_a,a,size, hipMemcpyHostToDevice);
hipMemcpy(dev_b,b,size, hipMemcpyHostToDevice);
printf("a: ");
for (int i = 0; i < N; i++)
printf("%d ", a[i]);
printf("\nb: ");
for (int i = 0; i < N; i++)
printf("%d ", b[i]);
hipLaunchKernelGGL(( Asum), dim3(N/THREADS_PER_BLOCK),dim3(THREADS_PER_BLOCK), 0, 0, dev_a,dev_b,dev_c);
//ESBMC_verify_kernel(Asum, N/THREADS_PER_BLOCK,THREADS_PER_BLOCK,dev_a,dev_b,dev_c);
hipMemcpy(c,dev_c,size,hipMemcpyDeviceToHost);
printf("\nResultado da soma de a e b eh:\n ");
for (int i = 0; i < N; i++){
printf("%d ", c[i]);
assert(c[i]==a[i]+b[i]);
}
free(a); free(b); free(c);
hipFree(dev_a);
hipFree(dev_c);
hipFree(dev_b);
return 0;
}
| 270e02b86dc5cc69db883d63285fdc245381be3c.cu | #include <cuda.h>
#include <assert.h>
#define N 2//(64*64)//(2048*2048)
#define THREADS_PER_BLOCK 2//512
__global__ void Asum(int *a, int *b, int *c){
int index = threadIdx.x + blockIdx.x*blockDim.x;
c[index] = a[index] + b[index];
}
int main(void){
int *a, *b, *c;
int *dev_a, *dev_b, *dev_c;
int size = N*sizeof(int);
cudaMalloc((void**)&dev_a, size);
cudaMalloc((void**)&dev_b, size);
cudaMalloc((void**)&dev_c,size);
a = (int*)malloc(size);
b = (int*)malloc(size);
c = (int*)malloc(size);
for (int i = 0; i < N; i++)
a[i] = 10;
for (int i = 0; i < N; i++)
b[i] = 10;
cudaMemcpy(dev_a,a,size, cudaMemcpyHostToDevice);
cudaMemcpy(dev_b,b,size, cudaMemcpyHostToDevice);
printf("a: ");
for (int i = 0; i < N; i++)
printf("%d ", a[i]);
printf("\nb: ");
for (int i = 0; i < N; i++)
printf("%d ", b[i]);
Asum<<<N/THREADS_PER_BLOCK,THREADS_PER_BLOCK>>>(dev_a,dev_b,dev_c);
//ESBMC_verify_kernel(Asum, N/THREADS_PER_BLOCK,THREADS_PER_BLOCK,dev_a,dev_b,dev_c);
cudaMemcpy(c,dev_c,size,cudaMemcpyDeviceToHost);
printf("\nResultado da soma de a e b eh:\n ");
for (int i = 0; i < N; i++){
printf("%d ", c[i]);
assert(c[i]==a[i]+b[i]);
}
free(a); free(b); free(c);
cudaFree(dev_a);
cudaFree(dev_c);
cudaFree(dev_b);
return 0;
}
|
ed82052cd85970425f383bd00b1755ba9d2260f0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cstdio>
#include <cstdlib>
#include <stdio.h>
#include <math.h>
#include "SyncedMemory.h"
#include <ctype.h>
#define CHECK {\
auto e = hipDeviceSynchronize();\
if (e != hipSuccess) {\
printf("At " __FILE__ ":%d, %s\n", __LINE__, hipGetErrorString(e));\
abort();\
}\
}
__global__ void SomeTransform(char *input_gpu, int fsize) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < fsize && input_gpu[idx] != '\n') {
char c = input_gpu[idx];
if (c >= 'a' && c <= 'z'){
c -= 32;
input_gpu[idx] = c;
}
}
}
int main(int argc, char **argv)
{
// init, and check
if (argc != 2) {
printf("Usage %s <input text file>\n", argv[0]);
abort();
}
FILE *fp = fopen(argv[1], "r");
if (!fp) {
printf("Cannot open %s", argv[1]);
abort();
}
// get file size
fseek(fp, 0, SEEK_END);
size_t fsize = ftell(fp);
fseek(fp, 0, SEEK_SET);
// read files
MemoryBuffer<char> text(fsize+1);
auto text_smem = text.CreateSync(fsize);
CHECK;
fread(text_smem.get_cpu_wo(), 1, fsize, fp);
text_smem.get_cpu_wo()[fsize] = '\0';
fclose(fp);
// TODO: do your transform here
char *input_gpu = text_smem.get_gpu_rw();
// An example: transform the first 64 characters to '!'
// Don't transform over the tail
// And don't transform the line breaks
int blocksize = 8;
int nblock = fsize/blocksize + (fsize % blocksize == 0 ? 0 : 1);
hipLaunchKernelGGL(( SomeTransform) , dim3(nblock),dim3(blocksize) , 0, 0, input_gpu, fsize);
//printf("%d\n",fsize);
puts(text_smem.get_cpu_ro());
return 0;
}
| ed82052cd85970425f383bd00b1755ba9d2260f0.cu | #include <cstdio>
#include <cstdlib>
#include <stdio.h>
#include <math.h>
#include "SyncedMemory.h"
#include <ctype.h>
#define CHECK {\
auto e = cudaDeviceSynchronize();\
if (e != cudaSuccess) {\
printf("At " __FILE__ ":%d, %s\n", __LINE__, cudaGetErrorString(e));\
abort();\
}\
}
__global__ void SomeTransform(char *input_gpu, int fsize) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < fsize && input_gpu[idx] != '\n') {
char c = input_gpu[idx];
if (c >= 'a' && c <= 'z'){
c -= 32;
input_gpu[idx] = c;
}
}
}
int main(int argc, char **argv)
{
// init, and check
if (argc != 2) {
printf("Usage %s <input text file>\n", argv[0]);
abort();
}
FILE *fp = fopen(argv[1], "r");
if (!fp) {
printf("Cannot open %s", argv[1]);
abort();
}
// get file size
fseek(fp, 0, SEEK_END);
size_t fsize = ftell(fp);
fseek(fp, 0, SEEK_SET);
// read files
MemoryBuffer<char> text(fsize+1);
auto text_smem = text.CreateSync(fsize);
CHECK;
fread(text_smem.get_cpu_wo(), 1, fsize, fp);
text_smem.get_cpu_wo()[fsize] = '\0';
fclose(fp);
// TODO: do your transform here
char *input_gpu = text_smem.get_gpu_rw();
// An example: transform the first 64 characters to '!'
// Don't transform over the tail
// And don't transform the line breaks
int blocksize = 8;
int nblock = fsize/blocksize + (fsize % blocksize == 0 ? 0 : 1);
SomeTransform <<< nblock,blocksize >>>(input_gpu, fsize);
//printf("%d\n",fsize);
puts(text_smem.get_cpu_ro());
return 0;
}
|
883f67d04713135f2493bd14a6e77d3ae201b810.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void cuFilterGaussZKernel_32f_C1(float* dst, float* src, const int y, const int width, const int depth, const size_t stride, const size_t slice_stride, float sigma, int kernel_size)
{
int x = blockIdx.x*blockDim.x + threadIdx.x;
int z = blockIdx.y*blockDim.y + threadIdx.y;
if(x>=0 && z>= 0 && x<width && z<depth)
{
float sum = 0.0f;
int half_kernel_elements = (kernel_size - 1) / 2;
// convolve horizontally
float g0 = 1.0f / (sqrtf(2.0f * 3.141592653589793f) * sigma);
float g1 = exp(-0.5f / (sigma * sigma));
float g2 = g1 * g1;
sum = g0 * src[z*slice_stride + y*stride + x];
float sum_coeff = g0;
for (int i = 1; i <= half_kernel_elements; i++)
{
g0 *= g1;
g1 *= g2;
int cur_z = fmaxf(0, fminf(depth-1, z + i));
sum += g0 * src[cur_z*slice_stride + y*stride + x];
cur_z = fmaxf(0, fminf(depth-1, z - i));
sum += g0 * src[cur_z*slice_stride + y*stride + x];
sum_coeff += 2.0f*g0;
}
dst[z*slice_stride + y*stride + x] = sum/sum_coeff;
}
} | 883f67d04713135f2493bd14a6e77d3ae201b810.cu | #include "includes.h"
__global__ void cuFilterGaussZKernel_32f_C1(float* dst, float* src, const int y, const int width, const int depth, const size_t stride, const size_t slice_stride, float sigma, int kernel_size)
{
int x = blockIdx.x*blockDim.x + threadIdx.x;
int z = blockIdx.y*blockDim.y + threadIdx.y;
if(x>=0 && z>= 0 && x<width && z<depth)
{
float sum = 0.0f;
int half_kernel_elements = (kernel_size - 1) / 2;
// convolve horizontally
float g0 = 1.0f / (sqrtf(2.0f * 3.141592653589793f) * sigma);
float g1 = exp(-0.5f / (sigma * sigma));
float g2 = g1 * g1;
sum = g0 * src[z*slice_stride + y*stride + x];
float sum_coeff = g0;
for (int i = 1; i <= half_kernel_elements; i++)
{
g0 *= g1;
g1 *= g2;
int cur_z = fmaxf(0, fminf(depth-1, z + i));
sum += g0 * src[cur_z*slice_stride + y*stride + x];
cur_z = fmaxf(0, fminf(depth-1, z - i));
sum += g0 * src[cur_z*slice_stride + y*stride + x];
sum_coeff += 2.0f*g0;
}
dst[z*slice_stride + y*stride + x] = sum/sum_coeff;
}
} |
229bf9a9d3a5b2e7dac49befbc8b394d6188eb00.hip | // !!! This is a file automatically generated by hipify!!!
/**************************************************************************
*
* COMP 193
* GPU programming
* Exercise 2 template
*
**************************************************************************/
#include <hip/hip_runtime.h>
//#include <hiprand/hiprand.h> // includes random num stuff
//#include <hiprand/hiprand_kernel.h> // more rand stuff
//#include <hip/hip_texture_types.h>
#include <stdio.h>
#include "gpu_main.h"
#include "params.h"
// define texture memory
//texture<float, 2> texGray;
texture<float, 2> texBlue;
//texture<float, 2> texGreen;
//texture<float, 2> texBlue;
/*************************************************************************/
int updatePalette(GPU_Palette* P, const float* inPtr){
float GAIN = 10.0;
float val = inPtr[0];
val *= GAIN; // put some gain on the val
if (val > 1.0) val = 1.0; // clip if val is greater than 1
// printf("val = %f\n", val);
hipLaunchKernelGGL(( updateReds) , dim3(P->gBlocks), dim3(P->gThreads) , 0, 0, P->red, P->rand, val);
hipLaunchKernelGGL(( updateGreens) , dim3(P->gBlocks), dim3(P->gThreads) , 0, 0, P->green, P->rand, val);
hipLaunchKernelGGL(( updateBlues) , dim3(P->gBlocks), dim3(P->gThreads) , 0, 0, P->blue, P->rand, val);
return 0;
}
/*************************************************************************/
__global__ void updateReds(float* red, hiprandState_t* gRand, float amp){
int x = threadIdx.x + (blockIdx.x * blockDim.x);
int y = threadIdx.y + (blockIdx.y * blockDim.y);
int vecIdx = x + (y * blockDim.x * gridDim.x);
// generate noise & update seed
hiprandState_t localState = gRand[vecIdx];
float theRand = hiprand_uniform(&localState);
gRand[vecIdx] = localState;
// add change in signal and noise to the signal
red[vecIdx] = theRand * amp ;
}
/*************************************************************************/
__global__ void updateGreens(float* green, hiprandState_t* gRand, float amp){
int x = threadIdx.x + (blockIdx.x * blockDim.x);
int y = threadIdx.y + (blockIdx.y * blockDim.y);
int vecIdx = x + (y * blockDim.x * gridDim.x);
// generate noise & update seed
hiprandState_t localState = gRand[vecIdx];
float theRand = hiprand_uniform(&localState);
gRand[vecIdx] = localState;
// add change in signal and noise to the signal
green[vecIdx] = theRand * amp ;
}
/*************************************************************************/
__global__ void updateBlues(float* blue, hiprandState_t* gRand, float amp){
int x = threadIdx.x + (blockIdx.x * blockDim.x);
int y = threadIdx.y + (blockIdx.y * blockDim.y);
int vecIdx = x + (y * blockDim.x * gridDim.x);
// generate noise & update seed
hiprandState_t localState = gRand[vecIdx];
float theRand = hiprand_uniform(&localState);
gRand[vecIdx] = localState;
// add change in signal and noise to the signal
// blue[vecIdx] = theRand * amp * blue[vecIdx];
blue[vecIdx] = theRand * amp;
}
/*************************************************************************/
// use this for initializing random num generator
__global__ void setup_kernel(hiprandState_t* state, unsigned long seed) {
int x = threadIdx.x + (blockIdx.x * blockDim.x);
int y = threadIdx.y + (blockIdx.y * blockDim.y);
int vecIdx = x + (y * blockDim.x * gridDim.x);
hiprand_init(seed, vecIdx, 0, &state[vecIdx]);
}
/*************************************************************************/
GPU_Palette initGPUPalette(AParams* PARAMS){
// load
GPU_Palette P;
P.gTPB = 32; // threads per block
P.gDIM = 800; // assumes the image is 800x800
// 800x800 palette = 25x25 grid of 32x32 threadblocks
P.gSize = P.gDIM * P.gDIM * sizeof(float);
P.gThreads.x = P.gTPB;
P.gThreads.y = P.gTPB;
P.gThreads.z = 1; // 3D of threads allowed
P.gBlocks.x = P.gDIM/P.gTPB;
P.gBlocks.y = P.gDIM/P.gTPB;
P.gBlocks.z = 1; // only 2D of blocks allowed
// allocate memory for rand seeds
unsigned long randSize = P.gDIM * P.gDIM * sizeof(hiprandState_t);
hipMalloc((void**) &P.rand, randSize);
// allocate memory for the palette
hipMalloc((void**) &P.gray, P.gSize); // black and white (avg of rgb)
hipMalloc((void**) &P.red, P.gSize); // r
hipMalloc((void**) &P.green, P.gSize); // g
hipMalloc((void**) &P.blue, P.gSize); // b
// create texture memory and bind to black and white data
hipChannelFormatDesc desc = hipCreateChannelDesc<float>();
// hipBindTexture2D(NULL, texGray, P.gray, desc, P.gDIM,
// P.gDIM, sizeof(float) * P.gDIM);
hipBindTexture2D(NULL, texBlue, P.blue, desc, P.gDIM,
P.gDIM, sizeof(float) * P.gDIM);
// init rands on gpu
hipLaunchKernelGGL(( setup_kernel) , dim3(P.gBlocks), dim3(P.gThreads) , 0, 0, P.rand, time(NULL));
return P;
}
/*************************************************************************/
int freeGPUPalette(GPU_Palette* P) {
// free texture memory
// hipUnbindTexture(texGray); // this is bound to black and white
hipUnbindTexture(texBlue); // this is bound to black and white
// free gpu memory
hipFree(P->gray);
hipFree(P->red);
hipFree(P->green);
hipFree(P->blue);
hipFree(P->rand);
return 0;
}
/*************************************************************************/
| 229bf9a9d3a5b2e7dac49befbc8b394d6188eb00.cu | /**************************************************************************
*
* COMP 193
* GPU programming
* Exercise 2 template
*
**************************************************************************/
#include <cuda.h>
//#include <curand.h> // includes random num stuff
//#include <curand_kernel.h> // more rand stuff
//#include <cuda_texture_types.h>
#include <stdio.h>
#include "gpu_main.h"
#include "params.h"
// define texture memory
//texture<float, 2> texGray;
texture<float, 2> texBlue;
//texture<float, 2> texGreen;
//texture<float, 2> texBlue;
/*************************************************************************/
int updatePalette(GPU_Palette* P, const float* inPtr){
float GAIN = 10.0;
float val = inPtr[0];
val *= GAIN; // put some gain on the val
if (val > 1.0) val = 1.0; // clip if val is greater than 1
// printf("val = %f\n", val);
updateReds <<< P->gBlocks, P->gThreads >>> (P->red, P->rand, val);
updateGreens <<< P->gBlocks, P->gThreads >>> (P->green, P->rand, val);
updateBlues <<< P->gBlocks, P->gThreads >>> (P->blue, P->rand, val);
return 0;
}
/*************************************************************************/
__global__ void updateReds(float* red, curandState* gRand, float amp){
int x = threadIdx.x + (blockIdx.x * blockDim.x);
int y = threadIdx.y + (blockIdx.y * blockDim.y);
int vecIdx = x + (y * blockDim.x * gridDim.x);
// generate noise & update seed
curandState localState = gRand[vecIdx];
float theRand = curand_uniform(&localState);
gRand[vecIdx] = localState;
// add change in signal and noise to the signal
red[vecIdx] = theRand * amp ;
}
/*************************************************************************/
__global__ void updateGreens(float* green, curandState* gRand, float amp){
int x = threadIdx.x + (blockIdx.x * blockDim.x);
int y = threadIdx.y + (blockIdx.y * blockDim.y);
int vecIdx = x + (y * blockDim.x * gridDim.x);
// generate noise & update seed
curandState localState = gRand[vecIdx];
float theRand = curand_uniform(&localState);
gRand[vecIdx] = localState;
// add change in signal and noise to the signal
green[vecIdx] = theRand * amp ;
}
/*************************************************************************/
__global__ void updateBlues(float* blue, curandState* gRand, float amp){
int x = threadIdx.x + (blockIdx.x * blockDim.x);
int y = threadIdx.y + (blockIdx.y * blockDim.y);
int vecIdx = x + (y * blockDim.x * gridDim.x);
// generate noise & update seed
curandState localState = gRand[vecIdx];
float theRand = curand_uniform(&localState);
gRand[vecIdx] = localState;
// add change in signal and noise to the signal
// blue[vecIdx] = theRand * amp * blue[vecIdx];
blue[vecIdx] = theRand * amp;
}
/*************************************************************************/
// use this for initializing random num generator
__global__ void setup_kernel(curandState* state, unsigned long seed) {
int x = threadIdx.x + (blockIdx.x * blockDim.x);
int y = threadIdx.y + (blockIdx.y * blockDim.y);
int vecIdx = x + (y * blockDim.x * gridDim.x);
curand_init(seed, vecIdx, 0, &state[vecIdx]);
}
/*************************************************************************/
GPU_Palette initGPUPalette(AParams* PARAMS){
// load
GPU_Palette P;
P.gTPB = 32; // threads per block
P.gDIM = 800; // assumes the image is 800x800
// 800x800 palette = 25x25 grid of 32x32 threadblocks
P.gSize = P.gDIM * P.gDIM * sizeof(float);
P.gThreads.x = P.gTPB;
P.gThreads.y = P.gTPB;
P.gThreads.z = 1; // 3D of threads allowed
P.gBlocks.x = P.gDIM/P.gTPB;
P.gBlocks.y = P.gDIM/P.gTPB;
P.gBlocks.z = 1; // only 2D of blocks allowed
// allocate memory for rand seeds
unsigned long randSize = P.gDIM * P.gDIM * sizeof(curandState);
cudaMalloc((void**) &P.rand, randSize);
// allocate memory for the palette
cudaMalloc((void**) &P.gray, P.gSize); // black and white (avg of rgb)
cudaMalloc((void**) &P.red, P.gSize); // r
cudaMalloc((void**) &P.green, P.gSize); // g
cudaMalloc((void**) &P.blue, P.gSize); // b
// create texture memory and bind to black and white data
cudaChannelFormatDesc desc = cudaCreateChannelDesc<float>();
// cudaBindTexture2D(NULL, texGray, P.gray, desc, P.gDIM,
// P.gDIM, sizeof(float) * P.gDIM);
cudaBindTexture2D(NULL, texBlue, P.blue, desc, P.gDIM,
P.gDIM, sizeof(float) * P.gDIM);
// init rands on gpu
setup_kernel <<< P.gBlocks, P.gThreads >>> (P.rand, time(NULL));
return P;
}
/*************************************************************************/
int freeGPUPalette(GPU_Palette* P) {
// free texture memory
// cudaUnbindTexture(texGray); // this is bound to black and white
cudaUnbindTexture(texBlue); // this is bound to black and white
// free gpu memory
cudaFree(P->gray);
cudaFree(P->red);
cudaFree(P->green);
cudaFree(P->blue);
cudaFree(P->rand);
return 0;
}
/*************************************************************************/
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.